Search is not available for this dataset
repo
stringlengths
2
152
file
stringlengths
15
239
code
stringlengths
0
58.4M
file_length
int64
0
58.4M
avg_line_length
float64
0
1.81M
max_line_length
int64
0
12.7M
extension_type
stringclasses
364 values
null
ceph-main/src/test/objectstore/store_test.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system * * Copyright (C) 2004-2006 Sage Weil <sage@newdream.net> * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #include <glob.h> #include <stdio.h> #include <string.h> #include <iostream> #include <memory> #include <time.h> #include <sys/mount.h> #include <boost/random/mersenne_twister.hpp> #include <boost/random/uniform_int.hpp> #include <boost/random/binomial_distribution.hpp> #include <fmt/format.h> #include <gtest/gtest.h> #include "os/ObjectStore.h" #if defined(WITH_BLUESTORE) #include "os/bluestore/BlueStore.h" #include "os/bluestore/BlueFS.h" #endif #include "include/Context.h" #include "common/buffer_instrumentation.h" #include "common/ceph_argparse.h" #include "common/admin_socket.h" #include "global/global_init.h" #include "common/ceph_mutex.h" #include "common/Cond.h" #include "common/errno.h" #include "common/options.h" // for the size literals #include "common/pretty_binary.h" #include "include/stringify.h" #include "include/coredumpctl.h" #include "include/unordered_map.h" #include "os/kv.h" #include "store_test_fixture.h" using namespace std; using namespace std::placeholders; typedef boost::mt11213b gen_type; const uint64_t DEF_STORE_TEST_BLOCKDEV_SIZE = 10240000000; #define dout_context g_ceph_context bool smr = false; static bool bl_eq(bufferlist& expected, bufferlist& actual) { if (expected.contents_equal(actual)) return true; unsigned first = 0; if(expected.length() != actual.length()) { cout << "--- buffer lengths mismatch " << std::hex << "expected 0x" << expected.length() << " != actual 0x" << actual.length() << std::dec << std::endl; derr << "--- buffer lengths mismatch " << std::hex << "expected 0x" << expected.length() << " != actual 0x" << actual.length() << std::dec << dendl; } auto len = std::min(expected.length(), actual.length()); while ( first<len && expected[first] == actual[first]) ++first; unsigned last = len; while (last > 0 && expected[last-1] == actual[last-1]) --last; if(len > 0) { cout << "--- buffer mismatch between offset 0x" << std::hex << first << " and 0x" << last << ", total 0x" << len << std::dec << std::endl; derr << "--- buffer mismatch between offset 0x" << std::hex << first << " and 0x" << last << ", total 0x" << len << std::dec << dendl; cout << "--- expected:\n"; expected.hexdump(cout); cout << "--- actual:\n"; actual.hexdump(cout); } return false; } template <typename T> int queue_transaction( T &store, ObjectStore::CollectionHandle ch, ObjectStore::Transaction &&t) { if (rand() % 2) { ObjectStore::Transaction t2; t2.append(t); return store->queue_transaction(ch, std::move(t2)); } else { return store->queue_transaction(ch, std::move(t)); } } template <typename T> int collection_list(T &store, ObjectStore::CollectionHandle &c, const ghobject_t& start, const ghobject_t& end, int max, vector<ghobject_t> *ls, ghobject_t *pnext, bool disable_legacy = false) { if (disable_legacy || rand() % 2) { return store->collection_list(c, start, end, max, ls, pnext); } else { return store->collection_list_legacy(c, start, end, max, ls, pnext); } } bool sorted(const vector<ghobject_t> &in) { ghobject_t start; for (vector<ghobject_t>::const_iterator i = in.begin(); i != in.end(); ++i) { if (start > *i) { cout << start << " should follow " << *i << std::endl; return false; } start = *i; } return true; } class StoreTest : public StoreTestFixture, public ::testing::WithParamInterface<const char*> { public: StoreTest() : StoreTestFixture(GetParam()) {} void doCompressionTest(); void doSyntheticTest( int num_ops, uint64_t max_obj, uint64_t max_wr, uint64_t align); }; class StoreTestDeferredSetup : public StoreTest { void SetUp() override { //do nothing } protected: void DeferredSetup() { StoreTest::SetUp(); } public: }; class StoreTestSpecificAUSize : public StoreTestDeferredSetup { public: typedef std::function<void( uint64_t num_ops, uint64_t max_obj, uint64_t max_wr, uint64_t align)> MatrixTest; void StartDeferred(size_t min_alloc_size) { SetVal(g_conf(), "bluestore_min_alloc_size", stringify(min_alloc_size).c_str()); DeferredSetup(); } private: // bluestore matrix testing uint64_t max_write = 40 * 1024; uint64_t max_size = 400 * 1024; uint64_t alignment = 0; uint64_t num_ops = 10000; protected: string matrix_get(const char *k) { if (string(k) == "max_write") { return stringify(max_write); } else if (string(k) == "max_size") { return stringify(max_size); } else if (string(k) == "alignment") { return stringify(alignment); } else if (string(k) == "num_ops") { return stringify(num_ops); } else { char *buf; g_conf().get_val(k, &buf, -1); string v = buf; free(buf); return v; } } void matrix_set(const char *k, const char *v) { if (string(k) == "max_write") { max_write = atoll(v); } else if (string(k) == "max_size") { max_size = atoll(v); } else if (string(k) == "alignment") { alignment = atoll(v); } else if (string(k) == "num_ops") { num_ops = atoll(v); } else { SetVal(g_conf(), k, v); } } void do_matrix_choose(const char *matrix[][10], int i, int pos, int num, MatrixTest fn) { if (matrix[i][0]) { int count; for (count = 0; matrix[i][count+1]; ++count) ; for (int j = 1; matrix[i][j]; ++j) { matrix_set(matrix[i][0], matrix[i][j]); do_matrix_choose(matrix, i + 1, pos * count + j - 1, num * count, fn); } } else { cout << "---------------------- " << (pos + 1) << " / " << num << " ----------------------" << std::endl; for (unsigned k=0; matrix[k][0]; ++k) { cout << " " << matrix[k][0] << " = " << matrix_get(matrix[k][0]) << std::endl; } g_ceph_context->_conf.apply_changes(nullptr); fn(num_ops, max_size, max_write, alignment); } } void do_matrix(const char *matrix[][10], MatrixTest fn) { if (strcmp(matrix[0][0], "bluestore_min_alloc_size") == 0) { int count; for (count = 0; matrix[0][count+1]; ++count) ; for (size_t j = 1; matrix[0][j]; ++j) { if (j > 1) { TearDown(); } StartDeferred(strtoll(matrix[0][j], NULL, 10)); do_matrix_choose(matrix, 1, j - 1, count, fn); } } else { StartDeferred(0); do_matrix_choose(matrix, 0, 0, 1, fn); } } }; class StoreTestOmapUpgrade : public StoreTestDeferredSetup { protected: void StartDeferred() { DeferredSetup(); } public: struct generator { double r = 3.6; double x = 0.5; double operator()(){ double v = x; x = r * x * (1 - x); return v; } }; std::string generate_monotonic_name(uint32_t SUM, uint32_t i, double r, double x) { generator gen{r, x}; //std::cout << "r=" << r << " x=" << x << std::endl; std::string s; while (SUM > 1) { uint32_t lo = 0; uint32_t hi = 1 + gen() * 10; uint32_t start = ('z' - 'a' + 1 - hi) * gen(); while (hi - lo > 0) { uint32_t mid = (lo + hi + 1 + (SUM&1)) / 2; // round up or down, depending on SUM // std::cout << "SUM=" << SUM << " x=" << gen.x << std::endl; uint32_t mid_val = gen() * (SUM - 1) + 1; // LEFT = lo .. mid - 1 // RIGHT = mid .. hi // std::cout << "lo=" << lo << " hi=" << hi << " mid=" << mid // << " SUM=" << SUM << " i=" << i << " x=" << gen.x << " mid_val=" << mid_val << std::endl; if (i < mid_val) { hi = mid - 1; SUM = mid_val; } else { lo = mid; SUM = SUM - mid_val; i = i - mid_val; } } //std::cout << "lo=" << lo << " hi=" << hi // << " SUM=" << SUM << " i=" << i << std::endl; s.push_back('a' + lo + start); // to keep alphabetic order uint32_t cnt = gen() * 8; for (uint32_t j = 0; j < cnt; j++) { s.push_back('a' + ('z' - 'a' + 1) * gen()); } s.push_back('.'); } return s; } std::string gen_string(size_t size, generator& gen) { std::string s; for (size_t i = 0; i < size; i++) { s.push_back('a' + ('z' - 'a' + 1 ) * gen()); } return s; } void make_omap_data(size_t object_count, int64_t poolid, coll_t cid) { int r; ObjectStore::CollectionHandle ch = store->open_collection(cid); for (size_t o = 0; o < object_count; o++) { ObjectStore::Transaction t; std::string oid = generate_monotonic_name(object_count, o, 3.71, 0.5); ghobject_t hoid(hobject_t(oid, "", CEPH_NOSNAP, 0, poolid, "")); t.touch(cid, hoid); generator gen{3.85 + 0.1 * o / object_count, 1 - double(o) / object_count}; map<string, bufferlist> start_set; size_t omap_count = 1 + gen() * 20; bool do_omap_header = gen() > 0.5; if (do_omap_header) { bufferlist header; header.append(gen_string(50, gen)); t.omap_setheader(cid, hoid, header); } for (size_t i = 0; i < omap_count; i++) { std::string name = generate_monotonic_name(omap_count, i, 3.66 + 0.22 * o / object_count, 0.5); bufferlist val; val.append(gen_string(100, gen)); start_set.emplace(name, val); } t.omap_setkeys(cid, hoid, start_set); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } } void check_omap_data(size_t object_count, int64_t poolid, coll_t cid) { int r; ObjectStore::CollectionHandle ch = store->open_collection(cid); for (size_t o = 0; o < object_count; o++) { ObjectStore::Transaction t; std::string oid = generate_monotonic_name(object_count, o, 3.71, 0.5); ghobject_t hoid(hobject_t(oid, "", CEPH_NOSNAP, 0, poolid, "")); generator gen{3.85 + 0.1 * o / object_count, 1 - double(o) / object_count}; bufferlist omap_header; map<string, bufferlist> omap_set; r = store->omap_get(ch, hoid, &omap_header, &omap_set); ASSERT_EQ(r, 0); size_t omap_count = 1 + gen() * 20; bool do_omap_header = gen() > 0.5; if (do_omap_header) { std::string header_str = gen_string(50, gen); ASSERT_EQ(header_str, omap_header.to_str()); } auto it = omap_set.begin(); for (size_t i = 0; i < omap_count; i++) { ASSERT_TRUE(it != omap_set.end()); std::string name = generate_monotonic_name(omap_count, i, 3.66 + 0.22 * o / object_count, 0.5); std::string val_gen = gen_string(100, gen); ASSERT_EQ(it->first, name); ASSERT_EQ(it->second.to_str(), val_gen); ++it; } } } }; TEST_P(StoreTest, collect_metadata) { map<string,string> pm; store->collect_metadata(&pm); if (GetParam() == string("filestore")) { ASSERT_NE(pm.count("filestore_backend"), 0u); ASSERT_NE(pm.count("filestore_f_type"), 0u); ASSERT_NE(pm.count("backend_filestore_partition_path"), 0u); ASSERT_NE(pm.count("backend_filestore_dev_node"), 0u); } } TEST_P(StoreTest, Trivial) { } TEST_P(StoreTest, TrivialRemount) { int r = store->umount(); ASSERT_EQ(0, r); r = store->mount(); ASSERT_EQ(0, r); } TEST_P(StoreTest, TrivialRemountFsck) { if(string(GetParam()) != "bluestore") return; int r = store->umount(); ASSERT_EQ(0, r); r = store->fsck(false); ASSERT_EQ(0, r); r = store->mount(); ASSERT_EQ(0, r); } TEST_P(StoreTest, SimpleRemount) { coll_t cid; ghobject_t hoid(hobject_t(sobject_t("Object 1", CEPH_NOSNAP))); ghobject_t hoid2(hobject_t(sobject_t("Object 2", CEPH_NOSNAP))); bufferlist bl; bl.append("1234512345"); int r; auto ch = store->create_new_collection(cid); { cerr << "create collection + write" << std::endl; ObjectStore::Transaction t; t.create_collection(cid, 0); t.write(cid, hoid, 0, bl.length(), bl); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } ch.reset(); r = store->umount(); ASSERT_EQ(0, r); r = store->mount(); ASSERT_EQ(0, r); ch = store->open_collection(cid); { ObjectStore::Transaction t; t.write(cid, hoid2, 0, bl.length(), bl); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } { ObjectStore::Transaction t; t.remove(cid, hoid); t.remove(cid, hoid2); t.remove_collection(cid); cerr << "remove collection" << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } ch.reset(); r = store->umount(); ASSERT_EQ(0, r); r = store->mount(); ASSERT_EQ(0, r); ch = store->create_new_collection(cid); { ObjectStore::Transaction t; t.create_collection(cid, 0); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); bool exists = store->exists(ch, hoid); ASSERT_TRUE(!exists); } { ObjectStore::Transaction t; t.remove_collection(cid); cerr << "remove collection" << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } } TEST_P(StoreTest, IORemount) { coll_t cid; bufferlist bl; bl.append("1234512345"); int r; auto ch = store->create_new_collection(cid); { cerr << "create collection + objects" << std::endl; ObjectStore::Transaction t; t.create_collection(cid, 0); for (int n=1; n<=100; ++n) { ghobject_t hoid(hobject_t(sobject_t("Object " + stringify(n), CEPH_NOSNAP))); t.write(cid, hoid, 0, bl.length(), bl); } r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } // overwrites { cout << "overwrites" << std::endl; for (int n=1; n<=100; ++n) { ObjectStore::Transaction t; ghobject_t hoid(hobject_t(sobject_t("Object " + stringify(n), CEPH_NOSNAP))); t.write(cid, hoid, 1, bl.length(), bl); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } } ch.reset(); r = store->umount(); ASSERT_EQ(0, r); r = store->mount(); ASSERT_EQ(0, r); { ObjectStore::Transaction t; for (int n=1; n<=100; ++n) { ghobject_t hoid(hobject_t(sobject_t("Object " + stringify(n), CEPH_NOSNAP))); t.remove(cid, hoid); } t.remove_collection(cid); auto ch = store->open_collection(cid); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } } TEST_P(StoreTest, UnprintableCharsName) { coll_t cid; string name = "funnychars_"; for (unsigned i = 0; i < 256; ++i) { name.push_back(i); } ghobject_t oid(hobject_t(sobject_t(name, CEPH_NOSNAP))); int r; auto ch = store->create_new_collection(cid); { cerr << "create collection + object" << std::endl; ObjectStore::Transaction t; t.create_collection(cid, 0); t.touch(cid, oid); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } ch.reset(); r = store->umount(); ASSERT_EQ(0, r); r = store->mount(); ASSERT_EQ(0, r); { cout << "removing" << std::endl; ObjectStore::Transaction t; t.remove(cid, oid); t.remove_collection(cid); auto ch = store->open_collection(cid); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } } TEST_P(StoreTest, FiemapEmpty) { coll_t cid; int r = 0; ghobject_t oid(hobject_t(sobject_t("fiemap_object", CEPH_NOSNAP))); auto ch = store->create_new_collection(cid); { ObjectStore::Transaction t; t.create_collection(cid, 0); t.touch(cid, oid); t.truncate(cid, oid, 100000); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } { bufferlist bl; store->fiemap(ch, oid, 0, 100000, bl); map<uint64_t,uint64_t> m, e; auto p = bl.cbegin(); decode(m, p); cout << " got " << m << std::endl; e[0] = 100000; EXPECT_TRUE(m == e || m.empty()); } { ObjectStore::Transaction t; t.remove(cid, oid); t.remove_collection(cid); cerr << "remove collection" << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } } TEST_P(StoreTest, FiemapHoles) { const uint64_t MAX_EXTENTS = 4000; const uint64_t SKIP_STEP = 65536; coll_t cid; int r = 0; ghobject_t oid(hobject_t(sobject_t("fiemap_object", CEPH_NOSNAP))); bufferlist bl; bl.append("foo"); auto ch = store->create_new_collection(cid); { ObjectStore::Transaction t; t.create_collection(cid, 0); t.touch(cid, oid); for (uint64_t i = 0; i < MAX_EXTENTS; i++) t.write(cid, oid, SKIP_STEP * i, 3, bl); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } { //fiemap test from 0 to SKIP_STEP * (MAX_EXTENTS - 1) + 3 bufferlist bl; store->fiemap(ch, oid, 0, SKIP_STEP * (MAX_EXTENTS - 1) + 3, bl); map<uint64_t,uint64_t> m, e; auto p = bl.cbegin(); decode(m, p); cout << " got " << m << std::endl; ASSERT_TRUE(!m.empty()); ASSERT_GE(m[0], 3u); auto last = m.crbegin(); if (m.size() == 1) { ASSERT_EQ(0u, last->first); } else if (m.size() == MAX_EXTENTS) { for (uint64_t i = 0; i < MAX_EXTENTS; i++) { ASSERT_TRUE(m.count(SKIP_STEP * i)); } } ASSERT_GT(last->first + last->second, SKIP_STEP * (MAX_EXTENTS - 1)); } { // fiemap test from SKIP_STEP to SKIP_STEP * (MAX_EXTENTS - 2) + 3 bufferlist bl; store->fiemap(ch, oid, SKIP_STEP, SKIP_STEP * (MAX_EXTENTS - 2) + 3, bl); map<uint64_t,uint64_t> m, e; auto p = bl.cbegin(); decode(m, p); cout << " got " << m << std::endl; ASSERT_TRUE(!m.empty()); // kstore always returns [0, object_size] regardless of offset and length // FIXME: if fiemap logic in kstore is refined if (string(GetParam()) != "kstore") { ASSERT_GE(m[SKIP_STEP], 3u); auto last = m.crbegin(); if (m.size() == 1) { ASSERT_EQ(SKIP_STEP, last->first); } else if (m.size() == MAX_EXTENTS - 2) { for (uint64_t i = 1; i < MAX_EXTENTS - 1; i++) { ASSERT_TRUE(m.count(SKIP_STEP*i)); } } ASSERT_GT(last->first + last->second, SKIP_STEP * (MAX_EXTENTS - 1)); } } { ObjectStore::Transaction t; t.remove(cid, oid); t.remove_collection(cid); cerr << "remove collection" << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } } TEST_P(StoreTest, SimpleMetaColTest) { coll_t cid; int r = 0; { auto ch = store->create_new_collection(cid); ObjectStore::Transaction t; t.create_collection(cid, 0); cerr << "create collection" << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } { ObjectStore::Transaction t; t.remove_collection(cid); cerr << "remove collection" << std::endl; auto ch = store->open_collection(cid); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } { auto ch = store->create_new_collection(cid); ObjectStore::Transaction t; t.create_collection(cid, 0); cerr << "add collection" << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } { ObjectStore::Transaction t; t.remove_collection(cid); cerr << "remove collection" << std::endl; auto ch = store->open_collection(cid); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } } TEST_P(StoreTest, SimplePGColTest) { coll_t cid(spg_t(pg_t(1,2), shard_id_t::NO_SHARD)); int r = 0; { ObjectStore::Transaction t; auto ch = store->create_new_collection(cid); t.create_collection(cid, 4); cerr << "create collection" << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } { ObjectStore::Transaction t; t.remove_collection(cid); cerr << "remove collection" << std::endl; auto ch = store->open_collection(cid); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } { ObjectStore::Transaction t; t.create_collection(cid, 4); cerr << "add collection" << std::endl; auto ch = store->create_new_collection(cid); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } { ObjectStore::Transaction t; t.remove_collection(cid); cerr << "remove collection" << std::endl; auto ch = store->open_collection(cid); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } } TEST_P(StoreTest, SimpleColPreHashTest) { // Firstly we will need to revert the value making sure // collection hint actually works int merge_threshold = g_ceph_context->_conf->filestore_merge_threshold; std::ostringstream oss; if (merge_threshold > 0) { oss << "-" << merge_threshold; SetVal(g_conf(), "filestore_merge_threshold", oss.str().c_str()); } uint32_t pg_num = 128; boost::uniform_int<> pg_id_range(0, pg_num); gen_type rng(time(NULL)); int pg_id = pg_id_range(rng); int objs_per_folder = abs(merge_threshold) * 16 * g_ceph_context->_conf->filestore_split_multiple; boost::uniform_int<> folders_range(5, 256); uint64_t expected_num_objs = (uint64_t)objs_per_folder * (uint64_t)folders_range(rng); coll_t cid(spg_t(pg_t(pg_id, 15), shard_id_t::NO_SHARD)); int r; auto ch = store->create_new_collection(cid); { // Create a collection along with a hint ObjectStore::Transaction t; t.create_collection(cid, 5); cerr << "create collection" << std::endl; bufferlist hint; encode(pg_num, hint); encode(expected_num_objs, hint); t.collection_hint(cid, ObjectStore::Transaction::COLL_HINT_EXPECTED_NUM_OBJECTS, hint); cerr << "collection hint" << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } { // Remove the collection ObjectStore::Transaction t; t.remove_collection(cid); cerr << "remove collection" << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } } TEST_P(StoreTest, SmallBlockWrites) { int r; coll_t cid; auto ch = store->create_new_collection(cid); ghobject_t hoid(hobject_t(sobject_t("foo", CEPH_NOSNAP))); { ObjectStore::Transaction t; t.create_collection(cid, 0); cerr << "Creating collection " << cid << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } bufferlist a; bufferptr ap(0x1000); memset(ap.c_str(), 'a', 0x1000); a.append(ap); bufferlist b; bufferptr bp(0x1000); memset(bp.c_str(), 'b', 0x1000); b.append(bp); bufferlist c; bufferptr cp(0x1000); memset(cp.c_str(), 'c', 0x1000); c.append(cp); bufferptr zp(0x1000); zp.zero(); bufferlist z; z.append(zp); { ObjectStore::Transaction t; t.write(cid, hoid, 0, 0x1000, a); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); bufferlist in, exp; r = store->read(ch, hoid, 0, 0x4000, in); ASSERT_EQ(0x1000, r); exp.append(a); ASSERT_TRUE(bl_eq(exp, in)); } { ObjectStore::Transaction t; t.write(cid, hoid, 0x1000, 0x1000, b); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); bufferlist in, exp; r = store->read(ch, hoid, 0, 0x4000, in); ASSERT_EQ(0x2000, r); exp.append(a); exp.append(b); ASSERT_TRUE(bl_eq(exp, in)); } { ObjectStore::Transaction t; t.write(cid, hoid, 0x3000, 0x1000, c); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); bufferlist in, exp; r = store->read(ch, hoid, 0, 0x4000, in); ASSERT_EQ(0x4000, r); exp.append(a); exp.append(b); exp.append(z); exp.append(c); ASSERT_TRUE(bl_eq(exp, in)); } { ObjectStore::Transaction t; t.write(cid, hoid, 0x2000, 0x1000, a); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); bufferlist in, exp; r = store->read(ch, hoid, 0, 0x4000, in); ASSERT_EQ(0x4000, r); exp.append(a); exp.append(b); exp.append(a); exp.append(c); ASSERT_TRUE(bl_eq(exp, in)); } { ObjectStore::Transaction t; t.write(cid, hoid, 0, 0x1000, c); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } { bufferlist in, exp; r = store->read(ch, hoid, 0, 0x4000, in); ASSERT_EQ(0x4000, r); exp.append(c); exp.append(b); exp.append(a); exp.append(c); ASSERT_TRUE(bl_eq(exp, in)); } { ObjectStore::Transaction t; t.remove(cid, hoid); t.remove_collection(cid); cerr << "Cleaning" << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } } TEST_P(StoreTest, BufferCacheReadTest) { int r; coll_t cid; ghobject_t hoid(hobject_t(sobject_t("Object 1", CEPH_NOSNAP))); { auto ch = store->open_collection(cid); ASSERT_FALSE(ch); } auto ch = store->create_new_collection(cid); { ObjectStore::Transaction t; t.create_collection(cid, 0); cerr << "Creating collection " << cid << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } { bool exists = store->exists(ch, hoid); ASSERT_TRUE(!exists); ObjectStore::Transaction t; t.touch(cid, hoid); cerr << "Creating object " << hoid << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); exists = store->exists(ch, hoid); ASSERT_EQ(true, exists); } { ObjectStore::Transaction t; bufferlist bl, newdata; bl.append("abcde"); t.write(cid, hoid, 0, 5, bl); t.write(cid, hoid, 10, 5, bl); cerr << "TwinWrite" << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); r = store->read(ch, hoid, 0, 15, newdata); ASSERT_EQ(r, 15); { bufferlist expected; expected.append(bl); expected.append_zero(5); expected.append(bl); ASSERT_TRUE(bl_eq(expected, newdata)); } } //overwrite over the same extents { ObjectStore::Transaction t; bufferlist bl, newdata; bl.append("edcba"); t.write(cid, hoid, 0, 5, bl); t.write(cid, hoid, 10, 5, bl); cerr << "TwinWrite" << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); r = store->read(ch, hoid, 0, 15, newdata); ASSERT_EQ(r, 15); { bufferlist expected; expected.append(bl); expected.append_zero(5); expected.append(bl); ASSERT_TRUE(bl_eq(expected, newdata)); } } //additional write to an unused region of some blob { ObjectStore::Transaction t; bufferlist bl2, newdata; bl2.append("1234567890"); t.write(cid, hoid, 20, bl2.length(), bl2); cerr << "Append" << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); r = store->read(ch, hoid, 0, 30, newdata); ASSERT_EQ(r, 30); { bufferlist expected; expected.append("edcba"); expected.append_zero(5); expected.append("edcba"); expected.append_zero(5); expected.append(bl2); ASSERT_TRUE(bl_eq(expected, newdata)); } } //additional write to an unused region of some blob and partial owerite over existing extents { ObjectStore::Transaction t; bufferlist bl, bl2, bl3, newdata; bl.append("DCB"); bl2.append("1234567890"); bl3.append("BA"); t.write(cid, hoid, 30, bl2.length(), bl2); t.write(cid, hoid, 1, bl.length(), bl); t.write(cid, hoid, 13, bl3.length(), bl3); cerr << "TripleWrite" << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); r = store->read(ch, hoid, 0, 40, newdata); ASSERT_EQ(r, 40); { bufferlist expected; expected.append("eDCBa"); expected.append_zero(5); expected.append("edcBA"); expected.append_zero(5); expected.append(bl2); expected.append(bl2); ASSERT_TRUE(bl_eq(expected, newdata)); } } } void StoreTest::doCompressionTest() { int r; coll_t cid; ghobject_t hoid(hobject_t(sobject_t("Object 1", CEPH_NOSNAP))); { auto ch = store->open_collection(cid); ASSERT_FALSE(ch); } auto ch = store->create_new_collection(cid); { ObjectStore::Transaction t; t.create_collection(cid, 0); cerr << "Creating collection " << cid << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } { bool exists = store->exists(ch, hoid); ASSERT_TRUE(!exists); ObjectStore::Transaction t; t.touch(cid, hoid); cerr << "Creating object " << hoid << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); exists = store->exists(ch, hoid); ASSERT_EQ(true, exists); } std::string data; data.resize(0x10000 * 4); for(size_t i = 0;i < data.size(); i++) data[i] = i / 256; { ObjectStore::Transaction t; bufferlist bl, newdata; bl.append(data); t.write(cid, hoid, 0, bl.length(), bl); cerr << "CompressibleData (4xAU) Write" << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); r = store->read(ch, hoid, 0, data.size() , newdata); ASSERT_EQ(r, (int)data.size()); { bufferlist expected; expected.append(data); ASSERT_TRUE(bl_eq(expected, newdata)); } newdata.clear(); r = store->read(ch, hoid, 0, 711 , newdata); ASSERT_EQ(r, 711); { bufferlist expected; expected.append(data.substr(0,711)); ASSERT_TRUE(bl_eq(expected, newdata)); } newdata.clear(); r = store->read(ch, hoid, 0xf00f, data.size(), newdata); ASSERT_EQ(r, int(data.size() - 0xf00f) ); { bufferlist expected; expected.append(data.substr(0xf00f)); ASSERT_TRUE(bl_eq(expected, newdata)); } { struct store_statfs_t statfs; int r = store->statfs(&statfs); ASSERT_EQ(r, 0); ASSERT_EQ(statfs.data_stored, (unsigned)data.size()); ASSERT_LE(statfs.data_compressed, (unsigned)data.size()); ASSERT_EQ(statfs.data_compressed_original, (unsigned)data.size()); ASSERT_LE(statfs.data_compressed_allocated, (unsigned)data.size()); } } std::string data2; data2.resize(0x10000 * 4 - 0x9000); for(size_t i = 0;i < data2.size(); i++) data2[i] = (i+1) / 256; { ObjectStore::Transaction t; bufferlist bl, newdata; bl.append(data2); t.write(cid, hoid, 0x8000, bl.length(), bl); cerr << "CompressibleData partial overwrite" << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); r = store->read(ch, hoid, 0, 0x10000, newdata); ASSERT_EQ(r, (int)0x10000); { bufferlist expected; expected.append(data.substr(0, 0x8000)); expected.append(data2.substr(0, 0x8000)); ASSERT_TRUE(bl_eq(expected, newdata)); } newdata.clear(); r = store->read(ch, hoid, 0x9000, 711 , newdata); ASSERT_EQ(r, 711); { bufferlist expected; expected.append(data2.substr(0x1000,711)); ASSERT_TRUE(bl_eq(expected, newdata)); } newdata.clear(); r = store->read(ch, hoid, 0x0, 0x40000, newdata); ASSERT_EQ(r, int(0x40000) ); { bufferlist expected; expected.append(data.substr(0, 0x8000)); expected.append(data2.substr(0, 0x37000)); expected.append(data.substr(0x3f000, 0x1000)); ASSERT_TRUE(bl_eq(expected, newdata)); } } data2.resize(0x3f000); for(size_t i = 0;i < data2.size(); i++) data2[i] = (i+2) / 256; { ObjectStore::Transaction t; bufferlist bl, newdata; bl.append(data2); t.write(cid, hoid, 0, bl.length(), bl); cerr << "CompressibleData partial overwrite, two extents overlapped, single one to be removed" << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); r = store->read(ch, hoid, 0, 0x3e000 - 1, newdata); ASSERT_EQ(r, (int)0x3e000 - 1); { bufferlist expected; expected.append(data2.substr(0, 0x3e000 - 1)); ASSERT_TRUE(bl_eq(expected, newdata)); } newdata.clear(); r = store->read(ch, hoid, 0x3e000-1, 0x2001, newdata); ASSERT_EQ(r, 0x2001); { bufferlist expected; expected.append(data2.substr(0x3e000-1, 0x1001)); expected.append(data.substr(0x3f000, 0x1000)); ASSERT_TRUE(bl_eq(expected, newdata)); } newdata.clear(); r = store->read(ch, hoid, 0x0, 0x40000, newdata); ASSERT_EQ(r, int(0x40000) ); { bufferlist expected; expected.append(data2.substr(0, 0x3f000)); expected.append(data.substr(0x3f000, 0x1000)); ASSERT_TRUE(bl_eq(expected, newdata)); } } data.resize(0x1001); for(size_t i = 0;i < data.size(); i++) data[i] = (i+3) / 256; { ObjectStore::Transaction t; bufferlist bl, newdata; bl.append(data); t.write(cid, hoid, 0x3f000-1, bl.length(), bl); cerr << "Small chunk partial overwrite, two extents overlapped, single one to be removed" << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); r = store->read(ch, hoid, 0x3e000, 0x2000, newdata); ASSERT_EQ(r, (int)0x2000); { bufferlist expected; expected.append(data2.substr(0x3e000, 0x1000 - 1)); expected.append(data.substr(0, 0x1001)); ASSERT_TRUE(bl_eq(expected, newdata)); } } { ObjectStore::Transaction t; t.remove(cid, hoid); cerr << "Cleaning object" << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } //force fsck ch.reset(); EXPECT_EQ(store->umount(), 0); ASSERT_EQ(store->fsck(false), 0); // do fsck explicitly EXPECT_EQ(store->mount(), 0); ch = store->open_collection(cid); auto settingsBookmark = BookmarkSettings(); SetVal(g_conf(), "bluestore_compression_min_blob_size", "262144"); g_ceph_context->_conf.apply_changes(nullptr); { data.resize(0x10000*6); for(size_t i = 0;i < data.size(); i++) data[i] = i / 256; ObjectStore::Transaction t; bufferlist bl, newdata; bl.append(data); t.write(cid, hoid, 0, bl.length(), bl); cerr << "CompressibleData large blob" << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } //force fsck ch.reset(); EXPECT_EQ(store->umount(), 0); ASSERT_EQ(store->fsck(false), 0); // do fsck explicitly EXPECT_EQ(store->mount(), 0); ch = store->open_collection(cid); { ObjectStore::Transaction t; t.remove(cid, hoid); t.remove_collection(cid); cerr << "Cleaning" << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } } TEST_P(StoreTest, CompressionTest) { if (string(GetParam()) != "bluestore") return; if (smr) { cout << "TODO: need to adjust statfs check for smr" << std::endl; return; } SetVal(g_conf(), "bluestore_compression_algorithm", "snappy"); SetVal(g_conf(), "bluestore_compression_mode", "force"); g_ceph_context->_conf.apply_changes(nullptr); doCompressionTest(); SetVal(g_conf(), "bluestore_compression_algorithm", "zlib"); SetVal(g_conf(), "bluestore_compression_mode", "aggressive"); g_ceph_context->_conf.apply_changes(nullptr); doCompressionTest(); } TEST_P(StoreTest, SimpleObjectTest) { int r; coll_t cid; ghobject_t hoid(hobject_t(sobject_t("Object 1", CEPH_NOSNAP))); { auto ch = store->open_collection(cid); ASSERT_FALSE(ch); } auto ch = store->create_new_collection(cid); { ObjectStore::Transaction t; t.create_collection(cid, 0); cerr << "Creating collection " << cid << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } { bool exists = store->exists(ch, hoid); ASSERT_TRUE(!exists); ObjectStore::Transaction t; t.touch(cid, hoid); cerr << "Creating object " << hoid << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); exists = store->exists(ch, hoid); ASSERT_EQ(true, exists); } { ObjectStore::Transaction t; t.remove(cid, hoid); t.touch(cid, hoid); cerr << "Remove then create" << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } { ObjectStore::Transaction t; bufferlist bl, orig; bl.append("abcde"); orig = bl; t.remove(cid, hoid); t.write(cid, hoid, 0, 5, bl); cerr << "Remove then create" << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); bufferlist in; r = store->read(ch, hoid, 0, 5, in); ASSERT_EQ(5, r); ASSERT_TRUE(bl_eq(orig, in)); } { ObjectStore::Transaction t; bufferlist bl, exp; bl.append("abcde"); exp = bl; exp.append(bl); t.write(cid, hoid, 5, 5, bl); cerr << "Append" << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); bufferlist in; r = store->read(ch, hoid, 0, 10, in); ASSERT_EQ(10, r); ASSERT_TRUE(bl_eq(exp, in)); } { ObjectStore::Transaction t; bufferlist bl, exp; bl.append("abcdeabcde"); exp = bl; t.write(cid, hoid, 0, 10, bl); cerr << "Full overwrite" << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); bufferlist in; r = store->read(ch, hoid, 0, 10, in); ASSERT_EQ(10, r); ASSERT_TRUE(bl_eq(exp, in)); } { ObjectStore::Transaction t; bufferlist bl; bl.append("abcde"); t.write(cid, hoid, 3, 5, bl); cerr << "Partial overwrite" << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); bufferlist in, exp; exp.append("abcabcdede"); r = store->read(ch, hoid, 0, 10, in); ASSERT_EQ(10, r); in.hexdump(cout); ASSERT_TRUE(bl_eq(exp, in)); } { { ObjectStore::Transaction t; bufferlist bl; bl.append("fghij"); t.truncate(cid, hoid, 0); t.write(cid, hoid, 5, 5, bl); cerr << "Truncate + hole" << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } { ObjectStore::Transaction t; bufferlist bl; bl.append("abcde"); t.write(cid, hoid, 0, 5, bl); cerr << "Reverse fill-in" << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } bufferlist in, exp; exp.append("abcdefghij"); r = store->read(ch, hoid, 0, 10, in); ASSERT_EQ(10, r); in.hexdump(cout); ASSERT_TRUE(bl_eq(exp, in)); } { ObjectStore::Transaction t; bufferlist bl; bl.append("abcde01234012340123401234abcde01234012340123401234abcde01234012340123401234abcde01234012340123401234"); t.write(cid, hoid, 0, bl.length(), bl); cerr << "larger overwrite" << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); bufferlist in; r = store->read(ch, hoid, 0, bl.length(), in); ASSERT_EQ((int)bl.length(), r); in.hexdump(cout); ASSERT_TRUE(bl_eq(bl, in)); } { bufferlist bl; bl.append("abcde01234012340123401234abcde01234012340123401234abcde01234012340123401234abcde01234012340123401234"); //test: offset=len=0 mean read all data bufferlist in; r = store->read(ch, hoid, 0, 0, in); ASSERT_EQ((int)bl.length(), r); in.hexdump(cout); ASSERT_TRUE(bl_eq(bl, in)); } { //verifying unaligned csums std::string s1("1"), s2(0x1000, '2'), s3("00"); { ObjectStore::Transaction t; bufferlist bl; bl.append(s1); bl.append(s2); t.truncate(cid, hoid, 0); t.write(cid, hoid, 0x1000-1, bl.length(), bl); cerr << "Write unaligned csum, stage 1" << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } bufferlist in, exp1, exp2, exp3; exp1.append(s1); exp2.append(s2); exp3.append(s3); r = store->read(ch, hoid, 0x1000-1, 1, in); ASSERT_EQ(1, r); ASSERT_TRUE(bl_eq(exp1, in)); in.clear(); r = store->read(ch, hoid, 0x1000, 0x1000, in); ASSERT_EQ(0x1000, r); ASSERT_TRUE(bl_eq(exp2, in)); { ObjectStore::Transaction t; bufferlist bl; bl.append(s3); t.write(cid, hoid, 1, bl.length(), bl); cerr << "Write unaligned csum, stage 2" << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } in.clear(); r = store->read(ch, hoid, 1, 2, in); ASSERT_EQ(2, r); ASSERT_TRUE(bl_eq(exp3, in)); in.clear(); r = store->read(ch, hoid, 0x1000-1, 1, in); ASSERT_EQ(1, r); ASSERT_TRUE(bl_eq(exp1, in)); in.clear(); r = store->read(ch, hoid, 0x1000, 0x1000, in); ASSERT_EQ(0x1000, r); ASSERT_TRUE(bl_eq(exp2, in)); } { ObjectStore::Transaction t; t.remove(cid, hoid); t.remove_collection(cid); cerr << "Cleaning" << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } } #if defined(WITH_BLUESTORE) TEST_P(StoreTestSpecificAUSize, ReproBug41901Test) { if(string(GetParam()) != "bluestore") return; if (smr) { cout << "SKIP (smr)" << std::endl; return; } SetVal(g_conf(), "bluestore_max_blob_size", "524288"); SetVal(g_conf(), "bluestore_debug_enforce_settings", "hdd"); g_conf().apply_changes(nullptr); StartDeferred(65536); int r; coll_t cid; ghobject_t hoid(hobject_t(sobject_t("Object 1", CEPH_NOSNAP))); const PerfCounters* logger = store->get_perf_counters(); auto ch = store->create_new_collection(cid); { ObjectStore::Transaction t; t.create_collection(cid, 0); cerr << "Creating collection " << cid << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } { bool exists = store->exists(ch, hoid); ASSERT_TRUE(!exists); ObjectStore::Transaction t; t.touch(cid, hoid); cerr << "Creating object " << hoid << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); exists = store->exists(ch, hoid); ASSERT_EQ(true, exists); } { ObjectStore::Transaction t; bufferlist bl, orig; string s(4096, 'a'); bl.append(s); t.write(cid, hoid, 0x11000, bl.length(), bl); cerr << "write1" << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } { ObjectStore::Transaction t; bufferlist bl, orig; string s(4096 * 3, 'a'); bl.append(s); t.write(cid, hoid, 0x15000, bl.length(), bl); cerr << "write2" << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } ASSERT_EQ(logger->get(l_bluestore_write_small), 2u); ASSERT_EQ(logger->get(l_bluestore_write_small_unused), 1u); { ObjectStore::Transaction t; bufferlist bl, orig; string s(4096 * 2, 'a'); bl.append(s); t.write(cid, hoid, 0xe000, bl.length(), bl); cerr << "write3" << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } ASSERT_EQ(logger->get(l_bluestore_write_small), 3u); ASSERT_EQ(logger->get(l_bluestore_write_small_unused), 2u); { ObjectStore::Transaction t; bufferlist bl, orig; string s(4096, 'a'); bl.append(s); t.write(cid, hoid, 0xf000, bl.length(), bl); t.write(cid, hoid, 0x10000, bl.length(), bl); cerr << "write3" << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } ASSERT_EQ(logger->get(l_bluestore_write_small), 5u); ASSERT_EQ(logger->get(l_bluestore_write_small_unused), 2u); { ObjectStore::Transaction t; t.remove(cid, hoid); t.remove_collection(cid); cerr << "Cleaning" << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } } TEST_P(StoreTestSpecificAUSize, BluestoreStatFSTest) { if(string(GetParam()) != "bluestore") return; if (smr) { cout << "TODO: fix this for smr" << std::endl; return; } SetVal(g_conf(), "bluestore_block_db_path", ""); StartDeferred(65536); SetVal(g_conf(), "bluestore_compression_mode", "force"); SetVal(g_conf(), "bluestore_max_blob_size", "524288"); // just a big number to disble gc SetVal(g_conf(), "bluestore_gc_enable_total_threshold", "100000"); SetVal(g_conf(), "bluestore_fsck_on_umount", "true"); g_conf().apply_changes(nullptr); int r; int poolid = 4373; coll_t cid = coll_t(spg_t(pg_t(0, poolid), shard_id_t::NO_SHARD)); ghobject_t hoid(hobject_t(sobject_t("Object 1", CEPH_NOSNAP), string(), 0, poolid, string())); ghobject_t hoid2 = hoid; hoid2.hobj.snap = 1; { auto ch = store->open_collection(cid); ASSERT_FALSE(ch); } auto ch = store->create_new_collection(cid); { ObjectStore::Transaction t; t.create_collection(cid, 0); cerr << "Creating collection " << cid << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } { bool exists = store->exists(ch, hoid); ASSERT_TRUE(!exists); ObjectStore::Transaction t; t.touch(cid, hoid); cerr << "Creating object " << hoid << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); exists = store->exists(ch, hoid); ASSERT_EQ(true, exists); } { struct store_statfs_t statfs; int r = store->statfs(&statfs); ASSERT_EQ(r, 0); ASSERT_EQ( 0u, statfs.allocated); ASSERT_EQ( 0u, statfs.data_stored); ASSERT_EQ(g_conf()->bluestore_block_size, statfs.total); ASSERT_TRUE(statfs.available > 0u && statfs.available < g_conf()->bluestore_block_size); struct store_statfs_t statfs_pool; bool per_pool_omap; r = store->pool_statfs(poolid, &statfs_pool, &per_pool_omap); ASSERT_EQ(r, 0); ASSERT_EQ( 0u, statfs_pool.allocated); ASSERT_EQ( 0u, statfs_pool.data_stored); //force fsck ch.reset(); EXPECT_EQ(store->umount(), 0); ASSERT_EQ(store->fsck(false), 0); // do fsck explicitly EXPECT_EQ(store->mount(), 0); ch = store->open_collection(cid); } { ObjectStore::Transaction t; bufferlist bl; bl.append("abcde"); t.write(cid, hoid, 0, 5, bl); cerr << "Append 5 bytes" << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); struct store_statfs_t statfs; int r = store->statfs(&statfs); ASSERT_EQ(r, 0); ASSERT_EQ(5, statfs.data_stored); ASSERT_EQ(0x10000, statfs.allocated); ASSERT_EQ(0, statfs.data_compressed); ASSERT_EQ(0, statfs.data_compressed_original); ASSERT_EQ(0, statfs.data_compressed_allocated); struct store_statfs_t statfs_pool; bool per_pool_omap; r = store->pool_statfs(poolid, &statfs_pool, &per_pool_omap); ASSERT_EQ(r, 0); ASSERT_EQ(5, statfs_pool.data_stored); ASSERT_EQ(0x10000, statfs_pool.allocated); ASSERT_EQ(0, statfs_pool.data_compressed); ASSERT_EQ(0, statfs_pool.data_compressed_original); ASSERT_EQ(0, statfs_pool.data_compressed_allocated); // accessing unknown pool r = store->pool_statfs(poolid + 1, &statfs_pool, &per_pool_omap); ASSERT_EQ(r, 0); ASSERT_EQ(0, statfs_pool.data_stored); ASSERT_EQ(0, statfs_pool.allocated); ASSERT_EQ(0, statfs_pool.data_compressed); ASSERT_EQ(0, statfs_pool.data_compressed_original); ASSERT_EQ(0, statfs_pool.data_compressed_allocated); //force fsck ch.reset(); EXPECT_EQ(store->umount(), 0); ASSERT_EQ(store->fsck(false), 0); // do fsck explicitly EXPECT_EQ(store->mount(), 0); ch = store->open_collection(cid); } { ObjectStore::Transaction t; std::string s(0x30000, 'a'); bufferlist bl; bl.append(s); t.write(cid, hoid, 0x10000, bl.length(), bl); cerr << "Append 0x30000 compressible bytes" << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); struct store_statfs_t statfs; int r = store->statfs(&statfs); ASSERT_EQ(r, 0); ASSERT_EQ(0x30005, statfs.data_stored); ASSERT_EQ(0x30000, statfs.allocated); ASSERT_LE(statfs.data_compressed, 0x10000); ASSERT_EQ(0x20000, statfs.data_compressed_original); ASSERT_EQ(statfs.data_compressed_allocated, 0x10000); struct store_statfs_t statfs_pool; bool per_pool_omap; r = store->pool_statfs(poolid, &statfs_pool, &per_pool_omap); ASSERT_EQ(r, 0); ASSERT_EQ(0x30005, statfs_pool.data_stored); ASSERT_EQ(0x30000, statfs_pool.allocated); ASSERT_LE(statfs_pool.data_compressed, 0x10000); ASSERT_EQ(0x20000, statfs_pool.data_compressed_original); ASSERT_EQ(statfs_pool.data_compressed_allocated, 0x10000); //force fsck ch.reset(); EXPECT_EQ(store->umount(), 0); ASSERT_EQ(store->fsck(false), 0); // do fsck explicitly EXPECT_EQ(store->mount(), 0); ch = store->open_collection(cid); } { ObjectStore::Transaction t; t.zero(cid, hoid, 1, 3); t.zero(cid, hoid, 0x20000, 9); cerr << "Punch hole at 1~3, 0x20000~9" << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); struct store_statfs_t statfs; int r = store->statfs(&statfs); ASSERT_EQ(r, 0); ASSERT_EQ(0x30005 - 3 - 9, statfs.data_stored); ASSERT_EQ(0x30000, statfs.allocated); ASSERT_LE(statfs.data_compressed, 0x10000); ASSERT_EQ(0x20000 - 9, statfs.data_compressed_original); ASSERT_EQ(statfs.data_compressed_allocated, 0x10000); struct store_statfs_t statfs_pool; bool per_pool_omap; r = store->pool_statfs(poolid, &statfs_pool, &per_pool_omap); ASSERT_EQ(r, 0); ASSERT_EQ(0x30005 - 3 - 9, statfs_pool.data_stored); ASSERT_EQ(0x30000, statfs_pool.allocated); ASSERT_LE(statfs_pool.data_compressed, 0x10000); ASSERT_EQ(0x20000 - 9, statfs_pool.data_compressed_original); ASSERT_EQ(statfs_pool.data_compressed_allocated, 0x10000); //force fsck ch.reset(); EXPECT_EQ(store->umount(), 0); ASSERT_EQ(store->fsck(false), 0); // do fsck explicitly EXPECT_EQ(store->mount(), 0); ch = store->open_collection(cid); } { ObjectStore::Transaction t; std::string s(0x1000, 'b'); bufferlist bl; bl.append(s); t.write(cid, hoid, 1, bl.length(), bl); t.write(cid, hoid, 0x10001, bl.length(), bl); cerr << "Overwrite first and second(compressible) extents" << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); struct store_statfs_t statfs; int r = store->statfs(&statfs); ASSERT_EQ(r, 0); ASSERT_EQ(0x30001 - 9 + 0x1000, statfs.data_stored); ASSERT_EQ(0x40000, statfs.allocated); ASSERT_LE(statfs.data_compressed, 0x10000); ASSERT_EQ(0x20000 - 9 - 0x1000, statfs.data_compressed_original); ASSERT_EQ(statfs.data_compressed_allocated, 0x10000); struct store_statfs_t statfs_pool; bool per_pool_omap; r = store->pool_statfs(poolid, &statfs_pool, &per_pool_omap); ASSERT_EQ(r, 0); ASSERT_EQ(0x30001 - 9 + 0x1000, statfs_pool.data_stored); ASSERT_EQ(0x40000, statfs_pool.allocated); ASSERT_LE(statfs_pool.data_compressed, 0x10000); ASSERT_EQ(0x20000 - 9 - 0x1000, statfs_pool.data_compressed_original); ASSERT_EQ(statfs_pool.data_compressed_allocated, 0x10000); //force fsck ch.reset(); EXPECT_EQ(store->umount(), 0); ASSERT_EQ(store->fsck(false), 0); // do fsck explicitly EXPECT_EQ(store->mount(), 0); ch = store->open_collection(cid); } { ObjectStore::Transaction t; std::string s(0x10000, 'c'); bufferlist bl; bl.append(s); t.write(cid, hoid, 0x10000, bl.length(), bl); t.write(cid, hoid, 0x20000, bl.length(), bl); t.write(cid, hoid, 0x30000, bl.length(), bl); cerr << "Overwrite compressed extent with 3 uncompressible ones" << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); struct store_statfs_t statfs; int r = store->statfs(&statfs); ASSERT_EQ(r, 0); ASSERT_EQ(0x30000 + 0x1001, statfs.data_stored); ASSERT_EQ(0x40000, statfs.allocated); ASSERT_LE(statfs.data_compressed, 0); ASSERT_EQ(0, statfs.data_compressed_original); ASSERT_EQ(0, statfs.data_compressed_allocated); struct store_statfs_t statfs_pool; bool per_pool_omap; r = store->pool_statfs(poolid, &statfs_pool, &per_pool_omap); ASSERT_EQ(r, 0); ASSERT_EQ(0x30000 + 0x1001, statfs_pool.data_stored); ASSERT_EQ(0x40000, statfs_pool.allocated); ASSERT_LE(statfs_pool.data_compressed, 0); ASSERT_EQ(0, statfs_pool.data_compressed_original); ASSERT_EQ(0, statfs_pool.data_compressed_allocated); //force fsck ch.reset(); EXPECT_EQ(store->umount(), 0); ASSERT_EQ(store->fsck(false), 0); // do fsck explicitly EXPECT_EQ(store->mount(), 0); ch = store->open_collection(cid); } { ObjectStore::Transaction t; t.zero(cid, hoid, 0, 0x40000); cerr << "Zero object" << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); struct store_statfs_t statfs; int r = store->statfs(&statfs); ASSERT_EQ(r, 0); ASSERT_EQ(0u, statfs.allocated); ASSERT_EQ(0u, statfs.data_stored); ASSERT_EQ(0u, statfs.data_compressed_original); ASSERT_EQ(0u, statfs.data_compressed); ASSERT_EQ(0u, statfs.data_compressed_allocated); struct store_statfs_t statfs_pool; bool per_pool_omap; r = store->pool_statfs(poolid, &statfs_pool, &per_pool_omap); ASSERT_EQ(r, 0); ASSERT_EQ(0u, statfs_pool.allocated); ASSERT_EQ(0u, statfs_pool.data_stored); ASSERT_EQ(0u, statfs_pool.data_compressed_original); ASSERT_EQ(0u, statfs_pool.data_compressed); ASSERT_EQ(0u, statfs_pool.data_compressed_allocated); //force fsck ch.reset(); EXPECT_EQ(store->umount(), 0); ASSERT_EQ(store->fsck(false), 0); // do fsck explicitly EXPECT_EQ(store->mount(), 0); ch = store->open_collection(cid); } { ObjectStore::Transaction t; std::string s(0x10000, 'c'); bufferlist bl; bl.append(s); bl.append(s); bl.append(s); bl.append(s.substr(0, 0x10000-2)); t.write(cid, hoid, 0, bl.length(), bl); cerr << "Yet another compressible write" << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); struct store_statfs_t statfs; r = store->statfs(&statfs); ASSERT_EQ(r, 0); ASSERT_EQ(0x40000 - 2, statfs.data_stored); ASSERT_EQ(0x30000, statfs.allocated); ASSERT_LE(statfs.data_compressed, 0x10000); ASSERT_EQ(0x20000, statfs.data_compressed_original); ASSERT_EQ(0x10000, statfs.data_compressed_allocated); struct store_statfs_t statfs_pool; bool per_pool_omap; r = store->pool_statfs(poolid, &statfs_pool, &per_pool_omap); ASSERT_EQ(r, 0); ASSERT_EQ(0x40000 - 2, statfs_pool.data_stored); ASSERT_EQ(0x30000, statfs_pool.allocated); ASSERT_LE(statfs_pool.data_compressed, 0x10000); ASSERT_EQ(0x20000, statfs_pool.data_compressed_original); ASSERT_EQ(0x10000, statfs_pool.data_compressed_allocated); //force fsck ch.reset(); EXPECT_EQ(store->umount(), 0); ASSERT_EQ(store->fsck(false), 0); // do fsck explicitly EXPECT_EQ(store->mount(), 0); ch = store->open_collection(cid); } { struct store_statfs_t statfs; r = store->statfs(&statfs); ASSERT_EQ(r, 0); struct store_statfs_t statfs_pool; bool per_pool_omap; r = store->pool_statfs(poolid, &statfs_pool, &per_pool_omap); ASSERT_EQ(r, 0); ObjectStore::Transaction t; t.clone(cid, hoid, hoid2); cerr << "Clone compressed objecte" << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); struct store_statfs_t statfs2; r = store->statfs(&statfs2); ASSERT_EQ(r, 0); ASSERT_GT(statfs2.data_stored, statfs.data_stored); ASSERT_EQ(statfs2.allocated, statfs.allocated); ASSERT_GT(statfs2.data_compressed, statfs.data_compressed); ASSERT_GT(statfs2.data_compressed_original, statfs.data_compressed_original); ASSERT_EQ(statfs2.data_compressed_allocated, statfs.data_compressed_allocated); struct store_statfs_t statfs2_pool; r = store->pool_statfs(poolid, &statfs2_pool, &per_pool_omap); ASSERT_EQ(r, 0); ASSERT_GT(statfs2_pool.data_stored, statfs_pool.data_stored); ASSERT_EQ(statfs2_pool.allocated, statfs_pool.allocated); ASSERT_GT(statfs2_pool.data_compressed, statfs_pool.data_compressed); ASSERT_GT(statfs2_pool.data_compressed_original, statfs_pool.data_compressed_original); ASSERT_EQ(statfs2_pool.data_compressed_allocated, statfs_pool.data_compressed_allocated); } { // verify no auto poolid2 = poolid + 1; coll_t cid2 = coll_t(spg_t(pg_t(20, poolid2), shard_id_t::NO_SHARD)); ghobject_t hoid(hobject_t(sobject_t("Object 2", CEPH_NOSNAP), string(), 0, poolid2, string())); auto ch = store->create_new_collection(cid2); { struct store_statfs_t statfs1_pool; bool per_pool_omap; int r = store->pool_statfs(poolid, &statfs1_pool, &per_pool_omap); ASSERT_EQ(r, 0); cerr << "Creating second collection " << cid2 << std::endl; ObjectStore::Transaction t; t.create_collection(cid2, 0); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); t = ObjectStore::Transaction(); bufferlist bl; bl.append("abcde"); t.write(cid2, hoid, 0, 5, bl); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); struct store_statfs_t statfs2_pool; r = store->pool_statfs(poolid2, &statfs2_pool, &per_pool_omap); ASSERT_EQ(r, 0); ASSERT_EQ(5, statfs2_pool.data_stored); ASSERT_EQ(0x10000, statfs2_pool.allocated); ASSERT_EQ(0, statfs2_pool.data_compressed); ASSERT_EQ(0, statfs2_pool.data_compressed_original); ASSERT_EQ(0, statfs2_pool.data_compressed_allocated); struct store_statfs_t statfs1_pool_again; r = store->pool_statfs(poolid, &statfs1_pool_again, &per_pool_omap); ASSERT_EQ(r, 0); // adjust 'available' since it has changed statfs1_pool_again.available = statfs1_pool.available; ASSERT_EQ(statfs1_pool_again, statfs1_pool); t = ObjectStore::Transaction(); t.remove(cid2, hoid); t.remove_collection(cid2); cerr << "Cleaning" << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } } { // verify ops on temporary object auto poolid3 = poolid + 2; coll_t cid3 = coll_t(spg_t(pg_t(20, poolid3), shard_id_t::NO_SHARD)); ghobject_t hoid3(hobject_t(sobject_t("Object 3", CEPH_NOSNAP), string(), 0, poolid3, string())); ghobject_t hoid3_temp; hoid3_temp.hobj = hoid3.hobj.make_temp_hobject("Object 3 temp"); auto ch3 = store->create_new_collection(cid3); { struct store_statfs_t statfs1_pool; bool per_pool_omap; int r = store->pool_statfs(poolid, &statfs1_pool, &per_pool_omap); ASSERT_EQ(r, 0); cerr << "Creating third collection " << cid3 << std::endl; ObjectStore::Transaction t; t.create_collection(cid3, 0); r = queue_transaction(store, ch3, std::move(t)); ASSERT_EQ(r, 0); t = ObjectStore::Transaction(); bufferlist bl; bl.append("abcde"); t.write(cid3, hoid3_temp, 0, 5, bl); r = queue_transaction(store, ch3, std::move(t)); ASSERT_EQ(r, 0); struct store_statfs_t statfs3_pool; r = store->pool_statfs(poolid3, &statfs3_pool, &per_pool_omap); ASSERT_EQ(r, 0); ASSERT_EQ(5, statfs3_pool.data_stored); ASSERT_EQ(0x10000, statfs3_pool.allocated); ASSERT_EQ(0, statfs3_pool.data_compressed); ASSERT_EQ(0, statfs3_pool.data_compressed_original); ASSERT_EQ(0, statfs3_pool.data_compressed_allocated); struct store_statfs_t statfs1_pool_again; r = store->pool_statfs(poolid, &statfs1_pool_again, &per_pool_omap); ASSERT_EQ(r, 0); // adjust 'available' since it has changed statfs1_pool_again.available = statfs1_pool.available; ASSERT_EQ(statfs1_pool_again, statfs1_pool); //force fsck ch.reset(); ch3.reset(); EXPECT_EQ(store->umount(), 0); EXPECT_EQ(store->mount(), 0); ch = store->open_collection(cid); ch3 = store->open_collection(cid3); t = ObjectStore::Transaction(); t.collection_move_rename( cid3, hoid3_temp, cid3, hoid3); r = queue_transaction(store, ch3, std::move(t)); ASSERT_EQ(r, 0); struct store_statfs_t statfs3_pool_again; r = store->pool_statfs(poolid3, &statfs3_pool_again, &per_pool_omap); ASSERT_EQ(r, 0); ASSERT_EQ(statfs3_pool_again, statfs3_pool); //force fsck ch.reset(); ch3.reset(); EXPECT_EQ(store->umount(), 0); EXPECT_EQ(store->mount(), 0); ch = store->open_collection(cid); ch3 = store->open_collection(cid3); t = ObjectStore::Transaction(); t.remove(cid3, hoid3); t.remove_collection(cid3); cerr << "Cleaning" << std::endl; r = queue_transaction(store, ch3, std::move(t)); ASSERT_EQ(r, 0); } } { ObjectStore::Transaction t; t.remove(cid, hoid); t.remove(cid, hoid2); t.remove_collection(cid); cerr << "Cleaning" << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); struct store_statfs_t statfs; r = store->statfs(&statfs); ASSERT_EQ(r, 0); ASSERT_EQ( 0u, statfs.allocated); ASSERT_EQ( 0u, statfs.data_stored); ASSERT_EQ( 0u, statfs.data_compressed_original); ASSERT_EQ( 0u, statfs.data_compressed); ASSERT_EQ( 0u, statfs.data_compressed_allocated); struct store_statfs_t statfs_pool; bool per_pool_omap; r = store->pool_statfs(poolid, &statfs_pool, &per_pool_omap); ASSERT_EQ(r, 0); ASSERT_EQ( 0u, statfs_pool.allocated); ASSERT_EQ( 0u, statfs_pool.data_stored); ASSERT_EQ( 0u, statfs_pool.data_compressed_original); ASSERT_EQ( 0u, statfs_pool.data_compressed); ASSERT_EQ( 0u, statfs_pool.data_compressed_allocated); } } TEST_P(StoreTestSpecificAUSize, BluestoreFragmentedBlobTest) { if(string(GetParam()) != "bluestore") return; if (smr) { cout << "TODO: fix this for smr" << std::endl; return; } SetVal(g_conf(), "bluestore_block_db_path", ""); StartDeferred(0x10000); int r; coll_t cid; ghobject_t hoid(hobject_t(sobject_t("Object 1", CEPH_NOSNAP))); auto ch = store->create_new_collection(cid); { ObjectStore::Transaction t; t.create_collection(cid, 0); cerr << "Creating collection " << cid << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } { bool exists = store->exists(ch, hoid); ASSERT_TRUE(!exists); ObjectStore::Transaction t; t.touch(cid, hoid); cerr << "Creating object " << hoid << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); exists = store->exists(ch, hoid); ASSERT_EQ(true, exists); } { struct store_statfs_t statfs; int r = store->statfs(&statfs); ASSERT_EQ(r, 0); ASSERT_EQ(g_conf()->bluestore_block_size, statfs.total); ASSERT_EQ(0u, statfs.allocated); ASSERT_EQ(0u, statfs.data_stored); ASSERT_TRUE(statfs.available > 0u && statfs.available < g_conf()->bluestore_block_size); } std::string data; data.resize(0x10000 * 3); { ObjectStore::Transaction t; for(size_t i = 0;i < data.size(); i++) data[i] = i / 256 + 1; bufferlist bl, newdata; bl.append(data); t.write(cid, hoid, 0, bl.length(), bl); t.zero(cid, hoid, 0x10000, 0x10000); cerr << "Append 3*0x10000 bytes and punch a hole 0x10000~10000" << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); struct store_statfs_t statfs; int r = store->statfs(&statfs); ASSERT_EQ(r, 0); ASSERT_EQ(0x20000, statfs.data_stored); ASSERT_EQ(0x20000, statfs.allocated); r = store->read(ch, hoid, 0, data.size(), newdata); ASSERT_EQ(r, (int)data.size()); { bufferlist expected; expected.append(data.substr(0, 0x10000)); expected.append(string(0x10000, 0)); expected.append(data.substr(0x20000, 0x10000)); ASSERT_TRUE(bl_eq(expected, newdata)); } newdata.clear(); r = store->read(ch, hoid, 1, data.size()-2, newdata); ASSERT_EQ(r, (int)data.size()-2); { bufferlist expected; expected.append(data.substr(1, 0x10000-1)); expected.append(string(0x10000, 0)); expected.append(data.substr(0x20000, 0x10000 - 1)); ASSERT_TRUE(bl_eq(expected, newdata)); } newdata.clear(); } //force fsck ch.reset(); EXPECT_EQ(store->umount(), 0); ASSERT_EQ(store->fsck(false), 0); // do fsck explicitly EXPECT_EQ(store->mount(), 0); ch = store->open_collection(cid); { ObjectStore::Transaction t; std::string data2(3, 'b'); bufferlist bl, newdata; bl.append(data2); t.write(cid, hoid, 0x20000, bl.length(), bl); cerr << "Write 3 bytes after the hole" << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); struct store_statfs_t statfs; int r = store->statfs(&statfs); ASSERT_EQ(r, 0); ASSERT_EQ(0x20000, statfs.allocated); ASSERT_EQ(0x20000, statfs.data_stored); r = store->read(ch, hoid, 0x20000-1, 21, newdata); ASSERT_EQ(r, (int)21); { bufferlist expected; expected.append(string(0x1, 0)); expected.append(string(data2)); expected.append(data.substr(0x20003, 21-4)); ASSERT_TRUE(bl_eq(expected, newdata)); } newdata.clear(); } //force fsck ch.reset(); EXPECT_EQ(store->umount(), 0); ASSERT_EQ(store->fsck(false), 0); // do fsck explicitly EXPECT_EQ(store->mount(), 0); ch = store->open_collection(cid); { ObjectStore::Transaction t; std::string data2(3, 'a'); bufferlist bl, newdata; bl.append(data2); t.write(cid, hoid, 0x10000+1, bl.length(), bl); cerr << "Write 3 bytes to the hole" << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); struct store_statfs_t statfs; int r = store->statfs(&statfs); ASSERT_EQ(r, 0); ASSERT_EQ(0x30000, statfs.allocated); ASSERT_EQ(0x20003, statfs.data_stored); r = store->read(ch, hoid, 0x10000-1, 0x10000+22, newdata); ASSERT_EQ(r, (int)0x10000+22); { bufferlist expected; expected.append(data.substr(0x10000-1, 1)); expected.append(string(0x1, 0)); expected.append(data2); expected.append(string(0x10000-4, 0)); expected.append(string(0x3, 'b')); expected.append(data.substr(0x20004, 21-3)); ASSERT_TRUE(bl_eq(expected, newdata)); } newdata.clear(); } { ObjectStore::Transaction t; bufferlist bl, newdata; bl.append(string(0x30000, 'c')); t.write(cid, hoid, 0, 0x30000, bl); t.zero(cid, hoid, 0, 0x10000); t.zero(cid, hoid, 0x20000, 0x10000); cerr << "Rewrite an object and create two holes at the beginning and the end" << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); struct store_statfs_t statfs; int r = store->statfs(&statfs); ASSERT_EQ(r, 0); ASSERT_EQ(0x10000, statfs.allocated); ASSERT_EQ(0x10000, statfs.data_stored); r = store->read(ch, hoid, 0, 0x30000, newdata); ASSERT_EQ(r, (int)0x30000); { bufferlist expected; expected.append(string(0x10000, 0)); expected.append(string(0x10000, 'c')); expected.append(string(0x10000, 0)); ASSERT_TRUE(bl_eq(expected, newdata)); } newdata.clear(); } //force fsck ch.reset(); EXPECT_EQ(store->umount(), 0); ASSERT_EQ(store->fsck(false), 0); // do fsck explicitly EXPECT_EQ(store->mount(), 0); ch = store->open_collection(cid); { ObjectStore::Transaction t; t.remove(cid, hoid); t.remove_collection(cid); cerr << "Cleaning" << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); struct store_statfs_t statfs; r = store->statfs(&statfs); ASSERT_EQ(r, 0); ASSERT_EQ( 0u, statfs.allocated); ASSERT_EQ( 0u, statfs.data_stored); ASSERT_EQ( 0u, statfs.data_compressed_original); ASSERT_EQ( 0u, statfs.data_compressed); ASSERT_EQ( 0u, statfs.data_compressed_allocated); } } #endif TEST_P(StoreTest, ManySmallWrite) { int r; coll_t cid; ghobject_t a(hobject_t(sobject_t("Object 1", CEPH_NOSNAP))); ghobject_t b(hobject_t(sobject_t("Object 2", CEPH_NOSNAP))); auto ch = store->create_new_collection(cid); { ObjectStore::Transaction t; t.create_collection(cid, 0); cerr << "Creating collection " << cid << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } bufferlist bl; bufferptr bp(4096); bp.zero(); bl.append(bp); for (int i=0; i<100; ++i) { ObjectStore::Transaction t; t.write(cid, a, i*4096, 4096, bl, 0); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } for (int i=0; i<100; ++i) { ObjectStore::Transaction t; t.write(cid, b, (rand() % 1024)*4096, 4096, bl, 0); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } { ObjectStore::Transaction t; t.remove(cid, a); t.remove(cid, b); t.remove_collection(cid); cerr << "Cleaning" << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } } TEST_P(StoreTest, MultiSmallWriteSameBlock) { int r; coll_t cid; ghobject_t a(hobject_t(sobject_t("Object 1", CEPH_NOSNAP))); auto ch = store->create_new_collection(cid); { ObjectStore::Transaction t; t.create_collection(cid, 0); cerr << "Creating collection " << cid << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } bufferlist bl; bl.append("short"); C_SaferCond c, d; // touch same block in both same transaction, tls, and pipelined txns { ObjectStore::Transaction t, u; t.write(cid, a, 0, 5, bl, 0); t.write(cid, a, 5, 5, bl, 0); t.write(cid, a, 4094, 5, bl, 0); t.write(cid, a, 9000, 5, bl, 0); u.write(cid, a, 10, 5, bl, 0); u.write(cid, a, 7000, 5, bl, 0); t.register_on_commit(&c); vector<ObjectStore::Transaction> v = {t, u}; store->queue_transactions(ch, v); } { ObjectStore::Transaction t, u; t.write(cid, a, 40, 5, bl, 0); t.write(cid, a, 45, 5, bl, 0); t.write(cid, a, 4094, 5, bl, 0); t.write(cid, a, 6000, 5, bl, 0); u.write(cid, a, 610, 5, bl, 0); u.write(cid, a, 11000, 5, bl, 0); t.register_on_commit(&d); vector<ObjectStore::Transaction> v = {t, u}; store->queue_transactions(ch, v); } c.wait(); d.wait(); { bufferlist bl2; r = store->read(ch, a, 0, 16000, bl2); ASSERT_GE(r, 0); } { ObjectStore::Transaction t; t.remove(cid, a); t.remove_collection(cid); cerr << "Cleaning" << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } } TEST_P(StoreTest, SmallSkipFront) { int r; coll_t cid; ghobject_t a(hobject_t(sobject_t("Object 1", CEPH_NOSNAP))); auto ch = store->create_new_collection(cid); { ObjectStore::Transaction t; t.create_collection(cid, 0); cerr << "Creating collection " << cid << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } { ObjectStore::Transaction t; t.touch(cid, a); t.truncate(cid, a, 3000); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } { bufferlist bl; bufferptr bp(4096); memset(bp.c_str(), 1, 4096); bl.append(bp); ObjectStore::Transaction t; t.write(cid, a, 4096, 4096, bl); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } { bufferlist bl; ASSERT_EQ(8192, store->read(ch, a, 0, 8192, bl)); for (unsigned i=0; i<4096; ++i) ASSERT_EQ(0, bl[i]); for (unsigned i=4096; i<8192; ++i) ASSERT_EQ(1, bl[i]); } { ObjectStore::Transaction t; t.remove(cid, a); t.remove_collection(cid); cerr << "Cleaning" << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } } TEST_P(StoreTest, AppendDeferredVsTailCache) { int r; coll_t cid; ghobject_t a(hobject_t(sobject_t("fooo", CEPH_NOSNAP))); auto ch = store->create_new_collection(cid); { ObjectStore::Transaction t; t.create_collection(cid, 0); cerr << "Creating collection " << cid << std::endl; r = store->queue_transaction(ch, std::move(t)); ASSERT_EQ(r, 0); } unsigned min_alloc = g_conf()->bluestore_min_alloc_size; unsigned size = min_alloc / 3; bufferptr bpa(size); memset(bpa.c_str(), 1, bpa.length()); bufferlist bla; bla.append(bpa); { ObjectStore::Transaction t; t.write(cid, a, 0, bla.length(), bla, 0); r = store->queue_transaction(ch, std::move(t)); ASSERT_EQ(r, 0); } // force cached tail to clear ... { ch.reset(); int r = store->umount(); ASSERT_EQ(0, r); r = store->mount(); ASSERT_EQ(0, r); ch = store->open_collection(cid); } bufferptr bpb(size); memset(bpb.c_str(), 2, bpb.length()); bufferlist blb; blb.append(bpb); { ObjectStore::Transaction t; t.write(cid, a, bla.length(), blb.length(), blb, 0); r = store->queue_transaction(ch, std::move(t)); ASSERT_EQ(r, 0); } bufferptr bpc(size); memset(bpc.c_str(), 3, bpc.length()); bufferlist blc; blc.append(bpc); { ObjectStore::Transaction t; t.write(cid, a, bla.length() + blb.length(), blc.length(), blc, 0); r = store->queue_transaction(ch, std::move(t)); ASSERT_EQ(r, 0); } bufferlist final; final.append(bla); final.append(blb); final.append(blc); bufferlist actual; { ASSERT_EQ((int)final.length(), store->read(ch, a, 0, final.length(), actual)); ASSERT_TRUE(bl_eq(final, actual)); } { ObjectStore::Transaction t; t.remove(cid, a); t.remove_collection(cid); cerr << "Cleaning" << std::endl; r = store->queue_transaction(ch, std::move(t)); ASSERT_EQ(r, 0); } } TEST_P(StoreTest, AppendZeroTrailingSharedBlock) { int r; coll_t cid; ghobject_t a(hobject_t(sobject_t("fooo", CEPH_NOSNAP))); ghobject_t b = a; b.hobj.snap = 1; auto ch = store->create_new_collection(cid); { ObjectStore::Transaction t; t.create_collection(cid, 0); cerr << "Creating collection " << cid << std::endl; r = store->queue_transaction(ch, std::move(t)); ASSERT_EQ(r, 0); } unsigned min_alloc = g_conf()->bluestore_min_alloc_size; unsigned size = min_alloc / 3; bufferptr bpa(size); memset(bpa.c_str(), 1, bpa.length()); bufferlist bla; bla.append(bpa); // make sure there is some trailing gunk in the last block { bufferlist bt; bt.append(bla); bt.append("BADBADBADBAD"); ObjectStore::Transaction t; t.write(cid, a, 0, bt.length(), bt, 0); r = store->queue_transaction(ch, std::move(t)); ASSERT_EQ(r, 0); } { ObjectStore::Transaction t; t.truncate(cid, a, size); r = store->queue_transaction(ch, std::move(t)); ASSERT_EQ(r, 0); } // clone { ObjectStore::Transaction t; t.clone(cid, a, b); r = store->queue_transaction(ch, std::move(t)); ASSERT_EQ(r, 0); } // append with implicit zeroing bufferptr bpb(size); memset(bpb.c_str(), 2, bpb.length()); bufferlist blb; blb.append(bpb); { ObjectStore::Transaction t; t.write(cid, a, min_alloc * 3, blb.length(), blb, 0); r = store->queue_transaction(ch, std::move(t)); ASSERT_EQ(r, 0); } bufferlist final; final.append(bla); bufferlist zeros; zeros.append_zero(min_alloc * 3 - size); final.append(zeros); final.append(blb); bufferlist actual; { ASSERT_EQ((int)final.length(), store->read(ch, a, 0, final.length(), actual)); final.hexdump(cout); actual.hexdump(cout); ASSERT_TRUE(bl_eq(final, actual)); } { ObjectStore::Transaction t; t.remove(cid, a); t.remove(cid, b); t.remove_collection(cid); cerr << "Cleaning" << std::endl; r = store->queue_transaction(ch, std::move(t)); ASSERT_EQ(r, 0); } } TEST_P(StoreTest, SmallSequentialUnaligned) { int r; coll_t cid; ghobject_t a(hobject_t(sobject_t("Object 1", CEPH_NOSNAP))); auto ch = store->create_new_collection(cid); { ObjectStore::Transaction t; t.create_collection(cid, 0); cerr << "Creating collection " << cid << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } bufferlist bl; int len = 1000; bufferptr bp(len); bp.zero(); bl.append(bp); for (int i=0; i<1000; ++i) { ObjectStore::Transaction t; t.write(cid, a, i*len, len, bl, 0); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } { ObjectStore::Transaction t; t.remove(cid, a); t.remove_collection(cid); cerr << "Cleaning" << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } } TEST_P(StoreTest, ManyBigWrite) { int r; coll_t cid; ghobject_t a(hobject_t(sobject_t("Object 1", CEPH_NOSNAP))); ghobject_t b(hobject_t(sobject_t("Object 2", CEPH_NOSNAP))); auto ch = store->create_new_collection(cid); { ObjectStore::Transaction t; t.create_collection(cid, 0); cerr << "Creating collection " << cid << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } bufferlist bl; bufferptr bp(4 * 1048576); bp.zero(); bl.append(bp); for (int i=0; i<10; ++i) { ObjectStore::Transaction t; t.write(cid, a, i*4*1048586, 4*1048576, bl, 0); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } // aligned for (int i=0; i<10; ++i) { ObjectStore::Transaction t; t.write(cid, b, (rand() % 256)*4*1048576, 4*1048576, bl, 0); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } // unaligned for (int i=0; i<10; ++i) { ObjectStore::Transaction t; t.write(cid, b, (rand() % (256*4096))*1024, 4*1048576, bl, 0); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } // do some zeros for (int i=0; i<10; ++i) { ObjectStore::Transaction t; t.zero(cid, b, (rand() % (256*4096))*1024, 16*1048576); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } { ObjectStore::Transaction t; t.remove(cid, a); t.remove(cid, b); t.remove_collection(cid); cerr << "Cleaning" << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } } TEST_P(StoreTest, BigWriteBigZero) { int r; coll_t cid; ghobject_t a(hobject_t(sobject_t("foo", CEPH_NOSNAP))); auto ch = store->create_new_collection(cid); { ObjectStore::Transaction t; t.create_collection(cid, 0); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } bufferlist bl; bufferptr bp(1048576); memset(bp.c_str(), 'b', bp.length()); bl.append(bp); bufferlist s; bufferptr sp(4096); memset(sp.c_str(), 's', sp.length()); s.append(sp); { ObjectStore::Transaction t; t.write(cid, a, 0, bl.length(), bl); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } { ObjectStore::Transaction t; t.zero(cid, a, bl.length() / 4, bl.length() / 2); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } { ObjectStore::Transaction t; t.write(cid, a, bl.length() / 2, s.length(), s); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } { ObjectStore::Transaction t; t.remove(cid, a); t.remove_collection(cid); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } } TEST_P(StoreTest, MiscFragmentTests) { int r; coll_t cid; ghobject_t a(hobject_t(sobject_t("Object 1", CEPH_NOSNAP))); auto ch = store->create_new_collection(cid); { ObjectStore::Transaction t; t.create_collection(cid, 0); cerr << "Creating collection " << cid << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } bufferlist bl; bufferptr bp(524288); bp.zero(); bl.append(bp); { ObjectStore::Transaction t; t.write(cid, a, 0, 524288, bl, 0); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } { ObjectStore::Transaction t; t.write(cid, a, 1048576, 524288, bl, 0); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } { bufferlist inbl; int r = store->read(ch, a, 524288 + 131072, 1024, inbl); ASSERT_EQ(r, 1024); ASSERT_EQ(inbl.length(), 1024u); ASSERT_TRUE(inbl.is_zero()); } { ObjectStore::Transaction t; t.write(cid, a, 1048576 - 4096, 524288, bl, 0); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } { ObjectStore::Transaction t; t.remove(cid, a); t.remove_collection(cid); cerr << "Cleaning" << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } } TEST_P(StoreTest, ZeroVsObjectSize) { int r; coll_t cid; struct stat stat; ghobject_t hoid(hobject_t(sobject_t("foo", CEPH_NOSNAP))); auto ch = store->create_new_collection(cid); { ObjectStore::Transaction t; t.create_collection(cid, 0); cerr << "Creating collection " << cid << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } bufferlist a; a.append("stuff"); { ObjectStore::Transaction t; t.write(cid, hoid, 0, 5, a); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } ASSERT_EQ(0, store->stat(ch, hoid, &stat)); ASSERT_EQ(5, stat.st_size); { ObjectStore::Transaction t; t.zero(cid, hoid, 1, 2); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } ASSERT_EQ(0, store->stat(ch, hoid, &stat)); ASSERT_EQ(5, stat.st_size); { ObjectStore::Transaction t; t.zero(cid, hoid, 3, 200); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } ASSERT_EQ(0, store->stat(ch, hoid, &stat)); ASSERT_EQ(203, stat.st_size); { ObjectStore::Transaction t; t.zero(cid, hoid, 100000, 200); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } ASSERT_EQ(0, store->stat(ch, hoid, &stat)); ASSERT_EQ(100200, stat.st_size); } TEST_P(StoreTest, ZeroLengthWrite) { int r; coll_t cid; ghobject_t hoid(hobject_t(sobject_t("foo", CEPH_NOSNAP))); auto ch = store->create_new_collection(cid); { ObjectStore::Transaction t; t.create_collection(cid, 0); t.touch(cid, hoid); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } { ObjectStore::Transaction t; bufferlist empty; t.write(cid, hoid, 1048576, 0, empty); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } struct stat stat; r = store->stat(ch, hoid, &stat); ASSERT_EQ(0, r); ASSERT_EQ(0, stat.st_size); bufferlist newdata; r = store->read(ch, hoid, 0, 1048576, newdata); ASSERT_EQ(0, r); } TEST_P(StoreTest, ZeroLengthZero) { int r; coll_t cid; ghobject_t hoid(hobject_t(sobject_t("foo", CEPH_NOSNAP))); auto ch = store->create_new_collection(cid); { ObjectStore::Transaction t; t.create_collection(cid, 0); t.touch(cid, hoid); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(0, r); } { ObjectStore::Transaction t; t.zero(cid, hoid, 1048576, 0); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(0, r); } struct stat stat; r = store->stat(ch, hoid, &stat); ASSERT_EQ(0, r); ASSERT_EQ(0, stat.st_size); bufferlist newdata; r = store->read(ch, hoid, 0, 1048576, newdata); ASSERT_EQ(0, r); } TEST_P(StoreTest, SimpleAttrTest) { int r; coll_t cid; ghobject_t hoid(hobject_t(sobject_t("attr object 1", CEPH_NOSNAP))); bufferlist val, val2; val.append("value"); val.append("value2"); { auto ch = store->open_collection(cid); ASSERT_FALSE(ch); } auto ch = store->create_new_collection(cid); { ObjectStore::Transaction t; t.create_collection(cid, 0); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } { bool empty; int r = store->collection_empty(ch, &empty); ASSERT_EQ(0, r); ASSERT_TRUE(empty); } { bufferptr bp; r = store->getattr(ch, hoid, "nofoo", bp); ASSERT_EQ(-ENOENT, r); } { ObjectStore::Transaction t; t.touch(cid, hoid); t.setattr(cid, hoid, "foo", val); t.setattr(cid, hoid, "bar", val2); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } { bool empty; int r = store->collection_empty(ch, &empty); ASSERT_EQ(0, r); ASSERT_TRUE(!empty); } { bufferptr bp; r = store->getattr(ch, hoid, "nofoo", bp); ASSERT_EQ(-ENODATA, r); r = store->getattr(ch, hoid, "foo", bp); ASSERT_EQ(0, r); bufferlist bl; bl.append(bp); ASSERT_TRUE(bl_eq(val, bl)); map<string,bufferptr,less<>> bm; r = store->getattrs(ch, hoid, bm); ASSERT_EQ(0, r); } { ObjectStore::Transaction t; t.remove(cid, hoid); t.remove_collection(cid); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } } TEST_P(StoreTest, SimpleListTest) { int r; coll_t cid(spg_t(pg_t(0, 1), shard_id_t(1))); auto ch = store->create_new_collection(cid); { ObjectStore::Transaction t; t.create_collection(cid, 0); cerr << "Creating collection " << cid << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } set<ghobject_t> all; { ObjectStore::Transaction t; for (int i=0; i<200; ++i) { string name("object_"); name += stringify(i); ghobject_t hoid(hobject_t(sobject_t(name, CEPH_NOSNAP)), ghobject_t::NO_GEN, shard_id_t(1)); hoid.hobj.pool = 1; all.insert(hoid); t.touch(cid, hoid); cerr << "Creating object " << hoid << std::endl; } r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } { set<ghobject_t> saw; vector<ghobject_t> objects; ghobject_t next, current; while (!next.is_max()) { int r = collection_list(store, ch, current, ghobject_t::get_max(), 50, &objects, &next); ASSERT_EQ(r, 0); ASSERT_TRUE(sorted(objects)); cout << " got " << objects.size() << " next " << next << std::endl; for (vector<ghobject_t>::iterator p = objects.begin(); p != objects.end(); ++p) { if (saw.count(*p)) { cout << "got DUP " << *p << std::endl; } else { //cout << "got new " << *p << std::endl; } saw.insert(*p); } objects.clear(); current = next; } ASSERT_EQ(saw.size(), all.size()); ASSERT_EQ(saw, all); } { ObjectStore::Transaction t; for (set<ghobject_t>::iterator p = all.begin(); p != all.end(); ++p) t.remove(cid, *p); t.remove_collection(cid); cerr << "Cleaning" << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } } TEST_P(StoreTest, ListEndTest) { int r; coll_t cid(spg_t(pg_t(0, 1), shard_id_t(1))); auto ch = store->create_new_collection(cid); { ObjectStore::Transaction t; t.create_collection(cid, 0); cerr << "Creating collection " << cid << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } set<ghobject_t> all; { ObjectStore::Transaction t; for (int i=0; i<200; ++i) { string name("object_"); name += stringify(i); ghobject_t hoid(hobject_t(sobject_t(name, CEPH_NOSNAP)), ghobject_t::NO_GEN, shard_id_t(1)); hoid.hobj.pool = 1; all.insert(hoid); t.touch(cid, hoid); cerr << "Creating object " << hoid << std::endl; } r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } { ghobject_t end(hobject_t(sobject_t("object_100", CEPH_NOSNAP)), ghobject_t::NO_GEN, shard_id_t(1)); end.hobj.pool = 1; vector<ghobject_t> objects; ghobject_t next; int r = collection_list(store, ch, ghobject_t(), end, 500, &objects, &next); ASSERT_EQ(r, 0); for (auto &p : objects) { ASSERT_NE(p, end); } } { ObjectStore::Transaction t; for (set<ghobject_t>::iterator p = all.begin(); p != all.end(); ++p) t.remove(cid, *p); t.remove_collection(cid); cerr << "Cleaning" << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } } TEST_P(StoreTest, List_0xfffffff_Hash_Test_in_meta) { int r = 0; coll_t cid; auto ch = store->create_new_collection(cid); { ObjectStore::Transaction t; t.create_collection(cid, 0); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } { ObjectStore::Transaction t; ghobject_t hoid(hobject_t(sobject_t("obj", CEPH_NOSNAP), "", UINT32_C(0xffffffff), -1, "nspace")); t.touch(cid, hoid); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } { vector<ghobject_t> objects; r = collection_list(store, ch, ghobject_t(), ghobject_t::get_max(), INT_MAX, &objects, nullptr, true); ASSERT_EQ(r, 0); ASSERT_EQ(objects.size(), 1); } } TEST_P(StoreTest, List_0xfffffff_Hash_Test_in_PG) { int r = 0; const int64_t poolid = 1; coll_t cid(spg_t(pg_t(0, poolid), shard_id_t::NO_SHARD)); auto ch = store->create_new_collection(cid); { ObjectStore::Transaction t; t.create_collection(cid, 0); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } { ObjectStore::Transaction t; ghobject_t hoid(hobject_t(sobject_t("obj", CEPH_NOSNAP), "", UINT32_C(0xffffffff), poolid, "nspace")); t.touch(cid, hoid); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } { vector<ghobject_t> objects; r = collection_list(store, ch, ghobject_t(), ghobject_t::get_max(), INT_MAX, &objects, nullptr, true); ASSERT_EQ(r, 0); ASSERT_EQ(objects.size(), 1); } } TEST_P(StoreTest, Sort) { { hobject_t a(sobject_t("a", CEPH_NOSNAP)); hobject_t b = a; ASSERT_EQ(a, b); b.oid.name = "b"; ASSERT_NE(a, b); ASSERT_TRUE(a < b); a.pool = 1; b.pool = 2; ASSERT_TRUE(a < b); a.pool = 3; ASSERT_TRUE(a > b); } { ghobject_t a(hobject_t(sobject_t("a", CEPH_NOSNAP))); ghobject_t b(hobject_t(sobject_t("b", CEPH_NOSNAP))); a.hobj.pool = 1; b.hobj.pool = 1; ASSERT_TRUE(a < b); a.hobj.pool = -3; ASSERT_TRUE(a < b); a.hobj.pool = 1; b.hobj.pool = -3; ASSERT_TRUE(a > b); } } TEST_P(StoreTest, MultipoolListTest) { int r; int poolid = 4373; coll_t cid = coll_t(spg_t(pg_t(0, poolid), shard_id_t::NO_SHARD)); auto ch = store->create_new_collection(cid); { ObjectStore::Transaction t; t.create_collection(cid, 0); cerr << "Creating collection " << cid << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } set<ghobject_t> all, saw; { ObjectStore::Transaction t; for (int i=0; i<200; ++i) { string name("object_"); name += stringify(i); ghobject_t hoid(hobject_t(sobject_t(name, CEPH_NOSNAP))); if (rand() & 1) hoid.hobj.pool = -2 - poolid; else hoid.hobj.pool = poolid; all.insert(hoid); t.touch(cid, hoid); cerr << "Creating object " << hoid << std::endl; } r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } { vector<ghobject_t> objects; ghobject_t next, current; while (!next.is_max()) { int r = collection_list(store, ch, current, ghobject_t::get_max(), 50, &objects, &next); ASSERT_EQ(r, 0); cout << " got " << objects.size() << " next " << next << std::endl; for (vector<ghobject_t>::iterator p = objects.begin(); p != objects.end(); ++p) { saw.insert(*p); } objects.clear(); current = next; } ASSERT_EQ(saw, all); } { ObjectStore::Transaction t; for (set<ghobject_t>::iterator p = all.begin(); p != all.end(); ++p) t.remove(cid, *p); t.remove_collection(cid); cerr << "Cleaning" << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } } TEST_P(StoreTest, SimpleCloneTest) { int r; coll_t cid; SetDeathTestStyle("threadsafe"); auto ch = store->create_new_collection(cid); { ObjectStore::Transaction t; t.create_collection(cid, 0); cerr << "Creating collection " << cid << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } ghobject_t hoid(hobject_t(sobject_t("Object 1", CEPH_NOSNAP), "key", 123, -1, "")); bufferlist small, large, xlarge, newdata, attr; small.append("small"); large.append("large"); xlarge.append("xlarge"); { ObjectStore::Transaction t; t.touch(cid, hoid); t.setattr(cid, hoid, "attr1", small); t.setattr(cid, hoid, "attr2", large); t.setattr(cid, hoid, "attr3", xlarge); t.write(cid, hoid, 0, small.length(), small); t.write(cid, hoid, 10, small.length(), small); cerr << "Creating object and set attr " << hoid << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } ghobject_t hoid2(hobject_t(sobject_t("Object 2", CEPH_NOSNAP), "key", 123, -1, "")); ghobject_t hoid3(hobject_t(sobject_t("Object 3", CEPH_NOSNAP))); { ObjectStore::Transaction t; t.clone(cid, hoid, hoid2); t.setattr(cid, hoid2, "attr2", small); t.rmattr(cid, hoid2, "attr1"); t.write(cid, hoid, 10, large.length(), large); t.setattr(cid, hoid, "attr1", large); t.setattr(cid, hoid, "attr2", small); cerr << "Clone object and rm attr" << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); r = store->read(ch, hoid, 10, 5, newdata); ASSERT_EQ(r, 5); ASSERT_TRUE(bl_eq(large, newdata)); newdata.clear(); r = store->read(ch, hoid, 0, 5, newdata); ASSERT_EQ(r, 5); ASSERT_TRUE(bl_eq(small, newdata)); newdata.clear(); r = store->read(ch, hoid2, 10, 5, newdata); ASSERT_EQ(r, 5); ASSERT_TRUE(bl_eq(small, newdata)); r = store->getattr(ch, hoid2, "attr2", attr); ASSERT_EQ(r, 0); ASSERT_TRUE(bl_eq(small, attr)); attr.clear(); r = store->getattr(ch, hoid2, "attr3", attr); ASSERT_EQ(r, 0); ASSERT_TRUE(bl_eq(xlarge, attr)); attr.clear(); r = store->getattr(ch, hoid, "attr1", attr); ASSERT_EQ(r, 0); ASSERT_TRUE(bl_eq(large, attr)); } { ObjectStore::Transaction t; t.remove(cid, hoid); t.remove(cid, hoid2); ASSERT_EQ(0, queue_transaction(store, ch, std::move(t))); } { bufferlist final; bufferptr p(16384); memset(p.c_str(), 1, p.length()); bufferlist pl; pl.append(p); final.append(p); ObjectStore::Transaction t; t.write(cid, hoid, 0, pl.length(), pl); t.clone(cid, hoid, hoid2); bufferptr a(4096); memset(a.c_str(), 2, a.length()); bufferlist al; al.append(a); final.append(a); t.write(cid, hoid, pl.length(), a.length(), al); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); bufferlist rl; ASSERT_EQ((int)final.length(), store->read(ch, hoid, 0, final.length(), rl)); ASSERT_TRUE(bl_eq(rl, final)); } { ObjectStore::Transaction t; t.remove(cid, hoid); t.remove(cid, hoid2); ASSERT_EQ(0, queue_transaction(store, ch, std::move(t))); } { bufferlist final; bufferptr p(16384); memset(p.c_str(), 111, p.length()); bufferlist pl; pl.append(p); final.append(p); ObjectStore::Transaction t; t.write(cid, hoid, 0, pl.length(), pl); t.clone(cid, hoid, hoid2); bufferptr z(4096); z.zero(); final.append(z); bufferptr a(4096); memset(a.c_str(), 112, a.length()); bufferlist al; al.append(a); final.append(a); t.write(cid, hoid, pl.length() + z.length(), a.length(), al); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); bufferlist rl; ASSERT_EQ((int)final.length(), store->read(ch, hoid, 0, final.length(), rl)); ASSERT_TRUE(bl_eq(rl, final)); } { ObjectStore::Transaction t; t.remove(cid, hoid); t.remove(cid, hoid2); ASSERT_EQ(0, queue_transaction(store, ch, std::move(t))); } { bufferlist final; bufferptr p(16000); memset(p.c_str(), 5, p.length()); bufferlist pl; pl.append(p); final.append(p); ObjectStore::Transaction t; t.write(cid, hoid, 0, pl.length(), pl); t.clone(cid, hoid, hoid2); bufferptr z(1000); z.zero(); final.append(z); bufferptr a(8000); memset(a.c_str(), 6, a.length()); bufferlist al; al.append(a); final.append(a); t.write(cid, hoid, 17000, a.length(), al); ASSERT_EQ(0, queue_transaction(store, ch, std::move(t))); bufferlist rl; ASSERT_EQ((int)final.length(), store->read(ch, hoid, 0, final.length(), rl)); /*cout << "expected:\n"; final.hexdump(cout); cout << "got:\n"; rl.hexdump(cout);*/ ASSERT_TRUE(bl_eq(rl, final)); } { ObjectStore::Transaction t; t.remove(cid, hoid); t.remove(cid, hoid2); ASSERT_EQ(0, queue_transaction(store, ch, std::move(t))); } { bufferptr p(1048576); memset(p.c_str(), 3, p.length()); bufferlist pl; pl.append(p); ObjectStore::Transaction t; t.write(cid, hoid, 0, pl.length(), pl); t.clone(cid, hoid, hoid2); bufferptr a(65536); memset(a.c_str(), 4, a.length()); bufferlist al; al.append(a); t.write(cid, hoid, a.length(), a.length(), al); ASSERT_EQ(0, queue_transaction(store, ch, std::move(t))); bufferlist rl; bufferlist final; final.substr_of(pl, 0, al.length()); final.append(al); bufferlist end; end.substr_of(pl, al.length()*2, pl.length() - al.length()*2); final.append(end); ASSERT_EQ((int)final.length(), store->read(ch, hoid, 0, final.length(), rl)); /*cout << "expected:\n"; final.hexdump(cout); cout << "got:\n"; rl.hexdump(cout);*/ ASSERT_TRUE(bl_eq(rl, final)); } { ObjectStore::Transaction t; t.remove(cid, hoid); t.remove(cid, hoid2); ASSERT_EQ(0, queue_transaction(store, ch, std::move(t))); } { bufferptr p(65536); memset(p.c_str(), 7, p.length()); bufferlist pl; pl.append(p); ObjectStore::Transaction t; t.write(cid, hoid, 0, pl.length(), pl); t.clone(cid, hoid, hoid2); bufferptr a(4096); memset(a.c_str(), 8, a.length()); bufferlist al; al.append(a); t.write(cid, hoid, 32768, a.length(), al); ASSERT_EQ(0, queue_transaction(store, ch, std::move(t))); bufferlist rl; bufferlist final; final.substr_of(pl, 0, 32768); final.append(al); bufferlist end; end.substr_of(pl, final.length(), pl.length() - final.length()); final.append(end); ASSERT_EQ((int)final.length(), store->read(ch, hoid, 0, final.length(), rl)); /*cout << "expected:\n"; final.hexdump(cout); cout << "got:\n"; rl.hexdump(cout);*/ ASSERT_TRUE(bl_eq(rl, final)); } { ObjectStore::Transaction t; t.remove(cid, hoid); t.remove(cid, hoid2); ASSERT_EQ(0, queue_transaction(store, ch, std::move(t))); } { bufferptr p(65536); memset(p.c_str(), 9, p.length()); bufferlist pl; pl.append(p); ObjectStore::Transaction t; t.write(cid, hoid, 0, pl.length(), pl); t.clone(cid, hoid, hoid2); bufferptr a(4096); memset(a.c_str(), 10, a.length()); bufferlist al; al.append(a); t.write(cid, hoid, 33768, a.length(), al); ASSERT_EQ(0, queue_transaction(store, ch, std::move(t))); bufferlist rl; bufferlist final; final.substr_of(pl, 0, 33768); final.append(al); bufferlist end; end.substr_of(pl, final.length(), pl.length() - final.length()); final.append(end); ASSERT_EQ((int)final.length(), store->read(ch, hoid, 0, final.length(), rl)); /*cout << "expected:\n"; final.hexdump(cout); cout << "got:\n"; rl.hexdump(cout);*/ ASSERT_TRUE(bl_eq(rl, final)); } { //verify if non-empty collection is properly handled after store reload ch.reset(); r = store->umount(); ASSERT_EQ(r, 0); r = store->mount(); ASSERT_EQ(r, 0); ch = store->open_collection(cid); ObjectStore::Transaction t; t.remove_collection(cid); cerr << "Invalid rm coll" << std::endl; PrCtl unset_dumpable; EXPECT_DEATH(queue_transaction(store, ch, std::move(t)), ""); } { ObjectStore::Transaction t; t.touch(cid, hoid3); //new record in db r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } { ObjectStore::Transaction t; //verify if non-empty collection is properly handled when there are some pending removes and live records in db cerr << "Invalid rm coll again" << std::endl; ch.reset(); r = store->umount(); ASSERT_EQ(r, 0); r = store->mount(); ASSERT_EQ(r, 0); ch = store->open_collection(cid); t.remove(cid, hoid); t.remove(cid, hoid2); t.remove_collection(cid); PrCtl unset_dumpable; EXPECT_DEATH(queue_transaction(store, ch, std::move(t)), ""); } { ObjectStore::Transaction t; t.remove(cid, hoid); t.remove(cid, hoid2); t.remove(cid, hoid3); t.remove_collection(cid); cerr << "Cleaning" << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } } TEST_P(StoreTest, OmapSimple) { int r; coll_t cid; auto ch = store->create_new_collection(cid); { ObjectStore::Transaction t; t.create_collection(cid, 0); cerr << "Creating collection " << cid << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } ghobject_t hoid(hobject_t(sobject_t("omap_obj", CEPH_NOSNAP), "key", 123, -1, "")); bufferlist small; small.append("small"); map<string,bufferlist> km; km["foo"] = small; km["bar"].append("asdfjkasdkjdfsjkafskjsfdj"); bufferlist header; header.append("this is a header"); { ObjectStore::Transaction t; t.touch(cid, hoid); t.omap_setkeys(cid, hoid, km); t.omap_setheader(cid, hoid, header); cerr << "Creating object and set omap " << hoid << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } // get header, keys { bufferlist h; map<string,bufferlist> r; store->omap_get(ch, hoid, &h, &r); ASSERT_TRUE(bl_eq(header, h)); ASSERT_EQ(r.size(), km.size()); cout << "r: " << r << std::endl; } // test iterator with seek_to_first { map<string,bufferlist> r; ObjectMap::ObjectMapIterator iter = store->get_omap_iterator(ch, hoid); for (iter->seek_to_first(); iter->valid(); iter->next()) { r[iter->key()] = iter->value(); } cout << "r: " << r << std::endl; ASSERT_EQ(r.size(), km.size()); } // test iterator with initial lower_bound { map<string,bufferlist> r; ObjectMap::ObjectMapIterator iter = store->get_omap_iterator(ch, hoid); for (iter->lower_bound(string()); iter->valid(); iter->next()) { r[iter->key()] = iter->value(); } cout << "r: " << r << std::endl; ASSERT_EQ(r.size(), km.size()); } { ObjectStore::Transaction t; t.remove(cid, hoid); t.remove_collection(cid); cerr << "Cleaning" << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } } TEST_P(StoreTest, OmapCloneTest) { int r; coll_t cid; auto ch = store->create_new_collection(cid); { ObjectStore::Transaction t; t.create_collection(cid, 0); cerr << "Creating collection " << cid << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } ghobject_t hoid(hobject_t(sobject_t("Object 1", CEPH_NOSNAP), "key", 123, -1, "")); bufferlist small; small.append("small"); map<string,bufferlist> km; km["foo"] = small; km["bar"].append("asdfjkasdkjdfsjkafskjsfdj"); bufferlist header; header.append("this is a header"); { ObjectStore::Transaction t; t.touch(cid, hoid); t.omap_setkeys(cid, hoid, km); t.omap_setheader(cid, hoid, header); cerr << "Creating object and set omap " << hoid << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } ghobject_t hoid2(hobject_t(sobject_t("Object 2", CEPH_NOSNAP), "key", 123, -1, "")); { ObjectStore::Transaction t; t.clone(cid, hoid, hoid2); cerr << "Clone object" << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } { map<string,bufferlist> r; bufferlist h; store->omap_get(ch, hoid2, &h, &r); ASSERT_TRUE(bl_eq(header, h)); ASSERT_EQ(r.size(), km.size()); } { ObjectStore::Transaction t; t.remove(cid, hoid); t.remove(cid, hoid2); t.remove_collection(cid); cerr << "Cleaning" << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } } TEST_P(StoreTest, SimpleCloneRangeTest) { int r; coll_t cid; auto ch = store->create_new_collection(cid); { ObjectStore::Transaction t; t.create_collection(cid, 0); cerr << "Creating collection " << cid << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } ghobject_t hoid(hobject_t(sobject_t("Object 1", CEPH_NOSNAP))); hoid.hobj.pool = -1; bufferlist small, newdata; small.append("small"); { ObjectStore::Transaction t; t.write(cid, hoid, 10, 5, small); cerr << "Creating object and write bl " << hoid << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } ghobject_t hoid2(hobject_t(sobject_t("Object 2", CEPH_NOSNAP))); hoid2.hobj.pool = -1; { ObjectStore::Transaction t; t.clone_range(cid, hoid, hoid2, 10, 5, 10); cerr << "Clone range object" << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); r = store->read(ch, hoid2, 10, 5, newdata); ASSERT_EQ(r, 5); ASSERT_TRUE(bl_eq(small, newdata)); } { ObjectStore::Transaction t; t.truncate(cid, hoid, 1024*1024); t.clone_range(cid, hoid, hoid2, 0, 1024*1024, 0); cerr << "Clone range object" << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); struct stat stat, stat2; r = store->stat(ch, hoid, &stat); r = store->stat(ch, hoid2, &stat2); ASSERT_EQ(stat.st_size, stat2.st_size); ASSERT_EQ(1024*1024, stat2.st_size); } { ObjectStore::Transaction t; t.remove(cid, hoid); t.remove(cid, hoid2); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } } #if defined(WITH_BLUESTORE) TEST_P(StoreTest, BlueStoreUnshareBlobTest) { if (string(GetParam()) != "bluestore") return; if (smr) { cout << "SKIP: non-deterministic behavior with smr" << std::endl; return; } int r; coll_t cid; auto ch = store->create_new_collection(cid); { ObjectStore::Transaction t; t.create_collection(cid, 0); cerr << "Creating collection " << cid << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } ghobject_t hoid(hobject_t(sobject_t("Object 1", CEPH_NOSNAP))); hoid.hobj.pool = -1; ghobject_t hoid2(hobject_t(sobject_t("Object 1", CEPH_NOSNAP))); hoid2.hobj.pool = -1; hoid2.generation = 2; { // check if blob is unshared properly bufferlist data, newdata; data.append(string(8192, 'a')); ObjectStore::Transaction t; t.write(cid, hoid, 0, data.length(), data); cerr << "Creating object and write 8K " << hoid << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); ObjectStore::Transaction t2; t2.clone_range(cid, hoid, hoid2, 0, 4096, 0); cerr << "Clone range object" << std::endl; r = queue_transaction(store, ch, std::move(t2)); ASSERT_EQ(r, 0); data.clear(); data.append(string(4096, 'b')); ObjectStore::Transaction t3; t3.write(cid, hoid, 0, data.length(), data); cerr << "Writing 4k to source object " << hoid << std::endl; r = queue_transaction(store, ch, std::move(t3)); ASSERT_EQ(r, 0); { // this trims hoid one out of onode cache EXPECT_EQ(store->umount(), 0); EXPECT_EQ(store->mount(), 0); ch = store->open_collection(cid); } ObjectStore::Transaction t4; t4.remove(cid, hoid2); cerr << "Deleting dest object" << hoid2 << std::endl; r = queue_transaction(store, ch, std::move(t4)); ASSERT_EQ(r, 0); { // this ensures remove operation submitted to kv store EXPECT_EQ(store->umount(), 0); EXPECT_EQ(store->mount(), 0); ch = store->open_collection(cid); } bufferlist resdata; r = store->read(ch, hoid, 0, 0x2000, resdata); ASSERT_EQ(r, 0x2000); { BlueStore* bstore = dynamic_cast<BlueStore*> (store.get()); auto* kv = bstore->get_kv(); // to be inline with BlueStore.cc const string PREFIX_SHARED_BLOB = "X"; size_t cnt = 0; auto it = kv->get_iterator(PREFIX_SHARED_BLOB); ceph_assert(it); for (it->lower_bound(string()); it->valid(); it->next()) { ++cnt; } ASSERT_EQ(cnt, 0); } } { ObjectStore::Transaction t; t.remove(cid, hoid); t.remove_collection(cid); cerr << "Cleaning" << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } } TEST_P(StoreTest, BlueStoreUnshareBlobBugTest) { if (string(GetParam()) != "bluestore") return; int r; coll_t cid; auto ch = store->create_new_collection(cid); { ObjectStore::Transaction t; t.create_collection(cid, 0); cerr << "Creating collection " << cid << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } ghobject_t hoid(hobject_t(sobject_t("Object 1", CEPH_NOSNAP))); hoid.hobj.pool = -1; ghobject_t hoid2(hobject_t(sobject_t("Object 1", CEPH_NOSNAP))); hoid2.hobj.pool = -1; hoid2.generation = 2; { // check if blob is unshared properly bufferlist data, newdata; data.append(string(8192, 'a')); ObjectStore::Transaction t; t.write(cid, hoid, 0, data.length(), data); cerr << "Creating object and write 8K " << hoid << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); ObjectStore::Transaction t2; t2.clone_range(cid, hoid, hoid2, 0, 4096, 0); cerr << "Clone range object" << std::endl; r = queue_transaction(store, ch, std::move(t2)); ASSERT_EQ(r, 0); data.clear(); data.append(string(4096, 'b')); ObjectStore::Transaction t3; t3.write(cid, hoid, 0, data.length(), data); cerr << "Writing 4k to source object " << hoid << std::endl; r = queue_transaction(store, ch, std::move(t3)); ASSERT_EQ(r, 0); { // this trims hoid one out of onode cache EXPECT_EQ(store->umount(), 0); EXPECT_EQ(store->mount(), 0); ch = store->open_collection(cid); } ObjectStore::Transaction t4; t4.write(cid, hoid2, 0, data.length(), data); cerr << "Writing 4k to second object " << hoid2 << std::endl; r = queue_transaction(store, ch, std::move(t4)); ASSERT_EQ(r, 0); bufferlist resdata; r = store->read(ch, hoid, 0, 0x2000, resdata); ASSERT_EQ(r, 0x2000); { BlueStore* bstore = dynamic_cast<BlueStore*> (store.get()); auto* kv = bstore->get_kv(); // to be inline with BlueStore.cc const string PREFIX_SHARED_BLOB = "X"; size_t cnt = 0; auto it = kv->get_iterator(PREFIX_SHARED_BLOB); ceph_assert(it); for (it->lower_bound(string()); it->valid(); it->next()) { ++cnt; } // This shows a bug in unsharing a blob, // after writing to 0x0~1000 to hoid2 share blob at hoid should be //unshared but it doesn't in the current implementation ASSERT_EQ(cnt, 1); } } { ObjectStore::Transaction t; t.remove(cid, hoid); t.remove(cid, hoid2); t.remove_collection(cid); cerr << "Cleaning" << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } } #endif TEST_P(StoreTest, SimpleObjectLongnameTest) { int r; coll_t cid; auto ch = store->create_new_collection(cid); { ObjectStore::Transaction t; t.create_collection(cid, 0); cerr << "Creating collection " << cid << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } ghobject_t hoid(hobject_t(sobject_t("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaObjectaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa 1", CEPH_NOSNAP))); { ObjectStore::Transaction t; t.touch(cid, hoid); cerr << "Creating object " << hoid << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } { ObjectStore::Transaction t; t.remove(cid, hoid); t.remove_collection(cid); cerr << "Cleaning" << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } } ghobject_t generate_long_name(unsigned i) { stringstream name; name << "object id " << i << " "; for (unsigned j = 0; j < 500; ++j) name << 'a'; ghobject_t hoid(hobject_t(sobject_t(name.str(), CEPH_NOSNAP))); hoid.hobj.set_hash(i % 2); return hoid; } TEST_P(StoreTest, LongnameSplitTest) { int r; coll_t cid; auto ch = store->create_new_collection(cid); { ObjectStore::Transaction t; t.create_collection(cid, 0); cerr << "Creating collection " << cid << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(0, r); } for (unsigned i = 0; i < 320; ++i) { ObjectStore::Transaction t; ghobject_t hoid = generate_long_name(i); t.touch(cid, hoid); cerr << "Creating object " << hoid << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(0, r); } ghobject_t test_obj = generate_long_name(319); ghobject_t test_obj_2 = test_obj; test_obj_2.generation = 0; { ObjectStore::Transaction t; // should cause a split t.collection_move_rename( cid, test_obj, cid, test_obj_2); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(0, r); } for (unsigned i = 0; i < 319; ++i) { ObjectStore::Transaction t; ghobject_t hoid = generate_long_name(i); t.remove(cid, hoid); cerr << "Removing object " << hoid << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(0, r); } { ObjectStore::Transaction t; t.remove(cid, test_obj_2); t.remove_collection(cid); cerr << "Cleaning" << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(0, r); } } TEST_P(StoreTest, ManyObjectTest) { int NUM_OBJS = 2000; int r = 0; coll_t cid; string base = ""; for (int i = 0; i < 100; ++i) base.append("aaaaa"); set<ghobject_t> created; auto ch = store->create_new_collection(cid); { ObjectStore::Transaction t; t.create_collection(cid, 0); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } for (int i = 0; i < NUM_OBJS; ++i) { if (!(i % 5)) { cerr << "Object " << i << std::endl; } ObjectStore::Transaction t; char buf[100]; snprintf(buf, sizeof(buf), "%d", i); ghobject_t hoid(hobject_t(sobject_t(string(buf) + base, CEPH_NOSNAP))); t.touch(cid, hoid); created.insert(hoid); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } for (set<ghobject_t>::iterator i = created.begin(); i != created.end(); ++i) { struct stat buf; ASSERT_TRUE(!store->stat(ch, *i, &buf)); } set<ghobject_t> listed, listed2; vector<ghobject_t> objects; r = collection_list(store, ch, ghobject_t(), ghobject_t::get_max(), INT_MAX, &objects, 0); ASSERT_EQ(r, 0); cerr << "objects.size() is " << objects.size() << std::endl; for (vector<ghobject_t> ::iterator i = objects.begin(); i != objects.end(); ++i) { listed.insert(*i); ASSERT_TRUE(created.count(*i)); } ASSERT_TRUE(listed.size() == created.size()); ghobject_t start, next; objects.clear(); r = collection_list( store, ch, ghobject_t::get_max(), ghobject_t::get_max(), 50, &objects, &next ); ASSERT_EQ(r, 0); ASSERT_TRUE(objects.empty()); objects.clear(); listed.clear(); ghobject_t start2, next2; while (1) { r = collection_list(store, ch, start, ghobject_t::get_max(), 50, &objects, &next); ASSERT_TRUE(sorted(objects)); ASSERT_EQ(r, 0); listed.insert(objects.begin(), objects.end()); if (objects.size() < 50) { ASSERT_TRUE(next.is_max()); break; } objects.clear(); start = next; } cerr << "listed.size() is " << listed.size() << std::endl; ASSERT_TRUE(listed.size() == created.size()); if (listed2.size()) { ASSERT_EQ(listed.size(), listed2.size()); } for (set<ghobject_t>::iterator i = listed.begin(); i != listed.end(); ++i) { ASSERT_TRUE(created.count(*i)); } for (set<ghobject_t>::iterator i = created.begin(); i != created.end(); ++i) { ObjectStore::Transaction t; t.remove(cid, *i); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } cerr << "cleaning up" << std::endl; { ObjectStore::Transaction t; t.remove_collection(cid); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } } class ObjectGenerator { public: virtual ghobject_t create_object(gen_type *gen) = 0; virtual ~ObjectGenerator() {} }; class MixedGenerator : public ObjectGenerator { public: unsigned seq; int64_t poolid; explicit MixedGenerator(int64_t p) : seq(0), poolid(p) {} ghobject_t create_object(gen_type *gen) override { char buf[100]; snprintf(buf, sizeof(buf), "OBJ_%u", seq); string name(buf); if (seq % 2) { for (unsigned i = 0; i < 300; ++i) { name.push_back('a'); } } ++seq; return ghobject_t( hobject_t( name, string(), rand() & 2 ? CEPH_NOSNAP : rand(), (((seq / 1024) % 2) * 0xF00 ) + (seq & 0xFF), poolid, "")); } }; class SyntheticWorkloadState { struct Object { bufferlist data; map<string, bufferlist> attrs; }; public: static const unsigned max_in_flight = 16; static const unsigned max_objects = 3000; static const unsigned max_attr_size = 5; static const unsigned max_attr_name_len = 100; static const unsigned max_attr_value_len = 1024 * 64; coll_t cid; unsigned write_alignment; unsigned max_object_len, max_write_len; unsigned in_flight; map<ghobject_t, Object> contents; set<ghobject_t> available_objects; set<ghobject_t>::iterator next_available_object; set<ghobject_t> in_flight_objects; ObjectGenerator *object_gen; gen_type *rng; ObjectStore *store; ObjectStore::CollectionHandle ch; ceph::mutex lock = ceph::make_mutex("State lock"); ceph::condition_variable cond; struct EnterExit { const char *msg; explicit EnterExit(const char *m) : msg(m) { //cout << pthread_self() << " enter " << msg << std::endl; } ~EnterExit() { //cout << pthread_self() << " exit " << msg << std::endl; } }; class C_SyntheticOnReadable : public Context { public: SyntheticWorkloadState *state; ghobject_t hoid; C_SyntheticOnReadable(SyntheticWorkloadState *state, ghobject_t hoid) : state(state), hoid(hoid) {} void finish(int r) override { std::lock_guard locker{state->lock}; EnterExit ee("onreadable finish"); ASSERT_TRUE(state->in_flight_objects.count(hoid)); ASSERT_EQ(r, 0); state->in_flight_objects.erase(hoid); if (state->contents.count(hoid)) state->available_objects.insert(hoid); --(state->in_flight); state->cond.notify_all(); bufferlist r2; r = state->store->read(state->ch, hoid, 0, state->contents[hoid].data.length(), r2); ceph_assert(bl_eq(state->contents[hoid].data, r2)); state->cond.notify_all(); } }; class C_SyntheticOnStash : public Context { public: SyntheticWorkloadState *state; ghobject_t oid, noid; C_SyntheticOnStash(SyntheticWorkloadState *state, ghobject_t oid, ghobject_t noid) : state(state), oid(oid), noid(noid) {} void finish(int r) override { std::lock_guard locker{state->lock}; EnterExit ee("stash finish"); ASSERT_TRUE(state->in_flight_objects.count(oid)); ASSERT_EQ(r, 0); state->in_flight_objects.erase(oid); if (state->contents.count(noid)) state->available_objects.insert(noid); --(state->in_flight); bufferlist r2; r = state->store->read( state->ch, noid, 0, state->contents[noid].data.length(), r2); ceph_assert(bl_eq(state->contents[noid].data, r2)); state->cond.notify_all(); } }; class C_SyntheticOnClone : public Context { public: SyntheticWorkloadState *state; ghobject_t oid, noid; C_SyntheticOnClone(SyntheticWorkloadState *state, ghobject_t oid, ghobject_t noid) : state(state), oid(oid), noid(noid) {} void finish(int r) override { std::lock_guard locker{state->lock}; EnterExit ee("clone finish"); ASSERT_TRUE(state->in_flight_objects.count(oid)); ASSERT_EQ(r, 0); state->in_flight_objects.erase(oid); if (state->contents.count(oid)) state->available_objects.insert(oid); if (state->contents.count(noid)) state->available_objects.insert(noid); --(state->in_flight); bufferlist r2; r = state->store->read(state->ch, noid, 0, state->contents[noid].data.length(), r2); ceph_assert(bl_eq(state->contents[noid].data, r2)); state->cond.notify_all(); } }; static void filled_byte_array(bufferlist& bl, size_t size) { static const char alphanum[] = "0123456789" "ABCDEFGHIJKLMNOPQRSTUVWXYZ" "abcdefghijklmnopqrstuvwxyz"; if (!size) { return; } bufferptr bp(size); for (unsigned int i = 0; i < size - 1; i++) { // severely limit entropy so we can compress... bp[i] = alphanum[rand() % 10]; //(sizeof(alphanum) - 1)]; } bp[size - 1] = '\0'; bl.append(bp); } SyntheticWorkloadState(ObjectStore *store, ObjectGenerator *gen, gen_type *rng, coll_t cid, unsigned max_size, unsigned max_write, unsigned alignment) : cid(cid), write_alignment(alignment), max_object_len(max_size), max_write_len(max_write), in_flight(0), next_available_object(available_objects.end()), object_gen(gen), rng(rng), store(store) {} int init() { ObjectStore::Transaction t; ch = store->create_new_collection(cid); t.create_collection(cid, 0); return queue_transaction(store, ch, std::move(t)); } void shutdown() { ghobject_t next; while (1) { vector<ghobject_t> objects; int r = collection_list(store, ch, next, ghobject_t::get_max(), 10, &objects, &next); ceph_assert(r >= 0); if (objects.size() == 0) break; ObjectStore::Transaction t; std::map<std::string, ceph::buffer::list> attrset; for (vector<ghobject_t>::iterator p = objects.begin(); p != objects.end(); ++p) { t.remove(cid, *p); } queue_transaction(store, ch, std::move(t)); } ObjectStore::Transaction t; t.remove_collection(cid); queue_transaction(store, ch, std::move(t)); } void statfs(store_statfs_t& stat) { store->statfs(&stat); } ghobject_t get_uniform_random_object(std::unique_lock<ceph::mutex>& locker) { cond.wait(locker, [this] { return in_flight < max_in_flight && !available_objects.empty(); }); boost::uniform_int<> choose(0, available_objects.size() - 1); int index = choose(*rng); set<ghobject_t>::iterator i = available_objects.begin(); for ( ; index > 0; --index, ++i) ; ghobject_t ret = *i; return ret; } ghobject_t get_next_object(std::unique_lock<ceph::mutex>& locker) { cond.wait(locker, [this] { return in_flight < max_in_flight && !available_objects.empty(); }); if (next_available_object == available_objects.end()) { next_available_object = available_objects.begin(); } ghobject_t ret = *next_available_object; ++next_available_object; return ret; } void wait_for_ready(std::unique_lock<ceph::mutex>& locker) { cond.wait(locker, [this] { return in_flight < max_in_flight; }); } void wait_for_done() { std::unique_lock locker{lock}; cond.wait(locker, [this] { return in_flight == 0; }); } bool can_create() { return (available_objects.size() + in_flight_objects.size()) < max_objects; } bool can_unlink() { return (available_objects.size() + in_flight_objects.size()) > 0; } unsigned get_random_alloc_hints() { unsigned f = 0; { boost::uniform_int<> u(0, 3); switch (u(*rng)) { case 1: f |= CEPH_OSD_ALLOC_HINT_FLAG_SEQUENTIAL_WRITE; break; case 2: f |= CEPH_OSD_ALLOC_HINT_FLAG_RANDOM_WRITE; break; } } { boost::uniform_int<> u(0, 3); switch (u(*rng)) { case 1: f |= CEPH_OSD_ALLOC_HINT_FLAG_SEQUENTIAL_READ; break; case 2: f |= CEPH_OSD_ALLOC_HINT_FLAG_RANDOM_READ; break; } } { // append_only, immutable boost::uniform_int<> u(0, 4); f |= u(*rng) << 4; } { boost::uniform_int<> u(0, 3); switch (u(*rng)) { case 1: f |= CEPH_OSD_ALLOC_HINT_FLAG_SHORTLIVED; break; case 2: f |= CEPH_OSD_ALLOC_HINT_FLAG_LONGLIVED; break; } } { boost::uniform_int<> u(0, 3); switch (u(*rng)) { case 1: f |= CEPH_OSD_ALLOC_HINT_FLAG_COMPRESSIBLE; break; case 2: f |= CEPH_OSD_ALLOC_HINT_FLAG_INCOMPRESSIBLE; break; } } return f; } int touch() { std::unique_lock locker{lock}; EnterExit ee("touch"); if (!can_create()) return -ENOSPC; wait_for_ready(locker); ghobject_t new_obj = object_gen->create_object(rng); available_objects.erase(new_obj); ObjectStore::Transaction t; t.touch(cid, new_obj); boost::uniform_int<> u(17, 22); boost::uniform_int<> v(12, 17); t.set_alloc_hint(cid, new_obj, 1ull << u(*rng), 1ull << v(*rng), get_random_alloc_hints()); ++in_flight; in_flight_objects.insert(new_obj); if (!contents.count(new_obj)) contents[new_obj] = Object(); t.register_on_applied(new C_SyntheticOnReadable(this, new_obj)); int status = store->queue_transaction(ch, std::move(t)); return status; } int stash() { std::unique_lock locker{lock}; EnterExit ee("stash"); if (!can_unlink()) return -ENOENT; if (!can_create()) return -ENOSPC; wait_for_ready(locker); ghobject_t old_obj; int max = 20; do { old_obj = get_uniform_random_object(locker); } while (--max && !contents[old_obj].data.length()); available_objects.erase(old_obj); ghobject_t new_obj = old_obj; new_obj.generation++; available_objects.erase(new_obj); ObjectStore::Transaction t; t.collection_move_rename(cid, old_obj, cid, new_obj); ++in_flight; in_flight_objects.insert(old_obj); contents[new_obj].attrs = contents[old_obj].attrs; contents[new_obj].data = contents[old_obj].data; contents.erase(old_obj); t.register_on_applied(new C_SyntheticOnStash(this, old_obj, new_obj)); int status = store->queue_transaction(ch, std::move(t)); return status; } int clone() { std::unique_lock locker{lock}; EnterExit ee("clone"); if (!can_unlink()) return -ENOENT; if (!can_create()) return -ENOSPC; wait_for_ready(locker); ghobject_t old_obj; int max = 20; do { old_obj = get_uniform_random_object(locker); } while (--max && !contents[old_obj].data.length()); available_objects.erase(old_obj); ghobject_t new_obj = object_gen->create_object(rng); // make the hash match new_obj.hobj.set_hash(old_obj.hobj.get_hash()); available_objects.erase(new_obj); ObjectStore::Transaction t; t.clone(cid, old_obj, new_obj); ++in_flight; in_flight_objects.insert(old_obj); contents[new_obj].attrs = contents[old_obj].attrs; contents[new_obj].data = contents[old_obj].data; t.register_on_applied(new C_SyntheticOnClone(this, old_obj, new_obj)); int status = store->queue_transaction(ch, std::move(t)); return status; } int clone_range() { std::unique_lock locker{lock}; EnterExit ee("clone_range"); if (!can_unlink()) return -ENOENT; if (!can_create()) return -ENOSPC; wait_for_ready(locker); ghobject_t old_obj; int max = 20; do { old_obj = get_uniform_random_object(locker); } while (--max && !contents[old_obj].data.length()); bufferlist &srcdata = contents[old_obj].data; if (srcdata.length() == 0) { return 0; } available_objects.erase(old_obj); ghobject_t new_obj = get_uniform_random_object(locker); available_objects.erase(new_obj); boost::uniform_int<> u1(0, max_object_len - max_write_len); boost::uniform_int<> u2(0, max_write_len); uint64_t srcoff = u1(*rng); // make src and dst offsets match, since that's what the osd does uint64_t dstoff = srcoff; //u1(*rng); uint64_t len = u2(*rng); if (write_alignment) { srcoff = round_up_to(srcoff, write_alignment); dstoff = round_up_to(dstoff, write_alignment); len = round_up_to(len, write_alignment); } if (srcoff > srcdata.length() - 1) { srcoff = srcdata.length() - 1; } if (srcoff + len > srcdata.length()) { len = srcdata.length() - srcoff; } if (0) cout << __func__ << " from " << srcoff << "~" << len << " (size " << srcdata.length() << ") to " << dstoff << "~" << len << std::endl; ObjectStore::Transaction t; t.clone_range(cid, old_obj, new_obj, srcoff, len, dstoff); ++in_flight; in_flight_objects.insert(old_obj); bufferlist bl; if (srcoff < srcdata.length()) { if (srcoff + len > srcdata.length()) { bl.substr_of(srcdata, srcoff, srcdata.length() - srcoff); } else { bl.substr_of(srcdata, srcoff, len); } } bufferlist& dstdata = contents[new_obj].data; if (dstdata.length() <= dstoff) { if (bl.length() > 0) { dstdata.append_zero(dstoff - dstdata.length()); dstdata.append(bl); } } else { bufferlist value; ceph_assert(dstdata.length() > dstoff); dstdata.cbegin().copy(dstoff, value); value.append(bl); if (value.length() < dstdata.length()) dstdata.cbegin(value.length()).copy( dstdata.length() - value.length(), value); value.swap(dstdata); } t.register_on_applied(new C_SyntheticOnClone(this, old_obj, new_obj)); int status = store->queue_transaction(ch, std::move(t)); return status; } int write() { std::unique_lock locker{lock}; EnterExit ee("write"); if (!can_unlink()) return -ENOENT; wait_for_ready(locker); ghobject_t new_obj = get_uniform_random_object(locker); available_objects.erase(new_obj); ObjectStore::Transaction t; boost::uniform_int<> u1(0, max_object_len - max_write_len); boost::uniform_int<> u2(0, max_write_len); uint64_t offset = u1(*rng); uint64_t len = u2(*rng); bufferlist bl; if (write_alignment) { offset = round_up_to(offset, write_alignment); len = round_up_to(len, write_alignment); } filled_byte_array(bl, len); bufferlist& data = contents[new_obj].data; if (data.length() <= offset) { if (len > 0) { data.append_zero(offset-data.length()); data.append(bl); } } else { bufferlist value; ceph_assert(data.length() > offset); data.cbegin().copy(offset, value); value.append(bl); if (value.length() < data.length()) data.cbegin(value.length()).copy( data.length()-value.length(), value); value.swap(data); } t.write(cid, new_obj, offset, len, bl); ++in_flight; in_flight_objects.insert(new_obj); t.register_on_applied(new C_SyntheticOnReadable(this, new_obj)); int status = store->queue_transaction(ch, std::move(t)); return status; } int truncate() { std::unique_lock locker{lock}; EnterExit ee("truncate"); if (!can_unlink()) return -ENOENT; wait_for_ready(locker); ghobject_t obj = get_uniform_random_object(locker); available_objects.erase(obj); ObjectStore::Transaction t; boost::uniform_int<> choose(0, max_object_len); size_t len = choose(*rng); if (write_alignment) { len = round_up_to(len, write_alignment); } t.truncate(cid, obj, len); ++in_flight; in_flight_objects.insert(obj); bufferlist& data = contents[obj].data; if (data.length() <= len) { data.append_zero(len - data.length()); } else { bufferlist bl; data.cbegin().copy(len, bl); bl.swap(data); } t.register_on_applied(new C_SyntheticOnReadable(this, obj)); int status = store->queue_transaction(ch, std::move(t)); return status; } int zero() { std::unique_lock locker{lock}; EnterExit ee("zero"); if (!can_unlink()) return -ENOENT; wait_for_ready(locker); ghobject_t new_obj = get_uniform_random_object(locker); available_objects.erase(new_obj); ObjectStore::Transaction t; boost::uniform_int<> u1(0, max_object_len - max_write_len); boost::uniform_int<> u2(0, max_write_len); uint64_t offset = u1(*rng); uint64_t len = u2(*rng); if (write_alignment) { offset = round_up_to(offset, write_alignment); len = round_up_to(len, write_alignment); } if (len > 0) { auto& data = contents[new_obj].data; if (data.length() < offset + len) { data.append_zero(offset+len-data.length()); } bufferlist n; n.substr_of(data, 0, offset); n.append_zero(len); if (data.length() > offset + len) data.cbegin(offset + len).copy(data.length() - offset - len, n); data.swap(n); } t.zero(cid, new_obj, offset, len); ++in_flight; in_flight_objects.insert(new_obj); t.register_on_applied(new C_SyntheticOnReadable(this, new_obj)); int status = store->queue_transaction(ch, std::move(t)); return status; } void read() { EnterExit ee("read"); boost::uniform_int<> u1(0, max_object_len/2); boost::uniform_int<> u2(0, max_object_len); uint64_t offset = u1(*rng); uint64_t len = u2(*rng); if (offset > len) swap(offset, len); ghobject_t obj; bufferlist expected; int r; { std::unique_lock locker{lock}; EnterExit ee("read locked"); if (!can_unlink()) return ; wait_for_ready(locker); obj = get_uniform_random_object(locker); expected = contents[obj].data; } bufferlist bl, result; if (0) cout << " obj " << obj << " size " << expected.length() << " offset " << offset << " len " << len << std::endl; r = store->read(ch, obj, offset, len, result); if (offset >= expected.length()) { ASSERT_EQ(r, 0); } else { size_t max_len = expected.length() - offset; if (len > max_len) len = max_len; ceph_assert(len == result.length()); ASSERT_EQ(len, result.length()); expected.cbegin(offset).copy(len, bl); ASSERT_EQ(r, (int)len); ASSERT_TRUE(bl_eq(bl, result)); } } int setattrs() { std::unique_lock locker{lock}; EnterExit ee("setattrs"); if (!can_unlink()) return -ENOENT; wait_for_ready(locker); ghobject_t obj = get_uniform_random_object(locker); available_objects.erase(obj); ObjectStore::Transaction t; boost::uniform_int<> u0(1, max_attr_size); boost::uniform_int<> u1(4, max_attr_name_len); boost::uniform_int<> u2(4, max_attr_value_len); boost::uniform_int<> u3(0, 100); uint64_t size = u0(*rng); uint64_t name_len; map<string, bufferlist, less<>> attrs; set<string> keys; for (map<string, bufferlist>::iterator it = contents[obj].attrs.begin(); it != contents[obj].attrs.end(); ++it) keys.insert(it->first); while (size--) { bufferlist name, value; uint64_t get_exist = u3(*rng); uint64_t value_len = u2(*rng); filled_byte_array(value, value_len); if (get_exist < 50 && keys.size()) { set<string>::iterator k = keys.begin(); attrs[*k] = value; contents[obj].attrs[*k] = value; keys.erase(k); } else { name_len = u1(*rng); filled_byte_array(name, name_len); attrs[name.c_str()] = value; contents[obj].attrs[name.c_str()] = value; } } t.setattrs(cid, obj, attrs); ++in_flight; in_flight_objects.insert(obj); t.register_on_applied(new C_SyntheticOnReadable(this, obj)); int status = store->queue_transaction(ch, std::move(t)); return status; } int set_fixed_attrs(size_t entries, size_t key_size, size_t val_size) { std::unique_lock locker{ lock }; EnterExit ee("setattrs"); if (!can_unlink()) return -ENOENT; wait_for_ready(locker); ghobject_t obj = get_next_object(locker); available_objects.erase(obj); ObjectStore::Transaction t; map<string, bufferlist, less<>> attrs; set<string> keys; while (entries--) { bufferlist name, value; filled_byte_array(value, val_size); filled_byte_array(name, key_size); attrs[name.c_str()] = value; contents[obj].attrs[name.c_str()] = value; } t.setattrs(cid, obj, attrs); ++in_flight; in_flight_objects.insert(obj); t.register_on_applied(new C_SyntheticOnReadable(this, obj)); int status = store->queue_transaction(ch, std::move(t)); return status; } void getattrs() { EnterExit ee("getattrs"); ghobject_t obj; map<string, bufferlist> expected; { std::unique_lock locker{lock}; EnterExit ee("getattrs locked"); if (!can_unlink()) return ; wait_for_ready(locker); int retry = 10; do { obj = get_uniform_random_object(locker); if (!--retry) return ; } while (contents[obj].attrs.empty()); expected = contents[obj].attrs; } map<string, bufferlist, less<>> attrs; int r = store->getattrs(ch, obj, attrs); ASSERT_TRUE(r == 0); ASSERT_TRUE(attrs.size() == expected.size()); for (map<string, bufferlist>::iterator it = expected.begin(); it != expected.end(); ++it) { ASSERT_TRUE(bl_eq(attrs[it->first], it->second)); } } void getattr() { EnterExit ee("getattr"); ghobject_t obj; int r; int retry; map<string, bufferlist> expected; { std::unique_lock locker{lock}; EnterExit ee("getattr locked"); if (!can_unlink()) return ; wait_for_ready(locker); retry = 10; do { obj = get_uniform_random_object(locker); if (!--retry) return ; } while (contents[obj].attrs.empty()); expected = contents[obj].attrs; } boost::uniform_int<> u(0, expected.size()-1); retry = u(*rng); map<string, bufferlist>::iterator it = expected.begin(); while (retry) { retry--; ++it; } bufferlist bl; r = store->getattr(ch, obj, it->first, bl); ASSERT_EQ(r, 0); ASSERT_TRUE(bl_eq(it->second, bl)); } int rmattr() { std::unique_lock locker{lock}; EnterExit ee("rmattr"); if (!can_unlink()) return -ENOENT; wait_for_ready(locker); ghobject_t obj; int retry = 10; do { obj = get_uniform_random_object(locker); if (!--retry) return 0; } while (contents[obj].attrs.empty()); boost::uniform_int<> u(0, contents[obj].attrs.size()-1); retry = u(*rng); map<string, bufferlist>::iterator it = contents[obj].attrs.begin(); while (retry) { retry--; ++it; } available_objects.erase(obj); ObjectStore::Transaction t; t.rmattr(cid, obj, it->first); contents[obj].attrs.erase(it->first); ++in_flight; in_flight_objects.insert(obj); t.register_on_applied(new C_SyntheticOnReadable(this, obj)); int status = store->queue_transaction(ch, std::move(t)); return status; } void fsck(bool deep) { std::unique_lock locker{lock}; EnterExit ee("fsck"); cond.wait(locker, [this] { return in_flight == 0; }); ch.reset(); store->umount(); int r = store->fsck(deep); ceph_assert(r == 0 || r == -EOPNOTSUPP); store->mount(); ch = store->open_collection(cid); } void scan() { std::unique_lock locker{lock}; EnterExit ee("scan"); cond.wait(locker, [this] { return in_flight == 0; }); vector<ghobject_t> objects; set<ghobject_t> objects_set, objects_set2; ghobject_t next, current; while (1) { //cerr << "scanning..." << std::endl; int r = collection_list(store, ch, current, ghobject_t::get_max(), 100, &objects, &next); ASSERT_EQ(r, 0); ASSERT_TRUE(sorted(objects)); objects_set.insert(objects.begin(), objects.end()); objects.clear(); if (next.is_max()) break; current = next; } if (objects_set.size() != available_objects.size()) { for (set<ghobject_t>::iterator p = objects_set.begin(); p != objects_set.end(); ++p) if (available_objects.count(*p) == 0) { cerr << "+ " << *p << std::endl; ceph_abort(); } for (set<ghobject_t>::iterator p = available_objects.begin(); p != available_objects.end(); ++p) if (objects_set.count(*p) == 0) cerr << "- " << *p << std::endl; //cerr << " objects_set: " << objects_set << std::endl; //cerr << " available_set: " << available_objects << std::endl; ceph_abort_msg("badness"); } ASSERT_EQ(objects_set.size(), available_objects.size()); for (set<ghobject_t>::iterator i = objects_set.begin(); i != objects_set.end(); ++i) { ASSERT_GT(available_objects.count(*i), (unsigned)0); } int r = collection_list(store, ch, ghobject_t(), ghobject_t::get_max(), INT_MAX, &objects, 0); ASSERT_EQ(r, 0); objects_set2.insert(objects.begin(), objects.end()); ASSERT_EQ(objects_set2.size(), available_objects.size()); for (set<ghobject_t>::iterator i = objects_set2.begin(); i != objects_set2.end(); ++i) { ASSERT_GT(available_objects.count(*i), (unsigned)0); if (available_objects.count(*i) == 0) { cerr << "+ " << *i << std::endl; } } } void stat() { EnterExit ee("stat"); ghobject_t hoid; uint64_t expected; { std::unique_lock locker{lock}; EnterExit ee("stat lock1"); if (!can_unlink()) return ; hoid = get_uniform_random_object(locker); in_flight_objects.insert(hoid); available_objects.erase(hoid); ++in_flight; expected = contents[hoid].data.length(); } struct stat buf; int r = store->stat(ch, hoid, &buf); ASSERT_EQ(0, r); ceph_assert((uint64_t)buf.st_size == expected); ASSERT_TRUE((uint64_t)buf.st_size == expected); { std::lock_guard locker{lock}; EnterExit ee("stat lock2"); --in_flight; cond.notify_all(); in_flight_objects.erase(hoid); available_objects.insert(hoid); } } int unlink() { std::unique_lock locker{lock}; EnterExit ee("unlink"); if (!can_unlink()) return -ENOENT; ghobject_t to_remove = get_uniform_random_object(locker); ObjectStore::Transaction t; t.remove(cid, to_remove); ++in_flight; available_objects.erase(to_remove); in_flight_objects.insert(to_remove); contents.erase(to_remove); t.register_on_applied(new C_SyntheticOnReadable(this, to_remove)); int status = store->queue_transaction(ch, std::move(t)); return status; } void print_internal_state() { std::lock_guard locker{lock}; cerr << "available_objects: " << available_objects.size() << " in_flight_objects: " << in_flight_objects.size() << " total objects: " << in_flight_objects.size() + available_objects.size() << " in_flight " << in_flight << std::endl; } }; void StoreTest::doSyntheticTest( int num_ops, uint64_t max_obj, uint64_t max_wr, uint64_t align) { MixedGenerator gen(555); gen_type rng(time(NULL)); coll_t cid(spg_t(pg_t(0,555), shard_id_t::NO_SHARD)); SetVal(g_conf(), "bluestore_fsck_on_mount", "false"); SetVal(g_conf(), "bluestore_fsck_on_umount", "false"); g_ceph_context->_conf.apply_changes(nullptr); SyntheticWorkloadState test_obj(store.get(), &gen, &rng, cid, max_obj, max_wr, align); test_obj.init(); for (int i = 0; i < num_ops/10; ++i) { if (!(i % 500)) cerr << "seeding object " << i << std::endl; test_obj.touch(); } for (int i = 0; i < num_ops; ++i) { if (!(i % 1000)) { cerr << "Op " << i << std::endl; test_obj.print_internal_state(); } boost::uniform_int<> true_false(0, 999); int val = true_false(rng); if (val > 998) { test_obj.fsck(true); } else if (val > 997) { test_obj.fsck(false); } else if (val > 970) { test_obj.scan(); } else if (val > 950) { test_obj.stat(); } else if (val > 850) { test_obj.zero(); } else if (val > 800) { test_obj.unlink(); } else if (val > 550) { test_obj.write(); } else if (val > 500) { test_obj.clone(); } else if (val > 450) { test_obj.clone_range(); } else if (val > 300) { test_obj.stash(); } else if (val > 100) { test_obj.read(); } else { test_obj.truncate(); } } test_obj.wait_for_done(); test_obj.shutdown(); } TEST_P(StoreTest, Synthetic) { doSyntheticTest(10000, 400*1024, 40*1024, 0); } #if defined(WITH_BLUESTORE) TEST_P(StoreTestSpecificAUSize, SyntheticMatrixSharding) { if (string(GetParam()) != "bluestore") return; const char *m[][10] = { { "bluestore_min_alloc_size", "4096", 0 }, // must be the first! { "num_ops", "50000", 0 }, { "max_write", "65536", 0 }, { "max_size", "262144", 0 }, { "alignment", "4096", 0 }, { "bluestore_max_blob_size", "65536", 0 }, { "bluestore_extent_map_shard_min_size", "60", 0 }, { "bluestore_extent_map_shard_max_size", "300", 0 }, { "bluestore_extent_map_shard_target_size", "150", 0 }, { "bluestore_default_buffered_read", "true", 0 }, { "bluestore_default_buffered_write", "true", 0 }, { 0 }, }; do_matrix(m, std::bind(&StoreTest::doSyntheticTest, this, _1, _2, _3, _4)); } TEST_P(StoreTestSpecificAUSize, ZipperPatternSharded) { if(string(GetParam()) != "bluestore") return; StartDeferred(4096); int r; coll_t cid; ghobject_t a(hobject_t(sobject_t("Object 1", CEPH_NOSNAP))); auto ch = store->create_new_collection(cid); { ObjectStore::Transaction t; t.create_collection(cid, 0); cerr << "Creating collection " << cid << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } bufferlist bl; int len = 4096; bufferptr bp(len); bp.zero(); bl.append(bp); for (int i=0; i<1000; ++i) { ObjectStore::Transaction t; t.write(cid, a, i*2*len, len, bl, 0); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } for (int i=0; i<1000; ++i) { ObjectStore::Transaction t; t.write(cid, a, i*2*len + 1, len, bl, 0); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } { ObjectStore::Transaction t; t.remove(cid, a); t.remove_collection(cid); cerr << "Cleaning" << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } } TEST_P(StoreTestSpecificAUSize, SyntheticMatrixCsumAlgorithm) { if (string(GetParam()) != "bluestore") return; const char *m[][10] = { { "bluestore_min_alloc_size", "65536", 0 }, // must be the first! { "max_write", "65536", 0 }, { "max_size", "1048576", 0 }, { "alignment", "16", 0 }, { "bluestore_csum_type", "crc32c", "crc32c_16", "crc32c_8", "xxhash32", "xxhash64", "none", 0 }, { "bluestore_default_buffered_write", "false", 0 }, { 0 }, }; do_matrix(m, std::bind(&StoreTest::doSyntheticTest, this, _1, _2, _3, _4)); } TEST_P(StoreTestSpecificAUSize, SyntheticMatrixCsumVsCompression) { if (string(GetParam()) != "bluestore") return; const char *m[][10] = { { "bluestore_min_alloc_size", "4096", "16384", 0 }, //to be the first! { "max_write", "131072", 0 }, { "max_size", "262144", 0 }, { "alignment", "512", 0 }, { "bluestore_compression_mode", "force", 0}, { "bluestore_compression_algorithm", "snappy", "zlib", 0 }, { "bluestore_csum_type", "crc32c", 0 }, { "bluestore_default_buffered_read", "true", "false", 0 }, { "bluestore_default_buffered_write", "true", "false", 0 }, { "bluestore_sync_submit_transaction", "false", 0 }, { 0 }, }; do_matrix(m, std::bind(&StoreTest::doSyntheticTest, this, _1, _2, _3, _4)); } TEST_P(StoreTestSpecificAUSize, SyntheticMatrixCompression) { if (string(GetParam()) != "bluestore") return; const char *m[][10] = { { "bluestore_min_alloc_size", "4096", "65536", 0 }, // to be the first! { "max_write", "1048576", 0 }, { "max_size", "4194304", 0 }, { "alignment", "65536", 0 }, { "bluestore_compression_mode", "force", "aggressive", "passive", "none", 0}, { "bluestore_default_buffered_write", "false", 0 }, { "bluestore_sync_submit_transaction", "true", 0 }, { 0 }, }; do_matrix(m, std::bind(&StoreTest::doSyntheticTest, this, _1, _2, _3, _4)); } TEST_P(StoreTestSpecificAUSize, SyntheticMatrixCompressionAlgorithm) { if (string(GetParam()) != "bluestore") return; const char *m[][10] = { { "bluestore_min_alloc_size", "4096", "65536", 0 }, // to be the first! { "max_write", "1048576", 0 }, { "max_size", "4194304", 0 }, { "alignment", "65536", 0 }, { "bluestore_compression_algorithm", "zlib", "snappy", 0 }, { "bluestore_compression_mode", "force", 0 }, { "bluestore_default_buffered_write", "false", 0 }, { 0 }, }; do_matrix(m, std::bind(&StoreTest::doSyntheticTest, this, _1, _2, _3, _4)); } TEST_P(StoreTestSpecificAUSize, SyntheticMatrixNoCsum) { if (string(GetParam()) != "bluestore") return; const char *m[][10] = { { "bluestore_min_alloc_size", "4096", "65536", 0 }, // to be the first! { "max_write", "65536", 0 }, { "max_size", "1048576", 0 }, { "alignment", "512", 0 }, { "bluestore_max_blob_size", "262144", 0 }, { "bluestore_compression_mode", "force", "none", 0}, { "bluestore_csum_type", "none", 0}, { "bluestore_default_buffered_read", "true", "false", 0 }, { "bluestore_default_buffered_write", "true", 0 }, { "bluestore_sync_submit_transaction", "true", "false", 0 }, { 0 }, }; do_matrix(m, std::bind(&StoreTest::doSyntheticTest, this, _1, _2, _3, _4)); } TEST_P(StoreTestSpecificAUSize, SyntheticMatrixPreferDeferred) { if (string(GetParam()) != "bluestore") return; const char *m[][10] = { { "bluestore_min_alloc_size", "4096", "65536", 0 }, // to be the first! { "max_write", "65536", 0 }, { "max_size", "1048576", 0 }, { "alignment", "512", 0 }, { "bluestore_max_blob_size", "262144", 0 }, { "bluestore_compression_mode", "force", "none", 0}, { "bluestore_prefer_deferred_size", "32768", "0", 0}, { 0 }, }; do_matrix(m, std::bind(&StoreTest::doSyntheticTest, this, _1, _2, _3, _4)); } #endif // WITH_BLUESTORE TEST_P(StoreTest, AttrSynthetic) { MixedGenerator gen(447); gen_type rng(time(NULL)); coll_t cid(spg_t(pg_t(0,447),shard_id_t::NO_SHARD)); SyntheticWorkloadState test_obj(store.get(), &gen, &rng, cid, 40*1024, 4*1024, 0); test_obj.init(); for (int i = 0; i < 500; ++i) { if (!(i % 10)) cerr << "seeding object " << i << std::endl; test_obj.touch(); } for (int i = 0; i < 1000; ++i) { if (!(i % 100)) { cerr << "Op " << i << std::endl; test_obj.print_internal_state(); } boost::uniform_int<> true_false(0, 99); int val = true_false(rng); if (val > 97) { test_obj.scan(); } else if (val > 93) { test_obj.stat(); } else if (val > 75) { test_obj.rmattr(); } else if (val > 47) { test_obj.setattrs(); } else if (val > 45) { test_obj.clone(); } else if (val > 37) { test_obj.stash(); } else if (val > 30) { test_obj.getattrs(); } else { test_obj.getattr(); } } test_obj.wait_for_done(); test_obj.shutdown(); } TEST_P(StoreTest, HashCollisionTest) { int64_t poolid = 11; coll_t cid(spg_t(pg_t(0,poolid),shard_id_t::NO_SHARD)); int r; auto ch = store->create_new_collection(cid); { ObjectStore::Transaction t; t.create_collection(cid, 0); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } string base = ""; for (int i = 0; i < 100; ++i) base.append("aaaaa"); set<ghobject_t> created; for (int n = 0; n < 10; ++n) { char nbuf[100]; sprintf(nbuf, "n%d", n); for (int i = 0; i < 1000; ++i) { char buf[100]; sprintf(buf, "%d", i); if (!(i % 100)) { cerr << "Object n" << n << " "<< i << std::endl; } ghobject_t hoid(hobject_t(string(buf) + base, string(), CEPH_NOSNAP, 0, poolid, string(nbuf))); { ObjectStore::Transaction t; t.touch(cid, hoid); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } created.insert(hoid); } } vector<ghobject_t> objects; r = collection_list(store, ch, ghobject_t(), ghobject_t::get_max(), INT_MAX, &objects, 0); ASSERT_EQ(r, 0); set<ghobject_t> listed(objects.begin(), objects.end()); cerr << "listed.size() is " << listed.size() << " and created.size() is " << created.size() << std::endl; ASSERT_TRUE(listed.size() == created.size()); objects.clear(); listed.clear(); ghobject_t current, next; while (1) { r = collection_list(store, ch, current, ghobject_t::get_max(), 60, &objects, &next); ASSERT_EQ(r, 0); ASSERT_TRUE(sorted(objects)); for (vector<ghobject_t>::iterator i = objects.begin(); i != objects.end(); ++i) { if (listed.count(*i)) cerr << *i << " repeated" << std::endl; listed.insert(*i); } if (objects.size() < 50) { ASSERT_TRUE(next.is_max()); break; } objects.clear(); current = next; } cerr << "listed.size() is " << listed.size() << std::endl; ASSERT_TRUE(listed.size() == created.size()); for (set<ghobject_t>::iterator i = listed.begin(); i != listed.end(); ++i) { ASSERT_TRUE(created.count(*i)); } for (set<ghobject_t>::iterator i = created.begin(); i != created.end(); ++i) { ObjectStore::Transaction t; t.remove(cid, *i); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } ObjectStore::Transaction t; t.remove_collection(cid); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } TEST_P(StoreTest, HashCollisionSorting) { bool disable_legacy = (string(GetParam()) == "bluestore"); char buf121664318_1[] = {18, -119, -121, -111, 0}; char buf121664318_2[] = {19, 127, -121, 32, 0}; char buf121664318_3[] = {19, -118, 15, 19, 0}; char buf121664318_4[] = {28, 27, -116, -113, 0}; char buf121664318_5[] = {28, 27, -115, -124, 0}; char buf121666222_1[] = {18, -119, -120, -111, 0}; char buf121666222_2[] = {19, 127, -120, 32, 0}; char buf121666222_3[] = {19, -118, 15, 30, 0}; char buf121666222_4[] = {29, 17, -126, -113, 0}; char buf121666222_5[] = {29, 17, -125, -124, 0}; std::map<uint32_t, std::vector<std::string>> object_names = { {121664318, {{buf121664318_1}, {buf121664318_2}, {buf121664318_3}, {buf121664318_4}, {buf121664318_5}}}, {121666222, {{buf121666222_1}, {buf121666222_2}, {buf121666222_3}, {buf121666222_4}, {buf121666222_5}}}}; int64_t poolid = 111; coll_t cid = coll_t(spg_t(pg_t(0, poolid), shard_id_t::NO_SHARD)); auto ch = store->create_new_collection(cid); { ObjectStore::Transaction t; t.create_collection(cid, 0); int r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } std::set<ghobject_t> created; for (auto &[hash, names] : object_names) { for (auto &name : names) { ghobject_t hoid(hobject_t(sobject_t(name, CEPH_NOSNAP), string(), hash, poolid, string())); ASSERT_EQ(hash, hoid.hobj.get_hash()); ObjectStore::Transaction t; t.touch(cid, hoid); int r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); created.insert(hoid); } } vector<ghobject_t> objects; int r = collection_list(store, ch, ghobject_t(), ghobject_t::get_max(), INT_MAX, &objects, 0, disable_legacy); ASSERT_EQ(r, 0); ASSERT_EQ(created.size(), objects.size()); auto it = objects.begin(); for (auto &hoid : created) { ASSERT_EQ(hoid, *it); it++; } for (auto i = created.begin(); i != created.end(); i++) { auto j = i; for (j++; j != created.end(); j++) { std::set<ghobject_t> created_sub(i, j); objects.clear(); ghobject_t next; r = collection_list(store, ch, *i, ghobject_t::get_max(), created_sub.size(), &objects, &next, disable_legacy); ASSERT_EQ(r, 0); ASSERT_EQ(created_sub.size(), objects.size()); it = objects.begin(); for (auto &hoid : created_sub) { ASSERT_EQ(hoid, *it); it++; } if (j == created.end()) { ASSERT_TRUE(next.is_max()); } else { ASSERT_EQ(*j, next); } } } for (auto i = created.begin(); i != created.end(); i++) { auto j = i; for (j++; j != created.end(); j++) { std::set<ghobject_t> created_sub(i, j); objects.clear(); ghobject_t next; r = collection_list(store, ch, *i, *j, INT_MAX, &objects, &next, disable_legacy); ASSERT_EQ(r, 0); ASSERT_EQ(created_sub.size(), objects.size()); it = objects.begin(); for (auto &hoid : created_sub) { ASSERT_EQ(hoid, *it); it++; } if (j == created.end()) { ASSERT_TRUE(next.is_max()); } else { ASSERT_EQ(*j, next); } } } } TEST_P(StoreTest, ScrubTest) { int64_t poolid = 111; coll_t cid(spg_t(pg_t(0, poolid),shard_id_t(1))); int r; auto ch = store->create_new_collection(cid); { ObjectStore::Transaction t; t.create_collection(cid, 0); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } string base = "aaaaa"; set<ghobject_t> created; for (int i = 0; i < 1000; ++i) { char buf[100]; sprintf(buf, "%d", i); if (!(i % 5)) { cerr << "Object " << i << std::endl; } ghobject_t hoid(hobject_t(string(buf) + base, string(), CEPH_NOSNAP, i, poolid, ""), ghobject_t::NO_GEN, shard_id_t(1)); { ObjectStore::Transaction t; t.touch(cid, hoid); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } created.insert(hoid); } // Add same hobject_t but different generation { ghobject_t hoid1(hobject_t("same-object", string(), CEPH_NOSNAP, 0, poolid, ""), ghobject_t::NO_GEN, shard_id_t(1)); ghobject_t hoid2(hobject_t("same-object", string(), CEPH_NOSNAP, 0, poolid, ""), (gen_t)1, shard_id_t(1)); ghobject_t hoid3(hobject_t("same-object", string(), CEPH_NOSNAP, 0, poolid, ""), (gen_t)2, shard_id_t(1)); ObjectStore::Transaction t; t.touch(cid, hoid1); t.touch(cid, hoid2); t.touch(cid, hoid3); r = queue_transaction(store, ch, std::move(t)); created.insert(hoid1); created.insert(hoid2); created.insert(hoid3); ASSERT_EQ(r, 0); } vector<ghobject_t> objects; r = collection_list(store, ch, ghobject_t(), ghobject_t::get_max(), INT_MAX, &objects, 0); ASSERT_EQ(r, 0); set<ghobject_t> listed(objects.begin(), objects.end()); cerr << "listed.size() is " << listed.size() << " and created.size() is " << created.size() << std::endl; ASSERT_TRUE(listed.size() == created.size()); objects.clear(); listed.clear(); ghobject_t current, next; while (1) { r = collection_list(store, ch, current, ghobject_t::get_max(), 60, &objects, &next); ASSERT_EQ(r, 0); ASSERT_TRUE(sorted(objects)); for (vector<ghobject_t>::iterator i = objects.begin(); i != objects.end(); ++i) { if (listed.count(*i)) cerr << *i << " repeated" << std::endl; listed.insert(*i); } if (objects.size() < 50) { ASSERT_TRUE(next.is_max()); break; } objects.clear(); current = next.get_boundary(); } cerr << "listed.size() is " << listed.size() << std::endl; ASSERT_TRUE(listed.size() == created.size()); for (set<ghobject_t>::iterator i = listed.begin(); i != listed.end(); ++i) { ASSERT_TRUE(created.count(*i)); } for (set<ghobject_t>::iterator i = created.begin(); i != created.end(); ++i) { ObjectStore::Transaction t; t.remove(cid, *i); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } ObjectStore::Transaction t; t.remove_collection(cid); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } TEST_P(StoreTest, OMapTest) { coll_t cid; ghobject_t hoid(hobject_t("tesomap", "", CEPH_NOSNAP, 0, 0, "")); auto ch = store->create_new_collection(cid); int r; { ObjectStore::Transaction t; t.create_collection(cid, 0); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } map<string, bufferlist> attrs; { ObjectStore::Transaction t; t.touch(cid, hoid); t.omap_clear(cid, hoid); map<string, bufferlist> start_set; t.omap_setkeys(cid, hoid, start_set); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } for (int i = 0; i < 100; i++) { if (!(i%5)) { std::cout << "On iteration " << i << std::endl; } ObjectStore::Transaction t; bufferlist bl; map<string, bufferlist> cur_attrs; r = store->omap_get(ch, hoid, &bl, &cur_attrs); ASSERT_EQ(r, 0); for (map<string, bufferlist>::iterator j = attrs.begin(); j != attrs.end(); ++j) { bool correct = cur_attrs.count(j->first) && string(cur_attrs[j->first].c_str()) == string(j->second.c_str()); if (!correct) { std::cout << j->first << " is present in cur_attrs " << cur_attrs.count(j->first) << " times " << std::endl; if (cur_attrs.count(j->first) > 0) { std::cout << j->second.c_str() << " : " << cur_attrs[j->first].c_str() << std::endl; } } ASSERT_EQ(correct, true); } ASSERT_EQ(attrs.size(), cur_attrs.size()); char buf[100]; snprintf(buf, sizeof(buf), "%d", i); bl.clear(); bufferptr bp(buf, strlen(buf) + 1); bl.append(bp); map<string, bufferlist> to_add; to_add.insert(pair<string, bufferlist>("key-" + string(buf), bl)); attrs.insert(pair<string, bufferlist>("key-" + string(buf), bl)); t.omap_setkeys(cid, hoid, to_add); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } int i = 0; while (attrs.size()) { if (!(i%5)) { std::cout << "removal: On iteration " << i << std::endl; } ObjectStore::Transaction t; bufferlist bl; map<string, bufferlist> cur_attrs; r = store->omap_get(ch, hoid, &bl, &cur_attrs); ASSERT_EQ(r, 0); for (map<string, bufferlist>::iterator j = attrs.begin(); j != attrs.end(); ++j) { bool correct = cur_attrs.count(j->first) && string(cur_attrs[j->first].c_str()) == string(j->second.c_str()); if (!correct) { std::cout << j->first << " is present in cur_attrs " << cur_attrs.count(j->first) << " times " << std::endl; if (cur_attrs.count(j->first) > 0) { std::cout << j->second.c_str() << " : " << cur_attrs[j->first].c_str() << std::endl; } } ASSERT_EQ(correct, true); } string to_remove = attrs.begin()->first; t.omap_rmkey(cid, hoid, to_remove); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); attrs.erase(to_remove); ++i; } { bufferlist bl1; bl1.append("omap_header"); ObjectStore::Transaction t; t.omap_setheader(cid, hoid, bl1); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); t = ObjectStore::Transaction(); bufferlist bl2; bl2.append("value"); map<string, bufferlist> to_add; to_add.insert(pair<string, bufferlist>("key", bl2)); t.omap_setkeys(cid, hoid, to_add); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); bufferlist bl3; map<string, bufferlist> cur_attrs; r = store->omap_get(ch, hoid, &bl3, &cur_attrs); ASSERT_EQ(r, 0); ASSERT_EQ(cur_attrs.size(), size_t(1)); ASSERT_TRUE(bl_eq(bl1, bl3)); set<string> keys; r = store->omap_get_keys(ch, hoid, &keys); ASSERT_EQ(r, 0); ASSERT_EQ(keys.size(), size_t(1)); } // test omap_clear, omap_rmkey_range { { map<string,bufferlist> to_set; for (int n=0; n<10; ++n) { to_set[stringify(n)].append("foo"); } bufferlist h; h.append("header"); ObjectStore::Transaction t; t.remove(cid, hoid); t.touch(cid, hoid); t.omap_setheader(cid, hoid, h); t.omap_setkeys(cid, hoid, to_set); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } { ObjectStore::Transaction t; t.omap_rmkeyrange(cid, hoid, "3", "7"); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } { bufferlist hdr; map<string,bufferlist> m; store->omap_get(ch, hoid, &hdr, &m); ASSERT_EQ(6u, hdr.length()); ASSERT_TRUE(m.count("2")); ASSERT_TRUE(!m.count("3")); ASSERT_TRUE(!m.count("6")); ASSERT_TRUE(m.count("7")); ASSERT_TRUE(m.count("8")); //cout << m << std::endl; ASSERT_EQ(6u, m.size()); } { ObjectStore::Transaction t; t.omap_clear(cid, hoid); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } { bufferlist hdr; map<string,bufferlist> m; store->omap_get(ch, hoid, &hdr, &m); ASSERT_EQ(0u, hdr.length()); ASSERT_EQ(0u, m.size()); } } ObjectStore::Transaction t; t.remove(cid, hoid); t.remove_collection(cid); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } TEST_P(StoreTest, OMapIterator) { coll_t cid; ghobject_t hoid(hobject_t("tesomap", "", CEPH_NOSNAP, 0, 0, "")); int count = 0; auto ch = store->create_new_collection(cid); int r; { ObjectStore::Transaction t; t.create_collection(cid, 0); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } map<string, bufferlist> attrs; { ObjectStore::Transaction t; t.touch(cid, hoid); t.omap_clear(cid, hoid); map<string, bufferlist> start_set; t.omap_setkeys(cid, hoid, start_set); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } ObjectMap::ObjectMapIterator iter; bool correct; //basic iteration for (int i = 0; i < 100; i++) { if (!(i%5)) { std::cout << "On iteration " << i << std::endl; } bufferlist bl; // FileStore may deadlock two active iterators over the same data iter = ObjectMap::ObjectMapIterator(); iter = store->get_omap_iterator(ch, hoid); for (iter->seek_to_first(), count=0; iter->valid(); iter->next(), count++) { string key = iter->key(); bufferlist value = iter->value(); correct = attrs.count(key) && (string(value.c_str()) == string(attrs[key].c_str())); if (!correct) { if (attrs.count(key) > 0) { std::cout << "key " << key << "in omap , " << value.c_str() << " : " << attrs[key].c_str() << std::endl; } else std::cout << "key " << key << "should not exists in omap" << std::endl; } ASSERT_EQ(correct, true); } ASSERT_EQ((int)attrs.size(), count); // FileStore may deadlock an active iterator vs queue_transaction iter = ObjectMap::ObjectMapIterator(); char buf[100]; snprintf(buf, sizeof(buf), "%d", i); bl.clear(); bufferptr bp(buf, strlen(buf) + 1); bl.append(bp); map<string, bufferlist> to_add; to_add.insert(pair<string, bufferlist>("key-" + string(buf), bl)); attrs.insert(pair<string, bufferlist>("key-" + string(buf), bl)); ObjectStore::Transaction t; t.omap_setkeys(cid, hoid, to_add); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } iter = store->get_omap_iterator(ch, hoid); //lower bound string bound_key = "key-5"; iter->lower_bound(bound_key); correct = bound_key <= iter->key(); if (!correct) { std::cout << "lower bound, bound key is " << bound_key << " < iter key is " << iter->key() << std::endl; } ASSERT_EQ(correct, true); //upper bound iter->upper_bound(bound_key); correct = iter->key() > bound_key; if (!correct) { std::cout << "upper bound, bound key is " << bound_key << " >= iter key is " << iter->key() << std::endl; } ASSERT_EQ(correct, true); // FileStore may deadlock an active iterator vs queue_transaction iter = ObjectMap::ObjectMapIterator(); { ObjectStore::Transaction t; t.remove(cid, hoid); t.remove_collection(cid); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } } TEST_P(StoreTest, XattrTest) { coll_t cid; ghobject_t hoid(hobject_t("tesomap", "", CEPH_NOSNAP, 0, 0, "")); bufferlist big; for (unsigned i = 0; i < 10000; ++i) { big.append('\0'); } bufferlist small; for (unsigned i = 0; i < 10; ++i) { small.append('\0'); } int r; auto ch = store->create_new_collection(cid); { ObjectStore::Transaction t; t.create_collection(cid, 0); t.touch(cid, hoid); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } map<string, bufferlist> attrs; { ObjectStore::Transaction t; t.setattr(cid, hoid, "attr1", small); attrs["attr1"] = small; t.setattr(cid, hoid, "attr2", big); attrs["attr2"] = big; t.setattr(cid, hoid, "attr3", small); attrs["attr3"] = small; t.setattr(cid, hoid, "attr1", small); attrs["attr1"] = small; t.setattr(cid, hoid, "attr4", big); attrs["attr4"] = big; t.setattr(cid, hoid, "attr3", big); attrs["attr3"] = big; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } map<string, bufferptr, less<>> aset; store->getattrs(ch, hoid, aset); ASSERT_EQ(aset.size(), attrs.size()); for (map<string, bufferptr>::iterator i = aset.begin(); i != aset.end(); ++i) { bufferlist bl; bl.push_back(i->second); ASSERT_TRUE(attrs[i->first] == bl); } { ObjectStore::Transaction t; t.rmattr(cid, hoid, "attr2"); attrs.erase("attr2"); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } aset.clear(); store->getattrs(ch, hoid, aset); ASSERT_EQ(aset.size(), attrs.size()); for (map<string, bufferptr>::iterator i = aset.begin(); i != aset.end(); ++i) { bufferlist bl; bl.push_back(i->second); ASSERT_TRUE(attrs[i->first] == bl); } bufferptr bp; r = store->getattr(ch, hoid, "attr2", bp); ASSERT_EQ(r, -ENODATA); r = store->getattr(ch, hoid, "attr3", bp); ASSERT_EQ(r, 0); bufferlist bl2; bl2.push_back(bp); ASSERT_TRUE(bl2 == attrs["attr3"]); ObjectStore::Transaction t; t.remove(cid, hoid); t.remove_collection(cid); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } void colsplittest( ObjectStore *store, unsigned num_objects, unsigned common_suffix_size, bool clones ) { coll_t cid(spg_t(pg_t(0,52),shard_id_t::NO_SHARD)); coll_t tid(spg_t(pg_t(1<<common_suffix_size,52),shard_id_t::NO_SHARD)); auto ch = store->create_new_collection(cid); auto tch = store->create_new_collection(tid); int r = 0; { ObjectStore::Transaction t; t.create_collection(cid, common_suffix_size); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } bufferlist small; small.append("small"); { ObjectStore::Transaction t; for (uint32_t i = 0; i < (2 - (int)clones)*num_objects; ++i) { stringstream objname; objname << "obj" << i; ghobject_t a(hobject_t( objname.str(), "", CEPH_NOSNAP, i<<common_suffix_size, 52, "")); t.write(cid, a, 0, small.length(), small, CEPH_OSD_OP_FLAG_FADVISE_WILLNEED); if (clones) { objname << "-clone"; ghobject_t b(hobject_t( objname.str(), "", CEPH_NOSNAP, i<<common_suffix_size, 52, "")); t.clone(cid, a, b); } if (i % 100) { r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); t = ObjectStore::Transaction(); } } r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } { ObjectStore::Transaction t; t.create_collection(tid, common_suffix_size + 1); t.split_collection(cid, common_suffix_size+1, 1<<common_suffix_size, tid); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } ch->flush(); // check vector<ghobject_t> objects; r = collection_list(store, ch, ghobject_t(), ghobject_t::get_max(), INT_MAX, &objects, 0); ASSERT_EQ(r, 0); ASSERT_EQ(objects.size(), num_objects); for (vector<ghobject_t>::iterator i = objects.begin(); i != objects.end(); ++i) { ASSERT_EQ(!!(i->hobj.get_hash() & (1<<common_suffix_size)), 0u); } objects.clear(); r = collection_list(store, tch, ghobject_t(), ghobject_t::get_max(), INT_MAX, &objects, 0); ASSERT_EQ(r, 0); ASSERT_EQ(objects.size(), num_objects); for (vector<ghobject_t>::iterator i = objects.begin(); i != objects.end(); ++i) { ASSERT_EQ(!(i->hobj.get_hash() & (1<<common_suffix_size)), 0u); } // merge them again! { ObjectStore::Transaction t; t.merge_collection(tid, cid, common_suffix_size); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } // check and clean up ObjectStore::Transaction t; { vector<ghobject_t> objects; r = collection_list(store, ch, ghobject_t(), ghobject_t::get_max(), INT_MAX, &objects, 0); ASSERT_EQ(r, 0); ASSERT_EQ(objects.size(), num_objects * 2); // both halves unsigned size = 0; for (vector<ghobject_t>::iterator i = objects.begin(); i != objects.end(); ++i) { t.remove(cid, *i); if (++size > 100) { size = 0; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); t = ObjectStore::Transaction(); } } } t.remove_collection(cid); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); ch->flush(); ASSERT_TRUE(!store->collection_exists(tid)); } TEST_P(StoreTest, ColSplitTest0) { colsplittest(store.get(), 10, 5, false); } TEST_P(StoreTest, ColSplitTest1) { colsplittest(store.get(), 10000, 11, false); } TEST_P(StoreTest, ColSplitTest1Clones) { colsplittest(store.get(), 10000, 11, true); } TEST_P(StoreTest, ColSplitTest2) { colsplittest(store.get(), 100, 7, false); } TEST_P(StoreTest, ColSplitTest2Clones) { colsplittest(store.get(), 100, 7, true); } #if 0 TEST_P(StoreTest, ColSplitTest3) { colsplittest(store.get(), 100000, 25); } #endif void test_merge_skewed(ObjectStore *store, unsigned base, unsigned bits, unsigned anum, unsigned bnum) { cout << __func__ << " 0x" << std::hex << base << std::dec << " bits " << bits << " anum " << anum << " bnum " << bnum << std::endl; /* make merge source pgs have radically different # of objects in them, which should trigger different splitting in filestore, and verify that post-merge all objects are accessible. */ int r; coll_t a(spg_t(pg_t(base, 0), shard_id_t::NO_SHARD)); coll_t b(spg_t(pg_t(base | (1<<bits), 0), shard_id_t::NO_SHARD)); auto cha = store->create_new_collection(a); auto chb = store->create_new_collection(b); { ObjectStore::Transaction t; t.create_collection(a, bits + 1); r = queue_transaction(store, cha, std::move(t)); ASSERT_EQ(r, 0); } { ObjectStore::Transaction t; t.create_collection(b, bits + 1); r = queue_transaction(store, chb, std::move(t)); ASSERT_EQ(r, 0); } bufferlist small; small.append("small"); string suffix = "ooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooaaaaaaaaaa"; set<ghobject_t> aobjects, bobjects; { // fill a ObjectStore::Transaction t; for (unsigned i = 0; i < 1000; ++i) { string objname = "a" + stringify(i) + suffix; ghobject_t o(hobject_t( objname, "", CEPH_NOSNAP, i<<(bits+1) | base, 52, "")); aobjects.insert(o); t.write(a, o, 0, small.length(), small, 0); if (i % 100) { r = queue_transaction(store, cha, std::move(t)); ASSERT_EQ(r, 0); t = ObjectStore::Transaction(); } } r = queue_transaction(store, cha, std::move(t)); ASSERT_EQ(r, 0); } { // fill b ObjectStore::Transaction t; for (unsigned i = 0; i < 10; ++i) { string objname = "b" + stringify(i) + suffix; ghobject_t o(hobject_t( objname, "", CEPH_NOSNAP, (i<<(base+1)) | base | (1<<bits), 52, "")); bobjects.insert(o); t.write(b, o, 0, small.length(), small, 0); if (i % 100) { r = queue_transaction(store, chb, std::move(t)); ASSERT_EQ(r, 0); t = ObjectStore::Transaction(); } } r = queue_transaction(store, chb, std::move(t)); ASSERT_EQ(r, 0); } // merge b->a { ObjectStore::Transaction t; t.merge_collection(b, a, bits); r = queue_transaction(store, cha, std::move(t)); ASSERT_EQ(r, 0); } // verify { vector<ghobject_t> got; collection_list(store, cha, ghobject_t(), ghobject_t::get_max(), INT_MAX, &got, 0); set<ghobject_t> gotset; for (auto& o : got) { ASSERT_TRUE(aobjects.count(o) || bobjects.count(o)); gotset.insert(o); } // check both listing and stat-ability (different code paths!) struct stat st; for (auto& o : aobjects) { ASSERT_TRUE(gotset.count(o)); int r = store->stat(cha, o, &st, false); ASSERT_EQ(r, 0); } for (auto& o : bobjects) { ASSERT_TRUE(gotset.count(o)); int r = store->stat(cha, o, &st, false); ASSERT_EQ(r, 0); } } // clean up { ObjectStore::Transaction t; for (auto &o : aobjects) { t.remove(a, o); } r = queue_transaction(store, cha, std::move(t)); ASSERT_EQ(r, 0); } { ObjectStore::Transaction t; for (auto &o : bobjects) { t.remove(a, o); } t.remove_collection(a); r = queue_transaction(store, cha, std::move(t)); ASSERT_EQ(r, 0); } } TEST_P(StoreTest, MergeSkewed) { if (string(GetParam()) != "filestore") return; // this is sufficient to exercise merges with different hashing levels test_merge_skewed(store.get(), 0xf, 4, 10, 10000); test_merge_skewed(store.get(), 0xf, 4, 10000, 10); /* // this covers a zillion variations that all boil down to the same thing for (unsigned base = 3; base < 0x1000; base *= 5) { unsigned bits; unsigned t = base; for (bits = 0; t; t >>= 1) { ++bits; } for (unsigned b = bits; b < bits + 10; b += 3) { for (auto anum : { 10, 1000, 10000 }) { for (auto bnum : { 10, 1000, 10000 }) { if (anum == bnum) { continue; } test_merge_skewed(store.get(), base, b, anum, bnum); } } } } */ } /** * This test tests adding two different groups * of objects, each with 1 common prefix and 1 * different prefix. We then remove half * in order to verify that the merging correctly * stops at the common prefix subdir. See bug * #5273 */ TEST_P(StoreTest, TwoHash) { coll_t cid; int r; auto ch = store->create_new_collection(cid); { ObjectStore::Transaction t; t.create_collection(cid, 0); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } std::cout << "Making objects" << std::endl; for (int i = 0; i < 360; ++i) { ObjectStore::Transaction t; ghobject_t o; o.hobj.pool = -1; if (i < 8) { o.hobj.set_hash((i << 16) | 0xA1); t.touch(cid, o); } o.hobj.set_hash((i << 16) | 0xB1); t.touch(cid, o); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } std::cout << "Removing half" << std::endl; for (int i = 1; i < 8; ++i) { ObjectStore::Transaction t; ghobject_t o; o.hobj.pool = -1; o.hobj.set_hash((i << 16) | 0xA1); t.remove(cid, o); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } std::cout << "Checking" << std::endl; for (int i = 1; i < 8; ++i) { ObjectStore::Transaction t; ghobject_t o; o.hobj.set_hash((i << 16) | 0xA1); o.hobj.pool = -1; bool exists = store->exists(ch, o); ASSERT_EQ(exists, false); } { ghobject_t o; o.hobj.set_hash(0xA1); o.hobj.pool = -1; bool exists = store->exists(ch, o); ASSERT_EQ(exists, true); } std::cout << "Cleanup" << std::endl; for (int i = 0; i < 360; ++i) { ObjectStore::Transaction t; ghobject_t o; o.hobj.set_hash((i << 16) | 0xA1); o.hobj.pool = -1; t.remove(cid, o); o.hobj.set_hash((i << 16) | 0xB1); t.remove(cid, o); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } ObjectStore::Transaction t; t.remove_collection(cid); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } TEST_P(StoreTest, Rename) { coll_t cid(spg_t(pg_t(0, 2122),shard_id_t::NO_SHARD)); ghobject_t srcoid(hobject_t("src_oid", "", CEPH_NOSNAP, 0, 0, "")); ghobject_t dstoid(hobject_t("dest_oid", "", CEPH_NOSNAP, 0, 0, "")); bufferlist a, b; a.append("foo"); b.append("bar"); auto ch = store->create_new_collection(cid); int r; { ObjectStore::Transaction t; t.create_collection(cid, 0); t.write(cid, srcoid, 0, a.length(), a); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } ASSERT_TRUE(store->exists(ch, srcoid)); { ObjectStore::Transaction t; t.collection_move_rename(cid, srcoid, cid, dstoid); t.write(cid, srcoid, 0, b.length(), b); t.setattr(cid, srcoid, "attr", b); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } ASSERT_TRUE(store->exists(ch, srcoid)); ASSERT_TRUE(store->exists(ch, dstoid)); { bufferlist bl; store->read(ch, srcoid, 0, 3, bl); ASSERT_TRUE(bl_eq(b, bl)); store->read(ch, dstoid, 0, 3, bl); ASSERT_TRUE(bl_eq(a, bl)); } { ObjectStore::Transaction t; t.remove(cid, dstoid); t.collection_move_rename(cid, srcoid, cid, dstoid); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } ASSERT_TRUE(store->exists(ch, dstoid)); ASSERT_FALSE(store->exists(ch, srcoid)); { bufferlist bl; store->read(ch, dstoid, 0, 3, bl); ASSERT_TRUE(bl_eq(b, bl)); } { ObjectStore::Transaction t; t.remove(cid, dstoid); t.remove_collection(cid); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } } TEST_P(StoreTest, MoveRename) { coll_t cid(spg_t(pg_t(0, 212),shard_id_t::NO_SHARD)); ghobject_t temp_oid(hobject_t("tmp_oid", "", CEPH_NOSNAP, 0, 0, "")); ghobject_t oid(hobject_t("dest_oid", "", CEPH_NOSNAP, 0, 0, "")); auto ch = store->create_new_collection(cid); int r; { ObjectStore::Transaction t; t.create_collection(cid, 0); t.touch(cid, oid); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } ASSERT_TRUE(store->exists(ch, oid)); bufferlist data, attr; map<string, bufferlist> omap; data.append("data payload"); attr.append("attr value"); omap["omap_key"].append("omap value"); { ObjectStore::Transaction t; t.touch(cid, temp_oid); t.write(cid, temp_oid, 0, data.length(), data); t.setattr(cid, temp_oid, "attr", attr); t.omap_setkeys(cid, temp_oid, omap); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } ASSERT_TRUE(store->exists(ch, temp_oid)); { ObjectStore::Transaction t; t.remove(cid, oid); t.collection_move_rename(cid, temp_oid, cid, oid); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } ASSERT_TRUE(store->exists(ch, oid)); ASSERT_FALSE(store->exists(ch, temp_oid)); { bufferlist newdata; r = store->read(ch, oid, 0, 1000, newdata); ASSERT_GE(r, 0); ASSERT_TRUE(bl_eq(data, newdata)); bufferlist newattr; r = store->getattr(ch, oid, "attr", newattr); ASSERT_EQ(r, 0); ASSERT_TRUE(bl_eq(attr, newattr)); set<string> keys; keys.insert("omap_key"); map<string, bufferlist> newomap; r = store->omap_get_values(ch, oid, keys, &newomap); ASSERT_GE(r, 0); ASSERT_EQ(1u, newomap.size()); ASSERT_TRUE(newomap.count("omap_key")); ASSERT_TRUE(bl_eq(omap["omap_key"], newomap["omap_key"])); } { ObjectStore::Transaction t; t.remove(cid, oid); t.remove_collection(cid); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } } TEST_P(StoreTest, BigRGWObjectName) { coll_t cid(spg_t(pg_t(0,12),shard_id_t::NO_SHARD)); ghobject_t oid( hobject_t( "default.4106.50_aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", "", CEPH_NOSNAP, 0x81920472, 12, ""), 15, shard_id_t::NO_SHARD); ghobject_t oid2(oid); oid2.generation = 17; ghobject_t oidhead(oid); oidhead.generation = ghobject_t::NO_GEN; auto ch = store->create_new_collection(cid); int r; { ObjectStore::Transaction t; t.create_collection(cid, 0); t.touch(cid, oidhead); t.collection_move_rename(cid, oidhead, cid, oid); t.touch(cid, oidhead); t.collection_move_rename(cid, oidhead, cid, oid2); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } { ObjectStore::Transaction t; t.remove(cid, oid); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } { vector<ghobject_t> objects; r = collection_list(store, ch, ghobject_t(), ghobject_t::get_max(), INT_MAX, &objects, 0); ASSERT_EQ(r, 0); ASSERT_EQ(objects.size(), 1u); ASSERT_EQ(objects[0], oid2); } ASSERT_FALSE(store->exists(ch, oid)); { ObjectStore::Transaction t; t.remove(cid, oid2); t.remove_collection(cid); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } } TEST_P(StoreTest, SetAllocHint) { coll_t cid; ghobject_t hoid(hobject_t("test_hint", "", CEPH_NOSNAP, 0, 0, "")); auto ch = store->create_new_collection(cid); int r; { ObjectStore::Transaction t; t.create_collection(cid, 0); t.touch(cid, hoid); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } { ObjectStore::Transaction t; t.set_alloc_hint(cid, hoid, 4*1024*1024, 1024*4, 0); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } { ObjectStore::Transaction t; t.remove(cid, hoid); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } { ObjectStore::Transaction t; t.set_alloc_hint(cid, hoid, 4*1024*1024, 1024*4, 0); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } { ObjectStore::Transaction t; t.remove_collection(cid); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } } TEST_P(StoreTest, TryMoveRename) { coll_t cid; ghobject_t hoid(hobject_t("test_hint", "", CEPH_NOSNAP, 0, -1, "")); ghobject_t hoid2(hobject_t("test_hint2", "", CEPH_NOSNAP, 0, -1, "")); auto ch = store->create_new_collection(cid); int r; { ObjectStore::Transaction t; t.create_collection(cid, 0); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } { ObjectStore::Transaction t; t.try_rename(cid, hoid, hoid2); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } { ObjectStore::Transaction t; t.touch(cid, hoid); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } { ObjectStore::Transaction t; t.try_rename(cid, hoid, hoid2); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } struct stat st; ASSERT_EQ(store->stat(ch, hoid, &st), -ENOENT); ASSERT_EQ(store->stat(ch, hoid2, &st), 0); } #if defined(WITH_BLUESTORE) TEST_P(StoreTest, BluestoreOnOffCSumTest) { if (string(GetParam()) != "bluestore") return; SetVal(g_conf(), "bluestore_csum_type", "crc32c"); g_conf().apply_changes(nullptr); int r; coll_t cid; ghobject_t hoid(hobject_t(sobject_t("Object 1", CEPH_NOSNAP))); { auto ch = store->open_collection(cid); ASSERT_FALSE(ch); } auto ch = store->create_new_collection(cid); { ObjectStore::Transaction t; t.create_collection(cid, 0); cerr << "Creating collection " << cid << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } { //write with csum enabled followed by read with csum disabled size_t block_size = 64*1024; ObjectStore::Transaction t; bufferlist bl, orig; bl.append(std::string(block_size, 'a')); orig = bl; t.remove(cid, hoid); t.set_alloc_hint(cid, hoid, 4*1024*1024, 1024*8, 0); t.write(cid, hoid, 0, bl.length(), bl); cerr << "Remove then create" << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); SetVal(g_conf(), "bluestore_csum_type", "none"); g_conf().apply_changes(nullptr); bufferlist in; r = store->read(ch, hoid, 0, block_size, in); ASSERT_EQ((int)block_size, r); ASSERT_TRUE(bl_eq(orig, in)); } { //write with csum disabled followed by read with csum enabled size_t block_size = 64*1024; ObjectStore::Transaction t; bufferlist bl, orig; bl.append(std::string(block_size, 'a')); orig = bl; t.remove(cid, hoid); t.set_alloc_hint(cid, hoid, 4*1024*1024, 1024*8, 0); t.write(cid, hoid, 0, bl.length(), bl); cerr << "Remove then create" << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); SetVal(g_conf(), "bluestore_csum_type", "crc32c"); g_conf().apply_changes(nullptr); bufferlist in; r = store->read(ch, hoid, 0, block_size, in); ASSERT_EQ((int)block_size, r); ASSERT_TRUE(bl_eq(orig, in)); } { //'mixed' non-overlapping writes to the same blob ObjectStore::Transaction t; bufferlist bl, orig; size_t block_size = 8000; bl.append(std::string(block_size, 'a')); orig = bl; t.remove(cid, hoid); t.write(cid, hoid, 0, bl.length(), bl); cerr << "Remove then create" << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); SetVal(g_conf(), "bluestore_csum_type", "none"); g_conf().apply_changes(nullptr); ObjectStore::Transaction t2; t2.write(cid, hoid, block_size*2, bl.length(), bl); cerr << "Append 'unprotected'" << std::endl; r = queue_transaction(store, ch, std::move(t2)); ASSERT_EQ(r, 0); bufferlist in; r = store->read(ch, hoid, 0, block_size, in); ASSERT_EQ((int)block_size, r); ASSERT_TRUE(bl_eq(orig, in)); in.clear(); r = store->read(ch, hoid, block_size*2, block_size, in); ASSERT_EQ((int)block_size, r); ASSERT_TRUE(bl_eq(orig, in)); SetVal(g_conf(), "bluestore_csum_type", "crc32c"); g_conf().apply_changes(nullptr); in.clear(); r = store->read(ch, hoid, 0, block_size, in); ASSERT_EQ((int)block_size, r); ASSERT_TRUE(bl_eq(orig, in)); in.clear(); r = store->read(ch, hoid, block_size*2, block_size, in); ASSERT_EQ((int)block_size, r); ASSERT_TRUE(bl_eq(orig, in)); } { //partially blob overwrite under a different csum enablement mode ObjectStore::Transaction t; bufferlist bl, orig, orig2; size_t block_size0 = 0x10000; size_t block_size = 9000; size_t block_size2 = 5000; bl.append(std::string(block_size0, 'a')); t.remove(cid, hoid); t.set_alloc_hint(cid, hoid, 4*1024*1024, 1024*8, 0); t.write(cid, hoid, 0, bl.length(), bl); cerr << "Remove then create" << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); SetVal(g_conf(), "bluestore_csum_type", "none"); g_conf().apply_changes(nullptr); ObjectStore::Transaction t2; bl.clear(); bl.append(std::string(block_size, 'b')); t2.write(cid, hoid, 0, bl.length(), bl); t2.write(cid, hoid, block_size0, bl.length(), bl); cerr << "Overwrite with unprotected data" << std::endl; r = queue_transaction(store, ch, std::move(t2)); ASSERT_EQ(r, 0); orig = bl; orig2 = bl; orig.append( std::string(block_size0 - block_size, 'a')); bufferlist in; r = store->read(ch, hoid, 0, block_size0, in); ASSERT_EQ((int)block_size0, r); ASSERT_TRUE(bl_eq(orig, in)); r = store->read(ch, hoid, block_size0, block_size, in); ASSERT_EQ((int)block_size, r); ASSERT_TRUE(bl_eq(orig2, in)); SetVal(g_conf(), "bluestore_csum_type", "crc32c"); g_conf().apply_changes(nullptr); ObjectStore::Transaction t3; bl.clear(); bl.append(std::string(block_size2, 'c')); t3.write(cid, hoid, block_size0, bl.length(), bl); cerr << "Overwrite with protected data" << std::endl; r = queue_transaction(store, ch, std::move(t3)); ASSERT_EQ(r, 0); in.clear(); orig = bl; orig.append( std::string(block_size - block_size2, 'b')); r = store->read(ch, hoid, block_size0, block_size, in); ASSERT_EQ((int)block_size, r); ASSERT_TRUE(bl_eq(orig, in)); } { ObjectStore::Transaction t; t.remove(cid, hoid); t.remove_collection(cid); cerr << "Cleaning" << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } } #endif INSTANTIATE_TEST_SUITE_P( ObjectStore, StoreTest, ::testing::Values( "memstore", #if defined(WITH_BLUESTORE) "bluestore", #endif "kstore")); // Note: instantiate all stores to preserve store numbering order only INSTANTIATE_TEST_SUITE_P( ObjectStore, StoreTestSpecificAUSize, ::testing::Values( "memstore", #if defined(WITH_BLUESTORE) "bluestore", #endif "kstore")); // Note: instantiate all stores to preserve store numbering order only INSTANTIATE_TEST_SUITE_P( ObjectStore, StoreTestOmapUpgrade, ::testing::Values( "memstore", #if defined(WITH_BLUESTORE) "bluestore", #endif "kstore")); #if defined(WITH_BLUESTORE) INSTANTIATE_TEST_SUITE_P( ObjectStore, StoreTestDeferredSetup, ::testing::Values( "bluestore")); #endif struct deferred_test_t { uint32_t bdev_block_size; uint32_t min_alloc_size; uint32_t max_blob_size; uint32_t prefer_deferred_size; }; void PrintTo(const deferred_test_t& t, ::std::ostream* os) { *os << t.bdev_block_size << "/" << t.min_alloc_size << "/" << t.max_blob_size << "/" << t.prefer_deferred_size; } class DeferredWriteTest : public StoreTestFixture, public ::testing::WithParamInterface<deferred_test_t> { public: DeferredWriteTest() : StoreTestFixture("bluestore") {} void SetUp() override { //do nothing } protected: void DeferredSetup() { StoreTestFixture::SetUp(); } public: std::vector<uint32_t> offsets = {0, 3000, 4096, 20000, 32768, 65000, 65536, 80000, 128 * 1024}; std::vector<uint32_t> lengths = {1, 1000, 4096, 12000, 32768, 30000, 80000, 128 * 1024}; }; TEST_P(DeferredWriteTest, NewData) { const bool print = false; deferred_test_t t = GetParam(); SetVal(g_conf(), "bdev_block_size", stringify(t.bdev_block_size).c_str()); SetVal(g_conf(), "bluestore_min_alloc_size", stringify(t.min_alloc_size).c_str()); SetVal(g_conf(), "bluestore_max_blob_size", stringify(t.max_blob_size).c_str()); SetVal(g_conf(), "bluestore_prefer_deferred_size", stringify(t.prefer_deferred_size).c_str()); g_conf().apply_changes(nullptr); DeferredSetup(); int r; coll_t cid; const PerfCounters* logger = store->get_perf_counters(); ObjectStore::CollectionHandle ch = store->create_new_collection(cid); { ObjectStore::Transaction t; t.create_collection(cid, 0); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } { for (auto offset:offsets) { for (auto length:lengths) { std::string hname = fmt::format("test-{}-{}", offset, length); ghobject_t hoid(hobject_t(hname, "", CEPH_NOSNAP, 0, -1, "")); { ObjectStore::Transaction t; t.touch(cid, hoid); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } if (print) std::cout << hname << std::endl; auto w_new = logger->get(l_bluestore_write_new); auto w_big_deferred = logger->get(l_bluestore_write_big_deferred); auto i_deferred_w = logger->get(l_bluestore_issued_deferred_writes); { ObjectStore::Transaction t; bufferlist bl; bl.append(std::string(length, 'x')); t.write(cid, hoid, offset, bl.length(), bl, CEPH_OSD_OP_FLAG_FADVISE_NOCACHE); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } uint32_t first_db = offset / t.bdev_block_size; uint32_t last_db = (offset + length - 1) / t.bdev_block_size; uint32_t write_size = (last_db - first_db + 1) * t.bdev_block_size; if (write_size < t.prefer_deferred_size) { // expect no direct writes ASSERT_EQ(w_new , logger->get(l_bluestore_write_new)); } else { // expect no deferred ASSERT_EQ(w_big_deferred , logger->get(l_bluestore_write_big_deferred)); ASSERT_EQ(i_deferred_w , logger->get(l_bluestore_issued_deferred_writes)); } } } } } #if defined(WITH_BLUESTORE) INSTANTIATE_TEST_SUITE_P( BlueStore, DeferredWriteTest, ::testing::Values( // bdev alloc blob deferred deferred_test_t{4 * 1024, 4 * 1024, 16 * 1024, 32 * 1024}, deferred_test_t{4 * 1024, 16 * 1024, 64 * 1024, 64 * 1024}, deferred_test_t{4 * 1024, 64 * 1024, 64 * 1024, 4 * 1024}, deferred_test_t{4 * 1024, 4 * 1024, 64 * 1024, 0 * 1024}, deferred_test_t{4 * 1024, 16 * 1024, 32 * 1024, 32 * 1024}, deferred_test_t{4 * 1024, 16 * 1024, 64 * 1024, 128 * 1024} )); #endif void doMany4KWritesTest(ObjectStore* store, unsigned max_objects, unsigned max_ops, unsigned max_object_size, unsigned max_write_size, unsigned write_alignment) { MixedGenerator gen(555); gen_type rng(time(NULL)); coll_t cid(spg_t(pg_t(0,555), shard_id_t::NO_SHARD)); store_statfs_t res_stat; SyntheticWorkloadState test_obj(store, &gen, &rng, cid, max_object_size, max_write_size, write_alignment); test_obj.init(); for (unsigned i = 0; i < max_objects; ++i) { if (!(i % 500)) cerr << "seeding object " << i << std::endl; test_obj.touch(); } for (unsigned i = 0; i < max_ops; ++i) { if (!(i % 200)) { cerr << "Op " << i << std::endl; test_obj.print_internal_state(); } test_obj.write(); } test_obj.wait_for_done(); test_obj.statfs(res_stat); if (!(res_stat.data_stored <= max_object_size) || !(res_stat.allocated <= max_object_size)) { // this will provide more insight on the mismatch and // helps to avoid any races during stats collection test_obj.fsck(false); // retrieving stats once again and assert if still broken test_obj.statfs(res_stat); ASSERT_LE(res_stat.data_stored, max_object_size); ASSERT_LE(res_stat.allocated, max_object_size); } test_obj.shutdown(); } TEST_P(StoreTestSpecificAUSize, Many4KWritesTest) { if (string(GetParam()) != "bluestore") return; if (smr) { cout << "SKIP: no deferred; assertions around res_stat.allocated don't apply" << std::endl; return; } StartDeferred(0x10000); const unsigned max_object = 4*1024*1024; doMany4KWritesTest(store.get(), 1, 1000, max_object, 4*1024, 0); } TEST_P(StoreTestSpecificAUSize, Many4KWritesNoCSumTest) { if (string(GetParam()) != "bluestore") return; if (smr) { cout << "SKIP: no deferred; assertions around res_stat.allocated don't apply" << std::endl; return; } StartDeferred(0x10000); SetVal(g_conf(), "bluestore_csum_type", "none"); g_ceph_context->_conf.apply_changes(nullptr); const unsigned max_object = 4*1024*1024; doMany4KWritesTest(store.get(), 1, 1000, max_object, 4*1024, 0 ); } TEST_P(StoreTestSpecificAUSize, TooManyBlobsTest) { if (string(GetParam()) != "bluestore") return; if (smr) { cout << "SKIP: no deferred; assertions around res_stat.allocated don't apply" << std::endl; return; } StartDeferred(0x10000); const unsigned max_object = 4*1024*1024; doMany4KWritesTest(store.get(), 1, 1000, max_object, 4*1024, 0); } #if defined(WITH_BLUESTORE) void get_mempool_stats(uint64_t* total_bytes, uint64_t* total_items) { uint64_t meta_allocated = mempool::bluestore_cache_meta::allocated_bytes(); uint64_t onode_allocated = mempool::bluestore_cache_onode::allocated_bytes(); uint64_t other_allocated = mempool::bluestore_cache_other::allocated_bytes(); uint64_t meta_items = mempool::bluestore_cache_meta::allocated_items(); uint64_t onode_items = mempool::bluestore_cache_onode::allocated_items(); uint64_t other_items = mempool::bluestore_cache_other::allocated_items(); cout << "meta(" << meta_allocated << "/" << meta_items << ") onode(" << onode_allocated << "/" << onode_items << ") other(" << other_allocated << "/" << other_items << ")" << std::endl; *total_bytes = meta_allocated + onode_allocated + other_allocated; *total_items = onode_items; } TEST_P(StoreTestSpecificAUSize, OnodeSizeTracking) { if (string(GetParam()) != "bluestore") return; size_t block_size = 4096; StartDeferred(block_size); SetVal(g_conf(), "bluestore_compression_mode", "none"); SetVal(g_conf(), "bluestore_csum_type", "none"); SetVal(g_conf(), "bluestore_cache_size_hdd", "400000000"); SetVal(g_conf(), "bluestore_cache_size_ssd", "400000000"); g_conf().apply_changes(nullptr); int r; coll_t cid; ghobject_t hoid(hobject_t("test_hint", "", CEPH_NOSNAP, 0, -1, "")); size_t obj_size = 4 * 1024 * 1024; uint64_t total_bytes_prev; uint64_t total_bytes, total_bytes2; uint64_t total_onodes; get_mempool_stats(&total_bytes, &total_onodes); total_bytes_prev = total_bytes; // 5u for onode_cache_shards vector ASSERT_EQ(total_onodes, 5u); ASSERT_EQ(total_bytes, 40u); auto ch = store->create_new_collection(cid); { ObjectStore::Transaction t; t.create_collection(cid, 0); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } { ObjectStore::Transaction t; bufferlist bl, orig, orig2; bl.append(std::string(obj_size, 'a')); t.write(cid, hoid, 0, bl.length(), bl); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } get_mempool_stats(&total_bytes, &total_onodes); ASSERT_GT(total_bytes - total_bytes_prev, 0u); ASSERT_EQ(total_onodes, 6u); { ObjectStore::Transaction t; t.truncate(cid, hoid, 0); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } for(size_t i = 0; i < 1; ++i) { bufferlist bl; bl.append(std::string(block_size * (i+1), 'a')); for( size_t j = 0; j < obj_size; j+= bl.length()) { ObjectStore::Transaction t; t.write(cid, hoid, j, bl.length(), bl); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } get_mempool_stats(&total_bytes2, &total_onodes); ASSERT_NE(total_bytes2, 0u); ASSERT_EQ(total_onodes, 6u); } { cout <<" mempool dump:\n"; JSONFormatter f(true); f.open_object_section("transaction"); mempool::dump(&f); f.close_section(); f.flush(cout); cout << std::endl; } { bufferlist bl; for (size_t i = 0; i < obj_size; i += 0x1000) { store->read(ch, hoid, i, 0x1000, bl); } } get_mempool_stats(&total_bytes, &total_onodes); ASSERT_NE(total_bytes, 0u); ASSERT_EQ(total_onodes, 6u); { cout <<" mempool dump:\n"; JSONFormatter f(true); f.open_object_section("transaction"); mempool::dump(&f); f.close_section(); f.flush(cout); cout << std::endl; } { ObjectStore::Transaction t; t.remove(cid, hoid); t.remove_collection(cid); cerr << "Cleaning" << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } } TEST_P(StoreTestSpecificAUSize, BlobReuseOnOverwrite) { if (string(GetParam()) != "bluestore") return; size_t block_size = 4096; StartDeferred(block_size); SetVal(g_conf(), "bluestore_max_blob_size", "65536"); g_conf().apply_changes(nullptr); int r; coll_t cid; ghobject_t hoid(hobject_t("test_hint", "", CEPH_NOSNAP, 0, -1, "")); const PerfCounters* logger = store->get_perf_counters(); auto ch = store->create_new_collection(cid); { ObjectStore::Transaction t; t.create_collection(cid, 0); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } { ObjectStore::Transaction t; bufferlist bl; bl.append(std::string(block_size * 2, 'a')); t.write(cid, hoid, 0, bl.length(), bl, CEPH_OSD_OP_FLAG_FADVISE_WILLNEED); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } { // overwrite at the beginning ObjectStore::Transaction t; bufferlist bl; bl.append(std::string(block_size, 'b')); t.write(cid, hoid, 0, bl.length(), bl, CEPH_OSD_OP_FLAG_FADVISE_WILLNEED); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } { // append ObjectStore::Transaction t; bufferlist bl; bl.append(std::string(block_size * 2, 'c')); t.write(cid, hoid, block_size * 2, bl.length(), bl, CEPH_OSD_OP_FLAG_FADVISE_WILLNEED); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } { // append with a gap ObjectStore::Transaction t; bufferlist bl; bl.append(std::string(block_size * 2, 'd')); t.write(cid, hoid, block_size * 5, bl.length(), bl, CEPH_OSD_OP_FLAG_FADVISE_WILLNEED); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } { // We need to issue a read to trigger cache stat update that refresh // perf counters. additionally we need to wait some time for mempool // thread to update stats. sleep(1); bufferlist bl, expected; r = store->read(ch, hoid, 0, block_size, bl); ASSERT_EQ(r, (int)block_size); expected.append(string(block_size, 'b')); ASSERT_TRUE(bl_eq(expected, bl)); ASSERT_EQ(logger->get(l_bluestore_blobs), 1u); ASSERT_EQ(logger->get(l_bluestore_extents), 2u); } { // overwrite at end ObjectStore::Transaction t; bufferlist bl; bl.append(std::string(block_size * 2, 'e')); // Currently we are unable to reuse blob when overwriting in a single step t.write(cid, hoid, block_size * 6, bl.length(), bl, CEPH_OSD_OP_FLAG_FADVISE_WILLNEED); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } { // We need to issue a read to trigger cache stat update that refresh // perf counters. additionally we need to wait some time for mempool // thread to update stats. sleep(1); bufferlist bl, expected; r = store->read(ch, hoid, 0, block_size, bl); ASSERT_EQ(r, (int)block_size); expected.append(string(block_size, 'b')); ASSERT_TRUE(bl_eq(expected, bl)); ASSERT_EQ(logger->get(l_bluestore_blobs), 1u); ASSERT_EQ(logger->get(l_bluestore_extents), 2u); } { // fill the gap ObjectStore::Transaction t; bufferlist bl; bl.append(std::string(block_size, 'f')); t.write(cid, hoid, block_size * 4, bl.length(), bl, CEPH_OSD_OP_FLAG_FADVISE_WILLNEED); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } { // we need to wait some time for mempool // thread to update stats to be able to check blob/extent numbers from // perf counters. sleep(1); bufferlist bl, expected; r = store->read(ch, hoid, 0, block_size, bl); ASSERT_EQ(r, (int)block_size); expected.append(string(block_size, 'b')); ASSERT_TRUE(bl_eq(expected, bl)); bl.clear(); expected.clear(); r = store->read(ch, hoid, block_size, block_size, bl); ASSERT_EQ(r, (int)block_size); expected.append(string(block_size, 'a')); ASSERT_TRUE(bl_eq(expected, bl)); bl.clear(); expected.clear(); r = store->read(ch, hoid, block_size * 2, block_size * 2, bl); ASSERT_EQ(r, (int)block_size * 2); expected.append(string(block_size * 2, 'c')); ASSERT_TRUE(bl_eq(expected, bl)); bl.clear(); expected.clear(); r = store->read(ch, hoid, block_size * 4, block_size, bl); ASSERT_EQ(r, (int)block_size); expected.append(string(block_size, 'f')); ASSERT_TRUE(bl_eq(expected, bl)); bl.clear(); expected.clear(); r = store->read(ch, hoid, block_size * 5, block_size, bl); ASSERT_EQ(r, (int)block_size); expected.append(string(block_size, 'd')); ASSERT_TRUE(bl_eq(expected, bl)); bl.clear(); expected.clear(); r = store->read(ch, hoid, block_size * 5, block_size * 3, bl); ASSERT_EQ(r, (int)block_size * 3); expected.append(string(block_size, 'd')); expected.append(string(block_size * 2, 'e')); ASSERT_TRUE(bl_eq(expected, bl)); } ASSERT_EQ(logger->get(l_bluestore_blobs), 1u); ASSERT_EQ(logger->get(l_bluestore_extents), 1u); { ObjectStore::Transaction t; t.remove(cid, hoid); t.remove_collection(cid); cerr << "Cleaning" << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } } TEST_P(StoreTestSpecificAUSize, ZeroBlockDetectionSmallAppend) { CephContext *cct = (new CephContext(CEPH_ENTITY_TYPE_CLIENT))->get(); if (string(GetParam()) != "bluestore" || !cct->_conf->bluestore_zero_block_detection) { GTEST_SKIP() << "not bluestore or bluestore_zero_block_detection=false, skipping"; } size_t block_size = 65536; StartDeferred(block_size); int r; coll_t cid; ghobject_t hoid(hobject_t("test", "", CEPH_NOSNAP, 0, -1, "")); const PerfCounters* logger = store->get_perf_counters(); auto ch = store->create_new_collection(cid); { ObjectStore::Transaction t; t.create_collection(cid, 0); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } { // [1] append zeros ObjectStore::Transaction t; bufferlist bl; bl.append_zero(4096); t.write(cid, hoid, 0, bl.length(), bl); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); ASSERT_EQ(logger->get(l_bluestore_write_small), 1u); ASSERT_EQ(logger->get(l_bluestore_write_small_bytes), 4096u); ASSERT_EQ(logger->get(l_bluestore_write_small_skipped), 1u); ASSERT_EQ(logger->get(l_bluestore_write_small_skipped_bytes), 4096u); bufferlist in; r = store->read(ch, hoid, 0, 0x4000, in); ASSERT_EQ(4096, r); ASSERT_TRUE(in.is_zero()); } { // [2] append non-zeros ObjectStore::Transaction t; bufferlist bl; bl.append(std::string(4096, 'c')); t.write(cid, hoid, 4096, bl.length(), bl); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); ASSERT_EQ(logger->get(l_bluestore_write_small), 2u); ASSERT_EQ(logger->get(l_bluestore_write_small_bytes), 4096u*2); ASSERT_EQ(logger->get(l_bluestore_write_small_skipped), 1u); ASSERT_EQ(logger->get(l_bluestore_write_small_skipped_bytes), 4096u); bufferlist in, _exp; r = store->read(ch, hoid, 0, 0x4000, in); ASSERT_EQ(4096 * 2, r); _exp.append_zero(4096); _exp.append(bl); ASSERT_TRUE(bl_eq(_exp, in)); } { ObjectStore::Transaction t; t.remove(cid, hoid); t.remove_collection(cid); cerr << "Cleaning" << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } } TEST_P(StoreTestSpecificAUSize, ZeroBlockDetectionSmallOverwrite) { CephContext *cct = (new CephContext(CEPH_ENTITY_TYPE_CLIENT))->get(); if (string(GetParam()) != "bluestore" || !cct->_conf->bluestore_zero_block_detection) { GTEST_SKIP() << "not bluestore or bluestore_zero_block_detection=false, skipping"; } if (smr) { GTEST_SKIP() << "smr, skipping"; } size_t block_size = 65536; StartDeferred(block_size); int r; coll_t cid; ghobject_t hoid(hobject_t("test", "", CEPH_NOSNAP, 0, -1, "")); const PerfCounters* logger = store->get_perf_counters(); auto ch = store->create_new_collection(cid); { ObjectStore::Transaction t; t.create_collection(cid, 0); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } { // {setting up the scenario} append non-zeros ObjectStore::Transaction t; bufferlist bl; bl.append(std::string(4096, 'c')); t.write(cid, hoid, 0, bl.length(), bl); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); ASSERT_EQ(logger->get(l_bluestore_write_small), 1u); ASSERT_EQ(logger->get(l_bluestore_write_small_bytes), 4096u); ASSERT_EQ(logger->get(l_bluestore_write_small_skipped), 0u); ASSERT_EQ(logger->get(l_bluestore_write_small_skipped_bytes), 0u); bufferlist in, _exp; r = store->read(ch, hoid, 0, 0x4000, in); ASSERT_EQ(4096, r); _exp.append(bl); ASSERT_TRUE(bl_eq(_exp, in)); } { // [1] overwrite non-zeros with zeros ObjectStore::Transaction t; bufferlist bl; bl.append_zero(4096); t.write(cid, hoid, 0, bl.length(), bl); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); ASSERT_EQ(logger->get(l_bluestore_write_small), 2u); ASSERT_EQ(logger->get(l_bluestore_write_small_bytes), 4096u*2); ASSERT_EQ(logger->get(l_bluestore_write_small_skipped), 0u); ASSERT_EQ(logger->get(l_bluestore_write_small_skipped_bytes), 0u); bufferlist in; r = store->read(ch, hoid, 0, 0x4000, in); ASSERT_EQ(4096, r); ASSERT_TRUE(in.is_zero()); } { // [2] overwrite zeros with non-zeros ObjectStore::Transaction t; bufferlist bl; bl.append(std::string(4096, 'c')); t.write(cid, hoid, 0, bl.length(), bl); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); ASSERT_EQ(logger->get(l_bluestore_write_small), 3u); ASSERT_EQ(logger->get(l_bluestore_write_small_bytes), 4096u*3); ASSERT_EQ(logger->get(l_bluestore_write_small_skipped), 0u); ASSERT_EQ(logger->get(l_bluestore_write_small_skipped_bytes), 0u); bufferlist in, _exp; r = store->read(ch, hoid, 0, 0x4000, in); ASSERT_EQ(4096, r); _exp.append(bl); ASSERT_TRUE(bl_eq(_exp, in)); } { ObjectStore::Transaction t; t.remove(cid, hoid); t.remove_collection(cid); cerr << "Cleaning" << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } } TEST_P(StoreTestSpecificAUSize, ZeroBlockDetectionBigAppend) { CephContext *cct = (new CephContext(CEPH_ENTITY_TYPE_CLIENT))->get(); if (string(GetParam()) != "bluestore" || !cct->_conf->bluestore_zero_block_detection) { GTEST_SKIP() << "not bluestore or bluestore_zero_block_detection=false, skipping"; } size_t block_size = 4096; StartDeferred(block_size); int r; coll_t cid; ghobject_t hoid(hobject_t("test", "", CEPH_NOSNAP, 0, -1, "")); const PerfCounters* logger = store->get_perf_counters(); auto ch = store->create_new_collection(cid); { ObjectStore::Transaction t; t.create_collection(cid, 0); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } { // [1] append zeros ObjectStore::Transaction t; bufferlist bl; bl.append_zero(block_size * 2); t.write(cid, hoid, 0, bl.length(), bl); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); ASSERT_EQ(logger->get(l_bluestore_write_big), 1u); ASSERT_EQ(logger->get(l_bluestore_write_big_bytes), 4096u*2); ASSERT_EQ(logger->get(l_bluestore_write_big_blobs), 0u); ASSERT_EQ(logger->get(l_bluestore_write_big_skipped_blobs), 1u); ASSERT_EQ(logger->get(l_bluestore_write_big_skipped_bytes), 4096u*2); bufferlist in; r = store->read(ch, hoid, 0, block_size * 8, in); ASSERT_EQ(block_size * 2, r); ASSERT_TRUE(in.is_zero()); } { // [2] append non-zeros ObjectStore::Transaction t; bufferlist bl; bl.append(std::string(block_size * 2, 'c')); t.write(cid, hoid, block_size * 2, bl.length(), bl); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); ASSERT_EQ(logger->get(l_bluestore_write_big), 2u); ASSERT_EQ(logger->get(l_bluestore_write_big_bytes), 4096u*4); ASSERT_EQ(logger->get(l_bluestore_write_big_blobs), 1u); ASSERT_EQ(logger->get(l_bluestore_write_big_skipped_blobs), 1u); ASSERT_EQ(logger->get(l_bluestore_write_big_skipped_bytes), 4096u*2); bufferlist in, _exp; r = store->read(ch, hoid, 0, block_size * 8, in); ASSERT_EQ(block_size * 4, r); _exp.append_zero(block_size * 2); _exp.append(bl); ASSERT_TRUE(bl_eq(_exp, in)); } { ObjectStore::Transaction t; t.remove(cid, hoid); t.remove_collection(cid); cerr << "Cleaning" << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } } TEST_P(StoreTestSpecificAUSize, ZeroBlockDetectionBigOverwrite) { CephContext *cct = (new CephContext(CEPH_ENTITY_TYPE_CLIENT))->get(); if (string(GetParam()) != "bluestore" || !cct->_conf->bluestore_zero_block_detection) { GTEST_SKIP() << "not bluestore or bluestore_zero_block_detection=false, skipping"; } if (smr) { GTEST_SKIP() << "smr, skipping"; } size_t block_size = 4096; StartDeferred(block_size); int r; coll_t cid; ghobject_t hoid(hobject_t("test", "", CEPH_NOSNAP, 0, -1, "")); const PerfCounters* logger = store->get_perf_counters(); auto ch = store->create_new_collection(cid); { ObjectStore::Transaction t; t.create_collection(cid, 0); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } { // {setting up the scenario} append non-zeros ObjectStore::Transaction t; bufferlist bl; bl.append(std::string(block_size * 2, 'c')); t.write(cid, hoid, 0, bl.length(), bl); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); ASSERT_EQ(logger->get(l_bluestore_write_big), 1u); ASSERT_EQ(logger->get(l_bluestore_write_big_bytes), 4096u*2); ASSERT_EQ(logger->get(l_bluestore_write_big_blobs), 1u); ASSERT_EQ(logger->get(l_bluestore_write_big_skipped_blobs), 0u); ASSERT_EQ(logger->get(l_bluestore_write_big_skipped_bytes), 0u); bufferlist in, _exp; r = store->read(ch, hoid, 0, block_size * 8, in); ASSERT_EQ(block_size * 2, r); _exp.append(bl); ASSERT_TRUE(bl_eq(_exp, in)); } { // [1] overwrite non-zeros with zeros ObjectStore::Transaction t; bufferlist bl; bl.append_zero(block_size * 2); t.write(cid, hoid, 0, bl.length(), bl); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); ASSERT_EQ(logger->get(l_bluestore_write_big), 2u); ASSERT_EQ(logger->get(l_bluestore_write_big_bytes), 4096u*4); ASSERT_EQ(logger->get(l_bluestore_write_big_blobs), 1u); ASSERT_EQ(logger->get(l_bluestore_write_big_skipped_blobs), 1u); ASSERT_EQ(logger->get(l_bluestore_write_big_skipped_bytes), 4096u*2); bufferlist in; r = store->read(ch, hoid, 0, block_size * 8, in); ASSERT_EQ(block_size * 2, r); ASSERT_TRUE(in.is_zero()); } { // [2] overwrite zeros with non-zeros ObjectStore::Transaction t; bufferlist bl; bl.append(std::string(block_size * 2, 'c')); t.write(cid, hoid, 0, bl.length(), bl); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); ASSERT_EQ(logger->get(l_bluestore_write_big), 3u); ASSERT_EQ(logger->get(l_bluestore_write_big_bytes), 4096u*6); ASSERT_EQ(logger->get(l_bluestore_write_big_blobs), 2u); ASSERT_EQ(logger->get(l_bluestore_write_big_skipped_blobs), 1u); ASSERT_EQ(logger->get(l_bluestore_write_big_skipped_bytes), 4096u*2); bufferlist in, _exp; r = store->read(ch, hoid, 0, block_size * 8, in); ASSERT_EQ(block_size * 2, r); _exp.append(bl); ASSERT_TRUE(bl_eq(_exp, in)); } { ObjectStore::Transaction t; t.remove(cid, hoid); t.remove_collection(cid); cerr << "Cleaning" << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } } TEST_P(StoreTestSpecificAUSize, DeferredOnBigOverwrite) { if (string(GetParam()) != "bluestore") return; if (smr) { cout << "SKIP: no deferred" << std::endl; return; } size_t block_size = 4096; StartDeferred(block_size); SetVal(g_conf(), "bluestore_max_blob_size", "131072"); SetVal(g_conf(), "bluestore_prefer_deferred_size", "65536"); g_conf().apply_changes(nullptr); int r; coll_t cid; ghobject_t hoid(hobject_t("test", "", CEPH_NOSNAP, 0, -1, "")); ghobject_t hoid2(hobject_t("test2", "", CEPH_NOSNAP, 0, -1, "")); PerfCounters* logger = const_cast<PerfCounters*>(store->get_perf_counters()); auto ch = store->create_new_collection(cid); { ObjectStore::Transaction t; t.create_collection(cid, 0); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } { ObjectStore::Transaction t; bufferlist bl, bl2; bl.append(std::string(block_size * 2, 'c')); bl2.append(std::string(block_size * 3, 'd')); t.write(cid, hoid, 0, bl.length(), bl, CEPH_OSD_OP_FLAG_FADVISE_NOCACHE); t.set_alloc_hint(cid, hoid2, block_size * 4, block_size * 4, CEPH_OSD_ALLOC_HINT_FLAG_SEQUENTIAL_READ); t.write(cid, hoid2, 0, bl2.length(), bl2, CEPH_OSD_OP_FLAG_FADVISE_NOCACHE); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } ASSERT_EQ(logger->get(l_bluestore_write_big), 2u); ASSERT_EQ(logger->get(l_bluestore_write_big_deferred), 0u); { struct store_statfs_t statfs; int r = store->statfs(&statfs); ASSERT_EQ(r, 0); ASSERT_EQ(statfs.data_stored, (unsigned)block_size * 5); ASSERT_LE(statfs.allocated, (unsigned)block_size * 5); } // overwrite at the beginning, 4K alignment { ObjectStore::Transaction t; bufferlist bl; bl.append(std::string(block_size, 'b')); t.write(cid, hoid, 0, bl.length(), bl, CEPH_OSD_OP_FLAG_FADVISE_NOCACHE); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } ASSERT_EQ(logger->get(l_bluestore_write_big), 3u); ASSERT_EQ(logger->get(l_bluestore_write_big_deferred), 1u); { bufferlist bl, expected; r = store->read(ch, hoid, 0, block_size, bl); ASSERT_EQ(r, (int)block_size); expected.append(string(block_size, 'b')); ASSERT_TRUE(bl_eq(expected, bl)); } { bufferlist bl, expected; r = store->read(ch, hoid, block_size, block_size, bl); ASSERT_EQ(r, (int)block_size); expected.append(string(block_size, 'c')); ASSERT_TRUE(bl_eq(expected, bl)); } // overwrite at the end, 4K alignment { ObjectStore::Transaction t; bufferlist bl; bl.append(std::string(block_size, 'g')); t.write(cid, hoid, block_size, bl.length(), bl, CEPH_OSD_OP_FLAG_FADVISE_NOCACHE); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } ASSERT_EQ(logger->get(l_bluestore_write_big), 4u); ASSERT_EQ(logger->get(l_bluestore_write_big_deferred), 2u); { bufferlist bl, expected; r = store->read(ch, hoid, 0, block_size, bl); ASSERT_EQ(r, (int)block_size); expected.append(string(block_size, 'b')); ASSERT_TRUE(bl_eq(expected, bl)); } { bufferlist bl, expected; r = store->read(ch, hoid, block_size, block_size, bl); ASSERT_EQ(r, (int)block_size); expected.append(string(block_size, 'g')); ASSERT_TRUE(bl_eq(expected, bl)); } // overwrite at 4K, 12K alignment { ObjectStore::Transaction t; bufferlist bl; bl.append(std::string(block_size, 'e')); t.write(cid, hoid2, block_size , bl.length(), bl, CEPH_OSD_OP_FLAG_FADVISE_NOCACHE); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } ASSERT_EQ(logger->get(l_bluestore_write_big), 5u); ASSERT_EQ(logger->get(l_bluestore_write_big_deferred), 3u); // makes sure deferred has been submitted // and do all the checks again sleep(g_conf().get_val<double>("bluestore_max_defer_interval") + 2); ASSERT_EQ(logger->get(l_bluestore_write_big), 5u); ASSERT_EQ(logger->get(l_bluestore_write_big_deferred), 3u); { bufferlist bl, expected; r = store->read(ch, hoid, 0, block_size, bl); ASSERT_EQ(r, (int)block_size); expected.append(string(block_size, 'b')); ASSERT_TRUE(bl_eq(expected, bl)); } { bufferlist bl, expected; r = store->read(ch, hoid, block_size, block_size, bl); ASSERT_EQ(r, (int)block_size); expected.append(string(block_size, 'g')); ASSERT_TRUE(bl_eq(expected, bl)); } { bufferlist bl, expected; r = store->read(ch, hoid2, 0, block_size, bl); ASSERT_EQ(r, (int)block_size); expected.append(string(block_size, 'd')); ASSERT_TRUE(bl_eq(expected, bl)); } { bufferlist bl, expected; r = store->read(ch, hoid2, block_size, block_size, bl); ASSERT_EQ(r, (int)block_size); expected.append(string(block_size, 'e')); ASSERT_TRUE(bl_eq(expected, bl)); } { bufferlist bl, expected; r = store->read(ch, hoid2, block_size * 2, block_size, bl); ASSERT_EQ(r, (int)block_size); expected.append(string(block_size, 'd')); ASSERT_TRUE(bl_eq(expected, bl)); } { struct store_statfs_t statfs; int r = store->statfs(&statfs); ASSERT_EQ(r, 0); ASSERT_EQ(statfs.data_stored, (unsigned)block_size * 5); ASSERT_LE(statfs.allocated, (unsigned)block_size * 5); } ASSERT_EQ(logger->get(l_bluestore_blobs), 2u); ASSERT_EQ(logger->get(l_bluestore_extents), 2u); { ObjectStore::Transaction t; t.remove(cid, hoid); t.remove(cid, hoid2); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } { ObjectStore::Transaction t; bufferlist bl; bl.append(std::string(block_size * 2, 'f')); t.write(cid, hoid, 0, bl.length(), bl, CEPH_OSD_OP_FLAG_FADVISE_NOCACHE); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } ASSERT_EQ(logger->get(l_bluestore_write_big), 6u); ASSERT_EQ(logger->get(l_bluestore_write_big_deferred), 3u); { ObjectStore::Transaction t; t.zero(cid, hoid, 0, 100); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } { bufferlist bl, expected; r = store->read(ch, hoid, 0, 100, bl); ASSERT_EQ(r, (int)100); expected.append(string(100, 0)); ASSERT_TRUE(bl_eq(expected, bl)); } { bufferlist bl, expected; r = store->read(ch, hoid, 100, block_size * 2 - 100, bl); ASSERT_EQ(r, (int)block_size * 2 - 100); expected.append(string(block_size * 2 - 100, 'f')); ASSERT_TRUE(bl_eq(expected, bl)); } sleep(2); { struct store_statfs_t statfs; int r = store->statfs(&statfs); ASSERT_EQ(r, 0); ASSERT_EQ(statfs.data_stored, (unsigned)block_size * 2 - 100); ASSERT_LE(statfs.allocated, (unsigned)block_size * 2); } ASSERT_EQ(logger->get(l_bluestore_blobs), 1u); ASSERT_EQ(logger->get(l_bluestore_extents), 1u); { ObjectStore::Transaction t; bufferlist bl; bl.append(std::string(block_size, 'g')); t.write(cid, hoid, 0, bl.length(), bl, CEPH_OSD_OP_FLAG_FADVISE_NOCACHE); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } ASSERT_EQ(logger->get(l_bluestore_write_big), 7u); ASSERT_EQ(logger->get(l_bluestore_write_big_deferred), 4u); { bufferlist bl, expected; r = store->read(ch, hoid, 0, block_size, bl); ASSERT_EQ(r, (int)block_size); expected.append(string(block_size, 'g')); ASSERT_TRUE(bl_eq(expected, bl)); } { bufferlist bl, expected; r = store->read(ch, hoid, block_size, block_size, bl); ASSERT_EQ(r, (int)block_size); expected.append(string(block_size, 'f')); ASSERT_TRUE(bl_eq(expected, bl)); } { struct store_statfs_t statfs; int r = store->statfs(&statfs); ASSERT_EQ(r, 0); ASSERT_EQ(statfs.data_stored, (unsigned)block_size * 2); ASSERT_LE(statfs.allocated, (unsigned)block_size * 2); } ASSERT_EQ(logger->get(l_bluestore_blobs), 1u); ASSERT_EQ(logger->get(l_bluestore_extents), 1u); // check whether full overwrite bypass deferred { ObjectStore::Transaction t; bufferlist bl; bl.append(std::string(block_size * 2, 'h')); t.write(cid, hoid, 0, bl.length(), bl, CEPH_OSD_OP_FLAG_FADVISE_NOCACHE); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } ASSERT_EQ(logger->get(l_bluestore_write_big), 8u); ASSERT_EQ(logger->get(l_bluestore_write_big_deferred), 4u); { bufferlist bl, expected; r = store->read(ch, hoid, 0, block_size * 2, bl); ASSERT_EQ(r, (int)block_size * 2); expected.append(string(block_size * 2, 'h')); ASSERT_TRUE(bl_eq(expected, bl)); } { struct store_statfs_t statfs; int r = store->statfs(&statfs); ASSERT_EQ(r, 0); ASSERT_EQ(statfs.data_stored, (unsigned)block_size * 2); ASSERT_LE(statfs.allocated, (unsigned)block_size * 2); } { ObjectStore::Transaction t; t.remove(cid, hoid); t.remove(cid, hoid2); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } { ObjectStore::Transaction t; bufferlist bl; bl.append(std::string(block_size * 32, 'a')); // this will create two 128K aligned blobs t.write(cid, hoid, 0, bl.length(), bl, CEPH_OSD_OP_FLAG_FADVISE_NOCACHE); t.write(cid, hoid, bl.length(), bl.length(), bl, CEPH_OSD_OP_FLAG_FADVISE_NOCACHE); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } ASSERT_EQ(logger->get(l_bluestore_write_big), 10u); ASSERT_EQ(logger->get(l_bluestore_write_big_deferred), 4u); // check whether overwrite (less than prefer_deferred_size) partially overlapping two adjacent blobs goes // deferred { ObjectStore::Transaction t; bufferlist bl; bl.append(std::string(block_size * 3, 'b')); t.write(cid, hoid, 0x20000 - block_size, bl.length(), bl, CEPH_OSD_OP_FLAG_FADVISE_NOCACHE); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } ASSERT_EQ(logger->get(l_bluestore_write_big), 11u); ASSERT_EQ(logger->get(l_bluestore_write_big_deferred), 6u); { bufferlist bl, expected; r = store->read(ch, hoid, 0, 0x20000 - block_size, bl); ASSERT_EQ(r, 0x20000 - block_size); expected.append(string(r, 'a')); ASSERT_TRUE(bl_eq(expected, bl)); expected.clear(); r = store->read(ch, hoid, 0x20000 - block_size, block_size * 3, bl); ASSERT_EQ(r, 3 * block_size); expected.append(string(r, 'b')); ASSERT_TRUE(bl_eq(expected, bl)); expected.clear(); r = store->read(ch, hoid, 0x20000 + 2 * block_size, block_size * 30, bl); ASSERT_EQ(r, 30 * block_size); expected.append(string(r, 'a')); ASSERT_TRUE(bl_eq(expected, bl)); expected.clear(); } { struct store_statfs_t statfs; int r = store->statfs(&statfs); ASSERT_EQ(r, 0); ASSERT_EQ(statfs.data_stored, (unsigned)block_size * 64); ASSERT_LE(statfs.allocated, (unsigned)block_size * 64); } // check whether overwrite (larger than prefer_deferred_size) partially // overlapping two adjacent blobs goes deferred { ObjectStore::Transaction t; bufferlist bl; bl.append(std::string(block_size * 30, 'c')); t.write(cid, hoid, 0x10000 + block_size, bl.length(), bl, CEPH_OSD_OP_FLAG_FADVISE_NOCACHE); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } sleep(2); ASSERT_EQ(logger->get(l_bluestore_write_big), 12u); ASSERT_EQ(logger->get(l_bluestore_write_big_deferred), 8u); { bufferlist bl, expected; r = store->read(ch, hoid, 0, 0x11000, bl); ASSERT_EQ(r, 0x11000); expected.append(string(r, 'a')); ASSERT_TRUE(bl_eq(expected, bl)); expected.clear(); r = store->read(ch, hoid, 0x11000, block_size * 30, bl); ASSERT_EQ(r, block_size * 30); expected.append(string(r, 'c')); ASSERT_TRUE(bl_eq(expected, bl)); expected.clear(); r = store->read(ch, hoid, block_size * 47, 0x10000 + block_size, bl); ASSERT_EQ(r, 0x10000 + block_size); expected.append(string(r, 'a')); ASSERT_TRUE(bl_eq(expected, bl)); expected.clear(); } { struct store_statfs_t statfs; int r = store->statfs(&statfs); ASSERT_EQ(r, 0); ASSERT_EQ(statfs.data_stored, (unsigned)block_size * 64); ASSERT_LE(statfs.allocated, (unsigned)block_size * 64); } logger->reset(); // check whether overwrite (prefer_deferred_size < 120K < 2 * prefer_defer_size) partially // overlapping two adjacent blobs goes partly deferred { ObjectStore::Transaction t; bufferlist bl; bl.append(std::string(block_size * 30, 'e')); t.write(cid, hoid, 0x20000 - block_size, bl.length(), bl, CEPH_OSD_OP_FLAG_FADVISE_NOCACHE); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } sleep(2); ASSERT_EQ(logger->get(l_bluestore_write_big), 1u); ASSERT_EQ(logger->get(l_bluestore_write_big_deferred), 1u); ASSERT_EQ(logger->get(l_bluestore_issued_deferred_writes), 1u); ASSERT_EQ(logger->get(l_bluestore_issued_deferred_write_bytes), block_size); { struct store_statfs_t statfs; int r = store->statfs(&statfs); ASSERT_EQ(r, 0); ASSERT_EQ(statfs.data_stored, (unsigned)block_size * 64); ASSERT_LE(statfs.allocated, (unsigned)block_size * 64); } { ObjectStore::Transaction t; t.remove(cid, hoid); t.remove(cid, hoid2); t.remove_collection(cid); cerr << "Cleaning" << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } } TEST_P(StoreTestSpecificAUSize, DeferredOnBigOverwrite2) { if (string(GetParam()) != "bluestore") return; if (smr) { cout << "SKIP: no deferred" << std::endl; return; } size_t block_size = 4096; StartDeferred(block_size); SetVal(g_conf(), "bluestore_max_blob_size", "65536"); SetVal(g_conf(), "bluestore_prefer_deferred_size", "65536"); g_conf().apply_changes(nullptr); int r; coll_t cid; ghobject_t hoid(hobject_t("test", "", CEPH_NOSNAP, 0, -1, "")); PerfCounters* logger = const_cast<PerfCounters*>(store->get_perf_counters()); auto ch = store->create_new_collection(cid); { ObjectStore::Transaction t; t.create_collection(cid, 0); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } { ObjectStore::Transaction t; bufferlist bl; bl.append(std::string(128 * 1024, 'c')); t.write(cid, hoid, 0x1000, bl.length(), bl, CEPH_OSD_OP_FLAG_FADVISE_NOCACHE); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); ASSERT_EQ(logger->get(l_bluestore_write_big), 1u); ASSERT_EQ(logger->get(l_bluestore_write_big_bytes), bl.length()); ASSERT_EQ(logger->get(l_bluestore_write_big_blobs), 3u); ASSERT_EQ(logger->get(l_bluestore_write_big_deferred), 0u); ASSERT_EQ(logger->get(l_bluestore_issued_deferred_writes), 0u); ASSERT_EQ(logger->get(l_bluestore_issued_deferred_write_bytes), 0); } logger->reset(); { ObjectStore::Transaction t; bufferlist bl; bl.append(std::string(128 * 1024, 'c')); t.write(cid, hoid, 0x2000, bl.length(), bl, CEPH_OSD_OP_FLAG_FADVISE_NOCACHE); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); ASSERT_EQ(logger->get(l_bluestore_write_big), 1u); ASSERT_EQ(logger->get(l_bluestore_write_big_bytes), bl.length()); ASSERT_EQ(logger->get(l_bluestore_write_big_blobs), 3u); ASSERT_EQ(logger->get(l_bluestore_write_big_deferred), 1u); ASSERT_EQ(logger->get(l_bluestore_issued_deferred_writes), 1u); ASSERT_EQ(logger->get(l_bluestore_issued_deferred_write_bytes), 57344); } { ObjectStore::Transaction t; t.remove(cid, hoid); t.remove(cid, hoid); t.remove_collection(cid); cerr << "Cleaning" << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } } TEST_P(StoreTestSpecificAUSize, DeferredOnBigOverwrite3) { if (string(GetParam()) != "bluestore") return; if (smr) { cout << "SKIP: no deferred" << std::endl; return; } size_t block_size = 4096; StartDeferred(block_size); SetVal(g_conf(), "bluestore_max_blob_size", "65536"); SetVal(g_conf(), "bluestore_prefer_deferred_size", "65536"); g_conf().apply_changes(nullptr); int r; coll_t cid; ghobject_t hoid(hobject_t("test", "", CEPH_NOSNAP, 0, -1, "")); PerfCounters* logger = const_cast<PerfCounters*>(store->get_perf_counters()); auto ch = store->create_new_collection(cid); { ObjectStore::Transaction t; t.create_collection(cid, 0); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } logger->reset(); { ObjectStore::Transaction t; bufferlist bl; bl.append(std::string(4096 * 1024, 'c')); t.write(cid, hoid, 0, bl.length(), bl, CEPH_OSD_OP_FLAG_FADVISE_NOCACHE); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); ASSERT_EQ(logger->get(l_bluestore_write_big), 1u); ASSERT_EQ(logger->get(l_bluestore_write_big_bytes), bl.length()); ASSERT_EQ(logger->get(l_bluestore_write_big_blobs), 64u); ASSERT_EQ(logger->get(l_bluestore_write_big_deferred), 0u); ASSERT_EQ(logger->get(l_bluestore_issued_deferred_writes), 0u); ASSERT_EQ(logger->get(l_bluestore_issued_deferred_write_bytes), 0u); } logger->reset(); { ObjectStore::Transaction t; bufferlist bl; bl.append(std::string(4096 * 1024, 'c')); t.write(cid, hoid, 0x1000, bl.length(), bl, CEPH_OSD_OP_FLAG_FADVISE_NOCACHE); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); ASSERT_EQ(logger->get(l_bluestore_write_big), 1u); ASSERT_EQ(logger->get(l_bluestore_write_big_bytes), bl.length()); ASSERT_EQ(logger->get(l_bluestore_write_big_blobs), 65u); ASSERT_EQ(logger->get(l_bluestore_write_big_deferred), 1u); ASSERT_EQ(logger->get(l_bluestore_issued_deferred_writes), 1u); ASSERT_EQ(logger->get(l_bluestore_issued_deferred_write_bytes), 61440); } { ObjectStore::Transaction t; t.remove(cid, hoid); t.remove_collection(cid); cerr << "Cleaning" << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } } TEST_P(StoreTestSpecificAUSize, DeferredDifferentChunks) { if (string(GetParam()) != "bluestore") return; if (smr) { cout << "SKIP: no deferred" << std::endl; return; } size_t alloc_size = 4096; size_t large_object_size = 1 * 1024 * 1024; size_t prefer_deferred_size = 65536; StartDeferred(alloc_size); SetVal(g_conf(), "bluestore_max_blob_size", "131072"); SetVal(g_conf(), "bluestore_prefer_deferred_size", stringify(prefer_deferred_size).c_str()); g_conf().apply_changes(nullptr); int r; coll_t cid; const PerfCounters* logger = store->get_perf_counters(); size_t exp_bluestore_write_big = 0; size_t exp_bluestore_write_big_deferred = 0; ObjectStore::CollectionHandle ch = store->create_new_collection(cid); { ObjectStore::Transaction t; t.create_collection(cid, 0); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } for (size_t expected_write_size = 1024; expected_write_size <= prefer_deferred_size; expected_write_size *= 2) { //create object with hint ghobject_t hoid(hobject_t("test-"+to_string(expected_write_size), "", CEPH_NOSNAP, 0, -1, "")); { ObjectStore::Transaction t; t.touch(cid, hoid); t.set_alloc_hint(cid, hoid, large_object_size, expected_write_size, CEPH_OSD_ALLOC_HINT_FLAG_SEQUENTIAL_READ | CEPH_OSD_ALLOC_HINT_FLAG_APPEND_ONLY); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } //fill object { ObjectStore::Transaction t; bufferlist bl; bl.append(std::string(large_object_size, 'h')); t.write(cid, hoid, 0, bl.length(), bl, CEPH_OSD_OP_FLAG_FADVISE_NOCACHE); r = queue_transaction(store, ch, std::move(t)); ++exp_bluestore_write_big; ASSERT_EQ(r, 0); } ASSERT_EQ(logger->get(l_bluestore_write_big), exp_bluestore_write_big); ASSERT_EQ(logger->get(l_bluestore_write_big_deferred), exp_bluestore_write_big_deferred); // check whether write will properly use deferred { ObjectStore::Transaction t; bufferlist bl; bl.append(std::string(alloc_size + 2, 'z')); t.write(cid, hoid, large_object_size - 2 * alloc_size - 1, bl.length(), bl, CEPH_OSD_OP_FLAG_FADVISE_NOCACHE); r = queue_transaction(store, ch, std::move(t)); ++exp_bluestore_write_big; if (expected_write_size < prefer_deferred_size) ++exp_bluestore_write_big_deferred; ASSERT_EQ(r, 0); } ASSERT_EQ(logger->get(l_bluestore_write_big), exp_bluestore_write_big); ASSERT_EQ(logger->get(l_bluestore_write_big_deferred), exp_bluestore_write_big_deferred); } ch.reset(nullptr); CloseAndReopen(); ch = store->open_collection(cid); // check values for (size_t expected_write_size = 1024; expected_write_size <= 65536; expected_write_size *= 2) { ghobject_t hoid(hobject_t("test-"+to_string(expected_write_size), "", CEPH_NOSNAP, 0, -1, "")); { bufferlist bl, expected; r = store->read(ch, hoid, 0, large_object_size, bl); ASSERT_EQ(r, large_object_size); expected.append(string(large_object_size - 2 * alloc_size - 1, 'h')); expected.append(string(alloc_size + 2, 'z')); expected.append(string(alloc_size - 1, 'h')); ASSERT_TRUE(bl_eq(expected, bl)); } } { ObjectStore::Transaction t; for (size_t expected_write_size = 1024; expected_write_size <= 65536; expected_write_size *= 2) { ghobject_t hoid(hobject_t("test-"+to_string(expected_write_size), "", CEPH_NOSNAP, 0, -1, "")); t.remove(cid, hoid); } t.remove_collection(cid); cerr << "Cleaning" << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } } TEST_P(StoreTestSpecificAUSize, BlobReuseOnOverwriteReverse) { if (string(GetParam()) != "bluestore") return; if (smr) { cout << "SKIP: no overwrite" << std::endl; return; } size_t block_size = 4096; StartDeferred(block_size); SetVal(g_conf(), "bluestore_max_blob_size", "65536"); g_conf().apply_changes(nullptr); int r; coll_t cid; ghobject_t hoid(hobject_t("test_hint", "", CEPH_NOSNAP, 0, -1, "")); auto ch = store->create_new_collection(cid); const PerfCounters* logger = store->get_perf_counters(); { ObjectStore::Transaction t; t.create_collection(cid, 0); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } { ObjectStore::Transaction t; bufferlist bl; bl.append(std::string(block_size * 2, 'a')); t.write(cid, hoid, block_size * 10, bl.length(), bl, CEPH_OSD_OP_FLAG_FADVISE_WILLNEED); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } { // prepend existing ObjectStore::Transaction t; bufferlist bl; bl.append(std::string(block_size, 'b')); t.write(cid, hoid, block_size * 9, bl.length(), bl, CEPH_OSD_OP_FLAG_FADVISE_WILLNEED); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } { // We need to issue a read to trigger cache stat update that refresh // perf counters. additionally we need to wait some time for mempool // thread to update stats. sleep(1); bufferlist bl, expected; r = store->read(ch, hoid, block_size * 9, block_size * 2, bl); ASSERT_EQ(r, (int)block_size * 2); expected.append(string(block_size, 'b')); expected.append(string(block_size, 'a')); ASSERT_TRUE(bl_eq(expected, bl)); ASSERT_EQ(logger->get(l_bluestore_blobs), 1u); ASSERT_EQ(logger->get(l_bluestore_extents), 1u); } { // prepend existing with a gap ObjectStore::Transaction t; bufferlist bl; bl.append(std::string(block_size, 'c')); t.write(cid, hoid, block_size * 7, bl.length(), bl, CEPH_OSD_OP_FLAG_FADVISE_WILLNEED); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } { // We need to issue a read to trigger cache stat update that refresh // perf counters. additionally we need to wait some time for mempool // thread to update stats. sleep(1); bufferlist bl, expected; r = store->read(ch, hoid, block_size * 7, block_size * 3, bl); ASSERT_EQ(r, (int)block_size * 3); expected.append(string(block_size, 'c')); expected.append(string(block_size, 0)); expected.append(string(block_size, 'b')); ASSERT_TRUE(bl_eq(expected, bl)); ASSERT_EQ(logger->get(l_bluestore_blobs), 1u); ASSERT_EQ(logger->get(l_bluestore_extents), 2u); } { // append after existing with a gap ObjectStore::Transaction t; bufferlist bl; bl.append(std::string(block_size, 'd')); t.write(cid, hoid, block_size * 13, bl.length(), bl, CEPH_OSD_OP_FLAG_FADVISE_WILLNEED); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } { // We need to issue a read to trigger cache stat update that refresh // perf counters. additionally we need to wait some time for mempool // thread to update stats. sleep(1); bufferlist bl, expected; r = store->read(ch, hoid, block_size * 11, block_size * 3, bl); ASSERT_EQ(r, (int)block_size * 3); expected.append(string(block_size, 'a')); expected.append(string(block_size, 0)); expected.append(string(block_size, 'd')); ASSERT_TRUE(bl_eq(expected, bl)); ASSERT_EQ(logger->get(l_bluestore_blobs), 1u); ASSERT_EQ(logger->get(l_bluestore_extents), 3u); } { // append twice to the next max_blob slot ObjectStore::Transaction t; bufferlist bl; bl.append(std::string(block_size, 'e')); t.write(cid, hoid, block_size * 17, bl.length(), bl, CEPH_OSD_OP_FLAG_FADVISE_WILLNEED); t.write(cid, hoid, block_size * 19, bl.length(), bl, CEPH_OSD_OP_FLAG_FADVISE_WILLNEED); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } { // We need to issue a read to trigger cache stat update that refresh // perf counters. additionally we need to wait some time for mempool // thread to update stats. sleep(1); bufferlist bl, expected; r = store->read(ch, hoid, block_size * 17, block_size * 3, bl); ASSERT_EQ(r, (int)block_size * 3); expected.append(string(block_size, 'e')); expected.append(string(block_size, 0)); expected.append(string(block_size, 'e')); ASSERT_TRUE(bl_eq(expected, bl)); ASSERT_EQ(logger->get(l_bluestore_blobs), 2u); ASSERT_EQ(logger->get(l_bluestore_extents), 5u); } { // fill gaps at the second slot ObjectStore::Transaction t; bufferlist bl; bl.append(std::string(block_size, 'f')); t.write(cid, hoid, block_size * 16, bl.length(), bl, CEPH_OSD_OP_FLAG_FADVISE_WILLNEED); t.write(cid, hoid, block_size * 18, bl.length(), bl, CEPH_OSD_OP_FLAG_FADVISE_WILLNEED); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } { // We need to issue a read to trigger cache stat update that refresh // perf counters. additionally we need to wait some time for mempool // thread to update stats. sleep(1); bufferlist bl, expected; r = store->read(ch, hoid, block_size * 16, block_size * 4, bl); ASSERT_EQ(r, (int)block_size * 4); expected.append(string(block_size, 'f')); expected.append(string(block_size, 'e')); expected.append(string(block_size, 'f')); expected.append(string(block_size, 'e')); ASSERT_TRUE(bl_eq(expected, bl)); ASSERT_EQ(logger->get(l_bluestore_blobs), 2u); ASSERT_EQ(logger->get(l_bluestore_extents), 4u); } { ObjectStore::Transaction t; t.remove(cid, hoid); t.remove_collection(cid); cerr << "Cleaning" << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } } TEST_P(StoreTestSpecificAUSize, BlobReuseOnSmallOverwrite) { if (string(GetParam()) != "bluestore") return; if (smr) { cout << "SKIP: no overwrite" << std::endl; return; } size_t block_size = 4096; StartDeferred(block_size); SetVal(g_conf(), "bluestore_max_blob_size", "65536"); g_conf().apply_changes(nullptr); int r; coll_t cid; ghobject_t hoid(hobject_t("test_hint", "", CEPH_NOSNAP, 0, -1, "")); const PerfCounters* logger = store->get_perf_counters(); auto ch = store->create_new_collection(cid); { ObjectStore::Transaction t; t.create_collection(cid, 0); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } { ObjectStore::Transaction t; bufferlist bl; bl.append(std::string(block_size, 'a')); t.write(cid, hoid, 0, bl.length(), bl, CEPH_OSD_OP_FLAG_FADVISE_WILLNEED); t.write(cid, hoid, block_size * 2, bl.length(), bl, CEPH_OSD_OP_FLAG_FADVISE_WILLNEED); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } { // write small into the gap ObjectStore::Transaction t; bufferlist bl; bl.append(std::string(3, 'b')); t.write(cid, hoid, block_size + 1, bl.length(), bl, CEPH_OSD_OP_FLAG_FADVISE_WILLNEED); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } { // We need to issue a read to trigger cache stat update that refresh // perf counters. additionally we need to wait some time for mempool // thread to update stats. sleep(1); bufferlist bl, expected; r = store->read(ch, hoid, 0, block_size * 3, bl); ASSERT_EQ(r, (int)block_size * 3); expected.append(string(block_size, 'a')); expected.append(string(1, 0)); expected.append(string(3, 'b')); expected.append(string(block_size - 4, 0)); expected.append(string(block_size, 'a')); ASSERT_TRUE(bl_eq(expected, bl)); ASSERT_EQ(logger->get(l_bluestore_blobs), 1u); ASSERT_EQ(logger->get(l_bluestore_extents), 3u); } { ObjectStore::Transaction t; t.remove(cid, hoid); t.remove_collection(cid); cerr << "Cleaning" << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } } // The test case to reproduce an issue when write happens // to a zero space between the extents sharing the same spanning blob // with unloaded shard map. // Second extent might be filled with zeros this way due to wrong result // returned by has_any_extents() call in do_write_small. The latter is caused // by incompletly loaded extent map. TEST_P(StoreTestSpecificAUSize, SmallWriteOnShardedExtents) { if (string(GetParam()) != "bluestore") return; size_t block_size = 0x10000; StartDeferred(block_size); SetVal(g_conf(), "bluestore_csum_type", "xxhash64"); SetVal(g_conf(), "bluestore_max_blob_size", "524288"); // for sure g_conf().apply_changes(nullptr); int r; coll_t cid; ghobject_t hoid1(hobject_t(sobject_t("Object 1", CEPH_NOSNAP))); auto ch = store->create_new_collection(cid); { ObjectStore::Transaction t; t.create_collection(cid, 0); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } { //doing some tricks to have sharded extents/spanning objects ObjectStore::Transaction t; bufferlist bl, bl2; bl.append(std::string(0x80000, 'a')); t.write(cid, hoid1, 0, bl.length(), bl, 0); t.zero(cid, hoid1, 0x719e0, 0x75b0 ); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); bl2.append(std::string(0x70000, 'b')); t.write(cid, hoid1, 0, bl2.length(), bl2, 0); t.zero(cid, hoid1, 0, 0x50000); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } ch.reset(); store->umount(); store->mount(); ch = store->open_collection(cid); { // do a write to zero space in between some extents sharing the same blob ObjectStore::Transaction t; bufferlist bl, bl2; bl.append(std::string(0x6520, 'c')); t.write(cid, hoid1, 0x71c00, bl.length(), bl, 0); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } { ObjectStore::Transaction t; bufferlist bl, expected; r = store->read(ch, hoid1, 0x70000, 0x9c00, bl); ASSERT_EQ(r, (int)0x9c00); expected.append(string(0x19e0, 'a')); expected.append(string(0x220, 0)); expected.append(string(0x6520, 'c')); expected.append(string(0xe70, 0)); expected.append(string(0xc70, 'a')); ASSERT_TRUE(bl_eq(expected, bl)); bl.clear(); } { ObjectStore::Transaction t; t.remove(cid, hoid1); t.remove_collection(cid); cerr << "Cleaning" << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } } TEST_P(StoreTestSpecificAUSize, ReproBug56488Test) { if (string(GetParam()) != "bluestore") return; if (smr) { cout << "SKIP: no deferred" << std::endl; return; } size_t alloc_size = 65536; size_t write_size = 4096; SetVal(g_conf(), "bluestore_debug_enforce_settings", "hdd"); SetVal(g_conf(), "bluestore_block_db_create", "true"); SetVal(g_conf(), "bluestore_block_db_size", stringify(1 << 30).c_str()); g_conf().apply_changes(nullptr); StartDeferred(alloc_size); int r; coll_t cid; const PerfCounters* logger = store->get_perf_counters(); ObjectStore::CollectionHandle ch = store->create_new_collection(cid); { ObjectStore::Transaction t; t.create_collection(cid, 0); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } { ghobject_t hoid(hobject_t("test", "", CEPH_NOSNAP, 0, -1, "")); { ObjectStore::Transaction t; t.touch(cid, hoid); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } auto issued_dw = logger->get(l_bluestore_issued_deferred_writes); auto issued_dw_bytes = logger->get(l_bluestore_issued_deferred_write_bytes); { ObjectStore::Transaction t; bufferlist bl; bl.append(std::string(write_size, 'x')); t.write(cid, hoid, 0, bl.length(), bl, CEPH_OSD_OP_FLAG_FADVISE_NOCACHE); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } ASSERT_EQ(logger->get(l_bluestore_issued_deferred_writes), issued_dw + 1); ASSERT_EQ(logger->get(l_bluestore_issued_deferred_write_bytes), issued_dw_bytes + write_size); } { ghobject_t hoid(hobject_t("test-a", "", CEPH_NOSNAP, 0, -1, "")); { ObjectStore::Transaction t; t.touch(cid, hoid); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } auto issued_dw = logger->get(l_bluestore_issued_deferred_writes); auto issued_dw_bytes = logger->get(l_bluestore_issued_deferred_write_bytes); { ObjectStore::Transaction t; bufferlist bl; bl.append(std::string(write_size * 2, 'x')); t.write(cid, hoid, alloc_size - write_size, bl.length(), bl, CEPH_OSD_OP_FLAG_FADVISE_NOCACHE); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } ASSERT_EQ(logger->get(l_bluestore_issued_deferred_writes), issued_dw + 2); ASSERT_EQ(logger->get(l_bluestore_issued_deferred_write_bytes), issued_dw_bytes + write_size * 2); } { ObjectStore::Transaction t; ghobject_t hoid(hobject_t("test", "", CEPH_NOSNAP, 0, -1, "")); t.remove(cid, hoid); ghobject_t hoid_a(hobject_t("test-a", "", CEPH_NOSNAP, 0, -1, "")); t.remove(cid, hoid_a); t.remove_collection(cid); cerr << "Cleaning" << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } } #endif //#if defined(WITH_BLUESTORE) TEST_P(StoreTest, KVDBHistogramTest) { if (string(GetParam()) != "bluestore") return; int NUM_OBJS = 200; int r = 0; coll_t cid; string base("testobj."); bufferlist a; bufferptr ap(0x1000); memset(ap.c_str(), 'a', 0x1000); a.append(ap); auto ch = store->create_new_collection(cid); { ObjectStore::Transaction t; t.create_collection(cid, 0); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } for (int i = 0; i < NUM_OBJS; ++i) { ObjectStore::Transaction t; char buf[100]; snprintf(buf, sizeof(buf), "%d", i); ghobject_t hoid(hobject_t(sobject_t(base + string(buf), CEPH_NOSNAP))); t.write(cid, hoid, 0, 0x1000, a); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } std::unique_ptr<Formatter> f(Formatter::create("store_test", "json-pretty", "json-pretty")); store->generate_db_histogram(f.get()); f->flush(cout); cout << std::endl; } TEST_P(StoreTest, KVDBStatsTest) { if (string(GetParam()) != "bluestore") return; SetVal(g_conf(), "rocksdb_perf", "true"); SetVal(g_conf(), "rocksdb_collect_compaction_stats", "true"); SetVal(g_conf(), "rocksdb_collect_extended_stats","true"); SetVal(g_conf(), "rocksdb_collect_memory_stats","true"); g_ceph_context->_conf.apply_changes(nullptr); int r = store->umount(); ASSERT_EQ(r, 0); r = store->mount(); //to force rocksdb stats ASSERT_EQ(r, 0); int NUM_OBJS = 200; coll_t cid; string base("testobj."); bufferlist a; bufferptr ap(0x1000); memset(ap.c_str(), 'a', 0x1000); a.append(ap); auto ch = store->create_new_collection(cid); { ObjectStore::Transaction t; t.create_collection(cid, 0); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } for (int i = 0; i < NUM_OBJS; ++i) { ObjectStore::Transaction t; char buf[100]; snprintf(buf, sizeof(buf), "%d", i); ghobject_t hoid(hobject_t(sobject_t(base + string(buf), CEPH_NOSNAP))); t.write(cid, hoid, 0, 0x1000, a); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } std::unique_ptr<Formatter> f(Formatter::create("store_test", "json-pretty", "json-pretty")); store->get_db_statistics(f.get()); f->flush(cout); cout << std::endl; } #if defined(WITH_BLUESTORE) TEST_P(StoreTestSpecificAUSize, garbageCollection) { int r; coll_t cid; int buf_len = 256 * 1024; int overlap_offset = 64 * 1024; int write_offset = buf_len; if (string(GetParam()) != "bluestore") return; if (smr) { cout << "SKIP: assertions about allocations need to be adjusted" << std::endl; return; } #define WRITE_AT(offset, _length) {\ ObjectStore::Transaction t;\ if ((uint64_t)_length != bl.length()) { \ buffer::ptr p(bl.c_str(), _length);\ bufferlist bl_tmp;\ bl_tmp.push_back(p);\ t.write(cid, hoid, offset, bl_tmp.length(), bl_tmp);\ } else {\ t.write(cid, hoid, offset, bl.length(), bl);\ }\ r = queue_transaction(store, ch, std::move(t));\ ASSERT_EQ(r, 0);\ } StartDeferred(65536); SetVal(g_conf(), "bluestore_compression_max_blob_size", "524288"); SetVal(g_conf(), "bluestore_compression_min_blob_size", "262144"); SetVal(g_conf(), "bluestore_max_blob_size", "524288"); SetVal(g_conf(), "bluestore_compression_mode", "force"); g_conf().apply_changes(nullptr); auto ch = store->create_new_collection(cid); ghobject_t hoid(hobject_t(sobject_t("Object 1", CEPH_NOSNAP))); { bufferlist in; r = store->read(ch, hoid, 0, 5, in); ASSERT_EQ(-ENOENT, r); } { ObjectStore::Transaction t; t.create_collection(cid, 0); cerr << "Creating collection " << cid << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } std::string data; data.resize(buf_len); { { bool exists = store->exists(ch, hoid); ASSERT_TRUE(!exists); ObjectStore::Transaction t; t.touch(cid, hoid); cerr << "Creating object " << hoid << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); exists = store->exists(ch, hoid); ASSERT_EQ(true, exists); } bufferlist bl; for(size_t i = 0; i < data.size(); i++) data[i] = i % 256; bl.append(data); { struct store_statfs_t statfs; WRITE_AT(0, buf_len); int r = store->statfs(&statfs); ASSERT_EQ(r, 0); ASSERT_EQ(statfs.data_compressed_allocated, 0x10000); } { struct store_statfs_t statfs; WRITE_AT(write_offset - 2 * overlap_offset, buf_len); int r = store->statfs(&statfs); ASSERT_EQ(r, 0); ASSERT_EQ(statfs.data_compressed_allocated, 0x20000); const PerfCounters* counters = store->get_perf_counters(); ASSERT_EQ(counters->get(l_bluestore_gc_merged), 0u); } { struct store_statfs_t statfs; WRITE_AT(write_offset - overlap_offset, buf_len); int r = store->statfs(&statfs); ASSERT_EQ(r, 0); ASSERT_EQ(statfs.data_compressed_allocated, 0x20000); const PerfCounters* counters = store->get_perf_counters(); ASSERT_EQ(counters->get(l_bluestore_gc_merged), 0x10000u); } { struct store_statfs_t statfs; WRITE_AT(write_offset - 3 * overlap_offset, buf_len); int r = store->statfs(&statfs); ASSERT_EQ(r, 0); ASSERT_EQ(statfs.data_compressed_allocated, 0x20000); const PerfCounters* counters = store->get_perf_counters(); ASSERT_EQ(counters->get(l_bluestore_gc_merged), 0x20000u); } { struct store_statfs_t statfs; WRITE_AT(write_offset + 1, overlap_offset-1); int r = store->statfs(&statfs); ASSERT_EQ(r, 0); ASSERT_EQ(statfs.data_compressed_allocated, 0x20000); const PerfCounters* counters = store->get_perf_counters(); ASSERT_EQ(counters->get(l_bluestore_gc_merged), 0x20000u); } { struct store_statfs_t statfs; WRITE_AT(write_offset + 1, overlap_offset); int r = store->statfs(&statfs); ASSERT_EQ(r, 0); ASSERT_EQ(statfs.data_compressed_allocated, 0x10000); const PerfCounters* counters = store->get_perf_counters(); ASSERT_EQ(counters->get(l_bluestore_gc_merged), 0x3ffffu); } { struct store_statfs_t statfs; WRITE_AT(0, buf_len-1); int r = store->statfs(&statfs); ASSERT_EQ(r, 0); ASSERT_EQ(statfs.data_compressed_allocated, 0x10000); const PerfCounters* counters = store->get_perf_counters(); ASSERT_EQ(counters->get(l_bluestore_gc_merged), 0x40001u); } SetVal(g_conf(), "bluestore_gc_enable_total_threshold", "1"); //forbid GC when saving = 0 { struct store_statfs_t statfs; WRITE_AT(1, overlap_offset-2); WRITE_AT(overlap_offset * 2 + 1, overlap_offset-2); int r = store->statfs(&statfs); ASSERT_EQ(r, 0); ASSERT_EQ(statfs.data_compressed_allocated, 0x10000); const PerfCounters* counters = store->get_perf_counters(); ASSERT_EQ(counters->get(l_bluestore_gc_merged), 0x40001u); } { struct store_statfs_t statfs; WRITE_AT(overlap_offset + 1, overlap_offset-2); int r = store->statfs(&statfs); ASSERT_EQ(r, 0); ASSERT_EQ(statfs.data_compressed_allocated, 0x0); const PerfCounters* counters = store->get_perf_counters(); ASSERT_EQ(counters->get(l_bluestore_gc_merged), 0x40007u); } { ObjectStore::Transaction t; t.remove(cid, hoid); cerr << "Cleaning" << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } } } TEST_P(StoreTestSpecificAUSize, fsckOnUnalignedDevice) { if (string(GetParam()) != "bluestore") return; SetVal(g_conf(), "bluestore_block_size", stringify(0x280005000).c_str()); //10 Gb + 4K SetVal(g_conf(), "bluestore_fsck_on_mount", "false"); SetVal(g_conf(), "bluestore_fsck_on_umount", "false"); StartDeferred(0x4000); store->umount(); ASSERT_EQ(store->fsck(false), 0); // do fsck explicitly store->mount(); } TEST_P(StoreTestSpecificAUSize, fsckOnUnalignedDevice2) { if (string(GetParam()) != "bluestore") return; SetVal(g_conf(), "bluestore_block_size", stringify(0x280005000).c_str()); //10 Gb + 20K SetVal(g_conf(), "bluestore_fsck_on_mount", "false"); SetVal(g_conf(), "bluestore_fsck_on_umount", "false"); StartDeferred(0x1000); store->umount(); ASSERT_EQ(store->fsck(false), 0); // do fsck explicitly store->mount(); } namespace { ghobject_t make_object(const char* name, int64_t pool) { sobject_t soid{name, CEPH_NOSNAP}; uint32_t hash = std::hash<sobject_t>{}(soid); return ghobject_t{hobject_t{soid, "", hash, pool, ""}}; } } TEST_P(StoreTestSpecificAUSize, BluestoreRepairTest) { if (string(GetParam()) != "bluestore") return; if (smr) { cout << "TODO: repair mismatched write pointer (+ dead bytes mismatch)" << std::endl; return; } const size_t offs_base = 65536 / 2; // Now we need standalone db to pass "false free fix" section below // Due to new BlueFS allocation model (single allocator for main device) // it might cause "false free" blob overwrite by BlueFS/DB stuff // and hence fail the test case and corrupt data. // SetVal(g_conf(), "bluestore_block_db_create", "true"); SetVal(g_conf(), "bluestore_block_db_size", "4294967296"); SetVal(g_conf(), "bluestore_fsck_on_mount", "false"); SetVal(g_conf(), "bluestore_fsck_on_umount", "false"); SetVal(g_conf(), "bluestore_max_blob_size", stringify(2 * offs_base).c_str()); SetVal(g_conf(), "bluestore_extent_map_shard_max_size", "12000"); StartDeferred(0x10000); BlueStore* bstore = dynamic_cast<BlueStore*> (store.get()); // fill the store with some data const uint64_t pool = 555; coll_t cid(spg_t(pg_t(0, pool), shard_id_t::NO_SHARD)); auto ch = store->create_new_collection(cid); ghobject_t hoid = make_object("Object 1", pool); ghobject_t hoid_dup = make_object("Object 1(dup)", pool); ghobject_t hoid2 = make_object("Object 2", pool); ghobject_t hoid_cloned = hoid2; hoid_cloned.hobj.snap = 1; ghobject_t hoid3 = make_object("Object 3", pool); ghobject_t hoid3_cloned = hoid3; hoid3_cloned.hobj.snap = 1; bufferlist bl; bl.append("1234512345"); int r; const size_t repeats = 16; { auto ch = store->create_new_collection(cid); cerr << "create collection + write" << std::endl; ObjectStore::Transaction t; t.create_collection(cid, 0); for( auto i = 0ul; i < repeats; ++i ) { t.write(cid, hoid, i * offs_base, bl.length(), bl); t.write(cid, hoid_dup, i * offs_base, bl.length(), bl); } for( auto i = 0ul; i < repeats; ++i ) { t.write(cid, hoid2, i * offs_base, bl.length(), bl); } t.clone(cid, hoid2, hoid_cloned); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } bstore->umount(); bool err_was_injected = false; //////////// leaked pextent fix //////////// cerr << "fix leaked pextents" << std::endl; ASSERT_EQ(bstore->fsck(false), 0); ASSERT_EQ(bstore->repair(false), 0); bstore->mount(); if (!bstore->has_null_manager()) { bstore->inject_leaked(0x30000); err_was_injected = true; } bstore->umount(); if (err_was_injected) { ASSERT_EQ(bstore->fsck(false), 1); } ASSERT_EQ(bstore->repair(false), 0); ASSERT_EQ(bstore->fsck(false), 0); //////////// false free fix //////////// cerr << "fix false free pextents" << std::endl; bstore->mount(); if (!bstore->has_null_manager()) { bstore->inject_false_free(cid, hoid); err_was_injected = true; } bstore->umount(); if (err_was_injected) { ASSERT_EQ(bstore->fsck(false), 2); ASSERT_EQ(bstore->repair(false), 0); } ASSERT_EQ(bstore->fsck(false), 0); ///////// undecodable shared blob key / stray shared blob records /////// bstore->mount(); cerr << "undecodable shared blob key" << std::endl; bstore->inject_broken_shared_blob_key("undec1", bufferlist()); bstore->inject_broken_shared_blob_key("undecodable key 2", bufferlist()); bstore->inject_broken_shared_blob_key("undecodable key 3", bufferlist()); bstore->umount(); ASSERT_EQ(bstore->fsck(false), 3); ASSERT_EQ(bstore->repair(false), 0); ASSERT_EQ(bstore->fsck(false), 0); cerr << "misreferencing" << std::endl; bstore->mount(); bstore->inject_misreference(cid, hoid, cid, hoid_dup, 0); bstore->inject_misreference(cid, hoid, cid, hoid_dup, (offs_base * repeats) / 2); bstore->inject_misreference(cid, hoid, cid, hoid_dup, offs_base * (repeats -1) ); int expected_errors = bstore->has_null_manager() ? 3 : 6; bstore->umount(); ASSERT_EQ(bstore->fsck(false), expected_errors); ASSERT_EQ(bstore->repair(false), 0); ASSERT_EQ(bstore->fsck(true), 0); // reproducing issues #21040 & 20983 SetVal(g_conf(), "bluestore_debug_inject_bug21040", "true"); g_ceph_context->_conf.apply_changes(nullptr); bstore->mount(); cerr << "repro bug #21040" << std::endl; { auto ch = store->open_collection(cid); { ObjectStore::Transaction t; bl.append("0123456789012345"); t.write(cid, hoid3, offs_base, bl.length(), bl); bl.clear(); bl.append('!'); t.write(cid, hoid3, 0, bl.length(), bl); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } { ObjectStore::Transaction t; t.clone(cid, hoid3, hoid3_cloned); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } bstore->umount(); // depending on statfs tracking we might meet or miss relevant error // hence error count >= 3 ASSERT_GE(bstore->fsck(false), 3); ASSERT_LE(bstore->repair(false), 0); ASSERT_EQ(bstore->fsck(false), 0); } cerr << "Zombie spanning blob" << std::endl; { bstore->mount(); ghobject_t hoid4 = make_object("Object 4", pool); auto ch = store->open_collection(cid); { bufferlist bl; string s(0x1000, 'a'); bl.append(s); ObjectStore::Transaction t; for(size_t i = 0; i < 0x10; i++) { t.write(cid, hoid4, i * bl.length(), bl.length(), bl); } r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } sleep(5); { bstore->inject_zombie_spanning_blob(cid, hoid4, 12345); bstore->inject_zombie_spanning_blob(cid, hoid4, 23456); bstore->inject_zombie_spanning_blob(cid, hoid4, 23457); } bstore->umount(); ASSERT_EQ(bstore->fsck(false), 1); ASSERT_LE(bstore->repair(false), 0); ASSERT_EQ(bstore->fsck(false), 0); } //////////// verify invalid statfs /////////// cerr << "fix invalid statfs" << std::endl; SetVal(g_conf(), "bluestore_fsck_error_on_no_per_pool_stats", "true"); SetVal(g_conf(), "bluestore_debug_inject_allocation_from_file_failure", "1"); store_statfs_t statfs0; store_statfs_t statfs; bstore->mount(); ASSERT_EQ(bstore->statfs(&statfs0), 0); statfs = statfs0; statfs.allocated += 0x10000; statfs.data_stored += 0x10000; ASSERT_FALSE(statfs0 == statfs); // this enforces global stats usage bstore->inject_statfs("bluestore_statfs", statfs); bstore->umount(); ASSERT_GE(bstore->fsck(false), 1); // global stats mismatch might omitted when // NCB restore is applied. Hence using >= for // error count ASSERT_EQ(bstore->repair(false), 0); ASSERT_EQ(bstore->fsck(false), 0); ASSERT_EQ(bstore->mount(), 0); ASSERT_EQ(bstore->statfs(&statfs), 0); // adjust free/internal meta space to success in comparison statfs0.available = statfs.available; statfs0.internal_metadata = statfs.internal_metadata; ASSERT_EQ(statfs0, statfs); SetVal(g_conf(), "bluestore_debug_inject_allocation_from_file_failure", "0"); cerr << "fix invalid statfs2" << std::endl; ASSERT_EQ(bstore->statfs(&statfs0), 0); statfs = statfs0; statfs.allocated += 0x20000; statfs.data_stored += 0x20000; ASSERT_FALSE(statfs0 == statfs); // this enforces global stats usage bstore->inject_statfs("bluestore_statfs", statfs); bstore->umount(); ASSERT_EQ(bstore->fsck(false), 2); ASSERT_EQ(bstore->repair(false), 0); ASSERT_EQ(bstore->fsck(false), 0); ASSERT_EQ(bstore->mount(), 0); ASSERT_EQ(bstore->statfs(&statfs), 0); // adjust free/internal meta space to success in comparison statfs0.available = statfs.available; statfs0.internal_metadata = statfs.internal_metadata; ASSERT_EQ(statfs0, statfs); cerr << "Completing" << std::endl; } TEST_P(StoreTestSpecificAUSize, BluestoreBrokenZombieRepairTest) { if (string(GetParam()) != "bluestore") return; if (smr) { cout << "SKIP: smr repair is different" << std::endl; return; } SetVal(g_conf(), "bluestore_fsck_on_mount", "false"); SetVal(g_conf(), "bluestore_fsck_on_umount", "false"); StartDeferred(0x10000); BlueStore* bstore = dynamic_cast<BlueStore*> (store.get()); int r; cerr << "initializing" << std::endl; { const size_t col_count = 16; const size_t obj_count = 1024; ObjectStore::CollectionHandle ch[col_count]; ghobject_t hoid[col_count][obj_count]; unique_ptr<coll_t> cid[col_count]; for (size_t i = 0; i < col_count; i++) { cid[i].reset(new coll_t(spg_t(pg_t(0, i), shard_id_t::NO_SHARD))); ch[i] = store->create_new_collection(*cid[i]); for (size_t j = 0; j < obj_count; j++) { hoid[i][j] = make_object(stringify(j).c_str(), i); } } for (size_t i = 0; i < col_count; i++) { ObjectStore::Transaction t; t.create_collection(*cid[i], 0); r = queue_transaction(store, ch[i], std::move(t)); ASSERT_EQ(r, 0); } cerr << "onode preparing" << std::endl; bufferlist bl; string s(0x1000, 'a'); bl.append(s); for (size_t i = 0; i < col_count; i++) { for (size_t j = 0; j < obj_count; j++) { ObjectStore::Transaction t; t.write(*cid[i], hoid[i][j], bl.length(), bl.length(), bl); r = queue_transaction(store, ch[i], std::move(t)); ASSERT_EQ(r, 0); } } cerr << "Zombie spanning blob injection" << std::endl; sleep(5); for (size_t i = 0; i < col_count; i++) { for (size_t j = 0; j < obj_count; j++) { bstore->inject_zombie_spanning_blob(*cid[i], hoid[i][j], 12345); } } cerr << "fscking/fixing" << std::endl; bstore->umount(); ASSERT_EQ(bstore->fsck(false), col_count * obj_count); ASSERT_LE(bstore->quick_fix(), 0); ASSERT_EQ(bstore->fsck(false), 0); } cerr << "Completing" << std::endl; bstore->mount(); } TEST_P(StoreTestSpecificAUSize, BluestoreRepairSharedBlobTest) { if (string(GetParam()) != "bluestore") return; if (smr) { cout << "TODO: repair mismatched write pointer (+ dead bytes mismatch)" << std::endl; return; } SetVal(g_conf(), "bluestore_fsck_on_mount", "false"); SetVal(g_conf(), "bluestore_fsck_on_umount", "false"); const size_t block_size = 0x1000; StartDeferred(block_size); BlueStore* bstore = dynamic_cast<BlueStore*> (store.get()); // fill the store with some data const uint64_t pool = 555; coll_t cid(spg_t(pg_t(0, pool), shard_id_t::NO_SHARD)); auto ch = store->create_new_collection(cid); ghobject_t hoid = make_object("Object 1", pool); ghobject_t hoid_cloned = hoid; hoid_cloned.hobj.snap = 1; ghobject_t hoid2 = make_object("Object 2", pool); string s(block_size, 1); bufferlist bl; bl.append(s); int r; { ObjectStore::Transaction t; t.create_collection(cid, 0); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } // check the scenario when shared blob contains // references to extents from two objects which don't overlapp // o1 -> 0x2000~1K // o2 -> 0x4000~1k cerr << "introduce 2 non-overlapped extents in a shared blob" << std::endl; { ObjectStore::Transaction t; t.write(cid, hoid, 0, bl.length(), bl); t.write(cid, hoid2, 0, bl.length(), bl); // to make a gap in allocations t.write(cid, hoid, block_size * 2 , bl.length(), bl); t.clone(cid, hoid, hoid_cloned); t.zero(cid, hoid, 0, bl.length()); t.zero(cid, hoid_cloned, block_size * 2, bl.length()); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } bstore->umount(); bstore->mount(); { string key; _key_encode_u64(1, &key); bluestore_shared_blob_t sb(1); sb.ref_map.get(0x2000, block_size); sb.ref_map.get(0x4000, block_size); sb.ref_map.get(0x4000, block_size); bufferlist bl; encode(sb, bl); bstore->inject_broken_shared_blob_key(key, bl); } bstore->umount(); ASSERT_EQ(bstore->fsck(false), 2); ASSERT_EQ(bstore->repair(false), 0); ASSERT_EQ(bstore->fsck(false), 0); cerr << "Completing" << std::endl; bstore->mount(); } TEST_P(StoreTestSpecificAUSize, BluestoreBrokenNoSharedBlobRepairTest) { if (string(GetParam()) != "bluestore") return; if (smr) { cout << "SKIP: smr repair is different" << std::endl; return; } SetVal(g_conf(), "bluestore_fsck_on_mount", "false"); SetVal(g_conf(), "bluestore_fsck_on_umount", "false"); StartDeferred(0x10000); BlueStore* bstore = dynamic_cast<BlueStore*> (store.get()); int r; // initializing cerr << "initializing" << std::endl; { const uint64_t pool = 555; coll_t cid(spg_t(pg_t(0, pool), shard_id_t::NO_SHARD)); auto ch = store->create_new_collection(cid); ghobject_t hoid = make_object("Object", pool); ghobject_t hoid_cloned = hoid; hoid_cloned.hobj.snap = 1; { ObjectStore::Transaction t; t.create_collection(cid, 0); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } { ObjectStore::Transaction t; bufferlist bl; bl.append("0123456789012345"); t.write(cid, hoid, 0, bl.length(), bl); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } { ObjectStore::Transaction t; t.clone(cid, hoid, hoid_cloned); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } } // injecting an error and checking cerr << "injecting" << std::endl; sleep(3); // need some time for the previous write to land bstore->inject_no_shared_blob_key(); bstore->inject_stray_shared_blob_key(12345678); { cerr << "fscking/fixing" << std::endl; // we need to check for null-manager before umount() bool has_null_manager = bstore->has_null_manager(); bstore->umount(); // depending on the allocation map's source we can // either observe or don't observe an additional // extent leak detection. Hence adjusting the expected // value size_t expected_error_count = has_null_manager ? 4: // 4 sb ref mismatch errors [+ 1 optional statfs, hence ASSERT_GE] 7; // 4 sb ref mismatch errors + 1 statfs + 1 block leak + 1 non-free ASSERT_GE(bstore->fsck(false), expected_error_count); // repair might report less errors than fsck above showed // as some errors, e.g. statfs mismatch, are implicitly fixed // before the detection during the previous repair steps... ASSERT_LE(bstore->repair(false), expected_error_count); ASSERT_EQ(bstore->fsck(false), 0); } cerr << "Completing" << std::endl; bstore->mount(); } TEST_P(StoreTest, BluestoreRepairGlobalStats) { if (string(GetParam()) != "bluestore") return; const size_t offs_base = 65536 / 2; BlueStore* bstore = dynamic_cast<BlueStore*> (store.get()); // start with global stats bstore->inject_global_statfs({}); bstore->umount(); SetVal(g_conf(), "bluestore_fsck_quick_fix_on_mount", "false"); bstore->mount(); // fill the store with some data const uint64_t pool = 555; coll_t cid(spg_t(pg_t(0, pool), shard_id_t::NO_SHARD)); auto ch = store->create_new_collection(cid); ghobject_t hoid = make_object("Object 1", pool); ghobject_t hoid_dup = make_object("Object 1(dup)", pool); ghobject_t hoid2 = make_object("Object 2", pool); ghobject_t hoid_cloned = hoid2; hoid_cloned.hobj.snap = 1; ghobject_t hoid3 = make_object("Object 3", pool); ghobject_t hoid3_cloned = hoid3; hoid3_cloned.hobj.snap = 1; bufferlist bl; bl.append("1234512345"); int r; const size_t repeats = 16; { auto ch = store->create_new_collection(cid); cerr << "create collection + write" << std::endl; ObjectStore::Transaction t; t.create_collection(cid, 0); for( auto i = 0ul; i < repeats; ++i ) { t.write(cid, hoid, i * offs_base, bl.length(), bl); t.write(cid, hoid_dup, i * offs_base, bl.length(), bl); } for( auto i = 0ul; i < repeats; ++i ) { t.write(cid, hoid2, i * offs_base, bl.length(), bl); } t.clone(cid, hoid2, hoid_cloned); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } bstore->umount(); // enable per-pool stats collection hence causing fsck to fail cerr << "per-pool statfs" << std::endl; SetVal(g_conf(), "bluestore_fsck_error_on_no_per_pool_stats", "true"); g_ceph_context->_conf.apply_changes(nullptr); ASSERT_EQ(bstore->fsck(false), 1); ASSERT_EQ(bstore->repair(false), 0); ASSERT_EQ(bstore->fsck(false), 0); bstore->mount(); } TEST_P(StoreTest, BluestoreRepairGlobalStatsFixOnMount) { if (string(GetParam()) != "bluestore") return; const size_t offs_base = 65536 / 2; BlueStore* bstore = dynamic_cast<BlueStore*> (store.get()); // start with global stats bstore->inject_global_statfs({}); bstore->umount(); SetVal(g_conf(), "bluestore_fsck_quick_fix_on_mount", "false"); bstore->mount(); // fill the store with some data const uint64_t pool = 555; coll_t cid(spg_t(pg_t(0, pool), shard_id_t::NO_SHARD)); auto ch = store->create_new_collection(cid); ghobject_t hoid = make_object("Object 1", pool); ghobject_t hoid_dup = make_object("Object 1(dup)", pool); ghobject_t hoid2 = make_object("Object 2", pool); ghobject_t hoid_cloned = hoid2; hoid_cloned.hobj.snap = 1; ghobject_t hoid3 = make_object("Object 3", pool); ghobject_t hoid3_cloned = hoid3; hoid3_cloned.hobj.snap = 1; bufferlist bl; bl.append("1234512345"); int r; const size_t repeats = 16; { auto ch = store->create_new_collection(cid); cerr << "create collection + write" << std::endl; ObjectStore::Transaction t; t.create_collection(cid, 0); for( auto i = 0ul; i < repeats; ++i ) { t.write(cid, hoid, i * offs_base, bl.length(), bl); t.write(cid, hoid_dup, i * offs_base, bl.length(), bl); } for( auto i = 0ul; i < repeats; ++i ) { t.write(cid, hoid2, i * offs_base, bl.length(), bl); } t.clone(cid, hoid2, hoid_cloned); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } bstore->umount(); // enable per-pool stats collection hence causing fsck to fail cerr << "per-pool statfs" << std::endl; SetVal(g_conf(), "bluestore_fsck_error_on_no_per_pool_stats", "true"); g_ceph_context->_conf.apply_changes(nullptr); ASSERT_EQ(bstore->fsck(false), 1); SetVal(g_conf(), "bluestore_fsck_quick_fix_on_mount", "true"); bstore->mount(); bstore->umount(); ASSERT_EQ(bstore->fsck(false), 0); bstore->mount(); } TEST_P(StoreTest, BluestoreStatistics) { if (string(GetParam()) != "bluestore") return; SetVal(g_conf(), "rocksdb_perf", "true"); SetVal(g_conf(), "rocksdb_collect_compaction_stats", "true"); SetVal(g_conf(), "rocksdb_collect_extended_stats","true"); SetVal(g_conf(), "rocksdb_collect_memory_stats","true"); // disable cache SetVal(g_conf(), "bluestore_cache_size_ssd", "0"); SetVal(g_conf(), "bluestore_cache_size_hdd", "0"); SetVal(g_conf(), "bluestore_cache_size", "0"); g_ceph_context->_conf.apply_changes(nullptr); int r = store->umount(); ASSERT_EQ(r, 0); r = store->mount(); ASSERT_EQ(r, 0); BlueStore* bstore = NULL; EXPECT_NO_THROW(bstore = dynamic_cast<BlueStore*> (store.get())); coll_t cid; ghobject_t hoid(hobject_t("test_db_statistics", "", CEPH_NOSNAP, 0, 0, "")); auto ch = bstore->create_new_collection(cid); bufferlist bl; bl.append("0123456789abcdefghi"); { ObjectStore::Transaction t; t.create_collection(cid, 0); t.touch(cid, hoid); t.write(cid, hoid, 0, bl.length(), bl); cerr << "Write object" << std::endl; r = queue_transaction(bstore, ch, std::move(t)); ASSERT_EQ(r, 0); } { bufferlist readback; r = store->read(ch, hoid, 0, bl.length(), readback); ASSERT_EQ(static_cast<int>(bl.length()), r); ASSERT_TRUE(bl_eq(bl, readback)); } std::unique_ptr<Formatter> f(Formatter::create("store_test", "json-pretty", "json-pretty")); EXPECT_NO_THROW(store->get_db_statistics(f.get())); f->flush(cout); cout << std::endl; } TEST_P(StoreTest, BluestoreStrayOmapDetection) { if (string(GetParam()) != "bluestore") return; BlueStore* bstore = dynamic_cast<BlueStore*> (store.get()); const uint64_t pool = 555; coll_t cid(spg_t(pg_t(0, pool), shard_id_t::NO_SHARD)); ghobject_t oid = make_object("Object 1", pool); ghobject_t oid2 = make_object("Object 2", pool); // fill the store with some data auto ch = store->create_new_collection(cid); bufferlist h; h.append("header"); { ObjectStore::Transaction t; t.create_collection(cid, 0); t.touch(cid, oid); t.omap_setheader(cid, oid, h); t.touch(cid, oid2); t.omap_setheader(cid, oid2, h); int r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } // inject stray omap bstore->inject_stray_omap(123456, "somename"); bstore->umount(); // check we detect injected stray omap.. ASSERT_EQ(bstore->fsck(false), 1); SetVal(g_conf(), "bluestore_fsck_on_mount", "false"); bstore->mount(); } TEST_P(StoreTest, BluestorePerPoolOmapFixOnMount) { if (string(GetParam()) != "bluestore") return; BlueStore* bstore = dynamic_cast<BlueStore*> (store.get()); const uint64_t pool = 555; coll_t cid(spg_t(pg_t(0, pool), shard_id_t::NO_SHARD)); ghobject_t oid = make_object("Object 1", pool); ghobject_t oid2 = make_object("Object 2", pool); // fill the store with some data auto ch = store->create_new_collection(cid); map<string, bufferlist> omap; bufferlist h; h.append("header"); { omap["omap_key"].append("omap value"); ObjectStore::Transaction t; t.create_collection(cid, 0); t.touch(cid, oid); t.omap_setheader(cid, oid, h); t.touch(cid, oid2); t.omap_setheader(cid, oid2, h); int r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } // inject legacy omaps bstore->inject_legacy_omap(); bstore->inject_legacy_omap(cid, oid); bstore->inject_legacy_omap(cid, oid2); bstore->umount(); // check we injected an issue SetVal(g_conf(), "bluestore_fsck_quick_fix_on_mount", "false"); SetVal(g_conf(), "bluestore_fsck_error_on_no_per_pool_omap", "true"); g_ceph_context->_conf.apply_changes(nullptr); ASSERT_EQ(bstore->fsck(false), 3); // set autofix and mount SetVal(g_conf(), "bluestore_fsck_quick_fix_on_mount", "true"); g_ceph_context->_conf.apply_changes(nullptr); bstore->mount(); bstore->umount(); // check we fixed it.. ASSERT_EQ(bstore->fsck(false), 0); bstore->mount(); // // Now repro https://tracker.ceph.com/issues/43824 // // inject legacy omaps again bstore->inject_legacy_omap(); bstore->inject_legacy_omap(cid, oid); bstore->inject_legacy_omap(cid, oid2); bstore->umount(); // check we injected an issue SetVal(g_conf(), "bluestore_fsck_quick_fix_on_mount", "true"); SetVal(g_conf(), "bluestore_fsck_error_on_no_per_pool_omap", "true"); g_ceph_context->_conf.apply_changes(nullptr); bstore->mount(); ch = store->open_collection(cid); { // write to onode which will partiall revert per-pool // omap repair done on mount due to #43824. // And object removal will leave stray per-pool omap recs // ObjectStore::Transaction t; bufferlist bl; bl.append("data"); //this triggers onode rec update and hence legacy omap t.write(cid, oid, 0, bl.length(), bl); t.remove(cid, oid2); // this will trigger stray per-pool omap int r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } bstore->umount(); // check omap's been fixed. ASSERT_EQ(bstore->fsck(false), 0); // this will fail without fix for #43824 bstore->mount(); } class hugepaged_raw; static bool is_hugepaged(const bufferptr& bp) { const auto& ibp = static_cast<const ceph::buffer_instrumentation::instrumented_bptr&>(bp); return ibp.is_raw_marked<BlockDevice::hugepaged_raw_marker_t>(); } // disabled by default b/c of the dependency on huge page ssome test // environments might not offer without extra configuration. TEST_P(StoreTestDeferredSetup, DISABLED_BluestoreHugeReads) { if (string(GetParam()) != "bluestore") { return; } constexpr static size_t HUGE_BUFFER_SIZE{2_M}; cout << "Configuring huge page pools" << std::endl; { SetVal(g_conf(), "bdev_read_preallocated_huge_buffers", fmt::format("{}=2", HUGE_BUFFER_SIZE).c_str()); SetVal(g_conf(), "bluestore_max_blob_size", std::to_string(HUGE_BUFFER_SIZE).c_str()); // let's verify the per-IOContext no-cache override SetVal(g_conf(), "bluestore_default_buffered_read", "true"); g_ceph_context->_conf.apply_changes(nullptr); } DeferredSetup(); coll_t cid; ghobject_t hoid(hobject_t("test_huge_buffers", "", CEPH_NOSNAP, 0, 0, "")); auto ch = store->create_new_collection(cid); bufferlist bl; { bufferptr bp{HUGE_BUFFER_SIZE}; // non-zeros! Otherwise the deduplication will take place. ::memset(bp.c_str(), 0x42, HUGE_BUFFER_SIZE); bl.push_back(std::move(bp)); ASSERT_EQ(bl.get_num_buffers(), 1); ASSERT_EQ(bl.length(), HUGE_BUFFER_SIZE); } cout << "Write object" << std::endl; { ObjectStore::Transaction t; t.create_collection(cid, 0); t.touch(cid, hoid); t.write(cid, hoid, 0, bl.length(), bl); const auto r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } // force cache clear { EXPECT_EQ(store->umount(), 0); EXPECT_EQ(store->mount(), 0); ch = store->open_collection(cid); } // we want to extend the life-time of all huge paged-backed // bufferlists to validate the behaviour on pool exhaustion. bufferlist bl_1_huge, bl_2_huge, bl_3_plain; cout << "Read object 1st time" << std::endl; { const auto r = store->read(ch, hoid, 0, HUGE_BUFFER_SIZE, bl_1_huge); ASSERT_EQ(static_cast<int>(HUGE_BUFFER_SIZE), r); ASSERT_TRUE(bl_eq(bl, bl_1_huge)); ASSERT_EQ(bl_1_huge.get_num_buffers(), 1); ASSERT_TRUE(is_hugepaged(bl_1_huge.front())); } cout << "Read object 2nd time" << std::endl; { const auto r = store->read(ch, hoid, 0, HUGE_BUFFER_SIZE, bl_2_huge); ASSERT_EQ(static_cast<int>(HUGE_BUFFER_SIZE), r); ASSERT_TRUE(bl_eq(bl, bl_2_huge)); ASSERT_EQ(bl_2_huge.get_num_buffers(), 1); ASSERT_TRUE(is_hugepaged(bl_2_huge.front())); } cout << "Read object 3rd time" << std::endl; { const auto r = store->read(ch, hoid, 0, HUGE_BUFFER_SIZE, bl_3_plain); ASSERT_EQ(static_cast<int>(HUGE_BUFFER_SIZE), r); ASSERT_TRUE(bl_eq(bl, bl_3_plain)); ASSERT_EQ(bl_3_plain.get_num_buffers(), 1); ASSERT_FALSE(is_hugepaged(bl_3_plain.front())); } } TEST_P(StoreTest, SpuriousReadErrorTest) { if (string(GetParam()) != "bluestore") return; int r; auto logger = store->get_perf_counters(); coll_t cid; auto ch = store->create_new_collection(cid); ghobject_t hoid(hobject_t(sobject_t("foo", CEPH_NOSNAP))); { ObjectStore::Transaction t; t.create_collection(cid, 0); cerr << "Creating collection " << cid << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } bufferlist test_data; bufferptr ap(0x2000); memset(ap.c_str(), 'a', 0x2000); test_data.append(ap); { ObjectStore::Transaction t; t.write(cid, hoid, 0, 0x2000, test_data); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); // force cache clear EXPECT_EQ(store->umount(), 0); EXPECT_EQ(store->mount(), 0); } ch = store->open_collection(cid); cerr << "Injecting CRC error with no retry, expecting EIO" << std::endl; SetVal(g_conf(), "bluestore_retry_disk_reads", "0"); SetVal(g_conf(), "bluestore_debug_inject_csum_err_probability", "1"); g_ceph_context->_conf.apply_changes(nullptr); { bufferlist in; r = store->read(ch, hoid, 0, 0x2000, in, CEPH_OSD_OP_FLAG_FADVISE_NOCACHE); ASSERT_EQ(-EIO, r); ASSERT_EQ(logger->get(l_bluestore_read_eio), 1u); ASSERT_EQ(logger->get(l_bluestore_reads_with_retries), 0u); } cerr << "Injecting CRC error with retries, expecting success after several retries" << std::endl; SetVal(g_conf(), "bluestore_retry_disk_reads", "255"); SetVal(g_conf(), "bluestore_debug_inject_csum_err_probability", "0.8"); /** * Probabilistic test: 25 reads, each has a 80% chance of failing with 255 retries * Probability of at least one retried read: 1 - (0.2 ** 25) = 100% - 3e-18 * Probability of a random test failure: 1 - ((1 - (0.8 ** 255)) ** 25) ~= 5e-24 */ g_ceph_context->_conf.apply_changes(nullptr); { for (int i = 0; i < 25; ++i) { bufferlist in; r = store->read(ch, hoid, 0, 0x2000, in, CEPH_OSD_OP_FLAG_FADVISE_NOCACHE); ASSERT_EQ(0x2000, r); ASSERT_TRUE(bl_eq(test_data, in)); } ASSERT_GE(logger->get(l_bluestore_reads_with_retries), 1u); } } TEST_P(StoreTest, mergeRegionTest) { if (string(GetParam()) != "bluestore") return; SetVal(g_conf(), "bluestore_fsck_on_mount", "true"); SetVal(g_conf(), "bluestore_fsck_on_umount", "true"); SetVal(g_conf(), "bdev_debug_inflight_ios", "true"); g_ceph_context->_conf.apply_changes(nullptr); uint32_t chunk_size = g_ceph_context->_conf->bdev_block_size; int r = -1; coll_t cid; ghobject_t hoid(hobject_t(sobject_t("Object", CEPH_NOSNAP))); auto ch = store->create_new_collection(cid); { ObjectStore::Transaction t; t.create_collection(cid, 0); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } { ObjectStore::Transaction t; t.touch(cid, hoid); cerr << "Creating object " << hoid << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } bufferlist bl5; bl5.append("abcde"); uint64_t offset = 0; { // 1. same region ObjectStore::Transaction t; t.write(cid, hoid, offset, 5, bl5); t.write(cid, hoid, 0xa + offset, 5, bl5); t.write(cid, hoid, 0x14 + offset, 5, bl5); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } { // 2. adjacent regions ObjectStore::Transaction t; offset = chunk_size; t.write(cid, hoid, offset, 5, bl5); t.write(cid, hoid, offset + chunk_size + 3, 5, bl5); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } { // 3. front merge ObjectStore::Transaction t; offset = chunk_size * 2; t.write(cid, hoid, offset, 5, bl5); t.write(cid, hoid, offset + chunk_size - 2, 5, bl5); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } { // 4. back merge ObjectStore::Transaction t; bufferlist blc2; blc2.append_zero(chunk_size + 2); offset = chunk_size * 3; t.write(cid, hoid, offset, chunk_size + 2, blc2); t.write(cid, hoid, offset + chunk_size + 3, 5, bl5); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } { // 5. overlapping ObjectStore::Transaction t; uint64_t final_len = 0; offset = chunk_size * 10; bufferlist bl2c2; bl2c2.append_zero(chunk_size * 2); t.write(cid, hoid, offset + chunk_size * 3 - 3, chunk_size * 2, bl2c2); bl2c2.append_zero(2); t.write(cid, hoid, offset + chunk_size - 2, chunk_size * 2 + 2, bl2c2); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); final_len = (offset + chunk_size * 3 - 3) + (chunk_size * 2); bufferlist bl; r = store->read(ch, hoid, 0, final_len, bl); ASSERT_EQ(final_len, static_cast<uint64_t>(r)); } } TEST_P(StoreTest, FixSMRWritePointer) { if(string(GetParam()) != "bluestore") return; if (!smr) return; int r = store->umount(); ASSERT_EQ(0, r); // copied from StoreTestFixture std::string path = GetParam() + ".test_temp_dir"s; std::string p = path + "/block"; BlockDevice* bdev = BlockDevice::create(g_ceph_context, p, nullptr, nullptr, nullptr, nullptr); r = bdev->open(p); ASSERT_EQ(0, r); ASSERT_EQ(true, bdev->is_smr()); std::vector<uint64_t> wp = bdev->get_zones(); uint64_t first_seq_zone = bdev->get_conventional_region_size() / bdev->get_zone_size(); IOContext ioc(g_ceph_context, NULL, true); bufferlist bl; bl.append(std::string(1024 * 1024, 'x')); r = bdev->aio_write(wp[first_seq_zone], bl, &ioc, false); ASSERT_EQ(0, r); bdev->aio_submit(&ioc); ioc.aio_wait(); bdev->close(); delete bdev; r = store->mount(); ASSERT_EQ(0, r); } TEST_P(StoreTestSpecificAUSize, BluestoreEnforceHWSettingsHdd) { if (string(GetParam()) != "bluestore") return; SetVal(g_conf(), "bluestore_debug_enforce_settings", "hdd"); StartDeferred(0x1000); int r; coll_t cid; ghobject_t hoid(hobject_t(sobject_t("Object", CEPH_NOSNAP))); auto ch = store->create_new_collection(cid); { ObjectStore::Transaction t; t.create_collection(cid, 0); cerr << "Creating collection " << cid << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } { ObjectStore::Transaction t; bufferlist bl, orig; string s(g_ceph_context->_conf->bluestore_max_blob_size_hdd, '0'); bl.append(s); t.write(cid, hoid, 0, bl.length(), bl); cerr << "write" << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); const PerfCounters* logger = store->get_perf_counters(); ASSERT_EQ(logger->get(l_bluestore_write_big_blobs), 1u); } } TEST_P(StoreTestSpecificAUSize, BluestoreEnforceHWSettingsSsd) { if (string(GetParam()) != "bluestore") return; SetVal(g_conf(), "bluestore_debug_enforce_settings", "ssd"); StartDeferred(0x1000); int r; coll_t cid; ghobject_t hoid(hobject_t(sobject_t("Object", CEPH_NOSNAP))); auto ch = store->create_new_collection(cid); { ObjectStore::Transaction t; t.create_collection(cid, 0); cerr << "Creating collection " << cid << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } { ObjectStore::Transaction t; bufferlist bl, orig; string s(g_ceph_context->_conf->bluestore_max_blob_size_ssd * 8, '0'); bl.append(s); t.write(cid, hoid, 0, bl.length(), bl); cerr << "write" << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); const PerfCounters* logger = store->get_perf_counters(); ASSERT_EQ(logger->get(l_bluestore_write_big_blobs), 8u); } } TEST_P(StoreTestSpecificAUSize, ReproNoBlobMultiTest) { if(string(GetParam()) != "bluestore") return; if (smr) { cout << "SKIP (FIXME): bluestore gc does not seem to do the trick here" << std::endl; return; } SetVal(g_conf(), "bluestore_block_db_create", "true"); SetVal(g_conf(), "bluestore_block_db_size", "4294967296"); SetVal(g_conf(), "bluestore_block_size", "12884901888"); SetVal(g_conf(), "bluestore_max_blob_size", "524288"); g_conf().apply_changes(nullptr); StartDeferred(65536); int r; coll_t cid; ghobject_t hoid(hobject_t(sobject_t("Object 1", CEPH_NOSNAP))); ghobject_t hoid2 = hoid; hoid2.hobj.snap = 1; auto ch = store->create_new_collection(cid); { ObjectStore::Transaction t; t.create_collection(cid, 0); cerr << "Creating collection " << cid << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } { bool exists = store->exists(ch, hoid); ASSERT_TRUE(!exists); ObjectStore::Transaction t; t.touch(cid, hoid); cerr << "Creating object " << hoid << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); exists = store->exists(ch, hoid); ASSERT_EQ(true, exists); } { uint64_t offs = 0; bufferlist bl; const int size = 0x100; bufferptr ap(size); memset(ap.c_str(), 'a', size); bl.append(ap); int i = 0; uint64_t blob_size = 524288; uint64_t total = 0; for (i = 0; i <= 512; i++) { offs = 0 + i * size; ObjectStore::Transaction t; ghobject_t hoid2 = hoid; hoid2.hobj.snap = i + 1; while (offs < 128 * 1024 * 1024) { t.write(cid, hoid, offs, ap.length(), bl); offs += blob_size; total += ap.length(); } t.clone(cid, hoid, hoid2); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } cerr << "Total written = " << total << std::endl; } { cerr << "Finalizing" << std::endl; const PerfCounters* logger = store->get_perf_counters(); ASSERT_GE(logger->get(l_bluestore_gc_merged), 1024*1024*1024); } } void doManySetAttr(ObjectStore* store, std::function<void(ObjectStore*)> do_check_fn) { MixedGenerator gen(447); gen_type rng(time(NULL)); coll_t cid(spg_t(pg_t(0, 447), shard_id_t::NO_SHARD)); SyntheticWorkloadState test_obj(store, &gen, &rng, cid, 0, 0, 0); test_obj.init(); size_t object_count = 256; for (size_t i = 0; i < object_count; ++i) { if (!(i % 10)) cerr << "seeding object " << i << std::endl; test_obj.touch(); } for (size_t i = 0; i < object_count; ++i) { if (!(i % 100)) { cerr << "Op " << i << std::endl; test_obj.print_internal_state(); } test_obj.set_fixed_attrs(1024, 64, 4096); // 1024 attributes, 64 bytes name and 4K value } test_obj.wait_for_done(); std::cout << "done" << std::endl; do_check_fn(store); AdminSocket* admin_socket = g_ceph_context->get_admin_socket(); ceph_assert(admin_socket); ceph::bufferlist in, out; ostringstream err; auto r = admin_socket->execute_command( { "{\"prefix\": \"bluefs stats\"}" }, in, err, &out); if (r != 0) { cerr << "failure querying: " << cpp_strerror(r) << std::endl; } else { std::cout << std::string(out.c_str(), out.length()) << std::endl; } test_obj.shutdown(); } TEST_P(StoreTestSpecificAUSize, SpilloverTest) { if (string(GetParam()) != "bluestore") return; if (smr) { cout << "SKIP: (FIXME?) adjust me for smr at some point?" << std::endl; return; } SetVal(g_conf(), "bluestore_block_db_create", "true"); SetVal(g_conf(), "bluestore_block_db_size", "3221225472"); SetVal(g_conf(), "bluestore_volume_selection_policy", "rocksdb_original"); // original RocksDB settings used before https://github.com/ceph/ceph/pull/47221/ // which enable BlueFS spillover. SetVal(g_conf(), "bluestore_rocksdb_options", "compression=kNoCompression,max_write_buffer_number=4," "min_write_buffer_number_to_merge=1,recycle_log_file_num=4," "write_buffer_size=268435456,writable_file_max_buffer_size=0," "compaction_readahead_size=2097152,max_background_compactions=2," "max_total_wal_size=1073741824"); g_conf().apply_changes(nullptr); StartDeferred(65536); doManySetAttr(store.get(), [&](ObjectStore* _store) { BlueStore* bstore = dynamic_cast<BlueStore*> (_store); ceph_assert(bstore); bstore->compact(); const PerfCounters* logger = bstore->get_bluefs_perf_counters(); //experimentally it was discovered that this case results in 400+MB spillover //using lower 300MB threshold just to be safe enough std::cout << "DB used:" << logger->get(l_bluefs_db_used_bytes) << std::endl; std::cout << "SLOW used:" << logger->get(l_bluefs_slow_used_bytes) << std::endl; ASSERT_GE(logger->get(l_bluefs_slow_used_bytes), 16 * 1024 * 1024); struct store_statfs_t statfs; osd_alert_list_t alerts; int r = store->statfs(&statfs, &alerts); ASSERT_EQ(r, 0); ASSERT_EQ(alerts.count("BLUEFS_SPILLOVER"), 1); std::cout << "spillover_alert:" << alerts.find("BLUEFS_SPILLOVER")->second << std::endl; } ); } TEST_P(StoreTestSpecificAUSize, SpilloverFixedTest) { if (string(GetParam()) != "bluestore") return; if (smr) { cout << "SKIP: (FIXME?) adjust me for smr at some point?" << std::endl; return; } SetVal(g_conf(), "bluestore_block_db_create", "true"); SetVal(g_conf(), "bluestore_block_db_size", "3221225472"); SetVal(g_conf(), "bluestore_volume_selection_policy", "use_some_extra"); SetVal(g_conf(), "bluestore_volume_selection_reserved", "1"); // just use non-zero to enable g_conf().apply_changes(nullptr); StartDeferred(65536); doManySetAttr(store.get(), [&](ObjectStore* _store) { BlueStore* bstore = dynamic_cast<BlueStore*> (_store); ceph_assert(bstore); bstore->compact(); const PerfCounters* logger = bstore->get_bluefs_perf_counters(); ASSERT_EQ(0, logger->get(l_bluefs_slow_used_bytes)); } ); } TEST_P(StoreTestSpecificAUSize, SpilloverFixed2Test) { if (string(GetParam()) != "bluestore") return; if (smr) { cout << "SKIP: (FIXME?) adjust me for smr at some point?" << std::endl; return; } SetVal(g_conf(), "bluestore_block_db_create", "true"); SetVal(g_conf(), "bluestore_block_db_size", "3221225472"); SetVal(g_conf(), "bluestore_volume_selection_policy", "use_some_extra"); //default 2.0 factor results in too high threshold, using less value // that results in less but still present spillover. SetVal(g_conf(), "bluestore_volume_selection_reserved_factor", "0.5"); g_conf().apply_changes(nullptr); StartDeferred(65536); doManySetAttr(store.get(), [&](ObjectStore* _store) { BlueStore* bstore = dynamic_cast<BlueStore*> (_store); ceph_assert(bstore); bstore->compact(); const PerfCounters* logger = bstore->get_bluefs_perf_counters(); ASSERT_LE(logger->get(l_bluefs_slow_used_bytes), 300 * 1024 * 1024); // see SpilloverTest for 300MB choice rationale } ); } TEST_P(StoreTestSpecificAUSize, SpilloverFixed3Test) { if (string(GetParam()) != "bluestore") return; if (smr) { cout << "SKIP: (FIXME?) adjust me for smr at some point?" << std::endl; return; } SetVal(g_conf(), "bluestore_block_db_create", "true"); SetVal(g_conf(), "bluestore_block_db_size", "3221225472"); SetVal(g_conf(), "bluestore_volume_selection_policy", "fit_to_fast"); g_conf().apply_changes(nullptr); StartDeferred(65536); doManySetAttr(store.get(), [&](ObjectStore* _store) { BlueStore* bstore = dynamic_cast<BlueStore*> (_store); ceph_assert(bstore); bstore->compact(); const PerfCounters* logger = bstore->get_bluefs_perf_counters(); ASSERT_EQ(logger->get(l_bluefs_slow_used_bytes), 0); // reffering to SpilloverFixedTest } ); } TEST_P(StoreTestSpecificAUSize, Ticket45195Repro) { if (string(GetParam()) != "bluestore") return; if (smr) { return; } SetVal(g_conf(), "bluestore_default_buffered_write", "true"); SetVal(g_conf(), "bluestore_max_blob_size", "65536"); SetVal(g_conf(), "bluestore_debug_enforce_settings", "hdd"); SetVal(g_conf(), "bluestore_fsck_on_mount", "false"); g_conf().apply_changes(nullptr); StartDeferred(0x1000); int r; coll_t cid; ghobject_t hoid(hobject_t(sobject_t("Object", CEPH_NOSNAP))); auto ch = store->create_new_collection(cid); { ObjectStore::Transaction t; t.create_collection(cid, 0); cerr << "Creating collection " << cid << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } { size_t large_object_size = 1 * 1024 * 1024; size_t expected_write_size = 0x8000; ObjectStore::Transaction t; t.touch(cid, hoid); t.set_alloc_hint(cid, hoid, large_object_size, expected_write_size, CEPH_OSD_ALLOC_HINT_FLAG_SEQUENTIAL_READ | CEPH_OSD_ALLOC_HINT_FLAG_APPEND_ONLY); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } { ObjectStore::Transaction t; bufferlist bl, orig; string s(0xc000, '0'); bl.append(s); t.write(cid, hoid, 0xb000, bl.length(), bl); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } { ObjectStore::Transaction t; bufferlist bl, orig; string s(0x10000, '1'); bl.append(s); t.write(cid, hoid, 0x16000, bl.length(), bl); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } { ObjectStore::Transaction t; bufferlist bl, orig; string s(0x4000, '1'); bl.append(s); t.write(cid, hoid, 0x1b000, bl.length(), bl); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } bufferlist bl; r = store->read(ch, hoid, 0xb000, 0xb000, bl); ASSERT_EQ(r, 0xb000); store->umount(); store->mount(); ch = store->open_collection(cid); { ObjectStore::Transaction t; bufferlist bl, orig; string s(0xf000, '3'); bl.append(s); t.write(cid, hoid, 0xf000, bl.length(), bl); cerr << "write4" << std::endl; r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } r = store->read(ch, hoid, 0xb000, 0x10000, bl); ASSERT_EQ(r, 0x10000); } TEST_P(StoreTestOmapUpgrade, WithOmapHeader) { if (string(GetParam()) != "bluestore") return; SetVal(g_conf(), "bluestore_debug_legacy_omap", "true"); g_conf().apply_changes(nullptr); StartDeferred(); int64_t poolid = 11; coll_t cid(spg_t(pg_t(1, poolid), shard_id_t::NO_SHARD)); ghobject_t hoid(hobject_t("tesomap", "", CEPH_NOSNAP, 0, poolid, "")); auto ch = store->create_new_collection(cid); int r; { ObjectStore::Transaction t; t.create_collection(cid, 0); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } map<string, bufferlist> attrs; bufferlist expected_header; expected_header.append("this is a header"); { ObjectStore::Transaction t; t.touch(cid, hoid); bufferlist header; header.append(expected_header); t.omap_setheader(cid, hoid, header); map<string, bufferlist> start_set; bufferlist bl; bl.append(string("value")); start_set.emplace(string("key1"), bl); t.omap_setkeys(cid, hoid, start_set); r = queue_transaction(store, ch, std::move(t)); } { map<string,bufferlist> res; bufferlist h; r = store->omap_get(ch, hoid, &h, &res); ASSERT_EQ(r, 0); ASSERT_TRUE(bl_eq(h, expected_header)); ASSERT_EQ(res.size(), 1); ASSERT_EQ(res.begin()->first, "key1"); } store->umount(); ASSERT_EQ(store->fsck(false), 0); SetVal(g_conf(), "bluestore_debug_legacy_omap", "false"); SetVal(g_conf(), "bluestore_fsck_error_on_no_per_pool_omap", "true"); g_conf().apply_changes(nullptr); ASSERT_EQ(store->fsck(false), 2); ASSERT_EQ(store->quick_fix(), 0); store->mount(); ch = store->open_collection(cid); { map<string,bufferlist> res; bufferlist h; r = store->omap_get(ch, hoid, &h, &res); ASSERT_EQ(r, 0); ASSERT_EQ(res.size(), 1); ASSERT_EQ(res.begin()->first, "key1"); } { ObjectStore::Transaction t; t.remove(cid, hoid); t.remove_collection(cid); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } } TEST_P(StoreTestSpecificAUSize, BluefsWriteInSingleDiskEnvTest) { if (string(GetParam()) != "bluestore") return; g_conf().apply_changes(nullptr); StartDeferred(0x1000); BlueStore* bstore = dynamic_cast<BlueStore*> (store.get()); ceph_assert(bstore); bstore->inject_bluefs_file("db.slow", "store_test_injection_slow", 1 << 20ul); bstore->inject_bluefs_file("db.wal", "store_test_injection_wal", 1 << 20ul); bstore->inject_bluefs_file("db", "store_test_injection_wal", 1 << 20ul); AdminSocket* admin_socket = g_ceph_context->get_admin_socket(); ceph_assert(admin_socket); ceph::bufferlist in, out; ostringstream err; auto r = admin_socket->execute_command( { "{\"prefix\": \"bluefs stats\"}" }, in, err, &out); if (r != 0) { cerr << "failure querying: " << cpp_strerror(r) << std::endl; } else { std::cout << std::string(out.c_str(), out.length()) << std::endl; } } TEST_P(StoreTestSpecificAUSize, BluefsWriteInNoWalDiskEnvTest) { if (string(GetParam()) != "bluestore") return; SetVal(g_conf(), "bluestore_block_db_path", "db"); SetVal(g_conf(), "bluestore_block_db_size", stringify(1ull << 31).c_str()); SetVal(g_conf(), "bluestore_block_db_create", "true"); g_conf().apply_changes(nullptr); StartDeferred(0x1000); BlueStore* bstore = dynamic_cast<BlueStore*> (store.get()); ceph_assert(bstore); bstore->inject_bluefs_file("db.slow", "store_test_injection_slow", 1 << 20ul); bstore->inject_bluefs_file("db.wal", "store_test_injection_wal", 1 << 20ul); bstore->inject_bluefs_file("db", "store_test_injection_wal", 1 << 20ul); AdminSocket* admin_socket = g_ceph_context->get_admin_socket(); ceph_assert(admin_socket); ceph::bufferlist in, out; ostringstream err; auto r = admin_socket->execute_command( { "{\"prefix\": \"bluefs stats\"}" }, in, err, &out); if (r != 0) { cerr << "failure querying: " << cpp_strerror(r) << std::endl; } else { std::cout << std::string(out.c_str(), out.length()) << std::endl; } } TEST_P(StoreTestOmapUpgrade, NoOmapHeader) { if (string(GetParam()) != "bluestore") return; SetVal(g_conf(), "bluestore_debug_legacy_omap", "true"); g_conf().apply_changes(nullptr); StartDeferred(); int64_t poolid = 11; coll_t cid(spg_t(pg_t(1, poolid), shard_id_t::NO_SHARD)); ghobject_t hoid(hobject_t("tesomap", "", CEPH_NOSNAP, 0, poolid, "")); auto ch = store->create_new_collection(cid); int r; { ObjectStore::Transaction t; t.create_collection(cid, 0); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } map<string, bufferlist> attrs; { ObjectStore::Transaction t; t.touch(cid, hoid); map<string, bufferlist> start_set; bufferlist bl; bl.append(string("value")); start_set.emplace(string("key1"), bl); t.omap_setkeys(cid, hoid, start_set); r = queue_transaction(store, ch, std::move(t)); } { map<string,bufferlist> res; bufferlist h; r = store->omap_get(ch, hoid, &h, &res); ASSERT_EQ(r, 0); ASSERT_EQ(h.length(), 0); ASSERT_EQ(res.size(), 1); ASSERT_EQ(res.begin()->first, "key1"); } store->umount(); ASSERT_EQ(store->fsck(false), 0); SetVal(g_conf(), "bluestore_debug_legacy_omap", "false"); SetVal(g_conf(), "bluestore_fsck_error_on_no_per_pool_omap", "true"); g_conf().apply_changes(nullptr); ASSERT_EQ(store->fsck(false), 2); ASSERT_EQ(store->quick_fix(), 0); store->mount(); ch = store->open_collection(cid); { map<string,bufferlist> res; bufferlist h; r = store->omap_get(ch, hoid, &h, &res); ASSERT_EQ(r, 0); ASSERT_EQ(res.size(), 1); ASSERT_EQ(res.begin()->first, "key1"); } { ObjectStore::Transaction t; t.remove(cid, hoid); t.remove_collection(cid); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } } TEST_P(StoreTestOmapUpgrade, LargeLegacyToPG) { if (string(GetParam()) != "bluestore") return; SetVal(g_conf(), "bluestore_debug_legacy_omap", "true"); g_conf().apply_changes(nullptr); int64_t poolid; coll_t cid; ghobject_t hoid; ObjectStore::CollectionHandle ch; StartDeferred(); poolid = 11; cid = coll_t(spg_t(pg_t(1, poolid), shard_id_t::NO_SHARD)); ch = store->create_new_collection(cid); int r; { ObjectStore::Transaction t; t.create_collection(cid, 0); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } //ASSERT_EQ(false, g_conf().get_val<bool>("bluestore_debug_inject_upgrade_bug53062")); map<string, bufferlist> attrs; bufferlist expected_header; expected_header.append("this is a header"); size_t object_count = 1000; make_omap_data(object_count, poolid, cid); //checking just written data check_omap_data(object_count, poolid, cid); store->umount(); ASSERT_EQ(store->fsck(false), 0); SetVal(g_conf(), "bluestore_debug_legacy_omap", "false"); SetVal(g_conf(), "bluestore_fsck_error_on_no_per_pool_omap", "true"); g_conf().apply_changes(nullptr); ASSERT_EQ(store->fsck(false), 1001); ASSERT_EQ(store->quick_fix(), 0); store->mount(); ch = store->open_collection(cid); //checking quick_fix() data check_omap_data(object_count, poolid, cid); { ObjectStore::Transaction t; for (size_t o = 0; o < object_count; o++) { std::string oid = generate_monotonic_name(object_count, o, 3.71, 0.5); ghobject_t hoid(hobject_t(oid, "", CEPH_NOSNAP, 0, poolid, "")); t.remove(cid, hoid); } t.remove_collection(cid); r = queue_transaction(store, ch, std::move(t)); ASSERT_EQ(r, 0); } } #endif // WITH_BLUESTORE int main(int argc, char **argv) { auto args = argv_to_vec(argc, argv); auto cct = global_init(NULL, args, CEPH_ENTITY_TYPE_CLIENT, CODE_ENVIRONMENT_UTILITY, CINIT_FLAG_NO_DEFAULT_CONFIG_FILE); common_init_finish(g_ceph_context); for (auto& i : args) { if (i == "--smr"s) { #if defined(HAVE_LIBZBD) derr << "Adjusting tests for smr mode." << dendl; smr = true; #else derr << "smr mode selected, but support not compiled in" << dendl; return 1; #endif } } // make sure we can adjust any config settings g_ceph_context->_conf._clear_safe_to_start_threads(); g_ceph_context->_conf.set_val_or_die("osd_journal_size", "400"); g_ceph_context->_conf.set_val_or_die("filestore_index_retry_probability", "0.5"); g_ceph_context->_conf.set_val_or_die("filestore_op_thread_timeout", "1000"); g_ceph_context->_conf.set_val_or_die("filestore_op_thread_suicide_timeout", "10000"); //g_ceph_context->_conf.set_val_or_die("filestore_fiemap", "true"); g_ceph_context->_conf.set_val_or_die("bluestore_fsck_on_mkfs", "false"); g_ceph_context->_conf.set_val_or_die("bluestore_fsck_on_mount", "false"); g_ceph_context->_conf.set_val_or_die("bluestore_fsck_on_umount", "false"); g_ceph_context->_conf.set_val_or_die("bluestore_debug_small_allocations", "4"); g_ceph_context->_conf.set_val_or_die("bluestore_debug_freelist", "true"); g_ceph_context->_conf.set_val_or_die("bluestore_clone_cow", "true"); g_ceph_context->_conf.set_val_or_die("bluestore_max_alloc_size", "196608"); // set small cache sizes so we see trimming during Synthetic tests g_ceph_context->_conf.set_val_or_die("bluestore_cache_size_hdd", "4000000"); g_ceph_context->_conf.set_val_or_die("bluestore_cache_size_ssd", "4000000"); g_ceph_context->_conf.set_val_or_die( "bluestore_debug_inject_allocation_from_file_failure", "0.66"); // very short *_max prealloc so that we fall back to async submits g_ceph_context->_conf.set_val_or_die("bluestore_blobid_prealloc", "10"); g_ceph_context->_conf.set_val_or_die("bluestore_nid_prealloc", "10"); g_ceph_context->_conf.set_val_or_die("bluestore_debug_randomize_serial_transaction", "10"); g_ceph_context->_conf.set_val_or_die("bdev_debug_aio", "true"); // specify device size g_ceph_context->_conf.set_val_or_die("bluestore_block_size", stringify(DEF_STORE_TEST_BLOCKDEV_SIZE)); g_ceph_context->_conf.set_val_or_die( "enable_experimental_unrecoverable_data_corrupting_features", "*"); g_ceph_context->_conf.apply_changes(nullptr); ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } /* * Local Variables: * compile-command: "cd ../.. ; make ceph_test_objectstore && * ./ceph_test_objectstore \ * --gtest_filter=*.collect_metadata* --log-to-stderr=true --debug-filestore=20 * " * End: */
324,221
28.655355
1,049
cc
null
ceph-main/src/test/objectstore/store_test_fixture.cc
#include <stdlib.h> #include <string> #include <iostream> #include <assert.h> #include <gtest/gtest.h> #include "common/errno.h" #include "common/config.h" #include "os/ObjectStore.h" #if defined(WITH_BLUESTORE) #include "os/bluestore/BlueStore.h" #endif #include "store_test_fixture.h" using namespace std; static void rm_r(const string& path) { string cmd = string("rm -r ") + path; cout << "==> " << cmd << std::endl; int r = ::system(cmd.c_str()); if (r) { if (r == -1) { r = errno; cerr << "system() failed to fork() " << cpp_strerror(r) << ", continuing anyway" << std::endl; } else { cerr << "failed with exit code " << r << ", continuing anyway" << std::endl; } } } void StoreTestFixture::SetUp() { int r = ::mkdir(data_dir.c_str(), 0777); if (r < 0) { r = -errno; cerr << __func__ << ": unable to create " << data_dir << ": " << cpp_strerror(r) << std::endl; } ASSERT_EQ(0, r); store = ObjectStore::create(g_ceph_context, type, data_dir, "store_test_temp_journal"); if (!store) { cerr << __func__ << ": objectstore type " << type << " doesn't exist yet!" << std::endl; } ASSERT_TRUE(store); #if defined(WITH_BLUESTORE) if (type == "bluestore") { BlueStore *s = static_cast<BlueStore*>(store.get()); // better test coverage! s->set_cache_shards(5); } #endif ASSERT_EQ(0, store->mkfs()); ASSERT_EQ(0, store->mount()); // we keep this stuff 'unsafe' out of test case scope to be able to update ANY // config settings. Hence setting it to 'safe' here to proceed with the test // case g_conf().set_safe_to_start_threads(); } void StoreTestFixture::TearDown() { if (store) { int r = store->umount(); EXPECT_EQ(0, r); rm_r(data_dir); } // we keep this stuff 'unsafe' out of test case scope to be able to update ANY // config settings. Hence setting it to 'unsafe' here as test case is closing. g_conf()._clear_safe_to_start_threads(); PopSettings(0); if (!orig_death_test_style.empty()) { ::testing::FLAGS_gtest_death_test_style = orig_death_test_style; orig_death_test_style.clear(); } } void StoreTestFixture::SetVal(ConfigProxy& _conf, const char* key, const char* val) { ceph_assert(!conf || conf == &_conf); conf = &_conf; std::string skey(key); std::string prev_val; conf->get_val(skey, &prev_val); conf->set_val_or_die(key, val); saved_settings.emplace(skey, prev_val); } void StoreTestFixture::PopSettings(size_t pos) { if (conf) { ceph_assert(pos == 0 || pos <= saved_settings.size()); // for sanity while(pos < saved_settings.size()) { auto& e = saved_settings.top(); conf->set_val_or_die(e.first, e.second); saved_settings.pop(); } conf->apply_changes(NULL); } } void StoreTestFixture::CloseAndReopen() { ceph_assert(store != nullptr); g_conf()._clear_safe_to_start_threads(); int r = store->umount(); EXPECT_EQ(0, r); ch.reset(nullptr); store.reset(nullptr); store = ObjectStore::create(g_ceph_context, type, data_dir, "store_test_temp_journal"); if (!store) { cerr << __func__ << ": objectstore type " << type << " failed to reopen!" << std::endl; } ASSERT_TRUE(store); #if defined(WITH_BLUESTORE) if (type == "bluestore") { BlueStore *s = static_cast<BlueStore*>(store.get()); // better test coverage! s->set_cache_shards(5); } #endif ASSERT_EQ(0, store->mount()); g_conf().set_safe_to_start_threads(); }
3,667
25.970588
98
cc
null
ceph-main/src/test/objectstore/store_test_fixture.h
#include <string> #include <stack> #include <memory> #include <gtest/gtest.h> #include "common/config_fwd.h" class ObjectStore; class StoreTestFixture : virtual public ::testing::Test { const std::string type; const std::string data_dir; std::stack<std::pair<std::string, std::string>> saved_settings; ConfigProxy* conf = nullptr; std::string orig_death_test_style; public: std::unique_ptr<ObjectStore> store; ObjectStore::CollectionHandle ch; explicit StoreTestFixture(const std::string& type) : type(type), data_dir(type + ".test_temp_dir") {} void SetUp() override; void TearDown() override; void SetDeathTestStyle(const char* new_style) { if (orig_death_test_style.empty()) { orig_death_test_style = ::testing::FLAGS_gtest_death_test_style; } ::testing::FLAGS_gtest_death_test_style = new_style; } void SetVal(ConfigProxy& conf, const char* key, const char* val); struct SettingsBookmark { StoreTestFixture& s; size_t pos; SettingsBookmark(StoreTestFixture& _s, size_t p) : s(_s), pos(p) {} ~SettingsBookmark() { s.PopSettings(pos); } }; SettingsBookmark BookmarkSettings() { return SettingsBookmark(*this, saved_settings.size()); } void PopSettings(size_t); void CloseAndReopen(); };
1,295
23.45283
70
h
null
ceph-main/src/test/objectstore/test_bdev.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include <stdio.h> #include <string.h> #include <iostream> #include <gtest/gtest.h> #include "global/global_init.h" #include "global/global_context.h" #include "common/ceph_context.h" #include "common/ceph_argparse.h" #include "include/stringify.h" #include "common/errno.h" #include "blk/BlockDevice.h" using namespace std; class TempBdev { public: TempBdev(uint64_t size) : path{get_temp_bdev(size)} {} ~TempBdev() { rm_temp_bdev(path); } const std::string path; private: static string get_temp_bdev(uint64_t size) { static int n = 0; string fn = "ceph_test_bluefs.tmp.block." + stringify(getpid()) + "." + stringify(++n); int fd = ::open(fn.c_str(), O_CREAT|O_RDWR|O_TRUNC, 0644); ceph_assert(fd >= 0); int r = ::ftruncate(fd, size); ceph_assert(r >= 0); ::close(fd); return fn; } static void rm_temp_bdev(string f) { ::unlink(f.c_str()); } }; TEST(KernelDevice, Ticket45337) { // Large (>=2 GB) writes are incomplete when bluefs_buffered_io = true uint64_t size = 1048576ull * 8192; TempBdev bdev{ size }; const bool buffered = true; std::unique_ptr<BlockDevice> b( BlockDevice::create(g_ceph_context, bdev.path, NULL, NULL, [](void* handle, void* aio) {}, NULL)); bufferlist bl; // writing a bit less than 4GB for (auto i = 0; i < 4000; i++) { string s(1048576, 'a' + (i % 28)); bl.append(s); } uint64_t magic_offs = bl.length(); string s(4086, 'z'); s += "0123456789"; bl.append(s); { int r = b->open(bdev.path); if (r < 0) { std::cerr << "open " << bdev.path << " failed" << std::endl; return; } } std::unique_ptr<IOContext> ioc(new IOContext(g_ceph_context, NULL)); auto r = b->aio_write(0, bl, ioc.get(), buffered); ASSERT_EQ(r, 0); if (ioc->has_pending_aios()) { b->aio_submit(ioc.get()); ioc->aio_wait(); } char outbuf[0x1000]; r = b->read_random(magic_offs, sizeof(outbuf), outbuf, buffered); ASSERT_EQ(r, 0); ASSERT_EQ(memcmp(s.c_str(), outbuf, sizeof(outbuf)), 0); b->close(); } int main(int argc, char **argv) { auto args = argv_to_vec(argc, argv); map<string,string> defaults = { { "debug_bdev", "1/20" } }; auto cct = global_init(&defaults, args, CEPH_ENTITY_TYPE_CLIENT, CODE_ENVIRONMENT_UTILITY, CINIT_FLAG_NO_DEFAULT_CONFIG_FILE); common_init_finish(g_ceph_context); g_ceph_context->_conf.set_val( "enable_experimental_unrecoverable_data_corrupting_features", "*"); g_ceph_context->_conf.apply_changes(nullptr); ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); }
2,725
23.339286
73
cc
null
ceph-main/src/test/objectstore/test_bluefs.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include <stdio.h> #include <string.h> #include <iostream> #include <time.h> #include <fcntl.h> #include <unistd.h> #include <random> #include <thread> #include <stack> #include <gtest/gtest.h> #include "global/global_init.h" #include "common/ceph_argparse.h" #include "include/stringify.h" #include "include/scope_guard.h" #include "common/errno.h" #include "os/bluestore/Allocator.h" #include "os/bluestore/BlueFS.h" using namespace std; std::unique_ptr<char[]> gen_buffer(uint64_t size) { std::unique_ptr<char[]> buffer = std::make_unique<char[]>(size); std::independent_bits_engine<std::default_random_engine, CHAR_BIT, unsigned char> e; std::generate(buffer.get(), buffer.get()+size, std::ref(e)); return buffer; } class TempBdev { public: TempBdev(uint64_t size) : path{get_temp_bdev(size)} {} ~TempBdev() { rm_temp_bdev(path); } const std::string path; private: static string get_temp_bdev(uint64_t size) { static int n = 0; string fn = "ceph_test_bluefs.tmp.block." + stringify(getpid()) + "." + stringify(++n); int fd = ::open(fn.c_str(), O_CREAT|O_RDWR|O_TRUNC, 0644); ceph_assert(fd >= 0); int r = ::ftruncate(fd, size); ceph_assert(r >= 0); ::close(fd); return fn; } static void rm_temp_bdev(string f) { ::unlink(f.c_str()); } }; class ConfSaver { std::stack<std::pair<std::string, std::string>> saved_settings; ConfigProxy& conf; public: ConfSaver(ConfigProxy& conf) : conf(conf) { conf._clear_safe_to_start_threads(); }; ~ConfSaver() { conf._clear_safe_to_start_threads(); while(saved_settings.size() > 0) { auto& e = saved_settings.top(); conf.set_val_or_die(e.first, e.second); saved_settings.pop(); } conf.set_safe_to_start_threads(); conf.apply_changes(nullptr); } void SetVal(const char* key, const char* val) { std::string skey(key); std::string prev_val; conf.get_val(skey, &prev_val); conf.set_val_or_die(skey, val); saved_settings.emplace(skey, prev_val); } void ApplyChanges() { conf.set_safe_to_start_threads(); conf.apply_changes(nullptr); } }; TEST(BlueFS, mkfs) { uint64_t size = 1048576 * 128; TempBdev bdev{size}; uuid_d fsid; BlueFS fs(g_ceph_context); ASSERT_EQ(0, fs.add_block_device(BlueFS::BDEV_DB, bdev.path, false, 1048576)); ASSERT_EQ(0, fs.mkfs(fsid, { BlueFS::BDEV_DB, false, false })); } TEST(BlueFS, mkfs_mount) { uint64_t size = 1048576 * 128; TempBdev bdev{size}; BlueFS fs(g_ceph_context); ASSERT_EQ(0, fs.add_block_device(BlueFS::BDEV_DB, bdev.path, false, 1048576)); uuid_d fsid; ASSERT_EQ(0, fs.mkfs(fsid, { BlueFS::BDEV_DB, false, false })); ASSERT_EQ(0, fs.mount()); ASSERT_EQ(0, fs.maybe_verify_layout({ BlueFS::BDEV_DB, false, false })); ASSERT_EQ(fs.get_total(BlueFS::BDEV_DB), size - 1048576); ASSERT_LT(fs.get_free(BlueFS::BDEV_DB), size - 1048576); fs.umount(); } TEST(BlueFS, write_read) { uint64_t size = 1048576 * 128; TempBdev bdev{size}; BlueFS fs(g_ceph_context); ASSERT_EQ(0, fs.add_block_device(BlueFS::BDEV_DB, bdev.path, false, 1048576)); uuid_d fsid; ASSERT_EQ(0, fs.mkfs(fsid, { BlueFS::BDEV_DB, false, false })); ASSERT_EQ(0, fs.mount()); ASSERT_EQ(0, fs.maybe_verify_layout({ BlueFS::BDEV_DB, false, false })); { BlueFS::FileWriter *h; ASSERT_EQ(0, fs.mkdir("dir")); ASSERT_EQ(0, fs.open_for_write("dir", "file", &h, false)); h->append("foo", 3); h->append("bar", 3); h->append("baz", 3); fs.fsync(h); fs.close_writer(h); } { BlueFS::FileReader *h; ASSERT_EQ(0, fs.open_for_read("dir", "file", &h)); bufferlist bl; ASSERT_EQ(9, fs.read(h, 0, 1024, &bl, NULL)); ASSERT_EQ(0, strncmp("foobarbaz", bl.c_str(), 9)); delete h; } fs.umount(); } TEST(BlueFS, small_appends) { uint64_t size = 1048576 * 128; TempBdev bdev{size}; BlueFS fs(g_ceph_context); ASSERT_EQ(0, fs.add_block_device(BlueFS::BDEV_DB, bdev.path, false, 1048576)); uuid_d fsid; ASSERT_EQ(0, fs.mkfs(fsid, { BlueFS::BDEV_DB, false, false })); ASSERT_EQ(0, fs.mount()); ASSERT_EQ(0, fs.maybe_verify_layout({ BlueFS::BDEV_DB, false, false })); { BlueFS::FileWriter *h; ASSERT_EQ(0, fs.mkdir("dir")); ASSERT_EQ(0, fs.open_for_write("dir", "file", &h, false)); for (unsigned i = 0; i < 10000; ++i) { h->append("abcdeabcdeabcdeabcdeabcdeabc", 23); } fs.fsync(h); fs.close_writer(h); } { BlueFS::FileWriter *h; ASSERT_EQ(0, fs.open_for_write("dir", "file_sync", &h, false)); for (unsigned i = 0; i < 1000; ++i) { h->append("abcdeabcdeabcdeabcdeabcdeabc", 23); ASSERT_EQ(0, fs.fsync(h)); } fs.close_writer(h); } fs.umount(); } TEST(BlueFS, very_large_write) { // we'll write a ~5G file, so allocate more than that for the whole fs uint64_t size = 1048576 * 1024 * 6ull; TempBdev bdev{size}; BlueFS fs(g_ceph_context); bool old = g_ceph_context->_conf.get_val<bool>("bluefs_buffered_io"); g_ceph_context->_conf.set_val("bluefs_buffered_io", "false"); uint64_t total_written = 0; ASSERT_EQ(0, fs.add_block_device(BlueFS::BDEV_DB, bdev.path, false, 1048576)); uuid_d fsid; ASSERT_EQ(0, fs.mkfs(fsid, { BlueFS::BDEV_DB, false, false })); ASSERT_EQ(0, fs.mount()); ASSERT_EQ(0, fs.maybe_verify_layout({ BlueFS::BDEV_DB, false, false })); char buf[1048571]; // this is biggish, but intentionally not evenly aligned for (unsigned i = 0; i < sizeof(buf); ++i) { buf[i] = i; } { BlueFS::FileWriter *h; ASSERT_EQ(0, fs.mkdir("dir")); ASSERT_EQ(0, fs.open_for_write("dir", "bigfile", &h, false)); for (unsigned i = 0; i < 3*1024*1048576ull / sizeof(buf); ++i) { h->append(buf, sizeof(buf)); total_written += sizeof(buf); } fs.fsync(h); for (unsigned i = 0; i < 2*1024*1048576ull / sizeof(buf); ++i) { h->append(buf, sizeof(buf)); total_written += sizeof(buf); } fs.fsync(h); fs.close_writer(h); } { BlueFS::FileReader *h; ASSERT_EQ(0, fs.open_for_read("dir", "bigfile", &h)); bufferlist bl; ASSERT_EQ(h->file->fnode.size, total_written); for (unsigned i = 0; i < 3*1024*1048576ull / sizeof(buf); ++i) { bl.clear(); fs.read(h, i * sizeof(buf), sizeof(buf), &bl, NULL); int r = memcmp(buf, bl.c_str(), sizeof(buf)); if (r) { cerr << "read got mismatch at offset " << i*sizeof(buf) << " r " << r << std::endl; } ASSERT_EQ(0, r); } for (unsigned i = 0; i < 2*1024*1048576ull / sizeof(buf); ++i) { bl.clear(); fs.read(h, i * sizeof(buf), sizeof(buf), &bl, NULL); int r = memcmp(buf, bl.c_str(), sizeof(buf)); if (r) { cerr << "read got mismatch at offset " << i*sizeof(buf) << " r " << r << std::endl; } ASSERT_EQ(0, r); } delete h; ASSERT_EQ(0, fs.open_for_read("dir", "bigfile", &h)); ASSERT_EQ(h->file->fnode.size, total_written); auto huge_buf = std::make_unique<char[]>(h->file->fnode.size); auto l = h->file->fnode.size; int64_t r = fs.read(h, 0, l, NULL, huge_buf.get()); ASSERT_EQ(r, l); delete h; } fs.umount(); g_ceph_context->_conf.set_val("bluefs_buffered_io", stringify((int)old)); } TEST(BlueFS, very_large_write2) { // we'll write a ~5G file, so allocate more than that for the whole fs uint64_t size_full = 1048576 * 1024 * 6ull; uint64_t size = 1048576 * 1024 * 5ull; TempBdev bdev{ size_full }; BlueFS fs(g_ceph_context); bool old = g_ceph_context->_conf.get_val<bool>("bluefs_buffered_io"); g_ceph_context->_conf.set_val("bluefs_buffered_io", "false"); uint64_t total_written = 0; ASSERT_EQ(0, fs.add_block_device(BlueFS::BDEV_DB, bdev.path, false, 1048576)); uuid_d fsid; ASSERT_EQ(0, fs.mkfs(fsid, { BlueFS::BDEV_DB, false, false })); ASSERT_EQ(0, fs.mount()); ASSERT_EQ(0, fs.maybe_verify_layout({ BlueFS::BDEV_DB, false, false })); char fill_arr[1 << 20]; // 1M for (size_t i = 0; i < sizeof(fill_arr); ++i) { fill_arr[i] = (char)i; } std::unique_ptr<char[]> buf; buf.reset(new char[size]); for (size_t i = 0; i < size; i += sizeof(fill_arr)) { memcpy(buf.get() + i, fill_arr, sizeof(fill_arr)); } { BlueFS::FileWriter* h; ASSERT_EQ(0, fs.mkdir("dir")); ASSERT_EQ(0, fs.open_for_write("dir", "bigfile", &h, false)); fs.append_try_flush(h, buf.get(), size); total_written = size; fs.fsync(h); fs.close_writer(h); } memset(buf.get(), 0, size); { BlueFS::FileReader* h; ASSERT_EQ(0, fs.open_for_read("dir", "bigfile", &h)); ASSERT_EQ(h->file->fnode.size, total_written); auto l = h->file->fnode.size; int64_t r = fs.read(h, 0, l, NULL, buf.get()); ASSERT_EQ(r, l); for (size_t i = 0; i < size; i += sizeof(fill_arr)) { ceph_assert(memcmp(buf.get() + i, fill_arr, sizeof(fill_arr)) == 0); } delete h; } fs.umount(); g_ceph_context->_conf.set_val("bluefs_buffered_io", stringify((int)old)); } #define ALLOC_SIZE 4096 void write_data(BlueFS &fs, uint64_t rationed_bytes) { int j=0, r=0; uint64_t written_bytes = 0; rationed_bytes -= ALLOC_SIZE; stringstream ss; string dir = "dir."; ss << std::this_thread::get_id(); dir.append(ss.str()); dir.append("."); dir.append(to_string(j)); ASSERT_EQ(0, fs.mkdir(dir)); while (1) { string file = "file."; file.append(to_string(j)); BlueFS::FileWriter *h; ASSERT_EQ(0, fs.open_for_write(dir, file, &h, false)); ASSERT_NE(nullptr, h); auto sg = make_scope_guard([&fs, h] { fs.close_writer(h); }); bufferlist bl; std::unique_ptr<char[]> buf = gen_buffer(ALLOC_SIZE); bufferptr bp = buffer::claim_char(ALLOC_SIZE, buf.get()); bl.push_back(bp); h->append(bl.c_str(), bl.length()); r = fs.fsync(h); if (r < 0) { break; } written_bytes += g_conf()->bluefs_alloc_size; j++; if ((rationed_bytes - written_bytes) <= g_conf()->bluefs_alloc_size) { break; } } } void create_single_file(BlueFS &fs) { BlueFS::FileWriter *h; stringstream ss; string dir = "dir.test"; ASSERT_EQ(0, fs.mkdir(dir)); string file = "testfile"; ASSERT_EQ(0, fs.open_for_write(dir, file, &h, false)); bufferlist bl; std::unique_ptr<char[]> buf = gen_buffer(ALLOC_SIZE); bufferptr bp = buffer::claim_char(ALLOC_SIZE, buf.get()); bl.push_back(bp); h->append(bl.c_str(), bl.length()); fs.fsync(h); fs.close_writer(h); } void write_single_file(BlueFS &fs, uint64_t rationed_bytes) { stringstream ss; const string dir = "dir.test"; const string file = "testfile"; uint64_t written_bytes = 0; rationed_bytes -= ALLOC_SIZE; while (1) { BlueFS::FileWriter *h; ASSERT_EQ(0, fs.open_for_write(dir, file, &h, false)); ASSERT_NE(nullptr, h); auto sg = make_scope_guard([&fs, h] { fs.close_writer(h); }); bufferlist bl; std::unique_ptr<char[]> buf = gen_buffer(ALLOC_SIZE); bufferptr bp = buffer::claim_char(ALLOC_SIZE, buf.get()); bl.push_back(bp); h->append(bl.c_str(), bl.length()); int r = fs.fsync(h); if (r < 0) { break; } written_bytes += g_conf()->bluefs_alloc_size; if ((rationed_bytes - written_bytes) <= g_conf()->bluefs_alloc_size) { break; } } } bool writes_done = false; void sync_fs(BlueFS &fs) { while (1) { if (writes_done == true) break; fs.sync_metadata(false); sleep(1); } } void do_join(std::thread& t) { t.join(); } void join_all(std::vector<std::thread>& v) { std::for_each(v.begin(),v.end(),do_join); } #define NUM_WRITERS 3 #define NUM_SYNC_THREADS 1 #define NUM_SINGLE_FILE_WRITERS 1 #define NUM_MULTIPLE_FILE_WRITERS 2 TEST(BlueFS, test_flush_1) { uint64_t size = 1048576 * 128; TempBdev bdev{size}; g_ceph_context->_conf.set_val( "bluefs_alloc_size", "65536"); g_ceph_context->_conf.apply_changes(nullptr); BlueFS fs(g_ceph_context); ASSERT_EQ(0, fs.add_block_device(BlueFS::BDEV_DB, bdev.path, false, 1048576)); uuid_d fsid; ASSERT_EQ(0, fs.mkfs(fsid, { BlueFS::BDEV_DB, false, false })); ASSERT_EQ(0, fs.mount()); ASSERT_EQ(0, fs.maybe_verify_layout({ BlueFS::BDEV_DB, false, false })); { std::vector<std::thread> write_thread_multiple; uint64_t effective_size = size - (32 * 1048576); // leaving the last 32 MB for log compaction uint64_t per_thread_bytes = (effective_size/(NUM_MULTIPLE_FILE_WRITERS + NUM_SINGLE_FILE_WRITERS)); for (int i=0; i<NUM_MULTIPLE_FILE_WRITERS ; i++) { write_thread_multiple.push_back(std::thread(write_data, std::ref(fs), per_thread_bytes)); } create_single_file(fs); std::vector<std::thread> write_thread_single; for (int i=0; i<NUM_SINGLE_FILE_WRITERS; i++) { write_thread_single.push_back(std::thread(write_single_file, std::ref(fs), per_thread_bytes)); } join_all(write_thread_single); join_all(write_thread_multiple); } fs.umount(); } TEST(BlueFS, test_flush_2) { uint64_t size = 1048576 * 256; TempBdev bdev{size}; g_ceph_context->_conf.set_val( "bluefs_alloc_size", "65536"); g_ceph_context->_conf.apply_changes(nullptr); BlueFS fs(g_ceph_context); ASSERT_EQ(0, fs.add_block_device(BlueFS::BDEV_DB, bdev.path, false, 1048576)); uuid_d fsid; ASSERT_EQ(0, fs.mkfs(fsid, { BlueFS::BDEV_DB, false, false })); ASSERT_EQ(0, fs.mount()); ASSERT_EQ(0, fs.maybe_verify_layout({ BlueFS::BDEV_DB, false, false })); { uint64_t effective_size = size - (128 * 1048576); // leaving the last 32 MB for log compaction uint64_t per_thread_bytes = (effective_size/(NUM_WRITERS)); std::vector<std::thread> write_thread_multiple; for (int i=0; i<NUM_WRITERS; i++) { write_thread_multiple.push_back(std::thread(write_data, std::ref(fs), per_thread_bytes)); } join_all(write_thread_multiple); } fs.umount(); } TEST(BlueFS, test_flush_3) { uint64_t size = 1048576 * 256; TempBdev bdev{size}; g_ceph_context->_conf.set_val( "bluefs_alloc_size", "65536"); g_ceph_context->_conf.apply_changes(nullptr); BlueFS fs(g_ceph_context); ASSERT_EQ(0, fs.add_block_device(BlueFS::BDEV_DB, bdev.path, false, 1048576)); uuid_d fsid; ASSERT_EQ(0, fs.mkfs(fsid, { BlueFS::BDEV_DB, false, false })); ASSERT_EQ(0, fs.mount()); ASSERT_EQ(0, fs.maybe_verify_layout({ BlueFS::BDEV_DB, false, false })); { std::vector<std::thread> write_threads; uint64_t effective_size = size - (64 * 1048576); // leaving the last 11 MB for log compaction uint64_t per_thread_bytes = (effective_size/(NUM_WRITERS)); for (int i=0; i<NUM_WRITERS; i++) { write_threads.push_back(std::thread(write_data, std::ref(fs), per_thread_bytes)); } std::vector<std::thread> sync_threads; for (int i=0; i<NUM_SYNC_THREADS; i++) { sync_threads.push_back(std::thread(sync_fs, std::ref(fs))); } join_all(write_threads); writes_done = true; join_all(sync_threads); } fs.umount(); } TEST(BlueFS, test_simple_compaction_sync) { g_ceph_context->_conf.set_val( "bluefs_compact_log_sync", "true"); uint64_t size = 1048576 * 128; TempBdev bdev{size}; BlueFS fs(g_ceph_context); ASSERT_EQ(0, fs.add_block_device(BlueFS::BDEV_DB, bdev.path, false, 1048576)); uuid_d fsid; ASSERT_EQ(0, fs.mkfs(fsid, { BlueFS::BDEV_DB, false, false })); ASSERT_EQ(0, fs.mount()); ASSERT_EQ(0, fs.maybe_verify_layout({ BlueFS::BDEV_DB, false, false })); { for (int i=0; i<10; i++) { string dir = "dir."; dir.append(to_string(i)); ASSERT_EQ(0, fs.mkdir(dir)); for (int j=0; j<10; j++) { string file = "file."; file.append(to_string(j)); BlueFS::FileWriter *h; ASSERT_EQ(0, fs.open_for_write(dir, file, &h, false)); ASSERT_NE(nullptr, h); auto sg = make_scope_guard([&fs, h] { fs.close_writer(h); }); bufferlist bl; std::unique_ptr<char[]> buf = gen_buffer(4096); bufferptr bp = buffer::claim_char(4096, buf.get()); bl.push_back(bp); h->append(bl.c_str(), bl.length()); fs.fsync(h); } } } { for (int i=0; i<10; i+=2) { string dir = "dir."; dir.append(to_string(i)); for (int j=0; j<10; j++) { string file = "file."; file.append(to_string(j)); fs.unlink(dir, file); fs.sync_metadata(false); } ASSERT_EQ(0, fs.rmdir(dir)); fs.sync_metadata(false); } } fs.compact_log(); fs.umount(); } TEST(BlueFS, test_simple_compaction_async) { g_ceph_context->_conf.set_val( "bluefs_compact_log_sync", "false"); uint64_t size = 1048576 * 128; TempBdev bdev{size}; BlueFS fs(g_ceph_context); ASSERT_EQ(0, fs.add_block_device(BlueFS::BDEV_DB, bdev.path, false, 1048576)); uuid_d fsid; ASSERT_EQ(0, fs.mkfs(fsid, { BlueFS::BDEV_DB, false, false })); ASSERT_EQ(0, fs.mount()); ASSERT_EQ(0, fs.maybe_verify_layout({ BlueFS::BDEV_DB, false, false })); { for (int i=0; i<10; i++) { string dir = "dir."; dir.append(to_string(i)); ASSERT_EQ(0, fs.mkdir(dir)); for (int j=0; j<10; j++) { string file = "file."; file.append(to_string(j)); BlueFS::FileWriter *h; ASSERT_EQ(0, fs.open_for_write(dir, file, &h, false)); ASSERT_NE(nullptr, h); auto sg = make_scope_guard([&fs, h] { fs.close_writer(h); }); bufferlist bl; std::unique_ptr<char[]> buf = gen_buffer(4096); bufferptr bp = buffer::claim_char(4096, buf.get()); bl.push_back(bp); h->append(bl.c_str(), bl.length()); fs.fsync(h); } } } { for (int i=0; i<10; i+=2) { string dir = "dir."; dir.append(to_string(i)); for (int j=0; j<10; j++) { string file = "file."; file.append(to_string(j)); fs.unlink(dir, file); fs.sync_metadata(false); } ASSERT_EQ(0, fs.rmdir(dir)); fs.sync_metadata(false); } } fs.compact_log(); fs.umount(); } TEST(BlueFS, test_compaction_sync) { uint64_t size = 1048576 * 128; TempBdev bdev{size}; g_ceph_context->_conf.set_val( "bluefs_alloc_size", "65536"); g_ceph_context->_conf.set_val( "bluefs_compact_log_sync", "true"); const char* canary_dir = "dir.after_compact_test"; const char* canary_file = "file.after_compact_test"; const char* canary_data = "some random data"; BlueFS fs(g_ceph_context); ASSERT_EQ(0, fs.add_block_device(BlueFS::BDEV_DB, bdev.path, false, 1048576)); uuid_d fsid; ASSERT_EQ(0, fs.mkfs(fsid, { BlueFS::BDEV_DB, false, false })); ASSERT_EQ(0, fs.mount()); ASSERT_EQ(0, fs.maybe_verify_layout({ BlueFS::BDEV_DB, false, false })); { std::vector<std::thread> write_threads; uint64_t effective_size = size - (32 * 1048576); // leaving the last 32 MB for log compaction uint64_t per_thread_bytes = (effective_size/(NUM_WRITERS)); for (int i=0; i<NUM_WRITERS; i++) { write_threads.push_back(std::thread(write_data, std::ref(fs), per_thread_bytes)); } std::vector<std::thread> sync_threads; for (int i=0; i<NUM_SYNC_THREADS; i++) { sync_threads.push_back(std::thread(sync_fs, std::ref(fs))); } join_all(write_threads); writes_done = true; join_all(sync_threads); fs.compact_log(); { ASSERT_EQ(0, fs.mkdir(canary_dir)); BlueFS::FileWriter *h; ASSERT_EQ(0, fs.open_for_write(canary_dir, canary_file, &h, false)); ASSERT_NE(nullptr, h); auto sg = make_scope_guard([&fs, h] { fs.close_writer(h); }); h->append(canary_data, strlen(canary_data)); int r = fs.fsync(h); ASSERT_EQ(r, 0); } } fs.umount(); fs.mount(); { BlueFS::FileReader *h; ASSERT_EQ(0, fs.open_for_read(canary_dir, canary_file, &h)); ASSERT_NE(nullptr, h); bufferlist bl; ASSERT_EQ(strlen(canary_data), fs.read(h, 0, 1024, &bl, NULL)); std::cout << bl.c_str() << std::endl; ASSERT_EQ(0, strncmp(canary_data, bl.c_str(), strlen(canary_data))); delete h; } fs.umount(); } TEST(BlueFS, test_compaction_async) { uint64_t size = 1048576 * 128; TempBdev bdev{size}; g_ceph_context->_conf.set_val( "bluefs_alloc_size", "65536"); g_ceph_context->_conf.set_val( "bluefs_compact_log_sync", "false"); const char* canary_dir = "dir.after_compact_test"; const char* canary_file = "file.after_compact_test"; const char* canary_data = "some random data"; BlueFS fs(g_ceph_context); ASSERT_EQ(0, fs.add_block_device(BlueFS::BDEV_DB, bdev.path, false, 1048576)); uuid_d fsid; ASSERT_EQ(0, fs.mkfs(fsid, { BlueFS::BDEV_DB, false, false })); ASSERT_EQ(0, fs.mount()); ASSERT_EQ(0, fs.maybe_verify_layout({ BlueFS::BDEV_DB, false, false })); { std::vector<std::thread> write_threads; uint64_t effective_size = size - (32 * 1048576); // leaving the last 32 MB for log compaction uint64_t per_thread_bytes = (effective_size/(NUM_WRITERS)); for (int i=0; i<NUM_WRITERS; i++) { write_threads.push_back(std::thread(write_data, std::ref(fs), per_thread_bytes)); } std::vector<std::thread> sync_threads; for (int i=0; i<NUM_SYNC_THREADS; i++) { sync_threads.push_back(std::thread(sync_fs, std::ref(fs))); } join_all(write_threads); writes_done = true; join_all(sync_threads); fs.compact_log(); { ASSERT_EQ(0, fs.mkdir(canary_dir)); BlueFS::FileWriter *h; ASSERT_EQ(0, fs.open_for_write(canary_dir, canary_file, &h, false)); ASSERT_NE(nullptr, h); auto sg = make_scope_guard([&fs, h] { fs.close_writer(h); }); h->append(canary_data, strlen(canary_data)); int r = fs.fsync(h); ASSERT_EQ(r, 0); } } fs.umount(); fs.mount(); { BlueFS::FileReader *h; ASSERT_EQ(0, fs.open_for_read(canary_dir, canary_file, &h)); ASSERT_NE(nullptr, h); bufferlist bl; ASSERT_EQ(strlen(canary_data), fs.read(h, 0, 1024, &bl, NULL)); std::cout << bl.c_str() << std::endl; ASSERT_EQ(0, strncmp(canary_data, bl.c_str(), strlen(canary_data))); delete h; } fs.umount(); } TEST(BlueFS, test_replay) { uint64_t size = 1048576 * 128; TempBdev bdev{size}; g_ceph_context->_conf.set_val( "bluefs_alloc_size", "65536"); g_ceph_context->_conf.set_val( "bluefs_compact_log_sync", "false"); BlueFS fs(g_ceph_context); ASSERT_EQ(0, fs.add_block_device(BlueFS::BDEV_DB, bdev.path, false, 1048576)); uuid_d fsid; ASSERT_EQ(0, fs.mkfs(fsid, { BlueFS::BDEV_DB, false, false })); ASSERT_EQ(0, fs.mount()); ASSERT_EQ(0, fs.maybe_verify_layout({ BlueFS::BDEV_DB, false, false })); { std::vector<std::thread> write_threads; uint64_t effective_size = size - (32 * 1048576); // leaving the last 32 MB for log compaction uint64_t per_thread_bytes = (effective_size/(NUM_WRITERS)); for (int i=0; i<NUM_WRITERS; i++) { write_threads.push_back(std::thread(write_data, std::ref(fs), per_thread_bytes)); } std::vector<std::thread> sync_threads; for (int i=0; i<NUM_SYNC_THREADS; i++) { sync_threads.push_back(std::thread(sync_fs, std::ref(fs))); } join_all(write_threads); writes_done = true; join_all(sync_threads); fs.compact_log(); } fs.umount(); // remount and check log can replay safe? ASSERT_EQ(0, fs.mount()); ASSERT_EQ(0, fs.maybe_verify_layout({ BlueFS::BDEV_DB, false, false })); fs.umount(); } TEST(BlueFS, test_replay_growth) { uint64_t size = 1048576LL * (2 * 1024 + 128); TempBdev bdev{size}; ConfSaver conf(g_ceph_context->_conf); conf.SetVal("bluefs_alloc_size", "4096"); conf.SetVal("bluefs_shared_alloc_size", "4096"); conf.SetVal("bluefs_compact_log_sync", "false"); conf.SetVal("bluefs_min_log_runway", "32768"); conf.SetVal("bluefs_max_log_runway", "65536"); conf.SetVal("bluefs_allocator", "stupid"); conf.SetVal("bluefs_sync_write", "true"); conf.ApplyChanges(); BlueFS fs(g_ceph_context); ASSERT_EQ(0, fs.add_block_device(BlueFS::BDEV_DB, bdev.path, false, 1048576)); uuid_d fsid; ASSERT_EQ(0, fs.mkfs(fsid, { BlueFS::BDEV_DB, false, false })); ASSERT_EQ(0, fs.mount()); ASSERT_EQ(0, fs.maybe_verify_layout({ BlueFS::BDEV_DB, false, false })); ASSERT_EQ(0, fs.mkdir("dir")); char data[2000]; BlueFS::FileWriter *h; ASSERT_EQ(0, fs.open_for_write("dir", "file", &h, false)); for (size_t i = 0; i < 10000; i++) { h->append(data, 2000); fs.fsync(h); } fs.close_writer(h); fs.umount(true); //do not compact on exit! // remount and check log can replay safe? ASSERT_EQ(0, fs.mount()); ASSERT_EQ(0, fs.maybe_verify_layout({ BlueFS::BDEV_DB, false, false })); fs.umount(); } TEST(BlueFS, test_tracker_50965) { uint64_t size_wal = 1048576 * 64; TempBdev bdev_wal{size_wal}; uint64_t size_db = 1048576 * 128; TempBdev bdev_db{size_db}; uint64_t size_slow = 1048576 * 256; TempBdev bdev_slow{size_slow}; ConfSaver conf(g_ceph_context->_conf); conf.SetVal("bluefs_min_flush_size", "65536"); conf.ApplyChanges(); BlueFS fs(g_ceph_context); ASSERT_EQ(0, fs.add_block_device(BlueFS::BDEV_WAL, bdev_wal.path, false, 0)); ASSERT_EQ(0, fs.add_block_device(BlueFS::BDEV_DB, bdev_db.path, false, 0)); ASSERT_EQ(0, fs.add_block_device(BlueFS::BDEV_SLOW, bdev_slow.path, false, 0)); uuid_d fsid; ASSERT_EQ(0, fs.mkfs(fsid, { BlueFS::BDEV_DB, true, true })); ASSERT_EQ(0, fs.mount()); ASSERT_EQ(0, fs.maybe_verify_layout({ BlueFS::BDEV_DB, true, true })); string dir_slow = "dir.slow"; ASSERT_EQ(0, fs.mkdir(dir_slow)); string dir_db = "dir_db"; ASSERT_EQ(0, fs.mkdir(dir_db)); string file_slow = "file"; BlueFS::FileWriter *h_slow; ASSERT_EQ(0, fs.open_for_write(dir_slow, file_slow, &h_slow, false)); ASSERT_NE(nullptr, h_slow); string file_db = "file"; BlueFS::FileWriter *h_db; ASSERT_EQ(0, fs.open_for_write(dir_db, file_db, &h_db, false)); ASSERT_NE(nullptr, h_db); bufferlist bl1; std::unique_ptr<char[]> buf1 = gen_buffer(70000); bufferptr bp1 = buffer::claim_char(70000, buf1.get()); bl1.push_back(bp1); h_slow->append(bl1.c_str(), bl1.length()); fs.flush(h_slow); uint64_t h_slow_dirty_seq_1 = fs.debug_get_dirty_seq(h_slow); bufferlist bl2; std::unique_ptr<char[]> buf2 = gen_buffer(1000); bufferptr bp2 = buffer::claim_char(1000, buf2.get()); bl2.push_back(bp2); h_db->append(bl2.c_str(), bl2.length()); fs.fsync(h_db); uint64_t h_slow_dirty_seq_2 = fs.debug_get_dirty_seq(h_slow); bool h_slow_dev_dirty = fs.debug_get_is_dev_dirty(h_slow, BlueFS::BDEV_SLOW); //problem if allocations are stable in log but slow device is not flushed yet ASSERT_FALSE(h_slow_dirty_seq_1 != 0 && h_slow_dirty_seq_2 == 0 && h_slow_dev_dirty == true); fs.close_writer(h_slow); fs.close_writer(h_db); fs.umount(); } TEST(BlueFS, test_truncate_stable_53129) { ConfSaver conf(g_ceph_context->_conf); conf.SetVal("bluefs_min_flush_size", "65536"); conf.ApplyChanges(); uint64_t size_wal = 1048576 * 64; TempBdev bdev_wal{size_wal}; uint64_t size_db = 1048576 * 128; TempBdev bdev_db{size_db}; uint64_t size_slow = 1048576 * 256; TempBdev bdev_slow{size_slow}; BlueFS fs(g_ceph_context); ASSERT_EQ(0, fs.add_block_device(BlueFS::BDEV_WAL, bdev_wal.path, false, 0)); ASSERT_EQ(0, fs.add_block_device(BlueFS::BDEV_DB, bdev_db.path, false, 0)); ASSERT_EQ(0, fs.add_block_device(BlueFS::BDEV_SLOW, bdev_slow.path, false, 0)); uuid_d fsid; ASSERT_EQ(0, fs.mkfs(fsid, { BlueFS::BDEV_DB, true, true })); ASSERT_EQ(0, fs.mount()); ASSERT_EQ(0, fs.maybe_verify_layout({ BlueFS::BDEV_DB, true, true })); string dir_slow = "dir.slow"; ASSERT_EQ(0, fs.mkdir(dir_slow)); string dir_db = "dir_db"; ASSERT_EQ(0, fs.mkdir(dir_db)); string file_slow = "file"; BlueFS::FileWriter *h_slow; ASSERT_EQ(0, fs.open_for_write(dir_slow, file_slow, &h_slow, false)); ASSERT_NE(nullptr, h_slow); string file_db = "file"; BlueFS::FileWriter *h_db; ASSERT_EQ(0, fs.open_for_write(dir_db, file_db, &h_db, false)); ASSERT_NE(nullptr, h_db); bufferlist bl1; std::unique_ptr<char[]> buf1 = gen_buffer(70000); bufferptr bp1 = buffer::claim_char(70000, buf1.get()); bl1.push_back(bp1); // add 70000 bytes h_slow->append(bl1.c_str(), bl1.length()); fs.flush(h_slow); // and truncate to 60000 bytes fs.truncate(h_slow, 60000); // write something to file on DB device bufferlist bl2; std::unique_ptr<char[]> buf2 = gen_buffer(1000); bufferptr bp2 = buffer::claim_char(1000, buf2.get()); bl2.push_back(bp2); h_db->append(bl2.c_str(), bl2.length()); // and force bluefs log to flush fs.fsync(h_db); // This is the actual test point. // We completed truncate, and we expect // - size to be 60000 // - data to be stable on slow device // OR // - size = 0 or file does not exist // - dev_dirty is irrelevant bool h_slow_dev_dirty = fs.debug_get_is_dev_dirty(h_slow, BlueFS::BDEV_SLOW); // Imagine power goes down here. fs.close_writer(h_slow); fs.close_writer(h_db); fs.umount(); ASSERT_EQ(0, fs.mount()); ASSERT_EQ(0, fs.maybe_verify_layout({ BlueFS::BDEV_DB, true, true })); uint64_t size; utime_t mtime; ASSERT_EQ(0, fs.stat("dir.slow", "file", &size, &mtime)); // check file size 60000 ASSERT_EQ(size, 60000); // check that dev_dirty was false (data stable on media) ASSERT_EQ(h_slow_dev_dirty, false); fs.umount(); } TEST(BlueFS, test_update_ino1_delta_after_replay) { uint64_t size = 1048576LL * (2 * 1024 + 128); TempBdev bdev{size}; ConfSaver conf(g_ceph_context->_conf); conf.SetVal("bluefs_alloc_size", "4096"); conf.SetVal("bluefs_shared_alloc_size", "4096"); conf.SetVal("bluefs_compact_log_sync", "false"); conf.SetVal("bluefs_min_log_runway", "32768"); conf.SetVal("bluefs_max_log_runway", "65536"); conf.SetVal("bluefs_allocator", "stupid"); conf.ApplyChanges(); BlueFS fs(g_ceph_context); ASSERT_EQ(0, fs.add_block_device(BlueFS::BDEV_DB, bdev.path, false, 1048576)); uuid_d fsid; ASSERT_EQ(0, fs.mkfs(fsid, { BlueFS::BDEV_DB, false, false })); ASSERT_EQ(0, fs.mount()); ASSERT_EQ(0, fs.maybe_verify_layout({ BlueFS::BDEV_DB, false, false })); ASSERT_EQ(0, fs.mkdir("dir")); char data[2000]; BlueFS::FileWriter *h; ASSERT_EQ(0, fs.open_for_write("dir", "file", &h, false)); for (size_t i = 0; i < 100; i++) { h->append(data, 2000); fs.fsync(h); } fs.close_writer(h); fs.umount(true); //do not compact on exit! ASSERT_EQ(0, fs.mount()); ASSERT_EQ(0, fs.open_for_write("dir", "file2", &h, false)); for (size_t i = 0; i < 100; i++) { h->append(data, 2000); fs.fsync(h); } fs.close_writer(h); fs.umount(); // remount and check log can replay safe? ASSERT_EQ(0, fs.mount()); ASSERT_EQ(0, fs.maybe_verify_layout({ BlueFS::BDEV_DB, false, false })); fs.umount(); } TEST(BlueFS, broken_unlink_fsync_seq) { uint64_t size = 1048576 * 128; TempBdev bdev{size}; BlueFS fs(g_ceph_context); ASSERT_EQ(0, fs.add_block_device(BlueFS::BDEV_DB, bdev.path, false, 1048576)); uuid_d fsid; ASSERT_EQ(0, fs.mkfs(fsid, { BlueFS::BDEV_DB, false, false })); ASSERT_EQ(0, fs.mount()); ASSERT_EQ(0, fs.maybe_verify_layout({ BlueFS::BDEV_DB, false, false })); { /* * This reproduces a weird file op sequence (unlink+fsync) that Octopus * RocksDB might issue to BlueFS when recycle_log_file_num setting is 0 * See https://tracker.ceph.com/issues/55636 for more details * */ char buf[1048571]; // this is biggish, but intentionally not evenly aligned for (unsigned i = 0; i < sizeof(buf); ++i) { buf[i] = i; } BlueFS::FileWriter *h; ASSERT_EQ(0, fs.mkdir("dir")); ASSERT_EQ(0, fs.open_for_write("dir", "file", &h, false)); h->append(buf, sizeof(buf)); fs.flush(h); h->append(buf, sizeof(buf)); fs.unlink("dir", "file"); fs.fsync(h); fs.close_writer(h); } fs.umount(); // remount and check log can replay safe? ASSERT_EQ(0, fs.mount()); ASSERT_EQ(0, fs.maybe_verify_layout({ BlueFS::BDEV_DB, false, false })); fs.umount(); } TEST(BlueFS, truncate_fsync) { uint64_t bdev_size = 128 * 1048576; uint64_t block_size = 4096; uint64_t reserved = 1048576; TempBdev bdev{bdev_size}; uuid_d fsid; const char* DIR_NAME="dir"; const char* FILE_NAME="file1"; size_t sizes[] = {3, 1024, 4096, 1024 * 4096}; for (size_t i = 0; i < sizeof(sizes) / sizeof(sizes[0]); i++) { const size_t content_size= sizes[i]; const size_t read_size = p2roundup(content_size, size_t(block_size)); const std::string content(content_size, 'x'); { BlueFS fs(g_ceph_context); ASSERT_EQ(0, fs.add_block_device(BlueFS::BDEV_DB, bdev.path, false, reserved)); ASSERT_EQ(0, fs.mkfs(fsid, { BlueFS::BDEV_DB, false, false })); ASSERT_EQ(0, fs.mount()); ASSERT_EQ(0, fs.maybe_verify_layout({ BlueFS::BDEV_DB, false, false })); { BlueFS::FileWriter *h; ASSERT_EQ(0, fs.mkdir("dir")); ASSERT_EQ(0, fs.open_for_write(DIR_NAME, FILE_NAME, &h, false)); h->append(content.c_str(), content.length()); fs.fsync(h); fs.close_writer(h); } { BlueFS::FileReader *h; ASSERT_EQ(0, fs.open_for_read(DIR_NAME, FILE_NAME, &h)); bufferlist bl; ASSERT_EQ(content.length(), fs.read(h, 0, read_size, &bl, NULL)); ASSERT_EQ(0, strncmp(content.c_str(), bl.c_str(), content.length())); delete h; } { BlueFS::FileWriter *h; ASSERT_EQ(0, fs.open_for_write(DIR_NAME, FILE_NAME, &h, true)); fs.truncate(h, 0); fs.fsync(h); fs.close_writer(h); } } { //this was broken due to https://tracker.ceph.com/issues/55307 BlueFS fs(g_ceph_context); ASSERT_EQ(0, fs.add_block_device(BlueFS::BDEV_DB, bdev.path, false, reserved)); ASSERT_EQ(0, fs.mount()); BlueFS::FileReader *h; ASSERT_EQ(0, fs.open_for_read(DIR_NAME, FILE_NAME, &h)); bufferlist bl; ASSERT_EQ(0, fs.read(h, 0, read_size, &bl, NULL)); delete h; fs.umount(); } } } TEST(BlueFS, test_shared_alloc) { uint64_t size = 1048576 * 128; TempBdev bdev_slow{size}; uint64_t size_db = 1048576 * 8; TempBdev bdev_db{size_db}; ConfSaver conf(g_ceph_context->_conf); conf.SetVal("bluefs_shared_alloc_size", "1048576"); bluefs_shared_alloc_context_t shared_alloc; uint64_t shared_alloc_unit = 4096; shared_alloc.set( Allocator::create(g_ceph_context, g_ceph_context->_conf->bluefs_allocator, size, shared_alloc_unit, 0, 0, "test shared allocator"), shared_alloc_unit); shared_alloc.a->init_add_free(0, size); BlueFS fs(g_ceph_context); // DB device is fully utilized ASSERT_EQ(0, fs.add_block_device(BlueFS::BDEV_DB, bdev_db.path, false, size_db - 0x1000)); ASSERT_EQ(0, fs.add_block_device(BlueFS::BDEV_SLOW, bdev_slow.path, false, 0, &shared_alloc)); uuid_d fsid; ASSERT_EQ(0, fs.mkfs(fsid, { BlueFS::BDEV_DB, false, false })); ASSERT_EQ(0, fs.mount()); ASSERT_EQ(0, fs.maybe_verify_layout({ BlueFS::BDEV_DB, false, false })); { for (int i=0; i<10; i++) { string dir = "dir."; dir.append(to_string(i)); ASSERT_EQ(0, fs.mkdir(dir)); for (int j=0; j<10; j++) { string file = "file."; file.append(to_string(j)); BlueFS::FileWriter *h; ASSERT_EQ(0, fs.open_for_write(dir, file, &h, false)); ASSERT_NE(nullptr, h); auto sg = make_scope_guard([&fs, h] { fs.close_writer(h); }); bufferlist bl; std::unique_ptr<char[]> buf = gen_buffer(4096); bufferptr bp = buffer::claim_char(4096, buf.get()); bl.push_back(bp); h->append(bl.c_str(), bl.length()); fs.fsync(h); } } } { for (int i=0; i<10; i+=2) { string dir = "dir."; dir.append(to_string(i)); for (int j=0; j<10; j++) { string file = "file."; file.append(to_string(j)); fs.unlink(dir, file); fs.sync_metadata(false); } ASSERT_EQ(0, fs.rmdir(dir)); fs.sync_metadata(false); } } fs.compact_log(); auto *logger = fs.get_perf_counters(); ASSERT_NE(logger->get(l_bluefs_alloc_shared_dev_fallbacks), 0); auto num_files = logger->get(l_bluefs_num_files); fs.umount(); fs.mount(); ASSERT_EQ(num_files, logger->get(l_bluefs_num_files)); fs.umount(); } TEST(BlueFS, test_shared_alloc_sparse) { uint64_t size = 1048576 * 128 * 2; uint64_t main_unit = 4096; uint64_t bluefs_alloc_unit = 1048576; TempBdev bdev_slow{size}; ConfSaver conf(g_ceph_context->_conf); conf.SetVal("bluefs_shared_alloc_size", stringify(bluefs_alloc_unit).c_str()); bluefs_shared_alloc_context_t shared_alloc; shared_alloc.set( Allocator::create(g_ceph_context, g_ceph_context->_conf->bluefs_allocator, size, main_unit, 0, 0, "test shared allocator"), main_unit); // prepare sparse free space but let's have a continuous chunk at // the beginning to fit initial log's fnode into superblock, // we don't have any tricks to deal with sparse allocations // (and hence long fnode) at mkfs shared_alloc.a->init_add_free(bluefs_alloc_unit, 4 * bluefs_alloc_unit); for(uint64_t i = 5 * bluefs_alloc_unit; i < size; i += 2 * main_unit) { shared_alloc.a->init_add_free(i, main_unit); } BlueFS fs(g_ceph_context); ASSERT_EQ(0, fs.add_block_device(BlueFS::BDEV_DB, bdev_slow.path, false, 0, &shared_alloc)); uuid_d fsid; ASSERT_EQ(0, fs.mkfs(fsid, { BlueFS::BDEV_DB, false, false })); ASSERT_EQ(0, fs.mount()); ASSERT_EQ(0, fs.maybe_verify_layout({ BlueFS::BDEV_DB, false, false })); { for (int i=0; i<10; i++) { string dir = "dir."; dir.append(to_string(i)); ASSERT_EQ(0, fs.mkdir(dir)); for (int j=0; j<10; j++) { string file = "file."; file.append(to_string(j)); BlueFS::FileWriter *h; ASSERT_EQ(0, fs.open_for_write(dir, file, &h, false)); ASSERT_NE(nullptr, h); auto sg = make_scope_guard([&fs, h] { fs.close_writer(h); }); bufferlist bl; std::unique_ptr<char[]> buf = gen_buffer(4096); bufferptr bp = buffer::claim_char(4096, buf.get()); bl.push_back(bp); h->append(bl.c_str(), bl.length()); fs.fsync(h); } } } { for (int i=0; i<10; i+=2) { string dir = "dir."; dir.append(to_string(i)); for (int j=0; j<10; j++) { string file = "file."; file.append(to_string(j)); fs.unlink(dir, file); fs.sync_metadata(false); } ASSERT_EQ(0, fs.rmdir(dir)); fs.sync_metadata(false); } } fs.compact_log(); auto *logger = fs.get_perf_counters(); ASSERT_NE(logger->get(l_bluefs_alloc_shared_size_fallbacks), 0); auto num_files = logger->get(l_bluefs_num_files); fs.umount(); fs.mount(); ASSERT_EQ(num_files, logger->get(l_bluefs_num_files)); fs.umount(); } TEST(BlueFS, test_4k_shared_alloc) { uint64_t size = 1048576 * 128 * 2; uint64_t main_unit = 4096; uint64_t bluefs_alloc_unit = main_unit; TempBdev bdev_slow{size}; ConfSaver conf(g_ceph_context->_conf); conf.SetVal("bluefs_shared_alloc_size", stringify(bluefs_alloc_unit).c_str()); bluefs_shared_alloc_context_t shared_alloc; shared_alloc.set( Allocator::create(g_ceph_context, g_ceph_context->_conf->bluefs_allocator, size, main_unit, 0, 0, "test shared allocator"), main_unit); shared_alloc.a->init_add_free(bluefs_alloc_unit, size - bluefs_alloc_unit); BlueFS fs(g_ceph_context); ASSERT_EQ(0, fs.add_block_device(BlueFS::BDEV_DB, bdev_slow.path, false, 0, &shared_alloc)); uuid_d fsid; ASSERT_EQ(0, fs.mkfs(fsid, { BlueFS::BDEV_DB, false, false })); ASSERT_EQ(0, fs.mount()); ASSERT_EQ(0, fs.maybe_verify_layout({ BlueFS::BDEV_DB, false, false })); { for (int i=0; i<10; i++) { string dir = "dir."; dir.append(to_string(i)); ASSERT_EQ(0, fs.mkdir(dir)); for (int j=0; j<10; j++) { string file = "file."; file.append(to_string(j)); BlueFS::FileWriter *h; ASSERT_EQ(0, fs.open_for_write(dir, file, &h, false)); ASSERT_NE(nullptr, h); auto sg = make_scope_guard([&fs, h] { fs.close_writer(h); }); bufferlist bl; std::unique_ptr<char[]> buf = gen_buffer(4096); bufferptr bp = buffer::claim_char(4096, buf.get()); bl.push_back(bp); h->append(bl.c_str(), bl.length()); fs.fsync(h); } } } { for (int i=0; i<10; i+=2) { string dir = "dir."; dir.append(to_string(i)); for (int j=0; j<10; j++) { string file = "file."; file.append(to_string(j)); fs.unlink(dir, file); fs.sync_metadata(false); } ASSERT_EQ(0, fs.rmdir(dir)); fs.sync_metadata(false); } } fs.compact_log(); auto *logger = fs.get_perf_counters(); ASSERT_EQ(logger->get(l_bluefs_alloc_shared_dev_fallbacks), 0); ASSERT_EQ(logger->get(l_bluefs_alloc_shared_size_fallbacks), 0); auto num_files = logger->get(l_bluefs_num_files); fs.umount(); fs.mount(); ASSERT_EQ(num_files, logger->get(l_bluefs_num_files)); fs.umount(); } void create_files(BlueFS &fs, atomic_bool& stop_creating, atomic_bool& started_creating) { uint32_t i = 0; stringstream ss; string dir = "dir."; ss << std::this_thread::get_id(); dir.append(ss.str()); dir.append("."); dir.append(to_string(i)); ASSERT_EQ(0, fs.mkdir(dir)); while (!stop_creating.load()) { string file = "file."; file.append(to_string(i)); BlueFS::FileWriter *h; ASSERT_EQ(0, fs.open_for_write(dir, file, &h, false)); ASSERT_NE(nullptr, h); fs.close_writer(h); i++; started_creating = true; } } TEST(BlueFS, test_concurrent_dir_link_and_compact_log_56210) { uint64_t size = 1048576 * 128; TempBdev bdev{size}; ConfSaver conf(g_ceph_context->_conf); conf.SetVal("bluefs_alloc_size", "65536"); conf.SetVal("bluefs_compact_log_sync", "false"); // make sure fsync always trigger log compact conf.SetVal("bluefs_log_compact_min_ratio", "0"); conf.SetVal("bluefs_log_compact_min_size", "0"); conf.ApplyChanges(); for (int i=0; i<10; ++i) { BlueFS fs(g_ceph_context); ASSERT_EQ(0, fs.add_block_device(BlueFS::BDEV_DB, bdev.path, false, 1048576)); uuid_d fsid; ASSERT_EQ(0, fs.mkfs(fsid, { BlueFS::BDEV_DB, false, false })); ASSERT_EQ(0, fs.mount()); ASSERT_EQ(0, fs.maybe_verify_layout({ BlueFS::BDEV_DB, false, false })); { atomic_bool stop_creating{false}; atomic_bool started_creating{false}; std::thread create_thread; create_thread = std::thread(create_files, std::ref(fs), std::ref(stop_creating), std::ref(started_creating)); while (!started_creating.load()) { } BlueFS::FileWriter *h; ASSERT_EQ(0, fs.mkdir("foo")); ASSERT_EQ(0, fs.open_for_write("foo", "bar", &h, false)); fs.fsync(h); fs.close_writer(h); stop_creating = true; do_join(create_thread); fs.umount(true); //do not compact on exit! ASSERT_EQ(0, fs.mount()); fs.umount(); } } } int main(int argc, char **argv) { auto args = argv_to_vec(argc, argv); map<string,string> defaults = { { "debug_bluefs", "1/20" }, { "debug_bdev", "1/20" } }; auto cct = global_init(&defaults, args, CEPH_ENTITY_TYPE_CLIENT, CODE_ENVIRONMENT_UTILITY, CINIT_FLAG_NO_DEFAULT_CONFIG_FILE); common_init_finish(g_ceph_context); g_ceph_context->_conf.set_val( "enable_experimental_unrecoverable_data_corrupting_features", "*"); g_ceph_context->_conf.apply_changes(nullptr); ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); }
44,357
30.172171
103
cc
null
ceph-main/src/test/objectstore/test_bluestore_types.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "include/types.h" #include "os/bluestore/bluestore_types.h" #include "gtest/gtest.h" #include "include/stringify.h" #include "common/ceph_time.h" #include "os/bluestore/BlueStore.h" #include "os/bluestore/simple_bitmap.h" #include "os/bluestore/AvlAllocator.h" #include "common/ceph_argparse.h" #include "global/global_init.h" #include "global/global_context.h" #include "perfglue/heap_profiler.h" #include <sstream> #define _STR(x) #x #define STRINGIFY(x) _STR(x) using namespace std; TEST(bluestore, sizeof) { #define P(t) cout << STRINGIFY(t) << "\t" << sizeof(t) << std::endl P(BlueStore::Onode); P(BlueStore::Extent); P(BlueStore::Blob); P(BlueStore::SharedBlob); P(BlueStore::ExtentMap); P(BlueStore::extent_map_t); P(BlueStore::blob_map_t); P(BlueStore::BufferSpace); P(BlueStore::Buffer); P(bluestore_onode_t); P(bluestore_blob_t); P(PExtentVector); P(ghobject_t); P(bluestore_shared_blob_t); P(bluestore_extent_ref_map_t); P(bluestore_extent_ref_map_t::record_t); P(bluestore_blob_use_tracker_t); P(std::atomic_int); P(BlueStore::SharedBlobRef); P(boost::intrusive::set_base_hook<>); P(boost::intrusive::unordered_set_base_hook<>); P(bufferlist); P(bufferptr); P(range_seg_t); P(sb_info_t); P(SimpleBitmap); cout << "map<uint64_t,uint64_t>\t" << sizeof(map<uint64_t,uint64_t>) << std::endl; cout << "map<char,char>\t" << sizeof(map<char,char>) << std::endl; } void dump_mempools() { ostringstream ostr; auto f = Formatter::create_unique("json-pretty", "json-pretty", "json-pretty"); ostr << "Mempools: "; f->open_object_section("mempools"); mempool::dump(f.get()); f->close_section(); f->flush(ostr); cout << ostr.str() << std::endl; } /*void get_mempool_stats(uint64_t* total_bytes, uint64_t* total_items) { uint64_t meta_allocated = mempool::bluestore_cache_meta::allocated_bytes(); uint64_t onode_allocated = mempool::bluestore_cache_onode::allocated_bytes(); uint64_t other_allocated = mempool::bluestore_cache_other::allocated_bytes(); uint64_t meta_items = mempool::bluestore_cache_meta::allocated_items(); uint64_t onode_items = mempool::bluestore_cache_onode::allocated_items(); uint64_t other_items = mempool::bluestore_cache_other::allocated_items(); cout << "meta(" << meta_allocated << "/" << meta_items << ") onode(" << onode_allocated << "/" << onode_items << ") other(" << other_allocated << "/" << other_items << ")" << std::endl; *total_bytes = meta_allocated + onode_allocated + other_allocated; *total_items = onode_items; }*/ TEST(sb_info_space_efficient_map_t, basic) { sb_info_space_efficient_map_t sb_info; const size_t num_shared = 1000; for (size_t i = 0; i < num_shared; i += 2) { auto& sbi = sb_info.add_maybe_stray(i); sbi.pool_id = i; } ASSERT_TRUE(sb_info.find(0) != sb_info.end()); ASSERT_TRUE(sb_info.find(1) == sb_info.end()); ASSERT_TRUE(sb_info.find(2) != sb_info.end()); ASSERT_TRUE(sb_info.find(4)->pool_id == 4); ASSERT_TRUE(sb_info.find(num_shared) == sb_info.end()); // ordered insertion sb_info.add_or_adopt(num_shared).pool_id = num_shared; ASSERT_TRUE(sb_info.find(num_shared) != sb_info.end()); ASSERT_TRUE(sb_info.find(num_shared)->pool_id == num_shared); // out of order insertion sb_info.add_or_adopt(1).pool_id = 1; ASSERT_TRUE(sb_info.find(1) != sb_info.end()); ASSERT_TRUE(sb_info.find(1)->pool_id == 1); // ordered insertion sb_info.add_maybe_stray(num_shared + 1).pool_id = num_shared + 1; ASSERT_TRUE(sb_info.find(num_shared + 1) != sb_info.end()); ASSERT_TRUE(sb_info.find(num_shared + 1)->pool_id == num_shared + 1); // out of order insertion sb_info.add_maybe_stray(105).pool_id = 105; ASSERT_TRUE(sb_info.find(105) != sb_info.end()); ASSERT_TRUE(sb_info.find(105)->pool_id == 105); } TEST(sb_info_space_efficient_map_t, size) { const size_t num_shared = 10000000; sb_info_space_efficient_map_t sb_info; BlueStore store(g_ceph_context, "", 4096); BlueStore::OnodeCacheShard* oc = BlueStore::OnodeCacheShard::create( g_ceph_context, "lru", NULL); BlueStore::BufferCacheShard* bc = BlueStore::BufferCacheShard::create( g_ceph_context, "lru", NULL); auto coll = ceph::make_ref<BlueStore::Collection>(&store, oc, bc, coll_t()); for (size_t i = 0; i < num_shared; i++) { auto& sbi = sb_info.add_or_adopt(i); // primarily to silent the 'unused' warning ceph_assert(sbi.pool_id == sb_info_t::INVALID_POOL_ID); } dump_mempools(); } TEST(bluestore_extent_ref_map_t, add) { bluestore_extent_ref_map_t m; m.get(10, 10); ASSERT_EQ(1u, m.ref_map.size()); cout << m << std::endl; m.get(20, 10); cout << m << std::endl; ASSERT_EQ(1u, m.ref_map.size()); ASSERT_EQ(20u, m.ref_map[10].length); ASSERT_EQ(1u, m.ref_map[10].refs); m.get(40, 10); cout << m << std::endl; ASSERT_EQ(2u, m.ref_map.size()); m.get(30, 10); cout << m << std::endl; ASSERT_EQ(1u, m.ref_map.size()); m.get(50, 10); cout << m << std::endl; ASSERT_EQ(1u, m.ref_map.size()); m.get(5, 5); cout << m << std::endl; ASSERT_EQ(1u, m.ref_map.size()); } TEST(bluestore_extent_ref_map_t, get) { bluestore_extent_ref_map_t m; m.get(00, 30); cout << m << std::endl; m.get(10, 10); cout << m << std::endl; ASSERT_EQ(3u, m.ref_map.size()); ASSERT_EQ(10u, m.ref_map[0].length); ASSERT_EQ(1u, m.ref_map[0].refs); ASSERT_EQ(10u, m.ref_map[10].length); ASSERT_EQ(2u, m.ref_map[10].refs); ASSERT_EQ(10u, m.ref_map[20].length); ASSERT_EQ(1u, m.ref_map[20].refs); m.get(20, 5); cout << m << std::endl; ASSERT_EQ(3u, m.ref_map.size()); ASSERT_EQ(15u, m.ref_map[10].length); ASSERT_EQ(2u, m.ref_map[10].refs); ASSERT_EQ(5u, m.ref_map[25].length); ASSERT_EQ(1u, m.ref_map[25].refs); m.get(5, 20); cout << m << std::endl; ASSERT_EQ(4u, m.ref_map.size()); ASSERT_EQ(5u, m.ref_map[0].length); ASSERT_EQ(1u, m.ref_map[0].refs); ASSERT_EQ(5u, m.ref_map[5].length); ASSERT_EQ(2u, m.ref_map[5].refs); ASSERT_EQ(15u, m.ref_map[10].length); ASSERT_EQ(3u, m.ref_map[10].refs); ASSERT_EQ(5u, m.ref_map[25].length); ASSERT_EQ(1u, m.ref_map[25].refs); m.get(25, 3); cout << m << std::endl; ASSERT_EQ(5u, m.ref_map.size()); ASSERT_EQ(5u, m.ref_map[0].length); ASSERT_EQ(1u, m.ref_map[0].refs); ASSERT_EQ(5u, m.ref_map[5].length); ASSERT_EQ(2u, m.ref_map[5].refs); ASSERT_EQ(15u, m.ref_map[10].length); ASSERT_EQ(3u, m.ref_map[10].refs); ASSERT_EQ(3u, m.ref_map[25].length); ASSERT_EQ(2u, m.ref_map[25].refs); ASSERT_EQ(2u, m.ref_map[28].length); ASSERT_EQ(1u, m.ref_map[28].refs); } TEST(bluestore_extent_ref_map_t, put) { bluestore_extent_ref_map_t m; PExtentVector r; bool maybe_unshared = false; m.get(10, 30); maybe_unshared = true; m.put(10, 30, &r, &maybe_unshared); cout << m << " " << r << " " << (int)maybe_unshared << std::endl; ASSERT_EQ(0u, m.ref_map.size()); ASSERT_EQ(1u, r.size()); ASSERT_EQ(10u, r[0].offset); ASSERT_EQ(30u, r[0].length); ASSERT_TRUE(maybe_unshared); r.clear(); m.get(10, 30); m.get(20, 10); maybe_unshared = true; m.put(10, 30, &r, &maybe_unshared); cout << m << " " << r << " " << (int)maybe_unshared << std::endl; ASSERT_EQ(1u, m.ref_map.size()); ASSERT_EQ(10u, m.ref_map[20].length); ASSERT_EQ(1u, m.ref_map[20].refs); ASSERT_EQ(2u, r.size()); ASSERT_EQ(10u, r[0].offset); ASSERT_EQ(10u, r[0].length); ASSERT_EQ(30u, r[1].offset); ASSERT_EQ(10u, r[1].length); ASSERT_TRUE(maybe_unshared); r.clear(); m.get(30, 10); m.get(30, 10); maybe_unshared = true; m.put(20, 15, &r, &maybe_unshared); cout << m << " " << r << " " << (int)maybe_unshared << std::endl; ASSERT_EQ(2u, m.ref_map.size()); ASSERT_EQ(5u, m.ref_map[30].length); ASSERT_EQ(1u, m.ref_map[30].refs); ASSERT_EQ(5u, m.ref_map[35].length); ASSERT_EQ(2u, m.ref_map[35].refs); ASSERT_EQ(1u, r.size()); ASSERT_EQ(20u, r[0].offset); ASSERT_EQ(10u, r[0].length); ASSERT_FALSE(maybe_unshared); r.clear(); maybe_unshared = true; m.put(33, 5, &r, &maybe_unshared); cout << m << " " << r << " " << (int)maybe_unshared << std::endl; ASSERT_EQ(3u, m.ref_map.size()); ASSERT_EQ(3u, m.ref_map[30].length); ASSERT_EQ(1u, m.ref_map[30].refs); ASSERT_EQ(3u, m.ref_map[35].length); ASSERT_EQ(1u, m.ref_map[35].refs); ASSERT_EQ(2u, m.ref_map[38].length); ASSERT_EQ(2u, m.ref_map[38].refs); ASSERT_EQ(1u, r.size()); ASSERT_EQ(33u, r[0].offset); ASSERT_EQ(2u, r[0].length); ASSERT_FALSE(maybe_unshared); r.clear(); maybe_unshared = true; m.put(38, 2, &r, &maybe_unshared); cout << m << " " << r << " " << (int)maybe_unshared << std::endl; ASSERT_TRUE(maybe_unshared); } TEST(bluestore_extent_ref_map_t, contains) { bluestore_extent_ref_map_t m; m.get(10, 30); ASSERT_TRUE(m.contains(10, 30)); ASSERT_TRUE(m.contains(10, 10)); ASSERT_TRUE(m.contains(30, 10)); ASSERT_FALSE(m.contains(0, 10)); ASSERT_FALSE(m.contains(0, 20)); ASSERT_FALSE(m.contains(0, 100)); ASSERT_FALSE(m.contains(40, 10)); ASSERT_FALSE(m.contains(30, 11)); m.get(40, 10); m.get(40, 10); ASSERT_TRUE(m.contains(30, 11)); ASSERT_TRUE(m.contains(30, 20)); ASSERT_TRUE(m.contains(10, 40)); ASSERT_FALSE(m.contains(0, 50)); ASSERT_FALSE(m.contains(40, 20)); m.get(60, 100); ASSERT_TRUE(m.contains(60, 10)); ASSERT_TRUE(m.contains(40, 10)); ASSERT_FALSE(m.contains(40, 11)); ASSERT_FALSE(m.contains(40, 20)); ASSERT_FALSE(m.contains(40, 30)); ASSERT_FALSE(m.contains(40, 3000)); ASSERT_FALSE(m.contains(4000, 30)); } TEST(bluestore_extent_ref_map_t, intersects) { bluestore_extent_ref_map_t m; m.get(10, 30); ASSERT_TRUE(m.intersects(10, 30)); ASSERT_TRUE(m.intersects(0, 11)); ASSERT_TRUE(m.intersects(10, 40)); ASSERT_TRUE(m.intersects(15, 40)); ASSERT_FALSE(m.intersects(0, 10)); ASSERT_FALSE(m.intersects(0, 5)); ASSERT_FALSE(m.intersects(40, 20)); ASSERT_FALSE(m.intersects(41, 20)); m.get(40, 10); m.get(40, 10); ASSERT_TRUE(m.intersects(0, 100)); ASSERT_TRUE(m.intersects(10, 35)); ASSERT_TRUE(m.intersects(45, 10)); ASSERT_FALSE(m.intersects(50, 5)); m.get(60, 100); ASSERT_TRUE(m.intersects(45, 10)); ASSERT_TRUE(m.intersects(55, 10)); ASSERT_TRUE(m.intersects(50, 11)); ASSERT_FALSE(m.intersects(50, 10)); ASSERT_FALSE(m.intersects(51, 9)); ASSERT_FALSE(m.intersects(55, 1)); } TEST(bluestore_blob_t, calc_csum) { bufferlist bl; bl.append("asdfghjkqwertyuizxcvbnm,"); bufferlist bl2; bl2.append("xxxxXXXXyyyyYYYYzzzzZZZZ"); bufferlist f; f.substr_of(bl, 0, 8); bufferlist m; m.substr_of(bl, 8, 8); bufferlist e; e.substr_of(bl, 16, 8); bufferlist n; n.append("12345678"); for (unsigned csum_type = Checksummer::CSUM_NONE + 1; csum_type < Checksummer::CSUM_MAX; ++csum_type) { cout << "csum_type " << Checksummer::get_csum_type_string(csum_type) << std::endl; bluestore_blob_t b; int bad_off; uint64_t bad_csum; ASSERT_EQ(0, b.verify_csum(0, bl, &bad_off, &bad_csum)); ASSERT_EQ(-1, bad_off); b.init_csum(csum_type, 3, 24); cout << " value size " << b.get_csum_value_size() << std::endl; b.calc_csum(0, bl); ASSERT_EQ(0, b.verify_csum(0, bl, &bad_off, &bad_csum)); ASSERT_EQ(-1, bad_off); ASSERT_EQ(-1, b.verify_csum(0, bl2, &bad_off, &bad_csum)); ASSERT_EQ(0, bad_off); ASSERT_EQ(0, b.verify_csum(0, f, &bad_off, &bad_csum)); ASSERT_EQ(-1, bad_off); ASSERT_EQ(-1, b.verify_csum(8, f, &bad_off, &bad_csum)); ASSERT_EQ(8, bad_off); ASSERT_EQ(-1, b.verify_csum(16, f, &bad_off, &bad_csum)); ASSERT_EQ(16, bad_off); ASSERT_EQ(-1, b.verify_csum(0, m, &bad_off, &bad_csum)); ASSERT_EQ(0, bad_off); ASSERT_EQ(0, b.verify_csum(8, m, &bad_off, &bad_csum)); ASSERT_EQ(-1, bad_off); ASSERT_EQ(-1, b.verify_csum(16, m, &bad_off, &bad_csum)); ASSERT_EQ(16, bad_off); ASSERT_EQ(-1, b.verify_csum(0, e, &bad_off, &bad_csum)); ASSERT_EQ(0, bad_off); ASSERT_EQ(-1, b.verify_csum(8, e, &bad_off, &bad_csum)); ASSERT_EQ(8, bad_off); ASSERT_EQ(0, b.verify_csum(16, e, &bad_off, &bad_csum)); ASSERT_EQ(-1, bad_off); b.calc_csum(8, n); ASSERT_EQ(0, b.verify_csum(0, f, &bad_off, &bad_csum)); ASSERT_EQ(-1, bad_off); ASSERT_EQ(0, b.verify_csum(8, n, &bad_off, &bad_csum)); ASSERT_EQ(-1, bad_off); ASSERT_EQ(0, b.verify_csum(16, e, &bad_off, &bad_csum)); ASSERT_EQ(-1, bad_off); ASSERT_EQ(-1, b.verify_csum(0, bl, &bad_off, &bad_csum)); ASSERT_EQ(8, bad_off); } } TEST(bluestore_blob_t, csum_bench) { bufferlist bl; bufferptr bp(10485760); for (char *a = bp.c_str(); a < bp.c_str() + bp.length(); ++a) *a = (unsigned long)a & 0xff; bl.append(bp); int count = 256; for (unsigned csum_type = 1; csum_type < Checksummer::CSUM_MAX; ++csum_type) { bluestore_blob_t b; b.init_csum(csum_type, 12, bl.length()); ceph::mono_clock::time_point start = ceph::mono_clock::now(); for (int i = 0; i<count; ++i) { b.calc_csum(0, bl); } ceph::mono_clock::time_point end = ceph::mono_clock::now(); auto dur = std::chrono::duration_cast<ceph::timespan>(end - start); double mbsec = (double)count * (double)bl.length() / 1000000.0 / (double)dur.count() * 1000000000.0; cout << "csum_type " << Checksummer::get_csum_type_string(csum_type) << ", " << dur << " seconds, " << mbsec << " MB/sec" << std::endl; } } TEST(Blob, put_ref) { { BlueStore store(g_ceph_context, "", 4096); BlueStore::OnodeCacheShard *oc = BlueStore::OnodeCacheShard::create( g_ceph_context, "lru", NULL); BlueStore::BufferCacheShard *bc = BlueStore::BufferCacheShard::create( g_ceph_context, "lru", NULL); auto coll = ceph::make_ref<BlueStore::Collection>(&store, oc, bc, coll_t()); BlueStore::Blob b; b.shared_blob = new BlueStore::SharedBlob(coll.get()); b.dirty_blob().allocated_test(bluestore_pextent_t(0x40715000, 0x2000)); b.dirty_blob().allocated_test( bluestore_pextent_t(bluestore_pextent_t::INVALID_OFFSET, 0x8000)); b.dirty_blob().allocated_test(bluestore_pextent_t(0x4071f000, 0x5000)); b.get_ref(coll.get(), 0, 0x1200); b.get_ref(coll.get(), 0xae00, 0x4200); ASSERT_EQ(0x5400u, b.get_referenced_bytes()); cout << b << std::endl; PExtentVector r; ASSERT_FALSE(b.put_ref(coll.get(), 0, 0x1200, &r)); ASSERT_EQ(0x4200u, b.get_referenced_bytes()); cout << " r " << r << std::endl; cout << b << std::endl; r.clear(); ASSERT_TRUE(b.put_ref(coll.get(), 0xae00, 0x4200, &r)); ASSERT_EQ(0u, b.get_referenced_bytes()); cout << " r " << r << std::endl; cout << b << std::endl; } unsigned mas = 4096; BlueStore store(g_ceph_context, "", 8192); BlueStore::OnodeCacheShard *oc = BlueStore::OnodeCacheShard::create( g_ceph_context, "lru", NULL); BlueStore::BufferCacheShard *bc = BlueStore::BufferCacheShard::create( g_ceph_context, "lru", NULL); auto coll = ceph::make_ref<BlueStore::Collection>(&store, oc, bc, coll_t()); { BlueStore::Blob B; B.shared_blob = new BlueStore::SharedBlob(coll.get()); bluestore_blob_t& b = B.dirty_blob(); PExtentVector r; b.allocated_test(bluestore_pextent_t(0, mas * 2)); B.get_ref(coll.get(), 0, mas*2); ASSERT_EQ(mas * 2, B.get_referenced_bytes()); ASSERT_TRUE(b.is_allocated(0, mas*2)); ASSERT_TRUE(B.put_ref(coll.get(), 0, mas*2, &r)); ASSERT_EQ(0u, B.get_referenced_bytes()); cout << "r " << r << " " << b << std::endl; ASSERT_EQ(1u, r.size()); ASSERT_EQ(0u, r[0].offset); ASSERT_EQ(mas*2, r[0].length); ASSERT_FALSE(b.is_allocated(0, mas*2)); ASSERT_FALSE(b.is_allocated(0, mas)); ASSERT_FALSE(b.is_allocated(mas, 0)); ASSERT_FALSE(b.get_extents()[0].is_valid()); ASSERT_EQ(mas*2, b.get_extents()[0].length); } { BlueStore::Blob B; B.shared_blob = new BlueStore::SharedBlob(coll.get()); bluestore_blob_t& b = B.dirty_blob(); PExtentVector r; b.allocated_test(bluestore_pextent_t(123, mas * 2)); B.get_ref(coll.get(), 0, mas*2); ASSERT_EQ(mas * 2, B.get_referenced_bytes()); ASSERT_FALSE(B.put_ref(coll.get(), 0, mas, &r)); ASSERT_EQ(mas, B.get_referenced_bytes()); cout << "r " << r << " " << b << std::endl; ASSERT_EQ(0u, r.size()); ASSERT_TRUE(b.is_allocated(0, mas*2)); ASSERT_TRUE(B.put_ref(coll.get(), mas, mas, &r)); ASSERT_EQ(0u, B.get_referenced_bytes()); ASSERT_EQ(0u, B.get_referenced_bytes()); cout << "r " << r << " " << b << std::endl; ASSERT_EQ(1u, r.size()); ASSERT_EQ(123u, r[0].offset); ASSERT_EQ(mas*2, r[0].length); ASSERT_FALSE(b.is_allocated(0, mas*2)); ASSERT_FALSE(b.get_extents()[0].is_valid()); ASSERT_EQ(mas*2, b.get_extents()[0].length); } { BlueStore::Blob B; B.shared_blob = new BlueStore::SharedBlob(coll.get()); bluestore_blob_t& b = B.dirty_blob(); PExtentVector r; b.allocated_test(bluestore_pextent_t(1, mas)); b.allocated_test(bluestore_pextent_t(2, mas)); b.allocated_test(bluestore_pextent_t(3, mas)); b.allocated_test(bluestore_pextent_t(4, mas)); B.get_ref(coll.get(), 0, mas*4); ASSERT_EQ(mas * 4, B.get_referenced_bytes()); ASSERT_FALSE(B.put_ref(coll.get(), mas, mas, &r)); ASSERT_EQ(mas * 3, B.get_referenced_bytes()); cout << "r " << r << " " << b << std::endl; ASSERT_EQ(0u, r.size()); ASSERT_TRUE(b.is_allocated(0, mas*4)); ASSERT_TRUE(b.is_allocated(mas, mas)); ASSERT_FALSE(B.put_ref(coll.get(), mas*2, mas, &r)); ASSERT_EQ(mas * 2, B.get_referenced_bytes()); cout << "r " << r << " " << b << std::endl; ASSERT_EQ(0u, r.size()); ASSERT_TRUE(b.is_allocated(mas*2, mas)); ASSERT_TRUE(b.is_allocated(0, mas*4)); ASSERT_FALSE(B.put_ref(coll.get(), mas*3, mas, &r)); ASSERT_EQ(mas, B.get_referenced_bytes()); cout << "r " << r << " " << b << std::endl; ASSERT_EQ(2u, r.size()); ASSERT_EQ(3u, r[0].offset); ASSERT_EQ(mas, r[0].length); ASSERT_EQ(4u, r[1].offset); ASSERT_EQ(mas, r[1].length); ASSERT_TRUE(b.is_allocated(0, mas*2)); ASSERT_FALSE(b.is_allocated(mas*2, mas*2)); ASSERT_TRUE(b.get_extents()[0].is_valid()); ASSERT_TRUE(b.get_extents()[1].is_valid()); ASSERT_FALSE(b.get_extents()[2].is_valid()); ASSERT_EQ(3u, b.get_extents().size()); } { BlueStore::Blob B; B.shared_blob = new BlueStore::SharedBlob(coll.get()); bluestore_blob_t& b = B.dirty_blob(); PExtentVector r; b.allocated_test(bluestore_pextent_t(1, mas)); b.allocated_test(bluestore_pextent_t(2, mas)); b.allocated_test(bluestore_pextent_t(3, mas)); b.allocated_test(bluestore_pextent_t(4, mas)); b.allocated_test(bluestore_pextent_t(5, mas)); b.allocated_test(bluestore_pextent_t(6, mas)); B.get_ref(coll.get(), 0, mas*6); ASSERT_EQ(mas * 6, B.get_referenced_bytes()); ASSERT_FALSE(B.put_ref(coll.get(), mas, mas, &r)); ASSERT_EQ(mas * 5, B.get_referenced_bytes()); cout << "r " << r << " " << b << std::endl; ASSERT_EQ(0u, r.size()); ASSERT_TRUE(b.is_allocated(0, mas*6)); ASSERT_FALSE(B.put_ref(coll.get(), mas*2, mas, &r)); ASSERT_EQ(mas * 4, B.get_referenced_bytes()); cout << "r " << r << " " << b << std::endl; ASSERT_EQ(0u, r.size()); ASSERT_TRUE(b.is_allocated(0, mas*6)); ASSERT_FALSE(B.put_ref(coll.get(), mas*3, mas, &r)); ASSERT_EQ(mas * 3, B.get_referenced_bytes()); cout << "r " << r << " " << b << std::endl; ASSERT_EQ(2u, r.size()); ASSERT_EQ(3u, r[0].offset); ASSERT_EQ(mas, r[0].length); ASSERT_EQ(4u, r[1].offset); ASSERT_EQ(mas, r[1].length); ASSERT_TRUE(b.is_allocated(0, mas*2)); ASSERT_FALSE(b.is_allocated(mas*2, mas*2)); ASSERT_TRUE(b.is_allocated(mas*4, mas*2)); ASSERT_EQ(5u, b.get_extents().size()); ASSERT_TRUE(b.get_extents()[0].is_valid()); ASSERT_TRUE(b.get_extents()[1].is_valid()); ASSERT_FALSE(b.get_extents()[2].is_valid()); ASSERT_TRUE(b.get_extents()[3].is_valid()); ASSERT_TRUE(b.get_extents()[4].is_valid()); } { BlueStore::Blob B; B.shared_blob = new BlueStore::SharedBlob(coll.get()); bluestore_blob_t& b = B.dirty_blob(); PExtentVector r; b.allocated_test(bluestore_pextent_t(1, mas * 6)); B.get_ref(coll.get(), 0, mas*6); ASSERT_EQ(mas * 6, B.get_referenced_bytes()); ASSERT_FALSE(B.put_ref(coll.get(), mas, mas, &r)); ASSERT_EQ(mas * 5, B.get_referenced_bytes()); cout << "r " << r << " " << b << std::endl; ASSERT_EQ(0u, r.size()); ASSERT_TRUE(b.is_allocated(0, mas*6)); ASSERT_FALSE(B.put_ref(coll.get(), mas*2, mas, &r)); ASSERT_EQ(mas * 4, B.get_referenced_bytes()); cout << "r " << r << " " << b << std::endl; ASSERT_EQ(0u, r.size()); ASSERT_TRUE(b.is_allocated(0, mas*6)); ASSERT_FALSE(B.put_ref(coll.get(), mas*3, mas, &r)); ASSERT_EQ(mas * 3, B.get_referenced_bytes()); cout << "r " << r << " " << b << std::endl; ASSERT_EQ(1u, r.size()); ASSERT_EQ(0x2001u, r[0].offset); ASSERT_EQ(mas*2, r[0].length); ASSERT_TRUE(b.is_allocated(0, mas*2)); ASSERT_FALSE(b.is_allocated(mas*2, mas*2)); ASSERT_TRUE(b.is_allocated(mas*4, mas*2)); ASSERT_EQ(3u, b.get_extents().size()); ASSERT_TRUE(b.get_extents()[0].is_valid()); ASSERT_FALSE(b.get_extents()[1].is_valid()); ASSERT_TRUE(b.get_extents()[2].is_valid()); } { BlueStore::Blob B; B.shared_blob = new BlueStore::SharedBlob(coll.get()); bluestore_blob_t& b = B.dirty_blob(); PExtentVector r; b.allocated_test(bluestore_pextent_t(1, mas * 4)); b.allocated_test(bluestore_pextent_t(2, mas * 4)); b.allocated_test(bluestore_pextent_t(3, mas * 4)); B.get_ref(coll.get(), 0, mas*12); ASSERT_EQ(mas * 12, B.get_referenced_bytes()); ASSERT_FALSE(B.put_ref(coll.get(), mas, mas, &r)); ASSERT_EQ(mas * 11, B.get_referenced_bytes()); cout << "r " << r << " " << b << std::endl; ASSERT_EQ(0u, r.size()); ASSERT_TRUE(b.is_allocated(0, mas*12)); ASSERT_FALSE(B.put_ref(coll.get(), mas*9, mas, &r)); ASSERT_EQ(mas * 10, B.get_referenced_bytes()); cout << "r " << r << " " << b << std::endl; ASSERT_EQ(0u, r.size()); ASSERT_TRUE(b.is_allocated(0, mas*12)); ASSERT_FALSE(B.put_ref(coll.get(), mas*2, mas*7, &r)); ASSERT_EQ(mas * 3, B.get_referenced_bytes()); cout << "r " << r << " " << b << std::endl; ASSERT_EQ(3u, r.size()); ASSERT_EQ(0x2001u, r[0].offset); ASSERT_EQ(mas*2, r[0].length); ASSERT_EQ(0x2u, r[1].offset); ASSERT_EQ(mas*4, r[1].length); ASSERT_EQ(0x3u, r[2].offset); ASSERT_EQ(mas*2, r[2].length); ASSERT_TRUE(b.is_allocated(0, mas*2)); ASSERT_FALSE(b.is_allocated(mas*2, mas*8)); ASSERT_TRUE(b.is_allocated(mas*10, mas*2)); ASSERT_EQ(3u, b.get_extents().size()); ASSERT_TRUE(b.get_extents()[0].is_valid()); ASSERT_FALSE(b.get_extents()[1].is_valid()); ASSERT_TRUE(b.get_extents()[2].is_valid()); } { BlueStore::Blob B; B.shared_blob = new BlueStore::SharedBlob(coll.get()); bluestore_blob_t& b = B.dirty_blob(); PExtentVector r; b.allocated_test(bluestore_pextent_t(1, mas * 4)); b.allocated_test(bluestore_pextent_t(2, mas * 4)); b.allocated_test(bluestore_pextent_t(3, mas * 4)); B.get_ref(coll.get(), 0, mas*12); ASSERT_EQ(mas * 12, B.get_referenced_bytes()); ASSERT_FALSE(B.put_ref(coll.get(), mas, mas, &r)); ASSERT_EQ(mas * 11, B.get_referenced_bytes()); cout << "r " << r << " " << b << std::endl; ASSERT_EQ(0u, r.size()); ASSERT_TRUE(b.is_allocated(0, mas*12)); ASSERT_FALSE(B.put_ref(coll.get(), mas*9, mas, &r)); ASSERT_EQ(mas * 10, B.get_referenced_bytes()); cout << "r " << r << " " << b << std::endl; ASSERT_EQ(0u, r.size()); ASSERT_TRUE(b.is_allocated(0, mas*12)); ASSERT_FALSE(B.put_ref(coll.get(), mas*2, mas*7, &r)); ASSERT_EQ(mas * 3, B.get_referenced_bytes()); cout << "r " << r << " " << b << std::endl; ASSERT_EQ(3u, r.size()); ASSERT_EQ(0x2001u, r[0].offset); ASSERT_EQ(mas*2, r[0].length); ASSERT_EQ(0x2u, r[1].offset); ASSERT_EQ(mas*4, r[1].length); ASSERT_EQ(0x3u, r[2].offset); ASSERT_EQ(mas*2, r[2].length); ASSERT_TRUE(b.is_allocated(0, mas*2)); ASSERT_FALSE(b.is_allocated(mas*2, mas*8)); ASSERT_TRUE(b.is_allocated(mas*10, mas*2)); ASSERT_EQ(3u, b.get_extents().size()); ASSERT_TRUE(b.get_extents()[0].is_valid()); ASSERT_FALSE(b.get_extents()[1].is_valid()); ASSERT_TRUE(b.get_extents()[2].is_valid()); ASSERT_FALSE(B.put_ref(coll.get(), 0, mas, &r)); ASSERT_EQ(mas * 2, B.get_referenced_bytes()); cout << "r " << r << " " << b << std::endl; ASSERT_EQ(1u, r.size()); ASSERT_EQ(0x1u, r[0].offset); ASSERT_EQ(mas*2, r[0].length); ASSERT_EQ(2u, b.get_extents().size()); ASSERT_FALSE(b.get_extents()[0].is_valid()); ASSERT_TRUE(b.get_extents()[1].is_valid()); ASSERT_TRUE(B.put_ref(coll.get(), mas*10, mas*2, &r)); ASSERT_EQ(mas * 0, B.get_referenced_bytes()); cout << "r " << r << " " << b << std::endl; ASSERT_EQ(1u, r.size()); ASSERT_EQ(0x2003u, r[0].offset); ASSERT_EQ(mas*2, r[0].length); ASSERT_EQ(1u, b.get_extents().size()); ASSERT_FALSE(b.get_extents()[0].is_valid()); } { BlueStore::Blob B; B.shared_blob = new BlueStore::SharedBlob(coll.get()); bluestore_blob_t& b = B.dirty_blob(); PExtentVector r; b.allocated_test(bluestore_pextent_t(1, mas * 4)); b.allocated_test(bluestore_pextent_t(2, mas * 4)); b.allocated_test(bluestore_pextent_t(3, mas * 4)); B.get_ref(coll.get(), 0, mas*12); ASSERT_EQ(mas * 12, B.get_referenced_bytes()); ASSERT_FALSE(B.put_ref(coll.get(), mas, mas, &r)); ASSERT_EQ(mas * 11, B.get_referenced_bytes()); cout << "r " << r << " " << b << std::endl; ASSERT_EQ(0u, r.size()); ASSERT_TRUE(b.is_allocated(0, mas*12)); ASSERT_FALSE(B.put_ref(coll.get(), mas*9, mas, &r)); ASSERT_EQ(mas * 10, B.get_referenced_bytes()); cout << "r " << r << " " << b << std::endl; ASSERT_EQ(0u, r.size()); ASSERT_TRUE(b.is_allocated(0, mas*12)); ASSERT_FALSE(B.put_ref(coll.get(), mas*2, mas*7, &r)); ASSERT_EQ(mas * 3, B.get_referenced_bytes()); cout << "r " << r << " " << b << std::endl; ASSERT_EQ(3u, r.size()); ASSERT_EQ(0x2001u, r[0].offset); ASSERT_EQ(mas*2, r[0].length); ASSERT_EQ(0x2u, r[1].offset); ASSERT_EQ(mas*4, r[1].length); ASSERT_EQ(0x3u, r[2].offset); ASSERT_EQ(mas*2, r[2].length); ASSERT_TRUE(b.is_allocated(0, mas*2)); ASSERT_FALSE(b.is_allocated(mas*2, mas*8)); ASSERT_TRUE(b.is_allocated(mas*10, mas*2)); ASSERT_EQ(3u, b.get_extents().size()); ASSERT_TRUE(b.get_extents()[0].is_valid()); ASSERT_FALSE(b.get_extents()[1].is_valid()); ASSERT_TRUE(b.get_extents()[2].is_valid()); ASSERT_FALSE(B.put_ref(coll.get(), mas*10, mas*2, &r)); ASSERT_EQ(mas * 1, B.get_referenced_bytes()); cout << "r " << r << " " << b << std::endl; ASSERT_EQ(1u, r.size()); ASSERT_EQ(0x2003u, r[0].offset); ASSERT_EQ(mas*2, r[0].length); ASSERT_EQ(2u, b.get_extents().size()); ASSERT_TRUE(b.get_extents()[0].is_valid()); ASSERT_FALSE(b.get_extents()[1].is_valid()); ASSERT_TRUE(B.put_ref(coll.get(), 0, mas, &r)); ASSERT_EQ(mas * 0, B.get_referenced_bytes()); cout << "r " << r << " " << b << std::endl; ASSERT_EQ(1u, r.size()); ASSERT_EQ(0x1u, r[0].offset); ASSERT_EQ(mas*2, r[0].length); ASSERT_EQ(1u, b.get_extents().size()); ASSERT_FALSE(b.get_extents()[0].is_valid()); } { BlueStore::Blob B; B.shared_blob = new BlueStore::SharedBlob(coll.get()); bluestore_blob_t& b = B.dirty_blob(); PExtentVector r; b.allocated_test(bluestore_pextent_t(1, mas * 8)); B.get_ref(coll.get(), 0, mas*8); ASSERT_EQ(mas * 8, B.get_referenced_bytes()); ASSERT_FALSE(B.put_ref(coll.get(), 0, mas, &r)); ASSERT_EQ(mas * 7, B.get_referenced_bytes()); cout << "r " << r << " " << b << std::endl; ASSERT_EQ(0u, r.size()); ASSERT_TRUE(b.is_allocated(0, mas*8)); ASSERT_FALSE(B.put_ref(coll.get(), mas*7, mas, &r)); ASSERT_EQ(mas * 6, B.get_referenced_bytes()); cout << "r " << r << " " << b << std::endl; ASSERT_EQ(0u, r.size()); ASSERT_TRUE(b.is_allocated(0, mas*8)); ASSERT_FALSE(B.put_ref(coll.get(), mas*2, mas, &r)); ASSERT_EQ(mas * 5, B.get_referenced_bytes()); cout << "r " << r << " " << b << std::endl; ASSERT_EQ(0u, r.size()); ASSERT_TRUE(b.is_allocated(0, 8)); ASSERT_FALSE(B.put_ref(coll.get(), mas*3, mas*4, &r)); ASSERT_EQ(mas * 1, B.get_referenced_bytes()); ASSERT_EQ(1u, r.size()); ASSERT_EQ(0x2001u, r[0].offset); ASSERT_EQ(mas*6, r[0].length); ASSERT_TRUE(b.is_allocated(0, mas*2)); ASSERT_FALSE(b.is_allocated(mas*2, mas*6)); ASSERT_EQ(2u, b.get_extents().size()); ASSERT_TRUE(b.get_extents()[0].is_valid()); ASSERT_FALSE(b.get_extents()[1].is_valid()); ASSERT_TRUE(B.put_ref(coll.get(), mas, mas, &r)); ASSERT_EQ(mas * 0, B.get_referenced_bytes()); cout << "r " << r << " " << b << std::endl; ASSERT_EQ(1u, r.size()); ASSERT_EQ(0x1u, r[0].offset); ASSERT_EQ(mas*2, r[0].length); ASSERT_EQ(1u, b.get_extents().size()); ASSERT_FALSE(b.get_extents()[0].is_valid()); } // verify csum chunk size if factored in properly { BlueStore::Blob B; B.shared_blob = new BlueStore::SharedBlob(coll.get()); bluestore_blob_t& b = B.dirty_blob(); PExtentVector r; b.allocated_test(bluestore_pextent_t(0, mas*4)); b.init_csum(Checksummer::CSUM_CRC32C, 14, mas * 4); B.get_ref(coll.get(), 0, mas*4); ASSERT_EQ(mas * 4, B.get_referenced_bytes()); ASSERT_TRUE(b.is_allocated(0, mas*4)); ASSERT_FALSE(B.put_ref(coll.get(), 0, mas*3, &r)); ASSERT_EQ(mas * 1, B.get_referenced_bytes()); cout << "r " << r << " " << b << std::endl; ASSERT_EQ(0u, r.size()); ASSERT_TRUE(b.is_allocated(0, mas*4)); ASSERT_TRUE(b.get_extents()[0].is_valid()); ASSERT_EQ(mas*4, b.get_extents()[0].length); } { BlueStore::Blob B; B.shared_blob = new BlueStore::SharedBlob(coll.get()); bluestore_blob_t& b = B.dirty_blob(); b.allocated_test(bluestore_pextent_t(0x40101000, 0x4000)); b.allocated_test(bluestore_pextent_t(bluestore_pextent_t::INVALID_OFFSET, 0x13000)); b.allocated_test(bluestore_pextent_t(0x40118000, 0x7000)); B.get_ref(coll.get(), 0x0, 0x3800); B.get_ref(coll.get(), 0x17c00, 0x6400); ASSERT_EQ(0x3800u + 0x6400u, B.get_referenced_bytes()); b.set_flag(bluestore_blob_t::FLAG_SHARED); b.init_csum(Checksummer::CSUM_CRC32C, 12, 0x1e000); cout << "before: " << B << std::endl; PExtentVector r; ASSERT_FALSE(B.put_ref(coll.get(), 0x1800, 0x2000, &r)); ASSERT_EQ(0x3800u + 0x6400u - 0x2000u, B.get_referenced_bytes()); cout << "after: " << B << std::endl; cout << "r " << r << std::endl; } { BlueStore::Blob B; B.shared_blob = new BlueStore::SharedBlob(coll.get()); bluestore_blob_t& b = B.dirty_blob(); b.allocated_test(bluestore_pextent_t(1, 0x5000)); b.allocated_test(bluestore_pextent_t(2, 0x5000)); B.get_ref(coll.get(), 0x0, 0xa000); ASSERT_EQ(0xa000u, B.get_referenced_bytes()); cout << "before: " << B << std::endl; PExtentVector r; ASSERT_FALSE(B.put_ref(coll.get(), 0x8000, 0x2000, &r)); cout << "after: " << B << std::endl; cout << "r " << r << std::endl; ASSERT_EQ(0x8000u, B.get_referenced_bytes()); ASSERT_EQ(1u, r.size()); ASSERT_EQ(0x3002u, r[0].offset); ASSERT_EQ(0x2000u, r[0].length); } { BlueStore::Blob B; B.shared_blob = new BlueStore::SharedBlob(coll.get()); bluestore_blob_t& b = B.dirty_blob(); b.allocated_test(bluestore_pextent_t(1, 0x7000)); b.allocated_test(bluestore_pextent_t(2, 0x7000)); B.get_ref(coll.get(), 0x0, 0xe000); ASSERT_EQ(0xe000u, B.get_referenced_bytes()); cout << "before: " << B << std::endl; PExtentVector r; ASSERT_FALSE(B.put_ref(coll.get(), 0, 0xb000, &r)); ASSERT_EQ(0x3000u, B.get_referenced_bytes()); cout << "after: " << B << std::endl; cout << "r " << r << std::endl; ASSERT_EQ(0x3000u, B.get_referenced_bytes()); ASSERT_EQ(2u, r.size()); ASSERT_EQ(1u, r[0].offset); ASSERT_EQ(0x7000u, r[0].length); ASSERT_EQ(2u, r[1].offset); ASSERT_EQ(0x3000u, r[1].length); // we have 0x1000 bytes less due to // alignment caused by min_alloc_size = 0x2000 } { BlueStore store(g_ceph_context, "", 0x4000); BlueStore::OnodeCacheShard *oc = BlueStore::OnodeCacheShard::create( g_ceph_context, "lru", NULL); BlueStore::BufferCacheShard *bc = BlueStore::BufferCacheShard::create( g_ceph_context, "lru", NULL); auto coll = ceph::make_ref<BlueStore::Collection>(&store, oc, bc, coll_t()); BlueStore::Blob B; B.shared_blob = new BlueStore::SharedBlob(coll.get()); bluestore_blob_t& b = B.dirty_blob(); b.allocated_test(bluestore_pextent_t(1, 0x5000)); b.allocated_test(bluestore_pextent_t(2, 0x7000)); B.get_ref(coll.get(), 0x0, 0xc000); ASSERT_EQ(0xc000u, B.get_referenced_bytes()); cout << "before: " << B << std::endl; PExtentVector r; ASSERT_FALSE(B.put_ref(coll.get(), 0x2000, 0xa000, &r)); cout << "after: " << B << std::endl; cout << "r " << r << std::endl; ASSERT_EQ(0x2000u, B.get_referenced_bytes()); ASSERT_EQ(2u, r.size()); ASSERT_EQ(0x4001u, r[0].offset); ASSERT_EQ(0x1000u, r[0].length); ASSERT_EQ(2u, r[1].offset); ASSERT_EQ(0x7000u, r[1].length); ASSERT_EQ(1u, b.get_extents()[0].offset); ASSERT_EQ(0x4000u, b.get_extents()[0].length); } } TEST(bluestore_blob_t, can_split) { bluestore_blob_t a; ASSERT_TRUE(a.can_split()); a.flags = bluestore_blob_t::FLAG_SHARED; ASSERT_FALSE(a.can_split()); a.flags = bluestore_blob_t::FLAG_COMPRESSED; ASSERT_FALSE(a.can_split()); a.flags = bluestore_blob_t::FLAG_HAS_UNUSED; ASSERT_FALSE(a.can_split()); } TEST(bluestore_blob_t, can_split_at) { bluestore_blob_t a; a.allocated_test(bluestore_pextent_t(0x10000, 0x2000)); a.allocated_test(bluestore_pextent_t(0x20000, 0x2000)); ASSERT_TRUE(a.can_split_at(0x1000)); ASSERT_TRUE(a.can_split_at(0x1800)); a.init_csum(Checksummer::CSUM_CRC32C, 12, 0x4000); ASSERT_TRUE(a.can_split_at(0x1000)); ASSERT_TRUE(a.can_split_at(0x2000)); ASSERT_TRUE(a.can_split_at(0x3000)); ASSERT_FALSE(a.can_split_at(0x2800)); } TEST(bluestore_blob_t, prune_tail) { bluestore_blob_t a; a.allocated_test(bluestore_pextent_t(0x10000, 0x2000)); a.allocated_test(bluestore_pextent_t(0x20000, 0x2000)); ASSERT_FALSE(a.can_prune_tail()); a.allocated_test( bluestore_pextent_t(bluestore_pextent_t::INVALID_OFFSET, 0x2000)); ASSERT_TRUE(a.can_prune_tail()); a.prune_tail(); ASSERT_FALSE(a.can_prune_tail()); ASSERT_EQ(2u, a.get_extents().size()); ASSERT_EQ(0x4000u, a.get_logical_length()); a.allocated_test( bluestore_pextent_t(bluestore_pextent_t::INVALID_OFFSET, 0x2000)); a.init_csum(Checksummer::CSUM_CRC32C_8, 12, 0x6000); ASSERT_EQ(6u, a.csum_data.length()); ASSERT_TRUE(a.can_prune_tail()); a.prune_tail(); ASSERT_FALSE(a.can_prune_tail()); ASSERT_EQ(2u, a.get_extents().size()); ASSERT_EQ(0x4000u, a.get_logical_length()); ASSERT_EQ(4u, a.csum_data.length()); bluestore_blob_t b; b.allocated_test( bluestore_pextent_t(bluestore_pextent_t::INVALID_OFFSET, 0x2000)); ASSERT_FALSE(a.can_prune_tail()); } TEST(Blob, split) { BlueStore store(g_ceph_context, "", 4096); BlueStore::OnodeCacheShard *oc = BlueStore::OnodeCacheShard::create( g_ceph_context, "lru", NULL); BlueStore::BufferCacheShard *bc = BlueStore::BufferCacheShard::create( g_ceph_context, "lru", NULL); auto coll = ceph::make_ref<BlueStore::Collection>(&store, oc, bc, coll_t()); { BlueStore::Blob L, R; L.shared_blob = new BlueStore::SharedBlob(coll.get()); R.shared_blob = new BlueStore::SharedBlob(coll.get()); L.dirty_blob().allocated_test(bluestore_pextent_t(0x2000, 0x2000)); L.dirty_blob().init_csum(Checksummer::CSUM_CRC32C, 12, 0x2000); L.get_ref(coll.get(), 0, 0x2000); L.split(coll.get(), 0x1000, &R); ASSERT_EQ(0x1000u, L.get_blob().get_logical_length()); ASSERT_EQ(4u, L.get_blob().csum_data.length()); ASSERT_EQ(1u, L.get_blob().get_extents().size()); ASSERT_EQ(0x2000u, L.get_blob().get_extents().front().offset); ASSERT_EQ(0x1000u, L.get_blob().get_extents().front().length); ASSERT_EQ(0x1000u, L.get_referenced_bytes()); ASSERT_EQ(0x1000u, R.get_blob().get_logical_length()); ASSERT_EQ(4u, R.get_blob().csum_data.length()); ASSERT_EQ(1u, R.get_blob().get_extents().size()); ASSERT_EQ(0x3000u, R.get_blob().get_extents().front().offset); ASSERT_EQ(0x1000u, R.get_blob().get_extents().front().length); ASSERT_EQ(0x1000u, R.get_referenced_bytes()); } { BlueStore::Blob L, R; L.shared_blob = new BlueStore::SharedBlob(coll.get()); R.shared_blob = new BlueStore::SharedBlob(coll.get()); L.dirty_blob().allocated_test(bluestore_pextent_t(0x2000, 0x1000)); L.dirty_blob().allocated_test(bluestore_pextent_t(0x12000, 0x1000)); L.dirty_blob().init_csum(Checksummer::CSUM_CRC32C, 12, 0x2000); L.get_ref(coll.get(), 0, 0x1000); L.get_ref(coll.get(), 0x1000, 0x1000); L.split(coll.get(), 0x1000, &R); ASSERT_EQ(0x1000u, L.get_blob().get_logical_length()); ASSERT_EQ(4u, L.get_blob().csum_data.length()); ASSERT_EQ(1u, L.get_blob().get_extents().size()); ASSERT_EQ(0x2000u, L.get_blob().get_extents().front().offset); ASSERT_EQ(0x1000u, L.get_blob().get_extents().front().length); ASSERT_EQ(0x1000u, L.get_referenced_bytes()); ASSERT_EQ(0x1000u, R.get_blob().get_logical_length()); ASSERT_EQ(4u, R.get_blob().csum_data.length()); ASSERT_EQ(1u, R.get_blob().get_extents().size()); ASSERT_EQ(0x12000u, R.get_blob().get_extents().front().offset); ASSERT_EQ(0x1000u, R.get_blob().get_extents().front().length); ASSERT_EQ(0x1000u, R.get_referenced_bytes()); } } TEST(Blob, legacy_decode) { BlueStore store(g_ceph_context, "", 4096); BlueStore::OnodeCacheShard *oc = BlueStore::OnodeCacheShard::create( g_ceph_context, "lru", NULL); BlueStore::BufferCacheShard *bc = BlueStore::BufferCacheShard::create( g_ceph_context, "lru", NULL); auto coll = ceph::make_ref<BlueStore::Collection>(&store, oc, bc, coll_t()); bufferlist bl, bl2; { BlueStore::Blob B; B.shared_blob = new BlueStore::SharedBlob(coll.get()); B.dirty_blob().allocated_test(bluestore_pextent_t(0x1, 0x2000)); B.dirty_blob().init_csum(Checksummer::CSUM_CRC32C, 12, 0x2000); B.get_ref(coll.get(), 0, 0xff0); B.get_ref(coll.get(), 0x1fff, 1); bluestore_extent_ref_map_t fake_ref_map; fake_ref_map.get(0, 0xff0); fake_ref_map.get(0x1fff, 1); size_t bound = 0, bound2 = 0; B.bound_encode( bound, 1, /*struct_v*/ 0, /*sbid*/ false); fake_ref_map.bound_encode(bound); B.bound_encode( bound2, 2, /*struct_v*/ 0, /*sbid*/ true); { auto app = bl.get_contiguous_appender(bound); auto app2 = bl2.get_contiguous_appender(bound2); B.encode( app, 1, /*struct_v*/ 0, /*sbid*/ false); fake_ref_map.encode(app); B.encode( app2, 2, /*struct_v*/ 0, /*sbid*/ true); } auto p = bl.front().begin_deep(); auto p2 = bl2.front().begin_deep(); BlueStore::Blob Bres, Bres2; Bres.shared_blob = new BlueStore::SharedBlob(coll.get()); Bres2.shared_blob = new BlueStore::SharedBlob(coll.get()); uint64_t sbid, sbid2; Bres.decode( p, 1, /*struct_v*/ &sbid, true, coll.get()); Bres2.decode( p2, 2, /*struct_v*/ &sbid2, true, coll.get()); ASSERT_EQ(0xff0u + 1u, Bres.get_blob_use_tracker().get_referenced_bytes()); ASSERT_EQ(0xff0u + 1u, Bres2.get_blob_use_tracker().get_referenced_bytes()); ASSERT_TRUE(Bres.get_blob_use_tracker().equal(Bres2.get_blob_use_tracker())); } } TEST(ExtentMap, seek_lextent) { BlueStore store(g_ceph_context, "", 4096); BlueStore::OnodeCacheShard *oc = BlueStore::OnodeCacheShard::create( g_ceph_context, "lru", NULL); BlueStore::BufferCacheShard *bc = BlueStore::BufferCacheShard::create( g_ceph_context, "lru", NULL); auto coll = ceph::make_ref<BlueStore::Collection>(&store, oc, bc, coll_t()); BlueStore::Onode onode(coll.get(), ghobject_t(), ""); BlueStore::ExtentMap em(&onode, g_ceph_context->_conf->bluestore_extent_map_inline_shard_prealloc_size); BlueStore::BlobRef br(new BlueStore::Blob); br->shared_blob = new BlueStore::SharedBlob(coll.get()); ASSERT_EQ(em.extent_map.end(), em.seek_lextent(0)); ASSERT_EQ(em.extent_map.end(), em.seek_lextent(100)); em.extent_map.insert(*new BlueStore::Extent(100, 0, 100, br)); auto a = em.find(100); ASSERT_EQ(a, em.seek_lextent(0)); ASSERT_EQ(a, em.seek_lextent(99)); ASSERT_EQ(a, em.seek_lextent(100)); ASSERT_EQ(a, em.seek_lextent(101)); ASSERT_EQ(a, em.seek_lextent(199)); ASSERT_EQ(em.extent_map.end(), em.seek_lextent(200)); em.extent_map.insert(*new BlueStore::Extent(200, 0, 100, br)); auto b = em.find(200); ASSERT_EQ(a, em.seek_lextent(0)); ASSERT_EQ(a, em.seek_lextent(99)); ASSERT_EQ(a, em.seek_lextent(100)); ASSERT_EQ(a, em.seek_lextent(101)); ASSERT_EQ(a, em.seek_lextent(199)); ASSERT_EQ(b, em.seek_lextent(200)); ASSERT_EQ(b, em.seek_lextent(299)); ASSERT_EQ(em.extent_map.end(), em.seek_lextent(300)); em.extent_map.insert(*new BlueStore::Extent(400, 0, 100, br)); auto d = em.find(400); ASSERT_EQ(a, em.seek_lextent(0)); ASSERT_EQ(a, em.seek_lextent(99)); ASSERT_EQ(a, em.seek_lextent(100)); ASSERT_EQ(a, em.seek_lextent(101)); ASSERT_EQ(a, em.seek_lextent(199)); ASSERT_EQ(b, em.seek_lextent(200)); ASSERT_EQ(b, em.seek_lextent(299)); ASSERT_EQ(d, em.seek_lextent(300)); ASSERT_EQ(d, em.seek_lextent(399)); ASSERT_EQ(d, em.seek_lextent(400)); ASSERT_EQ(d, em.seek_lextent(499)); ASSERT_EQ(em.extent_map.end(), em.seek_lextent(500)); } TEST(ExtentMap, has_any_lextents) { BlueStore store(g_ceph_context, "", 4096); BlueStore::OnodeCacheShard *oc = BlueStore::OnodeCacheShard::create( g_ceph_context, "lru", NULL); BlueStore::BufferCacheShard *bc = BlueStore::BufferCacheShard::create( g_ceph_context, "lru", NULL); auto coll = ceph::make_ref<BlueStore::Collection>(&store, oc, bc, coll_t()); BlueStore::Onode onode(coll.get(), ghobject_t(), ""); BlueStore::ExtentMap em(&onode, g_ceph_context->_conf->bluestore_extent_map_inline_shard_prealloc_size); BlueStore::BlobRef b(new BlueStore::Blob); b->shared_blob = new BlueStore::SharedBlob(coll.get()); ASSERT_FALSE(em.has_any_lextents(0, 0)); ASSERT_FALSE(em.has_any_lextents(0, 1000)); ASSERT_FALSE(em.has_any_lextents(1000, 1000)); em.extent_map.insert(*new BlueStore::Extent(100, 0, 100, b)); ASSERT_FALSE(em.has_any_lextents(0, 50)); ASSERT_FALSE(em.has_any_lextents(0, 100)); ASSERT_FALSE(em.has_any_lextents(50, 50)); ASSERT_TRUE(em.has_any_lextents(50, 51)); ASSERT_TRUE(em.has_any_lextents(50, 100051)); ASSERT_TRUE(em.has_any_lextents(100, 100)); ASSERT_TRUE(em.has_any_lextents(100, 1)); ASSERT_TRUE(em.has_any_lextents(199, 1)); ASSERT_TRUE(em.has_any_lextents(199, 2)); ASSERT_FALSE(em.has_any_lextents(200, 2)); em.extent_map.insert(*new BlueStore::Extent(200, 0, 100, b)); ASSERT_TRUE(em.has_any_lextents(199, 1)); ASSERT_TRUE(em.has_any_lextents(199, 2)); ASSERT_TRUE(em.has_any_lextents(200, 2)); ASSERT_TRUE(em.has_any_lextents(200, 200)); ASSERT_TRUE(em.has_any_lextents(299, 1)); ASSERT_FALSE(em.has_any_lextents(300, 1)); em.extent_map.insert(*new BlueStore::Extent(400, 0, 100, b)); ASSERT_TRUE(em.has_any_lextents(0, 10000)); ASSERT_TRUE(em.has_any_lextents(199, 1)); ASSERT_FALSE(em.has_any_lextents(300, 1)); ASSERT_FALSE(em.has_any_lextents(300, 100)); ASSERT_FALSE(em.has_any_lextents(399, 1)); ASSERT_TRUE(em.has_any_lextents(400, 1)); ASSERT_TRUE(em.has_any_lextents(400, 100)); ASSERT_TRUE(em.has_any_lextents(400, 1000)); ASSERT_TRUE(em.has_any_lextents(499, 1000)); ASSERT_FALSE(em.has_any_lextents(500, 1000)); } void erase_and_delete(BlueStore::ExtentMap& em, size_t v) { auto d = em.find(v); ASSERT_NE(d, em.extent_map.end()); em.extent_map.erase(d); delete &*d; } TEST(ExtentMap, compress_extent_map) { BlueStore store(g_ceph_context, "", 4096); BlueStore::OnodeCacheShard *oc = BlueStore::OnodeCacheShard::create( g_ceph_context, "lru", NULL); BlueStore::BufferCacheShard *bc = BlueStore::BufferCacheShard::create( g_ceph_context, "lru", NULL); auto coll = ceph::make_ref<BlueStore::Collection>(&store, oc, bc, coll_t()); BlueStore::Onode onode(coll.get(), ghobject_t(), ""); BlueStore::ExtentMap em(&onode, g_ceph_context->_conf->bluestore_extent_map_inline_shard_prealloc_size); BlueStore::BlobRef b1(new BlueStore::Blob); BlueStore::BlobRef b2(new BlueStore::Blob); BlueStore::BlobRef b3(new BlueStore::Blob); b1->shared_blob = new BlueStore::SharedBlob(coll.get()); b2->shared_blob = new BlueStore::SharedBlob(coll.get()); b3->shared_blob = new BlueStore::SharedBlob(coll.get()); em.extent_map.insert(*new BlueStore::Extent(0, 0, 100, b1)); em.extent_map.insert(*new BlueStore::Extent(100, 0, 100, b2)); ASSERT_EQ(0, em.compress_extent_map(0, 10000)); ASSERT_EQ(2u, em.extent_map.size()); em.extent_map.insert(*new BlueStore::Extent(200, 100, 100, b2)); em.extent_map.insert(*new BlueStore::Extent(300, 200, 100, b2)); ASSERT_EQ(0, em.compress_extent_map(0, 0)); ASSERT_EQ(0, em.compress_extent_map(100000, 1000)); ASSERT_EQ(2, em.compress_extent_map(0, 100000)); ASSERT_EQ(2u, em.extent_map.size()); erase_and_delete(em, 100); em.extent_map.insert(*new BlueStore::Extent(100, 0, 100, b2)); em.extent_map.insert(*new BlueStore::Extent(200, 100, 100, b3)); em.extent_map.insert(*new BlueStore::Extent(300, 200, 100, b2)); ASSERT_EQ(0, em.compress_extent_map(0, 1)); ASSERT_EQ(0, em.compress_extent_map(0, 100000)); ASSERT_EQ(4u, em.extent_map.size()); em.extent_map.insert(*new BlueStore::Extent(400, 300, 100, b2)); em.extent_map.insert(*new BlueStore::Extent(500, 500, 100, b2)); em.extent_map.insert(*new BlueStore::Extent(600, 600, 100, b2)); em.extent_map.insert(*new BlueStore::Extent(700, 0, 100, b1)); em.extent_map.insert(*new BlueStore::Extent(800, 0, 100, b3)); ASSERT_EQ(0, em.compress_extent_map(0, 99)); ASSERT_EQ(0, em.compress_extent_map(800, 1000)); ASSERT_EQ(2, em.compress_extent_map(100, 500)); ASSERT_EQ(7u, em.extent_map.size()); erase_and_delete(em, 300); erase_and_delete(em, 500); erase_and_delete(em, 700); em.extent_map.insert(*new BlueStore::Extent(400, 300, 100, b2)); em.extent_map.insert(*new BlueStore::Extent(500, 400, 100, b2)); em.extent_map.insert(*new BlueStore::Extent(700, 500, 100, b2)); ASSERT_EQ(1, em.compress_extent_map(0, 1000)); ASSERT_EQ(6u, em.extent_map.size()); } void clear_and_dispose(BlueStore::old_extent_map_t& old_em) { auto oep = old_em.begin(); while (oep != old_em.end()) { auto &lo = *oep; oep = old_em.erase(oep); delete &lo; } } TEST(GarbageCollector, BasicTest) { BlueStore::OnodeCacheShard *oc = BlueStore::OnodeCacheShard::create( g_ceph_context, "lru", NULL); BlueStore::BufferCacheShard *bc = BlueStore::BufferCacheShard::create( g_ceph_context, "lru", NULL); BlueStore store(g_ceph_context, "", 4096); auto coll = ceph::make_ref<BlueStore::Collection>(&store, oc, bc, coll_t()); BlueStore::Onode onode(coll.get(), ghobject_t(), ""); BlueStore::ExtentMap em(&onode, g_ceph_context->_conf->bluestore_extent_map_inline_shard_prealloc_size); BlueStore::old_extent_map_t old_extents; /* min_alloc_size = 4096 original disposition extent1 <loffs = 100, boffs = 100, len = 10> -> blob1<compressed, len_on_disk=4096, logical_len=8192> extent2 <loffs = 200, boffs = 200, len = 10> -> blob2<raw, len_on_disk=4096, llen=4096> extent3 <loffs = 300, boffs = 300, len = 10> -> blob1<compressed, len_on_disk=4096, llen=8192> extent4 <loffs = 4096, boffs = 0, len = 10> -> blob3<raw, len_on_disk=4096, llen=4096> on write(300~100) resulted in extent1 <loffs = 100, boffs = 100, len = 10> -> blob1<compressed, len_on_disk=4096, logical_len=8192> extent2 <loffs = 200, boffs = 200, len = 10> -> blob2<raw, len_on_disk=4096, llen=4096> extent3 <loffs = 300, boffs = 300, len = 100> -> blob4<raw, len_on_disk=4096, llen=4096> extent4 <loffs = 4096, boffs = 0, len = 10> -> blob3<raw, len_on_disk=4096, llen=4096> */ { BlueStore::GarbageCollector gc(g_ceph_context); int64_t saving; BlueStore::BlobRef b1(new BlueStore::Blob); BlueStore::BlobRef b2(new BlueStore::Blob); BlueStore::BlobRef b3(new BlueStore::Blob); BlueStore::BlobRef b4(new BlueStore::Blob); b1->shared_blob = new BlueStore::SharedBlob(coll.get()); b2->shared_blob = new BlueStore::SharedBlob(coll.get()); b3->shared_blob = new BlueStore::SharedBlob(coll.get()); b4->shared_blob = new BlueStore::SharedBlob(coll.get()); b1->dirty_blob().set_compressed(0x2000, 0x1000); b1->dirty_blob().allocated_test(bluestore_pextent_t(0, 0x1000)); b2->dirty_blob().allocated_test(bluestore_pextent_t(1, 0x1000)); b3->dirty_blob().allocated_test(bluestore_pextent_t(2, 0x1000)); b4->dirty_blob().allocated_test(bluestore_pextent_t(3, 0x1000)); em.extent_map.insert(*new BlueStore::Extent(100, 100, 10, b1)); b1->get_ref(coll.get(), 100, 10); em.extent_map.insert(*new BlueStore::Extent(200, 200, 10, b2)); b2->get_ref(coll.get(), 200, 10); em.extent_map.insert(*new BlueStore::Extent(300, 300, 100, b4)); b4->get_ref(coll.get(), 300, 100); em.extent_map.insert(*new BlueStore::Extent(4096, 0, 10, b3)); b3->get_ref(coll.get(), 0, 10); old_extents.push_back(*new BlueStore::OldExtent(300, 300, 10, b1)); saving = gc.estimate(300, 100, em, old_extents, 4096); ASSERT_EQ(saving, 1); auto& to_collect = gc.get_extents_to_collect(); ASSERT_EQ(to_collect.num_intervals(), 1u); { auto it = to_collect.begin(); using p = decltype(*it); auto v = p{100ul, 10ul}; ASSERT_EQ(*it, v); } em.clear(); clear_and_dispose(old_extents); } /* original disposition min_alloc_size = 0x10000 extent1 <loffs = 0, boffs = 0, len = 0x40000> -> blob1<compressed, len_on_disk=0x20000, logical_len=0x40000> Write 0x8000~37000 resulted in the following extent map prior to GC for the last write_small(0x30000~0xf000): extent1 <loffs = 0, boffs = 0, len = 0x8000> -> blob1<compressed, len_on_disk=0x20000, logical_len=0x40000> extent2 <loffs = 0x8000, boffs = 0x8000, len = 0x8000> -> blob2<raw, len_on_disk=0x10000, llen=0x10000> extent3 <loffs = 0x10000, boffs = 0, len = 0x20000> -> blob3<raw, len_on_disk=0x20000, llen=0x20000> extent4 <loffs = 0x30000, boffs = 0, len = 0xf000> -> blob4<raw, len_on_disk=0x10000, llen=0x10000> extent5 <loffs = 0x3f000, boffs = 0x3f000, len = 0x1000> -> blob1<compressed, len_on_disk=0x20000, llen=0x40000> */ { BlueStore store(g_ceph_context, "", 0x10000); auto coll = ceph::make_ref<BlueStore::Collection>(&store, oc, bc, coll_t()); BlueStore::Onode onode(coll.get(), ghobject_t(), ""); BlueStore::ExtentMap em(&onode, g_ceph_context->_conf->bluestore_extent_map_inline_shard_prealloc_size); BlueStore::old_extent_map_t old_extents; BlueStore::GarbageCollector gc(g_ceph_context); int64_t saving; BlueStore::BlobRef b1(new BlueStore::Blob); BlueStore::BlobRef b2(new BlueStore::Blob); BlueStore::BlobRef b3(new BlueStore::Blob); BlueStore::BlobRef b4(new BlueStore::Blob); b1->shared_blob = new BlueStore::SharedBlob(coll.get()); b2->shared_blob = new BlueStore::SharedBlob(coll.get()); b3->shared_blob = new BlueStore::SharedBlob(coll.get()); b4->shared_blob = new BlueStore::SharedBlob(coll.get()); b1->dirty_blob().set_compressed(0x40000, 0x20000); b1->dirty_blob().allocated_test(bluestore_pextent_t(0, 0x20000)); b2->dirty_blob().allocated_test(bluestore_pextent_t(1, 0x10000)); b3->dirty_blob().allocated_test(bluestore_pextent_t(2, 0x20000)); b4->dirty_blob().allocated_test(bluestore_pextent_t(3, 0x10000)); em.extent_map.insert(*new BlueStore::Extent(0, 0, 0x8000, b1)); b1->get_ref(coll.get(), 0, 0x8000); em.extent_map.insert( *new BlueStore::Extent(0x8000, 0x8000, 0x8000, b2)); // new extent b2->get_ref(coll.get(), 0x8000, 0x8000); em.extent_map.insert( *new BlueStore::Extent(0x10000, 0, 0x20000, b3)); // new extent b3->get_ref(coll.get(), 0, 0x20000); em.extent_map.insert( *new BlueStore::Extent(0x30000, 0, 0xf000, b4)); // new extent b4->get_ref(coll.get(), 0, 0xf000); em.extent_map.insert(*new BlueStore::Extent(0x3f000, 0x3f000, 0x1000, b1)); b1->get_ref(coll.get(), 0x3f000, 0x1000); old_extents.push_back(*new BlueStore::OldExtent(0x8000, 0x8000, 0x8000, b1)); old_extents.push_back( *new BlueStore::OldExtent(0x10000, 0x10000, 0x20000, b1)); old_extents.push_back(*new BlueStore::OldExtent(0x30000, 0x30000, 0xf000, b1)); saving = gc.estimate(0x30000, 0xf000, em, old_extents, 0x10000); ASSERT_EQ(saving, 2); auto& to_collect = gc.get_extents_to_collect(); ASSERT_EQ(to_collect.num_intervals(), 2u); { auto it1 = to_collect.begin(); auto it2 = ++to_collect.begin(); using p = decltype(*it1); { auto v1 = p{0x0ul ,0x8000ul}; auto v2 = p{0x0ul, 0x8000ul}; ASSERT_TRUE(*it1 == v1 || *it2 == v2); } { auto v1 = p{0x3f000ul, 0x1000ul}; auto v2 = p{0x3f000ul, 0x1000ul}; ASSERT_TRUE(*it1 == v1 || *it2 == v2); } } em.clear(); clear_and_dispose(old_extents); } /* original disposition min_alloc_size = 0x1000 extent1 <loffs = 0, boffs = 0, len = 0x4000> -> blob1<compressed, len_on_disk=0x2000, logical_len=0x4000> write 0x3000~4000 resulted in the following extent map (future feature - suppose we can compress incoming write prior to GC invocation) extent1 <loffs = 0, boffs = 0, len = 0x4000> -> blob1<compressed, len_on_disk=0x2000, logical_len=0x4000> extent2 <loffs = 0x3000, boffs = 0, len = 0x4000> -> blob2<compressed, len_on_disk=0x2000, llen=0x4000> */ { BlueStore::GarbageCollector gc(g_ceph_context); int64_t saving; BlueStore::BlobRef b1(new BlueStore::Blob); BlueStore::BlobRef b2(new BlueStore::Blob); b1->shared_blob = new BlueStore::SharedBlob(coll.get()); b2->shared_blob = new BlueStore::SharedBlob(coll.get()); b1->dirty_blob().set_compressed(0x4000, 0x2000); b1->dirty_blob().allocated_test(bluestore_pextent_t(0, 0x2000)); b2->dirty_blob().set_compressed(0x4000, 0x2000); b2->dirty_blob().allocated_test(bluestore_pextent_t(0, 0x2000)); em.extent_map.insert(*new BlueStore::Extent(0, 0, 0x3000, b1)); b1->get_ref(coll.get(), 0, 0x3000); em.extent_map.insert( *new BlueStore::Extent(0x3000, 0, 0x4000, b2)); // new extent b2->get_ref(coll.get(), 0, 0x4000); old_extents.push_back(*new BlueStore::OldExtent(0x3000, 0x3000, 0x1000, b1)); saving = gc.estimate(0x3000, 0x4000, em, old_extents, 0x1000); ASSERT_EQ(saving, 0); auto& to_collect = gc.get_extents_to_collect(); ASSERT_EQ(to_collect.num_intervals(), 0u); em.clear(); clear_and_dispose(old_extents); } /* original disposition min_alloc_size = 0x10000 extent0 <loffs = 0, boffs = 0, len = 0x20000> -> blob0<compressed, len_on_disk=0x10000, logical_len=0x20000> extent1 <loffs = 0x20000, boffs = 0, len = 0x20000> -> blob1<compressed, len_on_disk=0x10000, logical_len=0x20000> write 0x8000~37000 resulted in the following extent map prior to GC for the last write_small(0x30000~0xf000) extent0 <loffs = 0, boffs = 0, len = 0x8000> -> blob0<compressed, len_on_disk=0x10000, logical_len=0x20000> extent2 <loffs = 0x8000, boffs = 0x8000, len = 0x8000> -> blob2<raw, len_on_disk=0x10000, llen=0x10000> extent3 <loffs = 0x10000, boffs = 0, len = 0x20000> -> blob3<raw, len_on_disk=0x20000, llen=0x20000> extent4 <loffs = 0x30000, boffs = 0, len = 0xf000> -> blob4<raw, len_on_disk=0x1000, llen=0x1000> extent5 <loffs = 0x3f000, boffs = 0x1f000, len = 0x1000> -> blob1<compressed, len_on_disk=0x10000, llen=0x20000> */ { BlueStore store(g_ceph_context, "", 0x10000); auto coll = ceph::make_ref<BlueStore::Collection>(&store, oc, bc, coll_t()); BlueStore::Onode onode(coll.get(), ghobject_t(), ""); BlueStore::ExtentMap em(&onode, g_ceph_context->_conf->bluestore_extent_map_inline_shard_prealloc_size); BlueStore::old_extent_map_t old_extents; BlueStore::GarbageCollector gc(g_ceph_context); int64_t saving; BlueStore::BlobRef b0(new BlueStore::Blob); BlueStore::BlobRef b1(new BlueStore::Blob); BlueStore::BlobRef b2(new BlueStore::Blob); BlueStore::BlobRef b3(new BlueStore::Blob); BlueStore::BlobRef b4(new BlueStore::Blob); b0->shared_blob = new BlueStore::SharedBlob(coll.get()); b1->shared_blob = new BlueStore::SharedBlob(coll.get()); b2->shared_blob = new BlueStore::SharedBlob(coll.get()); b3->shared_blob = new BlueStore::SharedBlob(coll.get()); b4->shared_blob = new BlueStore::SharedBlob(coll.get()); b0->dirty_blob().set_compressed(0x2000, 0x1000); b0->dirty_blob().allocated_test(bluestore_pextent_t(0, 0x10000)); b1->dirty_blob().set_compressed(0x20000, 0x10000); b1->dirty_blob().allocated_test(bluestore_pextent_t(0, 0x10000)); b2->dirty_blob().allocated_test(bluestore_pextent_t(1, 0x10000)); b3->dirty_blob().allocated_test(bluestore_pextent_t(2, 0x20000)); b4->dirty_blob().allocated_test(bluestore_pextent_t(3, 0x1000)); em.extent_map.insert(*new BlueStore::Extent(0, 0, 0x8000, b0)); b0->get_ref(coll.get(), 0, 0x8000); em.extent_map.insert( *new BlueStore::Extent(0x8000, 0x8000, 0x8000, b2)); // new extent b2->get_ref(coll.get(), 0x8000, 0x8000); em.extent_map.insert( *new BlueStore::Extent(0x10000, 0, 0x20000, b3)); // new extent b3->get_ref(coll.get(), 0, 0x20000); em.extent_map.insert( *new BlueStore::Extent(0x30000, 0, 0xf000, b4)); // new extent b4->get_ref(coll.get(), 0, 0xf000); em.extent_map.insert(*new BlueStore::Extent(0x3f000, 0x1f000, 0x1000, b1)); b1->get_ref(coll.get(), 0x1f000, 0x1000); old_extents.push_back(*new BlueStore::OldExtent(0x8000, 0x8000, 0x8000, b0)); old_extents.push_back( *new BlueStore::OldExtent(0x10000, 0x10000, 0x10000, b0)); old_extents.push_back( *new BlueStore::OldExtent(0x20000, 0x00000, 0x1f000, b1)); saving = gc.estimate(0x30000, 0xf000, em, old_extents, 0x10000); ASSERT_EQ(saving, 2); auto& to_collect = gc.get_extents_to_collect(); ASSERT_EQ(to_collect.num_intervals(), 2u); { auto it1 = to_collect.begin(); auto it2 = ++to_collect.begin(); using p = decltype(*it1); { auto v1 = p{0x0ul, 0x8000ul}; auto v2 = p{0x0ul, 0x8000ul}; ASSERT_TRUE(*it1 == v1 || *it2 == v2); } { auto v1 = p{0x3f000ul, 0x1000ul}; auto v2 = p{0x3f000ul, 0x1000ul}; ASSERT_TRUE(*it1 == v1 || *it2 == v2); } } em.clear(); clear_and_dispose(old_extents); } } TEST(BlueStoreRepairer, StoreSpaceTracker) { BlueStoreRepairer::StoreSpaceTracker bmap0; bmap0.init((uint64_t)4096 * 1024 * 1024 * 1024, 0x1000); ASSERT_EQ(bmap0.granularity, 2 * 1024 * 1024U); ASSERT_EQ(bmap0.collections_bfs.size(), 2048u * 1024u); ASSERT_EQ(bmap0.objects_bfs.size(), 2048u * 1024u); BlueStoreRepairer::StoreSpaceTracker bmap; bmap.init(0x2000 * 0x1000 - 1, 0x1000, 512 * 1024); ASSERT_EQ(bmap.granularity, 0x1000u); ASSERT_EQ(bmap.collections_bfs.size(), 0x2000u); ASSERT_EQ(bmap.objects_bfs.size(), 0x2000u); coll_t cid; ghobject_t hoid; ASSERT_FALSE(bmap.is_used(cid, 0)); ASSERT_FALSE(bmap.is_used(hoid, 0)); bmap.set_used(0, 1, cid, hoid); ASSERT_TRUE(bmap.is_used(cid, 0)); ASSERT_TRUE(bmap.is_used(hoid, 0)); ASSERT_FALSE(bmap.is_used(cid, 0x1023)); ASSERT_FALSE(bmap.is_used(hoid, 0x1023)); ASSERT_FALSE(bmap.is_used(cid, 0x2023)); ASSERT_FALSE(bmap.is_used(hoid, 0x2023)); ASSERT_FALSE(bmap.is_used(cid, 0x3023)); ASSERT_FALSE(bmap.is_used(hoid, 0x3023)); bmap.set_used(0x1023, 0x3000, cid, hoid); ASSERT_TRUE(bmap.is_used(cid, 0x1023)); ASSERT_TRUE(bmap.is_used(hoid, 0x1023)); ASSERT_TRUE(bmap.is_used(cid, 0x2023)); ASSERT_TRUE(bmap.is_used(hoid, 0x2023)); ASSERT_TRUE(bmap.is_used(cid, 0x3023)); ASSERT_TRUE(bmap.is_used(hoid, 0x3023)); ASSERT_FALSE(bmap.is_used(cid, 0x9001)); ASSERT_FALSE(bmap.is_used(hoid, 0x9001)); ASSERT_FALSE(bmap.is_used(cid, 0xa001)); ASSERT_FALSE(bmap.is_used(hoid, 0xa001)); ASSERT_FALSE(bmap.is_used(cid, 0xb000)); ASSERT_FALSE(bmap.is_used(hoid, 0xb000)); ASSERT_FALSE(bmap.is_used(cid, 0xc000)); ASSERT_FALSE(bmap.is_used(hoid, 0xc000)); bmap.set_used(0x9001, 0x2fff, cid, hoid); ASSERT_TRUE(bmap.is_used(cid, 0x9001)); ASSERT_TRUE(bmap.is_used(hoid, 0x9001)); ASSERT_TRUE(bmap.is_used(cid, 0xa001)); ASSERT_TRUE(bmap.is_used(hoid, 0xa001)); ASSERT_TRUE(bmap.is_used(cid, 0xb001)); ASSERT_TRUE(bmap.is_used(hoid, 0xb001)); ASSERT_FALSE(bmap.is_used(cid, 0xc000)); ASSERT_FALSE(bmap.is_used(hoid, 0xc000)); bmap.set_used(0xa001, 0x2, cid, hoid); ASSERT_TRUE(bmap.is_used(cid, 0x9001)); ASSERT_TRUE(bmap.is_used(hoid, 0x9001)); ASSERT_TRUE(bmap.is_used(cid, 0xa001)); ASSERT_TRUE(bmap.is_used(hoid, 0xa001)); ASSERT_TRUE(bmap.is_used(cid, 0xb001)); ASSERT_TRUE(bmap.is_used(hoid, 0xb001)); ASSERT_FALSE(bmap.is_used(cid, 0xc000)); ASSERT_FALSE(bmap.is_used(hoid, 0xc000)); ASSERT_FALSE(bmap.is_used(cid, 0xc0000)); ASSERT_FALSE(bmap.is_used(hoid, 0xc0000)); ASSERT_FALSE(bmap.is_used(cid, 0xc1000)); ASSERT_FALSE(bmap.is_used(hoid, 0xc1000)); bmap.set_used(0xc0000, 0x2000, cid, hoid); ASSERT_TRUE(bmap.is_used(cid, 0xc0000)); ASSERT_TRUE(bmap.is_used(hoid, 0xc0000)); ASSERT_TRUE(bmap.is_used(cid, 0xc1000)); ASSERT_TRUE(bmap.is_used(hoid, 0xc1000)); interval_set<uint64_t> extents; extents.insert(0,0x500); extents.insert(0x800,0x100); extents.insert(0x1000,0x1000); extents.insert(0xa001,1); extents.insert(0xa0000,0xff8); ASSERT_EQ(3u, bmap.filter_out(extents)); ASSERT_TRUE(bmap.is_used(cid)); ASSERT_TRUE(bmap.is_used(hoid)); BlueStoreRepairer::StoreSpaceTracker bmap2; bmap2.init((uint64_t)0x3223b1d1000, 0x10000); ASSERT_EQ(0x1a0000u, bmap2.granularity); ASSERT_EQ(0x1edae4u, bmap2.collections_bfs.size()); ASSERT_EQ(0x1edae4u, bmap2.objects_bfs.size()); bmap2.set_used(0x3223b190000, 0x10000, cid, hoid); ASSERT_TRUE(bmap2.is_used(cid, 0x3223b190000)); ASSERT_TRUE(bmap2.is_used(hoid, 0x3223b190000)); ASSERT_TRUE(bmap2.is_used(cid, 0x3223b19f000)); ASSERT_TRUE(bmap2.is_used(hoid, 0x3223b19ffff)); } TEST(bluestore_blob_t, unused) { { bluestore_blob_t b; uint64_t min_alloc_size = 64 << 10; // 64 kB // _do_write_small 0x0~1000 uint64_t offset = 0x0; uint64_t length = 0x1000; // 4kB uint64_t suggested_boff = 0; PExtentVector extents; extents.emplace_back(0x1a560000, min_alloc_size); b.allocated(p2align(suggested_boff, min_alloc_size), 0 /*no matter*/, extents); b.mark_used(offset, length); ASSERT_FALSE(b.is_unused(offset, length)); // _do_write_small 0x2000~1000 offset = 0x2000; length = 0x1000; b.add_unused(0, 0x10000); ASSERT_TRUE(b.is_unused(offset, length)); b.mark_used(offset, length); ASSERT_FALSE(b.is_unused(offset, length)); // _do_write_small 0xc000~2000 offset = 0xc000; length = 0x2000; ASSERT_TRUE(b.is_unused(offset, length)); b.mark_used(offset, length); ASSERT_FALSE(b.is_unused(offset, length)); } { bluestore_blob_t b; uint64_t min_alloc_size = 64 << 10; // 64 kB // _do_write_small 0x11000~1000 uint64_t offset = 0x11000; uint64_t length = 0x1000; // 4kB uint64_t suggested_boff = 0x11000; PExtentVector extents; extents.emplace_back(0x1a560000, min_alloc_size); b.allocated(p2align(suggested_boff, min_alloc_size), 0 /*no matter*/, extents); b.add_unused(0, offset); b.add_unused(offset + length, min_alloc_size * 2 - offset - length); b.mark_used(offset, length); ASSERT_FALSE(b.is_unused(offset, length)); // _do_write_small 0x15000~3000 offset = 0x15000; length = 0x3000; ASSERT_TRUE(b.is_unused(offset, length)); b.mark_used(offset, length); ASSERT_FALSE(b.is_unused(offset, length)); } { // reuse blob bluestore_blob_t b; uint64_t min_alloc_size = 64 << 10; // 64 kB // _do_write_small 0x2a000~1000 // and 0x1d000~1000 uint64_t unused_granularity = 0x3000; // offsets and lenght below are selected to // be aligned with unused_granularity uint64_t offset0 = 0x2a000; uint64_t offset = 0x1d000; uint64_t length = 0x1000; // 4kB PExtentVector extents; extents.emplace_back(0x410000, min_alloc_size); b.allocated(p2align(offset0, min_alloc_size), min_alloc_size, extents); b.add_unused(0, min_alloc_size * 3); b.mark_used(offset0, length); ASSERT_FALSE(b.is_unused(offset0, length)); ASSERT_TRUE(b.is_unused(offset, length)); extents.clear(); extents.emplace_back(0x430000, min_alloc_size); b.allocated(p2align(offset, min_alloc_size), min_alloc_size, extents); b.mark_used(offset, length); ASSERT_FALSE(b.is_unused(offset0, length)); ASSERT_FALSE(b.is_unused(offset, length)); ASSERT_FALSE(b.is_unused(offset, unused_granularity)); ASSERT_TRUE(b.is_unused(0, offset / unused_granularity * unused_granularity)); ASSERT_TRUE(b.is_unused(offset + length, offset0 - offset - length)); auto end0_aligned = round_up_to(offset0 + length, unused_granularity); ASSERT_TRUE(b.is_unused(end0_aligned, min_alloc_size * 3 - end0_aligned)); } } // This UT is primarily intended to show how repair procedure // causes erroneous write to INVALID_OFFSET which is reported in // https://tracker.ceph.com/issues/51682 // Basic map_any functionality is tested as well though. // TEST(bluestore_blob_t, wrong_map_bl_in_51682) { { bluestore_blob_t b; uint64_t min_alloc_size = 4 << 10; // 64 kB b.allocated_test(bluestore_pextent_t(0x17ba000, 4 * min_alloc_size)); b.allocated_test(bluestore_pextent_t(0x17bf000, 4 * min_alloc_size)); b.allocated_test( bluestore_pextent_t( bluestore_pextent_t::INVALID_OFFSET, 1 * min_alloc_size)); b.allocated_test(bluestore_pextent_t(0x153c44d000, 7 * min_alloc_size)); b.mark_used(0, 0x8000); b.mark_used(0x9000, 0x7000); string s(0x7000, 'a'); bufferlist bl; bl.append(s); const size_t num_expected_entries = 5; uint64_t expected[num_expected_entries][2] = { {0x17ba000, 0x4000}, {0x17bf000, 0x3000}, {0x17c0000, 0x3000}, {0xffffffffffffffff, 0x1000}, {0x153c44d000, 0x3000}}; size_t expected_pos = 0; b.map_bl(0, bl, [&](uint64_t o, bufferlist& bl) { ASSERT_EQ(o, expected[expected_pos][0]); ASSERT_EQ(bl.length(), expected[expected_pos][1]); ++expected_pos; }); // 0x5000 is an improper offset presumably provided when doing a repair b.map_bl(0x5000, bl, [&](uint64_t o, bufferlist& bl) { ASSERT_EQ(o, expected[expected_pos][0]); ASSERT_EQ(bl.length(), expected[expected_pos][1]); ++expected_pos; }); ASSERT_EQ(expected_pos, num_expected_entries); } } //--------------------------------------------------------------------------------- static int verify_extent(const extent_t & ext, const extent_t *ext_arr, uint64_t ext_arr_size, uint64_t idx) { const extent_t & ext_ref = ext_arr[idx]; if (ext.offset == ext_ref.offset && ext.length == ext_ref.length) { return 0; } else { std::cerr << "mismatch was found at index " << idx << std::endl; if (ext.length == 0) { std::cerr << "Null extent was returned at idx = " << idx << std::endl; } unsigned start = std::max(((int32_t)(idx)-3), 0); unsigned end = std::min(idx+3, ext_arr_size); for (unsigned j = start; j < end; j++) { const extent_t & ext_ref = ext_arr[j]; std::cerr << j << ") ref_ext = [" << ext_ref.offset << ", " << ext_ref.length << "]" << std::endl; } std::cerr << idx << ") ext = [" << ext.offset << ", " << ext.length << "]" << std::endl; return -1; } } //--------------------------------------------------------------------------------- static int test_extents(uint64_t index, extent_t *ext_arr, uint64_t ext_arr_size, SimpleBitmap& sbmap, bool set) { const uint64_t MAX_JUMP_BIG = 1523; const uint64_t MAX_JUMP_SMALL = 19; const uint64_t MAX_LEN_BIG = 523; const uint64_t MAX_LEN_SMALL = 23; uint64_t n = sbmap.get_size(); uint64_t offset = 0; unsigned length, jump, i; for (i = 0; i < ext_arr_size; i++) { if (i & 3) { jump = std::rand() % MAX_JUMP_BIG; } else { jump = std::rand() % MAX_JUMP_SMALL; } offset += jump; if (i & 1) { length = std::rand() % MAX_LEN_BIG; } else { length = std::rand() % MAX_LEN_SMALL; } // make sure no zero length will be used length++; if (offset + length >= n) { break; } bool success; if (set) { success = sbmap.set(offset, length); } else { success = sbmap.clr(offset, length); } if (!success) { std::cerr << "Failed sbmap." << (set ? "set(" : "clr(") << offset << ", " << length << ")"<< std::endl; return -1; } // if this is not the first entry and no jump -> merge extents if ( (i==0) || (jump > 0) ) { ext_arr[i] = {offset, length}; } else { // merge 2 extents i --; ext_arr[i].length += length; } offset += length; } unsigned arr_size = std::min((uint64_t)i, ext_arr_size); std::cout << std::hex << std::right; std::cout << "[" << index << "] " << (set ? "Set::" : "Clr::") << " extents count = 0x" << arr_size; std::cout << std::dec << std::endl; offset = 0; extent_t ext; for(unsigned i = 0; i < arr_size; i++) { if (set) { ext = sbmap.get_next_set_extent(offset); } else { ext = sbmap.get_next_clr_extent(offset); } if (verify_extent(ext, ext_arr, ext_arr_size, i) != 0) { return -1; } offset = ext.offset + ext.length; } if (set) { ext = sbmap.get_next_set_extent(offset); } else { ext = sbmap.get_next_clr_extent(offset); } if (ext.length == 0) { return 0; } else { std::cerr << "sbmap.get_next_" << (set ? "set" : "clr") << "_extent(" << offset << ") return length = " << ext.length << std::endl; return -1; } } //--------------------------------------------------------------------------------- TEST(SimpleBitmap, basic) { const uint64_t MAX_EXTENTS_COUNT = 7131177; std::unique_ptr<extent_t[]> ext_arr = std::make_unique<extent_t[]>(MAX_EXTENTS_COUNT); ASSERT_TRUE(ext_arr != nullptr); const uint64_t BIT_COUNT = 4ULL << 30; // 4Gb = 512MB SimpleBitmap sbmap(g_ceph_context, BIT_COUNT); // use current time as seed for random generator std::srand(std::time(nullptr)); for (unsigned i = 0; i < 3; i++ ) { memset(ext_arr.get(), 0, sizeof(extent_t)*MAX_EXTENTS_COUNT); sbmap.clear_all(); ASSERT_TRUE(test_extents(i, ext_arr.get(), MAX_EXTENTS_COUNT, sbmap, true) == 0); memset(ext_arr.get(), 0, sizeof(extent_t)*MAX_EXTENTS_COUNT); sbmap.set_all(); ASSERT_TRUE(test_extents(i, ext_arr.get(), MAX_EXTENTS_COUNT, sbmap, false) == 0); } } //--------------------------------------------------------------------------------- static int test_intersections(unsigned test_idx, SimpleBitmap &sbmap, uint8_t map[], uint64_t map_size) { const uint64_t MAX_LEN_BIG = 523; const uint64_t MAX_LEN_SMALL = 23; bool success; uint64_t set_op_count = 0, clr_op_count = 0; unsigned length, i; for (i = 0; i < map_size / (MAX_LEN_BIG*2); i++) { uint64_t offset = (std::rand() % (map_size - 1)); if (i & 1) { length = std::rand() % MAX_LEN_BIG; } else { length = std::rand() % MAX_LEN_SMALL; } // make sure no zero length will be used length++; if (offset + length >= map_size) { continue; } // 2:1 set/clr bool set = (std::rand() % 3); if (set) { success = sbmap.set(offset, length); memset(map+offset, 0xFF, length); set_op_count++; } else { success = sbmap.clr(offset, length); memset(map+offset, 0x0, length); clr_op_count++; } if (!success) { std::cerr << "Failed sbmap." << (set ? "set(" : "clr(") << offset << ", " << length << ")"<< std::endl; return -1; } } uint64_t set_bit_count = 0; uint64_t clr_bit_count = 0; for(uint64_t idx = 0; idx < map_size; idx++) { if (map[idx]) { set_bit_count++; success = sbmap.bit_is_set(idx); } else { clr_bit_count++; success = sbmap.bit_is_clr(idx); } if (!success) { std::cerr << "expected: sbmap.bit_is_" << (map[idx] ? "set(" : "clr(") << idx << ")"<< std::endl; return -1; } } std::cout << std::hex << std::right << __func__ ; std::cout << " [" << test_idx << "] set_bit_count = 0x" << std::setfill('0') << std::setw(8) << set_bit_count << ", clr_bit_count = 0x" << std::setfill('0') << std::setw(8) << clr_bit_count << ", sum = 0x" << set_bit_count + clr_bit_count << std::endl; std::cout << std::dec; uint64_t offset = 0; for(uint64_t i = 0; i < (set_op_count + clr_op_count); i++) { extent_t ext = sbmap.get_next_set_extent(offset); //std::cout << "set_ext:: " << i << ") [" << ext.offset << ", " << ext.length << "]" << std::endl; for (uint64_t idx = ext.offset; idx < ext.offset + ext.length; idx++) { if (map[idx] != 0xFF) { std::cerr << "map[" << idx << "] is clear, but extent [" << ext.offset << ", " << ext.length << "] is set" << std::endl; return -1; } } offset = ext.offset + ext.length; } offset = 0; for(uint64_t i = 0; i < (set_op_count + clr_op_count); i++) { extent_t ext = sbmap.get_next_clr_extent(offset); //std::cout << "clr_ext:: " << i << ") [" << ext.offset << ", " << ext.length << "]" << std::endl; for (uint64_t idx = ext.offset; idx < ext.offset + ext.length; idx++) { if (map[idx] ) { std::cerr << "map[" << idx << "] is set, but extent [" << ext.offset << ", " << ext.length << "] is free" << std::endl; return -1; } } offset = ext.offset + ext.length; } return 0; } //--------------------------------------------------------------------------------- TEST(SimpleBitmap, intersection) { const uint64_t MAP_SIZE = 1ULL << 30; // 1G SimpleBitmap sbmap(g_ceph_context, MAP_SIZE); // use current time as seed for random generator std::srand(std::time(nullptr)); std::unique_ptr<uint8_t[]> map = std::make_unique<uint8_t[]> (MAP_SIZE); ASSERT_TRUE(map != nullptr); for (unsigned i = 0; i < 1; i++ ) { sbmap.clear_all(); memset(map.get(), 0, MAP_SIZE); ASSERT_TRUE(test_intersections(i, sbmap, map.get(), MAP_SIZE) == 0); sbmap.set_all(); memset(map.get(), 0xFF, MAP_SIZE); ASSERT_TRUE(test_intersections(i, sbmap, map.get(), MAP_SIZE) == 0); } } //--------------------------------------------------------------------------------- static int test_extents_boundaries(uint64_t index, extent_t *ext_arr, uint64_t ext_arr_size, SimpleBitmap& sbmap, bool set) { uint64_t n = sbmap.get_size(); uint64_t offset = 0, k = 0; for(unsigned i = 0; i < 64; i++) { offset += i; if (offset >= n) { break; } for(unsigned length = 1; length <= 128; length++) { if (offset + length >= n) { break; } if (k >= ext_arr_size) { break; } bool success; if (set) { success = sbmap.set(offset, length); } else { success = sbmap.clr(offset, length); } if (!success) { std::cerr << "Failed sbmap." << (set ? "set(" : "clr(") << offset << ", " << length << ")"<< std::endl; return -1; } ext_arr[k++] = {offset, length}; if (length < 64) { offset += 64; } else { offset += 128; } } if (k >= ext_arr_size) { break; } } unsigned arr_size = std::min((uint64_t)k, ext_arr_size); std::cout << std::hex << std::right << __func__ ; std::cout << " [" << index << "] " << (set ? "Set::" : "Clr::") << " extents count = 0x" << arr_size; std::cout << std::dec << std::endl; offset = 0; extent_t ext; for(unsigned i = 0; i < arr_size; i++) { if (set) { ext = sbmap.get_next_set_extent(offset); } else { ext = sbmap.get_next_clr_extent(offset); } if (verify_extent(ext, ext_arr, ext_arr_size, i) != 0) { return -1; } offset = ext.offset + ext.length; } if (set) { ext = sbmap.get_next_set_extent(offset); } else { ext = sbmap.get_next_clr_extent(offset); } if (ext.length == 0) { return 0; } else { std::cerr << "sbmap.get_next_" << (set ? "set" : "clr") << "_extent(" << offset << ") return length = " << ext.length << std::endl; return -1; } } //--------------------------------------------------------------------------------- TEST(SimpleBitmap, boundaries) { const uint64_t MAX_EXTENTS_COUNT = 64 << 10; std::unique_ptr<extent_t[]> ext_arr = std::make_unique<extent_t[]>(MAX_EXTENTS_COUNT); ASSERT_TRUE(ext_arr != nullptr); // use current time as seed for random generator std::srand(std::time(nullptr)); uint64_t bit_count = 32 << 20; // 32Mb = 4MB unsigned count = 0; for (unsigned i = 0; i < 64; i++) { SimpleBitmap sbmap(g_ceph_context, bit_count+i); memset(ext_arr.get(), 0, sizeof(extent_t)*MAX_EXTENTS_COUNT); sbmap.clear_all(); ASSERT_TRUE(test_extents_boundaries(count, ext_arr.get(), MAX_EXTENTS_COUNT, sbmap, true) == 0); memset(ext_arr.get(), 0, sizeof(extent_t)*MAX_EXTENTS_COUNT); sbmap.set_all(); ASSERT_TRUE(test_extents_boundaries(count++, ext_arr.get(), MAX_EXTENTS_COUNT, sbmap, false) == 0); } } //--------------------------------------------------------------------------------- TEST(SimpleBitmap, boundaries2) { const uint64_t bit_count_base = 64 << 10; // 64Kb = 8MB const extent_t null_extent = {0, 0}; for (unsigned i = 0; i < 64; i++) { uint64_t bit_count = bit_count_base + i; extent_t full_extent = {0, bit_count}; SimpleBitmap sbmap(g_ceph_context, bit_count); sbmap.set(0, bit_count); ASSERT_TRUE(sbmap.get_next_set_extent(0) == full_extent); ASSERT_TRUE(sbmap.get_next_clr_extent(0) == null_extent); for (uint64_t bit = 0; bit < bit_count; bit++) { sbmap.clr(bit, 1); } ASSERT_TRUE(sbmap.get_next_set_extent(0) == null_extent); ASSERT_TRUE(sbmap.get_next_clr_extent(0) == full_extent); for (uint64_t bit = 0; bit < bit_count; bit++) { sbmap.set(bit, 1); } ASSERT_TRUE(sbmap.get_next_set_extent(0) == full_extent); ASSERT_TRUE(sbmap.get_next_clr_extent(0) == null_extent); sbmap.clr(0, bit_count); ASSERT_TRUE(sbmap.get_next_set_extent(0) == null_extent); ASSERT_TRUE(sbmap.get_next_clr_extent(0) == full_extent); } } TEST(shared_blob_2hash_tracker_t, basic_test) { shared_blob_2hash_tracker_t t1(1024 * 1024, 4096); ASSERT_TRUE(t1.count_non_zero() == 0); t1.inc(0, 0, 1); ASSERT_TRUE(t1.count_non_zero() != 0); t1.inc(0, 0, -1); ASSERT_TRUE(t1.count_non_zero() == 0); t1.inc(3, 0x1000, 2); ASSERT_TRUE(t1.count_non_zero() != 0); t1.inc(3, 0x1000, -1); ASSERT_TRUE(t1.count_non_zero() != 0); t1.inc(3, 0x1000, -1); ASSERT_TRUE(t1.count_non_zero() == 0); t1.inc(2, 0x2000, 5); ASSERT_TRUE(t1.count_non_zero() != 0); t1.inc(18, 0x2000, -5); ASSERT_TRUE(t1.count_non_zero() != 0); t1.inc(18, 0x2000, 1); ASSERT_TRUE(t1.count_non_zero() != 0); t1.inc(2, 0x2000, -1); ASSERT_TRUE(t1.count_non_zero() != 0); t1.inc(18, 0x2000, 4); ASSERT_TRUE(t1.count_non_zero() != 0); t1.inc(2, 0x2000, -4); ASSERT_TRUE(t1.count_non_zero() == 0); t1.inc(3, 0x3000, 2); ASSERT_TRUE(t1.count_non_zero() != 0); t1.inc(4, 0x3000, -1); ASSERT_TRUE(t1.count_non_zero() != 0); t1.inc(4, 0x3000, -1); ASSERT_TRUE(t1.count_non_zero() != 0); t1.inc(3, 0x3000, -2); ASSERT_TRUE(t1.count_non_zero() != 0); t1.inc(4, 0x3000, 1); ASSERT_TRUE(t1.count_non_zero() != 0); t1.inc(4, 0x3000, 1); ASSERT_TRUE(t1.count_non_zero() == 0); t1.inc(5, 0x1000, 1); t1.inc(5, 0x2000, 3); t1.inc(5, 0x3000, 2); t1.inc(5, 0x8000, 1); ASSERT_TRUE(t1.count_non_zero() != 0); ASSERT_TRUE(!t1.test_all_zero(5,0x1000)); ASSERT_TRUE(!t1.test_all_zero(5, 0x2000)); ASSERT_TRUE(!t1.test_all_zero(5, 0x3000)); ASSERT_TRUE(t1.test_all_zero(5, 0x4000)); ASSERT_TRUE(!t1.test_all_zero(5, 0x8000)); ASSERT_TRUE(t1.test_all_zero_range(5, 0, 0x1000)); ASSERT_TRUE(t1.test_all_zero_range(5, 0x500, 0x500)); ASSERT_TRUE(!t1.test_all_zero_range(5, 0x500, 0x1500)); ASSERT_TRUE(!t1.test_all_zero_range(5, 0x1500, 0x3200)); ASSERT_TRUE(t1.test_all_zero_range(5, 0x4500, 0x1500)); ASSERT_TRUE(t1.test_all_zero_range(5, 0x4500, 0x3b00)); ASSERT_TRUE(!t1.test_all_zero_range(5, 0, 0x9000)); } TEST(bluestore_blob_use_tracker_t, mempool_stats_test) { using mempool::bluestore_cache_other::allocated_items; using mempool::bluestore_cache_other::allocated_bytes; uint64_t other_items0 = allocated_items(); uint64_t other_bytes0 = allocated_bytes(); { bluestore_blob_use_tracker_t* t1 = new bluestore_blob_use_tracker_t; t1->init(1024 * 1024, 4096); ASSERT_EQ(256, allocated_items() - other_items0); // = 1M / 4K ASSERT_EQ(1024, allocated_bytes() - other_bytes0); // = 1M / 4K * 4 delete t1; ASSERT_EQ(allocated_items(), other_items0); ASSERT_EQ(allocated_bytes(), other_bytes0); } { bluestore_blob_use_tracker_t* t1 = new bluestore_blob_use_tracker_t; t1->init(1024 * 1024, 4096); t1->add_tail(2048 * 1024, 4096); // proper stats update after tail add ASSERT_EQ(512, allocated_items() - other_items0); // = 2M / 4K ASSERT_EQ(2048, allocated_bytes() - other_bytes0); // = 2M / 4K * 4 delete t1; ASSERT_EQ(allocated_items(), other_items0); ASSERT_EQ(allocated_bytes(), other_bytes0); } { bluestore_blob_use_tracker_t* t1 = new bluestore_blob_use_tracker_t; t1->init(1024 * 1024, 4096); t1->prune_tail(512 * 1024); // no changes in stats after pruning ASSERT_EQ(256, allocated_items() - other_items0); // = 1M / 4K ASSERT_EQ(1024, allocated_bytes() - other_bytes0); // = 1M / 4K * 4 delete t1; ASSERT_EQ(allocated_items(), other_items0); ASSERT_EQ(allocated_bytes(), other_bytes0); } { bluestore_blob_use_tracker_t* t1 = new bluestore_blob_use_tracker_t; bluestore_blob_use_tracker_t* t2 = new bluestore_blob_use_tracker_t; t1->init(1024 * 1024, 4096); // t1 keeps the same amount of entries + t2 has got half of them t1->split(512 * 1024, t2); ASSERT_EQ(256 + 128, allocated_items() - other_items0); //= 1M / 4K*1.5 ASSERT_EQ(1024 + 512, allocated_bytes() - other_bytes0); //= 1M / 4K*4*1.5 // t1 & t2 release everything, then t2 get one less entry than t2 had had // before t1->split(4096, t2); ASSERT_EQ(127, allocated_items() - other_items0); // = 512K / 4K - 1 ASSERT_EQ(127 * 4, allocated_bytes() - other_bytes0); // = 512L / 4K * 4 - 4 delete t1; delete t2; ASSERT_EQ(allocated_items(), other_items0); ASSERT_EQ(allocated_bytes(), other_bytes0); } } int main(int argc, char **argv) { auto args = argv_to_vec(argc, argv); auto cct = global_init(NULL, args, CEPH_ENTITY_TYPE_CLIENT, CODE_ENVIRONMENT_UTILITY, CINIT_FLAG_NO_DEFAULT_CONFIG_FILE); common_init_finish(g_ceph_context); ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); }
85,116
35.266297
135
cc
null
ceph-main/src/test/objectstore/test_deferred.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include <stdio.h> #include <string.h> #include <iostream> #include <memory> #include <time.h> #include "os/ObjectStore.h" #include "os/bluestore/BlueStore.h" #include "include/Context.h" #include "common/ceph_argparse.h" #include "global/global_init.h" #include "common/ceph_mutex.h" #include "common/Cond.h" #include "common/errno.h" #include "common/options.h" // for the size literals #include <semaphore.h> class C_do_action : public Context { public: std::function<void()> action; C_do_action(std::function<void()> action) : action(action) {} void finish(int r) override { action(); } }; void create_deferred_and_terminate() { std::unique_ptr<ObjectStore> store; g_ceph_context->_conf._clear_safe_to_start_threads(); g_ceph_context->_conf.set_val_or_die("bluestore_prefer_deferred_size", "4096"); g_ceph_context->_conf.set_val_or_die("bluestore_allocator", "bitmap"); g_ceph_context->_conf.set_val_or_die("bluestore_block_size", "10240000000"); g_ceph_context->_conf.apply_changes(nullptr); int64_t poolid; coll_t cid; ghobject_t hoid; ObjectStore::CollectionHandle ch; ceph_assert(::mkdir("bluestore.test_temp_dir", 0777) == 0); store = ObjectStore::create(g_ceph_context, "bluestore", "bluestore.test_temp_dir", "store_test_temp_journal"); ceph_assert(store->mkfs() == 0); ceph_assert(store->mount() == 0); poolid = 11; cid = coll_t(spg_t(pg_t(1, poolid), shard_id_t::NO_SHARD)); ch = store->create_new_collection(cid); int r; { ObjectStore::Transaction t; t.create_collection(cid, 0); r = store->queue_transaction(ch, std::move(t)); ceph_assert(r == 0); } { ObjectStore::Transaction t; std::string oid = "zapchajdziura"; ghobject_t hoid(hobject_t(oid, "", CEPH_NOSNAP, 1, poolid, "")); bufferlist bl; bl.append(std::string(0xe000, '-')); t.write(cid, hoid, 0, 0xe000, bl); r = store->queue_transaction(ch, std::move(t)); ceph_assert(r == 0); } size_t object_count = 10; // initial fill bufferlist bl_64K; bl_64K.append(std::string(64 * 1024, '-')); std::atomic<size_t> prefill_counter{0}; sem_t prefill_mutex; sem_init(&prefill_mutex, 0, 0); for (size_t o = 0; o < object_count; o++) { ObjectStore::Transaction t; std::string oid = "object-" + std::to_string(o); ghobject_t hoid(hobject_t(oid, "", CEPH_NOSNAP, 1, poolid, "")); t.write(cid, hoid, 0, bl_64K.length(), bl_64K); t.register_on_commit(new C_do_action([&] { if (++prefill_counter == object_count) { sem_post(&prefill_mutex); } })); r = store->queue_transaction(ch, std::move(t)); ceph_assert(r == 0); } sem_wait(&prefill_mutex); // small deferred writes over object // and complete overwrite of previous one bufferlist bl_8_bytes; bl_8_bytes.append("abcdefgh"); std::atomic<size_t> deferred_counter{0}; for (size_t o = 0; o < object_count - 1; o++) { ObjectStore::Transaction t; // sprinkle deferred writes std::string oid_d = "object-" + std::to_string(o + 1); ghobject_t hoid_d(hobject_t(oid_d, "", CEPH_NOSNAP, 1, poolid, "")); for(int i = 0; i < 16; i++) { t.write(cid, hoid_d, 4096 * i, bl_8_bytes.length(), bl_8_bytes); } // overwrite previous object std::string oid_m = "object-" + std::to_string(o); ghobject_t hoid_m(hobject_t(oid_m, "", CEPH_NOSNAP, 1, poolid, "")); t.write(cid, hoid_m, 0, bl_64K.length(), bl_64K); t.register_on_commit(new C_do_action([&] { if (++deferred_counter == object_count - 1) { exit(0); } })); r = store->queue_transaction(ch, std::move(t)); ceph_assert(r == 0); } sleep(10); ceph_assert(0 && "should not reach here"); } int main(int argc, char **argv) { auto args = argv_to_vec(argc, argv); auto cct = global_init(NULL, args, CEPH_ENTITY_TYPE_CLIENT, CODE_ENVIRONMENT_UTILITY, CINIT_FLAG_NO_DEFAULT_CONFIG_FILE); common_init_finish(g_ceph_context); create_deferred_and_terminate(); return 0; }
4,206
27.619048
81
cc
null
ceph-main/src/test/objectstore/test_kv.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system * * Copyright (C) 2004-2006 Sage Weil <sage@newdream.net> * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #include <stdio.h> #include <string.h> #include <iostream> #include <time.h> #include <sys/mount.h> #include "kv/KeyValueDB.h" #include "kv/RocksDBStore.h" #include "include/Context.h" #include "common/ceph_argparse.h" #include "global/global_init.h" #include "common/Cond.h" #include "common/errno.h" #include "include/stringify.h" #include <gtest/gtest.h> using namespace std; class KVTest : public ::testing::TestWithParam<const char*> { public: boost::scoped_ptr<KeyValueDB> db; KVTest() : db(0) {} string _bl_to_str(bufferlist val) { string str(val.c_str(), val.length()); return str; } void rm_r(string path) { string cmd = string("rm -r ") + path; cout << "==> " << cmd << std::endl; int r = ::system(cmd.c_str()); if (r) { cerr << "failed with exit code " << r << ", continuing anyway" << std::endl; } } void init() { cout << "Creating " << string(GetParam()) << "\n"; db.reset(KeyValueDB::create(g_ceph_context, string(GetParam()), "kv_test_temp_dir")); } void fini() { db.reset(NULL); } void SetUp() override { int r = ::mkdir("kv_test_temp_dir", 0777); if (r < 0 && errno != EEXIST) { r = -errno; cerr << __func__ << ": unable to create kv_test_temp_dir: " << cpp_strerror(r) << std::endl; return; } init(); } void TearDown() override { fini(); rm_r("kv_test_temp_dir"); } }; TEST_P(KVTest, OpenClose) { ASSERT_EQ(0, db->create_and_open(cout)); db->close(); db->open(cout); fini(); } TEST_P(KVTest, OpenCloseReopenClose) { ASSERT_EQ(0, db->create_and_open(cout)); fini(); init(); ASSERT_EQ(0, db->open(cout)); fini(); } /* * Basic write and read test case in same database session. */ TEST_P(KVTest, OpenWriteRead) { ASSERT_EQ(0, db->create_and_open(cout)); { KeyValueDB::Transaction t = db->get_transaction(); bufferlist value; value.append("value"); t->set("prefix", "key", value); value.clear(); value.append("value2"); t->set("prefix", "key2", value); value.clear(); value.append("value3"); t->set("prefix", "key3", value); db->submit_transaction_sync(t); bufferlist v1, v2; ASSERT_EQ(0, db->get("prefix", "key", &v1)); ASSERT_EQ(v1.length(), 5u); (v1.c_str())[v1.length()] = 0x0; ASSERT_EQ(std::string(v1.c_str()), std::string("value")); ASSERT_EQ(0, db->get("prefix", "key2", &v2)); ASSERT_EQ(v2.length(), 6u); (v2.c_str())[v2.length()] = 0x0; ASSERT_EQ(std::string(v2.c_str()), std::string("value2")); } fini(); } TEST_P(KVTest, PutReopen) { ASSERT_EQ(0, db->create_and_open(cout)); { KeyValueDB::Transaction t = db->get_transaction(); bufferlist value; value.append("value"); t->set("prefix", "key", value); t->set("prefix", "key2", value); t->set("prefix", "key3", value); db->submit_transaction_sync(t); } fini(); init(); ASSERT_EQ(0, db->open(cout)); { bufferlist v1, v2; ASSERT_EQ(0, db->get("prefix", "key", &v1)); ASSERT_EQ(v1.length(), 5u); ASSERT_EQ(0, db->get("prefix", "key2", &v2)); ASSERT_EQ(v2.length(), 5u); } { KeyValueDB::Transaction t = db->get_transaction(); t->rmkey("prefix", "key"); t->rmkey("prefix", "key3"); db->submit_transaction_sync(t); } fini(); init(); ASSERT_EQ(0, db->open(cout)); { bufferlist v1, v2, v3; ASSERT_EQ(-ENOENT, db->get("prefix", "key", &v1)); ASSERT_EQ(0, db->get("prefix", "key2", &v2)); ASSERT_EQ(v2.length(), 5u); ASSERT_EQ(-ENOENT, db->get("prefix", "key3", &v3)); } fini(); } TEST_P(KVTest, BenchCommit) { int n = 1024; ASSERT_EQ(0, db->create_and_open(cout)); utime_t start = ceph_clock_now(); { cout << "priming" << std::endl; // prime bufferlist big; bufferptr bp(1048576); bp.zero(); big.append(bp); for (int i=0; i<30; ++i) { KeyValueDB::Transaction t = db->get_transaction(); t->set("prefix", "big" + stringify(i), big); db->submit_transaction_sync(t); } } cout << "now doing small writes" << std::endl; bufferlist data; bufferptr bp(1024); bp.zero(); data.append(bp); for (int i=0; i<n; ++i) { KeyValueDB::Transaction t = db->get_transaction(); t->set("prefix", "key" + stringify(i), data); db->submit_transaction_sync(t); } utime_t end = ceph_clock_now(); utime_t dur = end - start; cout << n << " commits in " << dur << ", avg latency " << (dur / (double)n) << std::endl; fini(); } struct AppendMOP : public KeyValueDB::MergeOperator { void merge_nonexistent( const char *rdata, size_t rlen, std::string *new_value) override { *new_value = "?" + std::string(rdata, rlen); } void merge( const char *ldata, size_t llen, const char *rdata, size_t rlen, std::string *new_value) override { *new_value = std::string(ldata, llen) + std::string(rdata, rlen); } // We use each operator name and each prefix to construct the // overall RocksDB operator name for consistency check at open time. const char *name() const override { return "Append"; } }; string tostr(bufferlist& b) { return string(b.c_str(),b.length()); } TEST_P(KVTest, Merge) { shared_ptr<KeyValueDB::MergeOperator> p(new AppendMOP); int r = db->set_merge_operator("A",p); if (r < 0) return; // No merge operators for this database type ASSERT_EQ(0, db->create_and_open(cout)); { KeyValueDB::Transaction t = db->get_transaction(); bufferlist v1, v2, v3; v1.append(string("1")); v2.append(string("2")); v3.append(string("3")); t->set("P", "K1", v1); t->set("A", "A1", v2); t->rmkey("A", "A2"); t->merge("A", "A2", v3); db->submit_transaction_sync(t); } { bufferlist v1, v2, v3; ASSERT_EQ(0, db->get("P", "K1", &v1)); ASSERT_EQ(tostr(v1), "1"); ASSERT_EQ(0, db->get("A", "A1", &v2)); ASSERT_EQ(tostr(v2), "2"); ASSERT_EQ(0, db->get("A", "A2", &v3)); ASSERT_EQ(tostr(v3), "?3"); } { KeyValueDB::Transaction t = db->get_transaction(); bufferlist v1; v1.append(string("1")); t->merge("A", "A2", v1); db->submit_transaction_sync(t); } { bufferlist v; ASSERT_EQ(0, db->get("A", "A2", &v)); ASSERT_EQ(tostr(v), "?31"); } fini(); } TEST_P(KVTest, RMRange) { ASSERT_EQ(0, db->create_and_open(cout)); bufferlist value; value.append("value"); { KeyValueDB::Transaction t = db->get_transaction(); t->set("prefix", "key1", value); t->set("prefix", "key2", value); t->set("prefix", "key3", value); t->set("prefix", "key4", value); t->set("prefix", "key45", value); t->set("prefix", "key5", value); t->set("prefix", "key6", value); db->submit_transaction_sync(t); } { KeyValueDB::Transaction t = db->get_transaction(); t->set("prefix", "key7", value); t->set("prefix", "key8", value); t->rm_range_keys("prefix", "key2", "key7"); db->submit_transaction_sync(t); bufferlist v1, v2; ASSERT_EQ(0, db->get("prefix", "key1", &v1)); v1.clear(); ASSERT_EQ(-ENOENT, db->get("prefix", "key45", &v1)); ASSERT_EQ(0, db->get("prefix", "key8", &v1)); v1.clear(); ASSERT_EQ(-ENOENT, db->get("prefix", "key2", &v1)); ASSERT_EQ(0, db->get("prefix", "key7", &v2)); } { KeyValueDB::Transaction t = db->get_transaction(); t->rm_range_keys("prefix", "key", "key"); db->submit_transaction_sync(t); bufferlist v1, v2; ASSERT_EQ(0, db->get("prefix", "key1", &v1)); ASSERT_EQ(0, db->get("prefix", "key8", &v2)); } { KeyValueDB::Transaction t = db->get_transaction(); t->rm_range_keys("prefix", "key-", "key~"); db->submit_transaction_sync(t); bufferlist v1, v2; ASSERT_EQ(-ENOENT, db->get("prefix", "key1", &v1)); ASSERT_EQ(-ENOENT, db->get("prefix", "key8", &v2)); } fini(); } TEST_P(KVTest, ShardingRMRange) { if(string(GetParam()) != "rocksdb") return; std::string cfs("O(7)="); ASSERT_EQ(0, db->create_and_open(cout, cfs)); { KeyValueDB::Transaction t = db->get_transaction(); for (size_t i = 0; i < 1000; i++) { bufferlist value; char* a; ASSERT_EQ(asprintf(&a, "key%3.3ld", i), 6); value.append(a); t->set("O", a, value); free(a); } db->submit_transaction_sync(t); } { KeyValueDB::Transaction t = db->get_transaction(); t->rm_range_keys("O", "key277", "key467"); db->submit_transaction_sync(t); } for (size_t i = 0; i < 1000; i++) { char* key; ASSERT_EQ(asprintf(&key, "key%3.3ld", i), 6); bufferlist value; int r = db->get("O", key, &value); ASSERT_EQ(r, (i >= 277 && i < 467 ? -ENOENT : 0)); free(key); } fini(); } TEST_P(KVTest, RocksDBColumnFamilyTest) { if(string(GetParam()) != "rocksdb") return; std::string cfs("cf1 cf2"); ASSERT_EQ(0, db->init(g_conf()->bluestore_rocksdb_options)); cout << "creating two column families and opening them" << std::endl; ASSERT_EQ(0, db->create_and_open(cout, cfs)); { KeyValueDB::Transaction t = db->get_transaction(); bufferlist value; value.append("value"); cout << "write a transaction includes three keys in different CFs" << std::endl; t->set("prefix", "key", value); t->set("cf1", "key", value); t->set("cf2", "key2", value); ASSERT_EQ(0, db->submit_transaction_sync(t)); } fini(); init(); ASSERT_EQ(0, db->open(cout, cfs)); { bufferlist v1, v2, v3; cout << "reopen db and read those keys" << std::endl; ASSERT_EQ(0, db->get("prefix", "key", &v1)); ASSERT_EQ(0, _bl_to_str(v1) != "value"); ASSERT_EQ(0, db->get("cf1", "key", &v2)); ASSERT_EQ(0, _bl_to_str(v2) != "value"); ASSERT_EQ(0, db->get("cf2", "key2", &v3)); ASSERT_EQ(0, _bl_to_str(v2) != "value"); } { cout << "delete two keys in CFs" << std::endl; KeyValueDB::Transaction t = db->get_transaction(); t->rmkey("prefix", "key"); t->rmkey("cf2", "key2"); ASSERT_EQ(0, db->submit_transaction_sync(t)); } fini(); init(); ASSERT_EQ(0, db->open(cout, cfs)); { cout << "reopen db and read keys again." << std::endl; bufferlist v1, v2, v3; ASSERT_EQ(-ENOENT, db->get("prefix", "key", &v1)); ASSERT_EQ(0, db->get("cf1", "key", &v2)); ASSERT_EQ(0, _bl_to_str(v2) != "value"); ASSERT_EQ(-ENOENT, db->get("cf2", "key2", &v3)); } fini(); } TEST_P(KVTest, RocksDBIteratorTest) { if(string(GetParam()) != "rocksdb") return; std::string cfs("cf1"); ASSERT_EQ(0, db->init(g_conf()->bluestore_rocksdb_options)); cout << "creating one column family and opening it" << std::endl; ASSERT_EQ(0, db->create_and_open(cout, cfs)); { KeyValueDB::Transaction t = db->get_transaction(); bufferlist bl1; bl1.append("hello"); bufferlist bl2; bl2.append("world"); cout << "write some kv pairs into default and new CFs" << std::endl; t->set("prefix", "key1", bl1); t->set("prefix", "key2", bl2); t->set("cf1", "key1", bl1); t->set("cf1", "key2", bl2); ASSERT_EQ(0, db->submit_transaction_sync(t)); } { cout << "iterating the default CF" << std::endl; KeyValueDB::Iterator iter = db->get_iterator("prefix"); iter->seek_to_first(); ASSERT_EQ(1, iter->valid()); ASSERT_EQ("key1", iter->key()); ASSERT_EQ("hello", _bl_to_str(iter->value())); ASSERT_EQ(0, iter->next()); ASSERT_EQ(1, iter->valid()); ASSERT_EQ("key2", iter->key()); ASSERT_EQ("world", _bl_to_str(iter->value())); } { cout << "iterating the new CF" << std::endl; KeyValueDB::Iterator iter = db->get_iterator("cf1"); iter->seek_to_first(); ASSERT_EQ(1, iter->valid()); ASSERT_EQ("key1", iter->key()); ASSERT_EQ("hello", _bl_to_str(iter->value())); ASSERT_EQ(0, iter->next()); ASSERT_EQ(1, iter->valid()); ASSERT_EQ("key2", iter->key()); ASSERT_EQ("world", _bl_to_str(iter->value())); } fini(); } TEST_P(KVTest, RocksDBShardingIteratorTest) { if(string(GetParam()) != "rocksdb") return; std::string cfs("A(6)"); ASSERT_EQ(0, db->init(g_conf()->bluestore_rocksdb_options)); cout << "creating one column family and opening it" << std::endl; ASSERT_EQ(0, db->create_and_open(cout, cfs)); { KeyValueDB::Transaction t = db->get_transaction(); for (int v = 100; v <= 999; v++) { std::string str = to_string(v); bufferlist val; val.append(str); t->set("A", str, val); } ASSERT_EQ(0, db->submit_transaction_sync(t)); } { KeyValueDB::Iterator it = db->get_iterator("A"); int pos = 0; ASSERT_EQ(it->lower_bound(to_string(pos)), 0); for (pos = 100; pos <= 999; pos++) { ASSERT_EQ(it->valid(), true); ASSERT_EQ(it->key(), to_string(pos)); ASSERT_EQ(it->value().to_str(), to_string(pos)); it->next(); } ASSERT_EQ(it->valid(), false); pos = 999; ASSERT_EQ(it->lower_bound(to_string(pos)), 0); for (pos = 999; pos >= 100; pos--) { ASSERT_EQ(it->valid(), true); ASSERT_EQ(it->key(), to_string(pos)); ASSERT_EQ(it->value().to_str(), to_string(pos)); it->prev(); } ASSERT_EQ(it->valid(), false); } fini(); } TEST_P(KVTest, RocksDBCFMerge) { if(string(GetParam()) != "rocksdb") return; shared_ptr<KeyValueDB::MergeOperator> p(new AppendMOP); int r = db->set_merge_operator("cf1",p); if (r < 0) return; // No merge operators for this database type std::string cfs("cf1"); ASSERT_EQ(0, db->init(g_conf()->bluestore_rocksdb_options)); cout << "creating one column family and opening it" << std::endl; ASSERT_EQ(0, db->create_and_open(cout, cfs)); { KeyValueDB::Transaction t = db->get_transaction(); bufferlist v1, v2, v3; v1.append(string("1")); v2.append(string("2")); v3.append(string("3")); t->set("P", "K1", v1); t->set("cf1", "A1", v2); t->rmkey("cf1", "A2"); t->merge("cf1", "A2", v3); db->submit_transaction_sync(t); } { bufferlist v1, v2, v3; ASSERT_EQ(0, db->get("P", "K1", &v1)); ASSERT_EQ(tostr(v1), "1"); ASSERT_EQ(0, db->get("cf1", "A1", &v2)); ASSERT_EQ(tostr(v2), "2"); ASSERT_EQ(0, db->get("cf1", "A2", &v3)); ASSERT_EQ(tostr(v3), "?3"); } { KeyValueDB::Transaction t = db->get_transaction(); bufferlist v1; v1.append(string("1")); t->merge("cf1", "A2", v1); db->submit_transaction_sync(t); } { bufferlist v; ASSERT_EQ(0, db->get("cf1", "A2", &v)); ASSERT_EQ(tostr(v), "?31"); } fini(); } TEST_P(KVTest, RocksDB_estimate_size) { if(string(GetParam()) != "rocksdb") GTEST_SKIP(); std::string cfs("cf1"); ASSERT_EQ(0, db->init(g_conf()->bluestore_rocksdb_options)); cout << "creating one column family and opening it" << std::endl; ASSERT_EQ(0, db->create_and_open(cout)); for(int test = 0; test < 20; test++) { KeyValueDB::Transaction t = db->get_transaction(); bufferlist v1; v1.append(string(1000, '1')); for (int i = 0; i < 100; i++) t->set("A", to_string(rand()%100000), v1); db->submit_transaction_sync(t); db->compact(); int64_t size_a = db->estimate_prefix_size("A",""); ASSERT_GT(size_a, (test + 1) * 1000 * 100 * 0.5); ASSERT_LT(size_a, (test + 1) * 1000 * 100 * 1.5); int64_t size_a1 = db->estimate_prefix_size("A","1"); ASSERT_GT(size_a1, (test + 1) * 1000 * 100 * 0.1 * 0.5); ASSERT_LT(size_a1, (test + 1) * 1000 * 100 * 0.1 * 1.5); int64_t size_b = db->estimate_prefix_size("B",""); ASSERT_EQ(size_b, 0); } fini(); } TEST_P(KVTest, RocksDB_estimate_size_column_family) { if(string(GetParam()) != "rocksdb") GTEST_SKIP(); std::string cfs("cf1"); ASSERT_EQ(0, db->init(g_conf()->bluestore_rocksdb_options)); cout << "creating one column family and opening it" << std::endl; ASSERT_EQ(0, db->create_and_open(cout, cfs)); for(int test = 0; test < 20; test++) { KeyValueDB::Transaction t = db->get_transaction(); bufferlist v1; v1.append(string(1000, '1')); for (int i = 0; i < 100; i++) t->set("cf1", to_string(rand()%100000), v1); db->submit_transaction_sync(t); db->compact(); int64_t size_a = db->estimate_prefix_size("cf1",""); ASSERT_GT(size_a, (test + 1) * 1000 * 100 * 0.5); ASSERT_LT(size_a, (test + 1) * 1000 * 100 * 1.5); int64_t size_a1 = db->estimate_prefix_size("cf1","1"); ASSERT_GT(size_a1, (test + 1) * 1000 * 100 * 0.1 * 0.5); ASSERT_LT(size_a1, (test + 1) * 1000 * 100 * 0.1 * 1.5); int64_t size_b = db->estimate_prefix_size("B",""); ASSERT_EQ(size_b, 0); } fini(); } TEST_P(KVTest, RocksDB_parse_sharding_def) { if(string(GetParam()) != "rocksdb") GTEST_SKIP(); bool result; std::vector<RocksDBStore::ColumnFamily> sharding_def; char const* error_position = nullptr; std::string error_msg; std::string_view text_def = "A(10,0-30) B(6)=option1,option2=aaaa C"; result = RocksDBStore::parse_sharding_def(text_def, sharding_def, &error_position, &error_msg); ASSERT_EQ(result, true); ASSERT_EQ(error_position, nullptr); ASSERT_EQ(error_msg, ""); std::cout << text_def << std::endl; if (error_position) std::cout << std::string(error_position - text_def.begin(), ' ') << "^" << error_msg << std::endl; ASSERT_EQ(sharding_def.size(), 3); ASSERT_EQ(sharding_def[0].name, "A"); ASSERT_EQ(sharding_def[0].shard_cnt, 10); ASSERT_EQ(sharding_def[0].hash_l, 0); ASSERT_EQ(sharding_def[0].hash_h, 30); ASSERT_EQ(sharding_def[1].name, "B"); ASSERT_EQ(sharding_def[1].shard_cnt, 6); ASSERT_EQ(sharding_def[1].options, "option1,option2=aaaa"); ASSERT_EQ(sharding_def[2].name, "C"); ASSERT_EQ(sharding_def[2].shard_cnt, 1); text_def = "A(10 B(6)=option C"; result = RocksDBStore::parse_sharding_def(text_def, sharding_def, &error_position, &error_msg); std::cout << text_def << std::endl; if (error_position) std::cout << std::string(error_position - text_def.begin(), ' ') << "^" << error_msg << std::endl; ASSERT_EQ(result, false); ASSERT_NE(error_position, nullptr); ASSERT_NE(error_msg, ""); text_def = "A(10,1) B(6)=option C"; result = RocksDBStore::parse_sharding_def(text_def, sharding_def, &error_position, &error_msg); std::cout << text_def << std::endl; std::cout << std::string(error_position - text_def.begin(), ' ') << "^" << error_msg << std::endl; ASSERT_EQ(result, false); ASSERT_NE(error_position, nullptr); ASSERT_NE(error_msg, ""); } class RocksDBShardingTest : public ::testing::TestWithParam<const char*> { public: boost::scoped_ptr<KeyValueDB> db; RocksDBShardingTest() : db(0) {} string _bl_to_str(bufferlist val) { string str(val.c_str(), val.length()); return str; } void rm_r(string path) { string cmd = string("rm -r ") + path; if (verbose) cout << "==> " << cmd << std::endl; int r = ::system(cmd.c_str()); if (r) { cerr << "failed with exit code " << r << ", continuing anyway" << std::endl; } } void SetUp() override { verbose = getenv("VERBOSE") && strcmp(getenv("VERBOSE"), "1") == 0; int r = ::mkdir("kv_test_temp_dir", 0777); if (r < 0 && errno != EEXIST) { r = -errno; cerr << __func__ << ": unable to create kv_test_temp_dir: " << cpp_strerror(r) << std::endl; return; } db.reset(KeyValueDB::create(g_ceph_context, "rocksdb", "kv_test_temp_dir")); ASSERT_EQ(0, db->init(g_conf()->bluestore_rocksdb_options)); if (verbose) cout << "Creating database with sharding: " << GetParam() << std::endl; ASSERT_EQ(0, db->create_and_open(cout, GetParam())); } void TearDown() override { db.reset(nullptr); rm_r("kv_test_temp_dir"); } /* A - main 0/1/20 B - shard 1/3 x 0/1/20 C - main 0/1/20 D - shard 1/3 x 0/1/20 E - main 0/1/20 */ bool verbose; std::vector<std::string> sharding_defs = { "Betelgeuse D", "Betelgeuse(3) D", "Betelgeuse D(3)", "Betelgeuse(3) D(3)"}; std::vector<std::string> prefixes = {"Ad", "Betelgeuse", "C", "D", "Evade"}; std::vector<std::string> randoms = {"0", "1", "2", "3", "4", "5", "found", "brain", "fully", "pen", "worth", "race", "stand", "nodded", "whenever", "surrounded", "industrial", "skin", "this", "direction", "family", "beginning", "whenever", "held", "metal", "year", "like", "valuable", "softly", "whistle", "perfectly", "broken", "idea", "also", "coffee", "branch", "tongue", "immediately", "bent", "partly", "burn", "include", "certain", "burst", "final", "smoke", "positive", "perfectly" }; int R = randoms.size(); typedef int test_id[6]; void zero(test_id& x) { k = 0; v = 0; for (auto& i:x) i = 0; } bool end(const test_id& x) { return x[5] != 0; } void next(test_id& x) { x[0]++; for (int i = 0; i < 5; i++) { if (x[i] == 3) { x[i] = 0; ++x[i + 1]; } } } std::map<std::string, std::string> data; int k = 0; int v = 0; void generate_data(const test_id& x) { data.clear(); for (int i = 0; i < 5; i++) { if (verbose) std::cout << x[i] << "-"; switch (x[i]) { case 0: break; case 1: data[RocksDBStore::combine_strings(prefixes[i], randoms[k++ % R])] = randoms[v++ % R]; break; case 2: std::string base = randoms[k++ % R]; for (int j = 0; j < 10; j++) { data[RocksDBStore::combine_strings(prefixes[i], base + "." + randoms[k++ % R])] = randoms[v++ % R]; } break; } } } void data_to_db() { KeyValueDB::Transaction t = db->get_transaction(); for (auto &d : data) { bufferlist v1; v1.append(d.second); string prefix; string key; RocksDBStore::split_key(d.first, &prefix, &key); t->set(prefix, key, v1); if (verbose) std::cout << "SET " << prefix << " " << key << std::endl; } ASSERT_EQ(db->submit_transaction_sync(t), 0); } void clear_db() { KeyValueDB::Transaction t = db->get_transaction(); for (auto &d : data) { string prefix; string key; RocksDBStore::split_key(d.first, &prefix, &key); t->rmkey(prefix, key); } ASSERT_EQ(db->submit_transaction_sync(t), 0); //paranoid, check if db empty KeyValueDB::WholeSpaceIterator it = db->get_wholespace_iterator(); ASSERT_EQ(it->seek_to_first(), 0); ASSERT_EQ(it->valid(), false); } }; TEST_P(RocksDBShardingTest, wholespace_next) { test_id X; zero(X); do { generate_data(X); data_to_db(); KeyValueDB::WholeSpaceIterator it = db->get_wholespace_iterator(); //move forward auto dit = data.begin(); int r = it->seek_to_first(); ASSERT_EQ(r, 0); ASSERT_EQ(it->valid(), (dit != data.end())); while (dit != data.end()) { ASSERT_EQ(it->valid(), true); string prefix; string key; RocksDBStore::split_key(dit->first, &prefix, &key); auto raw_key = it->raw_key(); ASSERT_EQ(raw_key.first, prefix); ASSERT_EQ(raw_key.second, key); ASSERT_EQ(it->value().to_str(), dit->second); if (verbose) std::cout << "next " << prefix << " " << key << std::endl; ASSERT_EQ(it->next(), 0); ++dit; } ASSERT_EQ(it->valid(), false); clear_db(); next(X); } while (!end(X)); } TEST_P(RocksDBShardingTest, wholespace_prev) { test_id X; zero(X); do { generate_data(X); data_to_db(); KeyValueDB::WholeSpaceIterator it = db->get_wholespace_iterator(); auto dit = data.rbegin(); int r = it->seek_to_last(); ASSERT_EQ(r, 0); ASSERT_EQ(it->valid(), (dit != data.rend())); while (dit != data.rend()) { ASSERT_EQ(it->valid(), true); string prefix; string key; RocksDBStore::split_key(dit->first, &prefix, &key); auto raw_key = it->raw_key(); ASSERT_EQ(raw_key.first, prefix); ASSERT_EQ(raw_key.second, key); ASSERT_EQ(it->value().to_str(), dit->second); if (verbose) std::cout << "prev " << prefix << " " << key << std::endl; ASSERT_EQ(it->prev(), 0); ++dit; } ASSERT_EQ(it->valid(), false); clear_db(); next(X); } while (!end(X)); } TEST_P(RocksDBShardingTest, wholespace_lower_bound) { test_id X; zero(X); do { generate_data(X); data_to_db(); KeyValueDB::WholeSpaceIterator it = db->get_wholespace_iterator(); auto dit = data.begin(); int r = it->seek_to_first(); ASSERT_EQ(r, 0); ASSERT_EQ(it->valid(), (dit != data.end())); while (dit != data.end()) { ASSERT_EQ(it->valid(), true); string prefix; string key; RocksDBStore::split_key(dit->first, &prefix, &key); KeyValueDB::WholeSpaceIterator it1 = db->get_wholespace_iterator(); ASSERT_EQ(it1->lower_bound(prefix, key), 0); ASSERT_EQ(it1->valid(), true); auto raw_key = it1->raw_key(); ASSERT_EQ(raw_key.first, prefix); ASSERT_EQ(raw_key.second, key); if (verbose) std::cout << "lower_bound " << prefix << " " << key << std::endl; ASSERT_EQ(it->next(), 0); ++dit; } ASSERT_EQ(it->valid(), false); clear_db(); next(X); } while (!end(X)); } TEST_P(RocksDBShardingTest, wholespace_upper_bound) { test_id X; zero(X); do { generate_data(X); data_to_db(); KeyValueDB::WholeSpaceIterator it = db->get_wholespace_iterator(); auto dit = data.begin(); int r = it->seek_to_first(); ASSERT_EQ(r, 0); ASSERT_EQ(it->valid(), (dit != data.end())); while (dit != data.end()) { ASSERT_EQ(it->valid(), true); string prefix; string key; string key_minus_1; RocksDBStore::split_key(dit->first, &prefix, &key); //decrement key minimally key_minus_1 = key.substr(0, key.length() - 1) + std::string(1, key[key.length() - 1] - 1); KeyValueDB::WholeSpaceIterator it1 = db->get_wholespace_iterator(); ASSERT_EQ(it1->upper_bound(prefix, key_minus_1), 0); ASSERT_EQ(it1->valid(), true); auto raw_key = it1->raw_key(); ASSERT_EQ(raw_key.first, prefix); ASSERT_EQ(raw_key.second, key); if (verbose) std::cout << "upper_bound " << prefix << " " << key_minus_1 << std::endl; ASSERT_EQ(it->next(), 0); ++dit; } ASSERT_EQ(it->valid(), false); clear_db(); next(X); } while (!end(X)); } TEST_P(RocksDBShardingTest, wholespace_lookup_limits) { test_id X; zero(X); do { generate_data(X); data_to_db(); //lookup before first if (data.size() > 0) { auto dit = data.begin(); string prefix; string key; RocksDBStore::split_key(dit->first, &prefix, &key); KeyValueDB::WholeSpaceIterator it1 = db->get_wholespace_iterator(); ASSERT_EQ(it1->lower_bound(" ", " "), 0); ASSERT_EQ(it1->valid(), true); auto raw_key = it1->raw_key(); ASSERT_EQ(raw_key.first, prefix); ASSERT_EQ(raw_key.second, key); } //lookup after last KeyValueDB::WholeSpaceIterator it1 = db->get_wholespace_iterator(); ASSERT_EQ(it1->lower_bound("~", "~"), 0); ASSERT_EQ(it1->valid(), false); clear_db(); next(X); } while (!end(X)); } class RocksDBResharding : public ::testing::Test { public: boost::scoped_ptr<RocksDBStore> db; RocksDBResharding() : db(0) {} string _bl_to_str(bufferlist val) { string str(val.c_str(), val.length()); return str; } void rm_r(string path) { string cmd = string("rm -r ") + path; if (verbose) cout << "==> " << cmd << std::endl; int r = ::system(cmd.c_str()); if (r) { cerr << "failed with exit code " << r << ", continuing anyway" << std::endl; } } void SetUp() override { verbose = getenv("VERBOSE") && strcmp(getenv("VERBOSE"), "1") == 0; int r = ::mkdir("kv_test_temp_dir", 0777); if (r < 0 && errno != EEXIST) { r = -errno; cerr << __func__ << ": unable to create kv_test_temp_dir: " << cpp_strerror(r) << std::endl; return; } KeyValueDB* db_kv = KeyValueDB::create(g_ceph_context, "rocksdb", "kv_test_temp_dir"); RocksDBStore* db_rocks = dynamic_cast<RocksDBStore*>(db_kv); ceph_assert(db_rocks); db.reset(db_rocks); ASSERT_EQ(0, db->init(g_conf()->bluestore_rocksdb_options)); } void TearDown() override { db.reset(nullptr); rm_r("kv_test_temp_dir"); } bool verbose; std::vector<std::string> prefixes = {"Ad", "Betelgeuse", "C", "D", "Evade"}; std::vector<std::string> randoms = {"0", "1", "2", "3", "4", "5", "found", "brain", "fully", "pen", "worth", "race", "stand", "nodded", "whenever", "surrounded", "industrial", "skin", "this", "direction", "family", "beginning", "whenever", "held", "metal", "year", "like", "valuable", "softly", "whistle", "perfectly", "broken", "idea", "also", "coffee", "branch", "tongue", "immediately", "bent", "partly", "burn", "include", "certain", "burst", "final", "smoke", "positive", "perfectly" }; int R = randoms.size(); int k = 0; std::map<std::string, std::string> data; void generate_data() { data.clear(); for (size_t p = 0; p < prefixes.size(); p++) { size_t elem_count = 1 << (( p * 3 ) + 3); for (size_t i = 0; i < elem_count; i++) { std::string key; for (int x = 0; x < 5; x++) { key = key + randoms[rand() % R]; } std::string value; for (int x = 0; x < 3; x++) { value = value + randoms[rand() % R]; } data[RocksDBStore::combine_strings(prefixes[p], key)] = value; } } } void data_to_db() { KeyValueDB::Transaction t = db->get_transaction(); size_t i = 0; for (auto& d: data) { bufferlist v1; v1.append(d.second); string prefix; string key; RocksDBStore::split_key(d.first, &prefix, &key); t->set(prefix, key, v1); if (verbose) std::cout << "SET " << prefix << " " << key << std::endl; i++; if ((i % 1000) == 0) { ASSERT_EQ(db->submit_transaction_sync(t), 0); t.reset(); if (verbose) std::cout << "writing key to DB" << std::endl; t = db->get_transaction(); } } if (verbose) std::cout << "writing keys to DB" << std::endl; ASSERT_EQ(db->submit_transaction_sync(t), 0); } void clear_db() { KeyValueDB::Transaction t = db->get_transaction(); for (auto &d : data) { string prefix; string key; RocksDBStore::split_key(d.first, &prefix, &key); t->rmkey(prefix, key); } ASSERT_EQ(db->submit_transaction_sync(t), 0); //paranoid, check if db empty KeyValueDB::WholeSpaceIterator it = db->get_wholespace_iterator(); ASSERT_EQ(it->seek_to_first(), 0); ASSERT_EQ(it->valid(), false); } void check_db() { KeyValueDB::WholeSpaceIterator it = db->get_wholespace_iterator(); //move forward auto dit = data.begin(); int r = it->seek_to_first(); ASSERT_EQ(r, 0); ASSERT_EQ(it->valid(), (dit != data.end())); while (dit != data.end()) { ASSERT_EQ(it->valid(), true); string prefix; string key; RocksDBStore::split_key(dit->first, &prefix, &key); auto raw_key = it->raw_key(); ASSERT_EQ(raw_key.first, prefix); ASSERT_EQ(raw_key.second, key); ASSERT_EQ(it->value().to_str(), dit->second); if (verbose) std::cout << "next " << prefix << " " << key << std::endl; ASSERT_EQ(it->next(), 0); ++dit; } ASSERT_EQ(it->valid(), false); } }; TEST_F(RocksDBResharding, basic) { ASSERT_EQ(0, db->create_and_open(cout, "")); generate_data(); data_to_db(); check_db(); db->close(); ASSERT_EQ(db->reshard("Evade(4)"), 0); ASSERT_EQ(db->open(cout), 0); check_db(); db->close(); } TEST_F(RocksDBResharding, all_to_shards) { ASSERT_EQ(0, db->create_and_open(cout, "")); generate_data(); data_to_db(); check_db(); db->close(); ASSERT_EQ(db->reshard("Ad(1) Betelgeuse(1) C(1) D(1) Evade(1)"), 0); ASSERT_EQ(db->open(cout), 0); check_db(); db->close(); } TEST_F(RocksDBResharding, all_to_shards_and_back_again) { ASSERT_EQ(0, db->create_and_open(cout, "")); generate_data(); data_to_db(); check_db(); db->close(); ASSERT_EQ(db->reshard("Ad(1) Betelgeuse(1) C(1) D(1) Evade(1)"), 0); ASSERT_EQ(db->open(cout), 0); check_db(); db->close(); ASSERT_EQ(db->reshard(""), 0); ASSERT_EQ(db->open(cout), 0); check_db(); db->close(); } TEST_F(RocksDBResharding, resume_interrupted_at_batch) { ASSERT_EQ(0, db->create_and_open(cout, "")); generate_data(); data_to_db(); check_db(); db->close(); RocksDBStore::resharding_ctrl ctrl; ctrl.unittest_fail_after_first_batch = true; ASSERT_EQ(db->reshard("Evade(4)", &ctrl), -1000); ASSERT_NE(db->open(cout), 0); ASSERT_EQ(db->reshard("Evade(4)"), 0); ASSERT_EQ(db->open(cout), 0); check_db(); db->close(); } TEST_F(RocksDBResharding, resume_interrupted_at_column) { ASSERT_EQ(0, db->create_and_open(cout, "")); generate_data(); data_to_db(); check_db(); db->close(); RocksDBStore::resharding_ctrl ctrl; ctrl.unittest_fail_after_processing_column = true; ASSERT_EQ(db->reshard("Evade(4)", &ctrl), -1001); ASSERT_NE(db->open(cout), 0); ASSERT_EQ(db->reshard("Evade(4)"), 0); ASSERT_EQ(db->open(cout), 0); check_db(); db->close(); } TEST_F(RocksDBResharding, resume_interrupted_before_commit) { ASSERT_EQ(0, db->create_and_open(cout, "")); generate_data(); data_to_db(); check_db(); db->close(); RocksDBStore::resharding_ctrl ctrl; ctrl.unittest_fail_after_successful_processing = true; ASSERT_EQ(db->reshard("Evade(4)", &ctrl), -1002); ASSERT_NE(db->open(cout), 0); ASSERT_EQ(db->reshard("Evade(4)"), 0); ASSERT_EQ(db->open(cout), 0); check_db(); db->close(); } TEST_F(RocksDBResharding, prevent_incomplete_hash_change) { ASSERT_EQ(0, db->create_and_open(cout, "Evade(4,0-3)")); generate_data(); data_to_db(); check_db(); db->close(); RocksDBStore::resharding_ctrl ctrl; ctrl.unittest_fail_after_successful_processing = true; ASSERT_EQ(db->reshard("Evade(4,0-8)", &ctrl), -1002); ASSERT_NE(db->open(cout), 0); ASSERT_EQ(db->reshard("Evade(4,0-8)"), 0); ASSERT_EQ(db->open(cout), 0); check_db(); db->close(); } TEST_F(RocksDBResharding, change_reshard) { ASSERT_EQ(0, db->create_and_open(cout, "Ad(4)")); generate_data(); data_to_db(); check_db(); db->close(); { RocksDBStore::resharding_ctrl ctrl; ctrl.unittest_fail_after_first_batch = true; ASSERT_EQ(db->reshard("C(5) D(3)", &ctrl), -1000); } { RocksDBStore::resharding_ctrl ctrl; ASSERT_NE(db->open(cout), 0); ctrl.unittest_fail_after_first_batch = false; ctrl.unittest_fail_after_processing_column = true; ASSERT_EQ(db->reshard("C(5) Evade(2)", &ctrl), -1001); } { RocksDBStore::resharding_ctrl ctrl; ASSERT_NE(db->open(cout), 0); ctrl.unittest_fail_after_processing_column = false; ctrl.unittest_fail_after_successful_processing = true; ASSERT_EQ(db->reshard("Evade(2) D(3)", &ctrl), -1002); } { ASSERT_NE(db->open(cout), 0); ASSERT_EQ(db->reshard("Ad(1) Evade(5)"), 0); } { ASSERT_EQ(db->open(cout), 0); check_db(); db->close(); } } INSTANTIATE_TEST_SUITE_P( KeyValueDB, KVTest, ::testing::Values("rocksdb")); INSTANTIATE_TEST_SUITE_P( KeyValueDB, RocksDBShardingTest, ::testing::Values("Betelgeuse D", "Betelgeuse(3) D", "Betelgeuse D(3)", "Betelgeuse(3) D(3)")); int main(int argc, char **argv) { auto args = argv_to_vec(argc, argv); auto cct = global_init(NULL, args, CEPH_ENTITY_TYPE_CLIENT, CODE_ENVIRONMENT_UTILITY, CINIT_FLAG_NO_DEFAULT_CONFIG_FILE); common_init_finish(g_ceph_context); g_ceph_context->_conf.set_val( "enable_experimental_unrecoverable_data_corrupting_features", "rocksdb"); g_ceph_context->_conf.apply_changes(nullptr); ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); }
36,536
26.997701
120
cc
null
ceph-main/src/test/objectstore/test_memstore_clone.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system * * Copyright (C) 2015 Red Hat * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #include <boost/intrusive_ptr.hpp> #include "global/global_init.h" #include "common/ceph_argparse.h" #include "os/ObjectStore.h" #include <gtest/gtest.h> #include "include/ceph_assert.h" #include "common/errno.h" #include "store_test_fixture.h" #define dout_context g_ceph_context using namespace std; namespace { const coll_t cid; ghobject_t make_ghobject(const char *oid) { return ghobject_t{hobject_t{oid, "", CEPH_NOSNAP, 0, 0, ""}}; } } // anonymous namespace class MemStoreClone : public StoreTestFixture { public: MemStoreClone() : StoreTestFixture("memstore") {} void SetUp() override { StoreTestFixture::SetUp(); if (HasFailure()) { return; } ObjectStore::Transaction t; ch = store->create_new_collection(cid); t.create_collection(cid, 4); unsigned r = store->queue_transaction(ch, std::move(t)); if (r != 0) { derr << "failed to create collection with " << cpp_strerror(r) << dendl; } ASSERT_EQ(0U, r); } void TearDown() override { ch.reset(); StoreTestFixture::TearDown(); } }; // src 11[11 11 11 11]11 // dst 22 22 22 22 22 22 // res 22 11 11 11 11 22 TEST_F(MemStoreClone, CloneRangeAllocated) { ASSERT_TRUE(store); const auto src = make_ghobject("src1"); const auto dst = make_ghobject("dst1"); bufferlist srcbl, dstbl, result, expected; srcbl.append("111111111111"); dstbl.append("222222222222"); expected.append("221111111122"); ObjectStore::Transaction t; t.write(cid, src, 0, 12, srcbl); t.write(cid, dst, 0, 12, dstbl); t.clone_range(cid, src, dst, 2, 8, 2); ASSERT_EQ(0, store->queue_transaction(ch, std::move(t))); ASSERT_EQ(12, store->read(ch, dst, 0, 12, result)); ASSERT_EQ(expected, result); } // src __[__ __ __ __]__ 11 11 // dst 22 22 22 22 22 22 // res 22 00 00 00 00 22 TEST_F(MemStoreClone, CloneRangeHole) { ASSERT_TRUE(store); const auto src = make_ghobject("src2"); const auto dst = make_ghobject("dst2"); bufferlist srcbl, dstbl, result, expected; srcbl.append("1111"); dstbl.append("222222222222"); expected.append("22\000\000\000\000\000\000\000\00022", 12); ObjectStore::Transaction t; t.write(cid, src, 12, 4, srcbl); t.write(cid, dst, 0, 12, dstbl); t.clone_range(cid, src, dst, 2, 8, 2); ASSERT_EQ(0, store->queue_transaction(ch, std::move(t))); ASSERT_EQ(12, store->read(ch, dst, 0, 12, result)); ASSERT_EQ(expected, result); } // src __[__ __ __ 11]11 // dst 22 22 22 22 22 22 // res 22 00 00 00 11 22 TEST_F(MemStoreClone, CloneRangeHoleStart) { ASSERT_TRUE(store); const auto src = make_ghobject("src3"); const auto dst = make_ghobject("dst3"); bufferlist srcbl, dstbl, result, expected; srcbl.append("1111"); dstbl.append("222222222222"); expected.append("22\000\000\000\000\000\0001122", 12); ObjectStore::Transaction t; t.write(cid, src, 8, 4, srcbl); t.write(cid, dst, 0, 12, dstbl); t.clone_range(cid, src, dst, 2, 8, 2); ASSERT_EQ(0, store->queue_transaction(ch, std::move(t))); ASSERT_EQ(12, store->read(ch, dst, 0, 12, result)); ASSERT_EQ(expected, result); } // src 11[11 __ __ 11]11 // dst 22 22 22 22 22 22 // res 22 11 00 00 11 22 TEST_F(MemStoreClone, CloneRangeHoleMiddle) { ASSERT_TRUE(store); const auto src = make_ghobject("src4"); const auto dst = make_ghobject("dst4"); bufferlist srcbl, dstbl, result, expected; srcbl.append("1111"); dstbl.append("222222222222"); expected.append("2211\000\000\000\0001122", 12); ObjectStore::Transaction t; t.write(cid, src, 0, 4, srcbl); t.write(cid, src, 8, 4, srcbl); t.write(cid, dst, 0, 12, dstbl); t.clone_range(cid, src, dst, 2, 8, 2); ASSERT_EQ(0, store->queue_transaction(ch, std::move(t))); ASSERT_EQ(12, store->read(ch, dst, 0, 12, result)); ASSERT_EQ(expected, result); } // src 11[11 __ __ __]__ 11 11 // dst 22 22 22 22 22 22 // res 22 11 00 00 00 22 TEST_F(MemStoreClone, CloneRangeHoleEnd) { ASSERT_TRUE(store); const auto src = make_ghobject("src5"); const auto dst = make_ghobject("dst5"); bufferlist srcbl, dstbl, result, expected; srcbl.append("1111"); dstbl.append("222222222222"); expected.append("2211\000\000\000\000\000\00022", 12); ObjectStore::Transaction t; t.write(cid, src, 0, 4, srcbl); t.write(cid, src, 12, 4, srcbl); t.write(cid, dst, 0, 12, dstbl); t.clone_range(cid, src, dst, 2, 8, 2); ASSERT_EQ(0, store->queue_transaction(ch, std::move(t))); ASSERT_EQ(12, store->read(ch, dst, 0, 12, result)); ASSERT_EQ(expected, result); } int main(int argc, char** argv) { // default to memstore map<string,string> defaults = { { "osd_objectstore", "memstore" }, { "osd_data", "msc.test_temp_dir" }, { "memstore_page_size", "4" } }; auto args = argv_to_vec(argc, argv); auto cct = global_init(&defaults, args, CEPH_ENTITY_TYPE_CLIENT, CODE_ENVIRONMENT_UTILITY, CINIT_FLAG_NO_DEFAULT_CONFIG_FILE); common_init_finish(g_ceph_context); ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); }
5,408
25.64532
78
cc
null
ceph-main/src/test/objectstore/test_transaction.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system * * Copyright (C) 2016 Casey Bodley <cbodley@redhat.com> * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #include "os/ObjectStore.h" #include <gtest/gtest.h> #include "common/Clock.h" #include "include/utime.h" #include <boost/tuple/tuple.hpp> using namespace std; TEST(Transaction, MoveConstruct) { auto a = ObjectStore::Transaction{}; a.nop(); ASSERT_FALSE(a.empty()); // move-construct in b auto b = std::move(a); ASSERT_TRUE(a.empty()); ASSERT_FALSE(b.empty()); } TEST(Transaction, MoveAssign) { auto a = ObjectStore::Transaction{}; a.nop(); ASSERT_FALSE(a.empty()); auto b = ObjectStore::Transaction{}; b = std::move(a); // move-assign to b ASSERT_TRUE(a.empty()); ASSERT_FALSE(b.empty()); } TEST(Transaction, CopyConstruct) { auto a = ObjectStore::Transaction{}; a.nop(); ASSERT_FALSE(a.empty()); auto b = a; // copy-construct in b ASSERT_FALSE(a.empty()); ASSERT_FALSE(b.empty()); } TEST(Transaction, CopyAssign) { auto a = ObjectStore::Transaction{}; a.nop(); ASSERT_FALSE(a.empty()); auto b = ObjectStore::Transaction{}; b = a; // copy-assign to b ASSERT_FALSE(a.empty()); ASSERT_FALSE(b.empty()); } TEST(Transaction, Swap) { auto a = ObjectStore::Transaction{}; a.nop(); ASSERT_FALSE(a.empty()); auto b = ObjectStore::Transaction{}; std::swap(a, b); // swap a and b ASSERT_TRUE(a.empty()); ASSERT_FALSE(b.empty()); } ObjectStore::Transaction generate_transaction() { auto a = ObjectStore::Transaction{}; a.nop(); coll_t cid; object_t obj("test_name"); snapid_t snap(0); hobject_t hoid(obj, "key", snap, 0, 0, "nspace"); ghobject_t oid(hoid); coll_t acid; object_t aobj("another_test_name"); snapid_t asnap(0); hobject_t ahoid(obj, "another_key", snap, 0, 0, "another_nspace"); ghobject_t aoid(hoid); std::set<string> keys; keys.insert("any_1"); keys.insert("any_2"); keys.insert("any_3"); bufferlist bl; bl.append_zero(4096); a.write(cid, oid, 1, 4096, bl, 0); a.omap_setkeys(acid, aoid, bl); a.omap_rmkeys(cid, aoid, keys); a.touch(acid, oid); return a; } TEST(Transaction, MoveRangesDelSrcObj) { auto t = ObjectStore::Transaction{}; t.nop(); coll_t c(spg_t(pg_t(1,2), shard_id_t::NO_SHARD)); ghobject_t o1(hobject_t("obj", "", 123, 456, -1, "")); ghobject_t o2(hobject_t("obj2", "", 123, 456, -1, "")); vector<std::pair<uint64_t, uint64_t>> move_info = { make_pair(1, 5), make_pair(10, 5) }; t.touch(c, o1); bufferlist bl; bl.append("some data"); t.write(c, o1, 1, bl.length(), bl); t.write(c, o1, 10, bl.length(), bl); t.clone(c, o1, o2); bl.append("some other data"); t.write(c, o2, 1, bl.length(), bl); } TEST(Transaction, GetNumBytes) { auto a = ObjectStore::Transaction{}; a.nop(); ASSERT_TRUE(a.get_encoded_bytes() == a.get_encoded_bytes_test()); coll_t cid; object_t obj("test_name"); snapid_t snap(0); hobject_t hoid(obj, "key", snap, 0, 0, "nspace"); ghobject_t oid(hoid); coll_t acid; object_t aobj("another_test_name"); snapid_t asnap(0); hobject_t ahoid(obj, "another_key", snap, 0, 0, "another_nspace"); ghobject_t aoid(hoid); std::set<string> keys; keys.insert("any_1"); keys.insert("any_2"); keys.insert("any_3"); bufferlist bl; bl.append_zero(4096); a.write(cid, oid, 1, 4096, bl, 0); ASSERT_TRUE(a.get_encoded_bytes() == a.get_encoded_bytes_test()); a.omap_setkeys(acid, aoid, bl); ASSERT_TRUE(a.get_encoded_bytes() == a.get_encoded_bytes_test()); a.omap_rmkeys(cid, aoid, keys); ASSERT_TRUE(a.get_encoded_bytes() == a.get_encoded_bytes_test()); a.touch(acid, oid); ASSERT_TRUE(a.get_encoded_bytes() == a.get_encoded_bytes_test()); } void bench_num_bytes(bool legacy) { const int max = 2500000; auto a = generate_transaction(); if (legacy) { cout << "get_encoded_bytes_test: "; } else { cout << "get_encoded_bytes: "; } utime_t start = ceph_clock_now(); if (legacy) { for (int i = 0; i < max; ++i) { a.get_encoded_bytes_test(); } } else { for (int i = 0; i < max; ++i) { a.get_encoded_bytes(); } } utime_t end = ceph_clock_now(); cout << max << " encodes in " << (end - start) << std::endl; } TEST(Transaction, GetNumBytesBenchLegacy) { bench_num_bytes(true); } TEST(Transaction, GetNumBytesBenchCurrent) { bench_num_bytes(false); }
4,692
20.726852
70
cc
null
ceph-main/src/test/old/test_disk_bw.cc
#include <sys/time.h> #include <sys/types.h> #include <sys/stat.h> #include <fcntl.h> #include <string.h> #include <stdlib.h> #include <stdio.h> #include <unistd.h> #include <errno.h> #include <sys/uio.h> #include "common/Clock.h" #include "common/safe_io.h" #include <iostream> using namespace std; int main(int argc, char **argv) { void *buf; int fd, count, loop = 0; if (argc != 4) { fprintf(stderr, "Usage: %s device bsize count\n", argv[0]); exit (0); } int bsize = atoi(argv[2]); count = atoi(argv[3]); posix_memalign(&buf, sysconf(_SC_PAGESIZE), bsize); //if ((fd = open(argv[1], O_SYNC|O_RDWR)) < 0) { if ((fd = open(argv[1], O_DIRECT|O_RDWR)) < 0) { fprintf(stderr, "Can't open device %s\n", argv[1]); exit (4); } utime_t start = ceph_clock_now(); while (loop++ < count) { int ret = safe_write(fd, buf, bsize); if (ret) ceph_abort(); //if ((loop % 100) == 0) //fprintf(stderr, "."); } ::fsync(fd); ::close(fd); utime_t end = ceph_clock_now(); end -= start; char hostname[80]; gethostname(hostname, 80); double mb = bsize*count/1024/1024; cout << hostname << "\t" << mb << " MB\t" << end << " seconds\t" << (mb / (double)end) << " MB/sec" << std::endl; }
1,280
19.333333
115
cc
null
ceph-main/src/test/old/test_setlayout.c
#define __USE_GNU 1 #include <fcntl.h> #include <netinet/in.h> #include <linux/types.h> #include "include/ceph_fs.h" #include <stdlib.h> #include <stdio.h> #include <string.h> #include "kernel/ioctl.h" main() { struct ceph_file_layout l; int fd = open("foo.txt", O_RDONLY); int r = ioctl(fd, CEPH_IOC_GET_LAYOUT, &l, sizeof(l)); printf("get = %d\n", r); l.fl_stripe_unit = 65536; l.fl_object_size = 65536; r = ioctl(fd, CEPH_IOC_SET_LAYOUT, &l, sizeof(l)); printf("set = %d\n", r); }
498
18.96
55
c
null
ceph-main/src/test/old/testfilepath.cc
#include "include/filepath.h" #include <iostream> using namespace std; int print(const string &s) { filepath fp = s; cout << "s = " << s << " filepath = " << fp << endl; cout << " depth " << fp.depth() << endl; for (int i=0; i<fp.depth(); i++) { cout << "\t" << i << " " << fp[i] << endl; } } int main() { filepath p; print("/home/sage"); print("a/b/c"); print("/a/b/c"); print("/a/b/c/"); print("/a/b/../d"); }
444
18.347826
56
cc
null
ceph-main/src/test/opensuse-13.2/install-deps.sh
../../../install-deps.sh
24
24
24
sh
null
ceph-main/src/test/osd/Object.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- #include "include/interval_set.h" #include "include/buffer.h" #include <list> #include <map> #include <set> #include <iostream> #include "Object.h" void ContDesc::encode(bufferlist &bl) const { ENCODE_START(1, 1, bl); encode(objnum, bl); encode(cursnap, bl); encode(seqnum, bl); encode(prefix, bl); encode(oid, bl); ENCODE_FINISH(bl); } void ContDesc::decode(bufferlist::const_iterator &bl) { DECODE_START(1, bl); decode(objnum, bl); decode(cursnap, bl); decode(seqnum, bl); decode(prefix, bl); decode(oid, bl); DECODE_FINISH(bl); } std::ostream &operator<<(std::ostream &out, const ContDesc &rhs) { return out << "(ObjNum " << rhs.objnum << " snap " << rhs.cursnap << " seq_num " << rhs.seqnum << ")"; } void AppendGenerator::get_ranges_map( const ContDesc &cont, std::map<uint64_t, uint64_t> &out) { RandWrap rand(cont.seqnum); uint64_t pos = off; uint64_t limit = off + get_append_size(cont); while (pos < limit) { uint64_t segment_length = round_up( rand() % (max_append_size - min_append_size), alignment) + min_append_size; ceph_assert(segment_length >= min_append_size); if (segment_length + pos > limit) { segment_length = limit - pos; } if (alignment) ceph_assert(segment_length % alignment == 0); out.insert(std::pair<uint64_t, uint64_t>(pos, segment_length)); pos += segment_length; } } void VarLenGenerator::get_ranges_map( const ContDesc &cont, std::map<uint64_t, uint64_t> &out) { RandWrap rand(cont.seqnum); uint64_t pos = 0; uint64_t limit = get_length(cont); bool include = false; while (pos < limit) { uint64_t segment_length = (rand() % (max_stride_size - min_stride_size)) + min_stride_size; ceph_assert(segment_length < max_stride_size); ceph_assert(segment_length >= min_stride_size); if (segment_length + pos > limit) { segment_length = limit - pos; } if (include) { out.insert(std::pair<uint64_t, uint64_t>(pos, segment_length)); include = false; } else { include = true; } pos += segment_length; } } void ObjectDesc::iterator::adjust_stack() { while (!stack.empty() && pos >= stack.top().second.next) { ceph_assert(pos == stack.top().second.next); size = stack.top().second.size; current = stack.top().first; stack.pop(); } if (stack.empty()) { cur_valid_till = std::numeric_limits<uint64_t>::max(); } else { cur_valid_till = stack.top().second.next; } while (current != layers.end() && !current->covers(pos)) { uint64_t next = current->next(pos); if (next < cur_valid_till) { stack.emplace(current, StackState{next, size}); cur_valid_till = next; } ++current; } if (current == layers.end()) { size = 0; } else { current->iter.seek(pos); size = std::min(size, current->get_size()); cur_valid_till = std::min( current->valid_till(pos), cur_valid_till); } } const ContDesc &ObjectDesc::most_recent() { return layers.begin()->second; } void ObjectDesc::update(ContentsGenerator *gen, const ContDesc &next) { layers.push_front(std::pair<std::shared_ptr<ContentsGenerator>, ContDesc>(std::shared_ptr<ContentsGenerator>(gen), next)); return; } bool ObjectDesc::check(bufferlist &to_check) { iterator objiter = begin(); uint64_t error_at = 0; if (!objiter.check_bl_advance(to_check, &error_at)) { std::cout << "incorrect buffer at pos " << error_at << std::endl; return false; } uint64_t size = layers.begin()->first->get_length(layers.begin()->second); if (to_check.length() < size) { std::cout << "only read " << to_check.length() << " out of size " << size << std::endl; return false; } return true; } bool ObjectDesc::check_sparse(const std::map<uint64_t, uint64_t>& extents, bufferlist &to_check) { uint64_t off = 0; uint64_t pos = 0; auto objiter = begin(); for (auto &&extiter : extents) { // verify hole { bufferlist bl; bl.append_zero(extiter.first - pos); uint64_t error_at = 0; if (!objiter.check_bl_advance(bl, &error_at)) { std::cout << "sparse read omitted non-zero data at " << error_at << std::endl; return false; } } ceph_assert(off <= to_check.length()); pos = extiter.first; objiter.seek(pos); { bufferlist bl; bl.substr_of( to_check, off, std::min(to_check.length() - off, extiter.second)); uint64_t error_at = 0; if (!objiter.check_bl_advance(bl, &error_at)) { std::cout << "incorrect buffer at pos " << error_at << std::endl; return false; } off += extiter.second; pos += extiter.second; } if (pos < extiter.first + extiter.second) { std::cout << "reached end of iterator first" << std::endl; return false; } } // final hole bufferlist bl; uint64_t size = layers.begin()->first->get_length(layers.begin()->second); bl.append_zero(size - pos); uint64_t error_at; if (!objiter.check_bl_advance(bl, &error_at)) { std::cout << "sparse read omitted non-zero data at " << error_at << std::endl; return false; } return true; }
5,268
25.21393
124
cc
null
ceph-main/src/test/osd/Object.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- #include "include/interval_set.h" #include "include/buffer.h" #include "include/encoding.h" #include <list> #include <map> #include <set> #include <stack> #include <random> #ifndef OBJECT_H #define OBJECT_H /// describes an object class ContDesc { public: int objnum; int cursnap; unsigned seqnum; std::string prefix; std::string oid; ContDesc() : objnum(0), cursnap(0), seqnum(0), prefix("") {} ContDesc(int objnum, int cursnap, unsigned seqnum, const std::string &prefix) : objnum(objnum), cursnap(cursnap), seqnum(seqnum), prefix(prefix) {} bool operator==(const ContDesc &rhs) { return (rhs.objnum == objnum && rhs.cursnap == cursnap && rhs.seqnum == seqnum && rhs.prefix == prefix && rhs.oid == oid); } bool operator<(const ContDesc &rhs) const { return seqnum < rhs.seqnum; } bool operator!=(const ContDesc &rhs) { return !((*this) == rhs); } void encode(bufferlist &bl) const; void decode(bufferlist::const_iterator &bp); }; WRITE_CLASS_ENCODER(ContDesc) std::ostream &operator<<(std::ostream &out, const ContDesc &rhs); class ChunkDesc { public: uint32_t offset; uint32_t length; std::string oid; }; class ContentsGenerator { public: class iterator_impl { public: virtual char operator*() = 0; virtual iterator_impl &operator++() = 0; virtual void seek(uint64_t pos) = 0; virtual bool end() = 0; virtual ContDesc get_cont() const = 0; virtual uint64_t get_pos() const = 0; virtual bufferlist gen_bl_advance(uint64_t s) { bufferptr ret = buffer::create(s); for (uint64_t i = 0; i < s; ++i, ++(*this)) { ret[i] = **this; } bufferlist _ret; _ret.push_back(ret); return _ret; } /// walk through given @c bl /// /// @param[out] off the offset of the first byte which does not match /// @returns true if @c bl matches with the content, false otherwise virtual bool check_bl_advance(bufferlist &bl, uint64_t *off = nullptr) { uint64_t _off = 0; for (bufferlist::iterator i = bl.begin(); !i.end(); ++i, ++_off, ++(*this)) { if (*i != **this) { if (off) *off = _off; return false; } } return true; } virtual ~iterator_impl() {}; }; class iterator { public: ContentsGenerator *parent; iterator_impl *impl; char operator *() { return **impl; } iterator &operator++() { ++(*impl); return *this; }; void seek(uint64_t pos) { impl->seek(pos); } bool end() { return impl->end(); } ~iterator() { parent->put_iterator_impl(impl); } iterator(const iterator &rhs) : parent(rhs.parent) { impl = parent->dup_iterator_impl(rhs.impl); } iterator &operator=(const iterator &rhs) { iterator new_iter(rhs); swap(new_iter); return *this; } void swap(iterator &other) { ContentsGenerator *otherparent = other.parent; other.parent = parent; parent = otherparent; iterator_impl *otherimpl = other.impl; other.impl = impl; impl = otherimpl; } bufferlist gen_bl_advance(uint64_t s) { return impl->gen_bl_advance(s); } bool check_bl_advance(bufferlist &bl, uint64_t *off = nullptr) { return impl->check_bl_advance(bl, off); } iterator(ContentsGenerator *parent, iterator_impl *impl) : parent(parent), impl(impl) {} }; virtual uint64_t get_length(const ContDesc &in) = 0; virtual void get_ranges_map( const ContDesc &cont, std::map<uint64_t, uint64_t> &out) = 0; void get_ranges(const ContDesc &cont, interval_set<uint64_t> &out) { std::map<uint64_t, uint64_t> ranges; get_ranges_map(cont, ranges); for (std::map<uint64_t, uint64_t>::iterator i = ranges.begin(); i != ranges.end(); ++i) { out.insert(i->first, i->second); } } virtual iterator_impl *get_iterator_impl(const ContDesc &in) = 0; virtual iterator_impl *dup_iterator_impl(const iterator_impl *in) = 0; virtual void put_iterator_impl(iterator_impl *in) = 0; virtual ~ContentsGenerator() {}; iterator get_iterator(const ContDesc &in) { return iterator(this, get_iterator_impl(in)); } }; class RandGenerator : public ContentsGenerator { public: typedef std::minstd_rand0 RandWrap; class iterator_impl : public ContentsGenerator::iterator_impl { public: uint64_t pos; ContDesc cont; RandWrap rand; RandGenerator *cont_gen; char current; iterator_impl(const ContDesc &cont, RandGenerator *cont_gen) : pos(0), cont(cont), rand(cont.seqnum), cont_gen(cont_gen) { current = rand(); } ContDesc get_cont() const override { return cont; } uint64_t get_pos() const override { return pos; } iterator_impl &operator++() override { pos++; current = rand(); return *this; } char operator*() override { return current; } void seek(uint64_t _pos) override { if (_pos < pos) { iterator_impl begin = iterator_impl(cont, cont_gen); begin.seek(_pos); *this = begin; } while (pos < _pos) { ++(*this); } } bool end() override { return pos >= cont_gen->get_length(cont); } }; ContentsGenerator::iterator_impl *get_iterator_impl(const ContDesc &in) override { RandGenerator::iterator_impl *i = new iterator_impl(in, this); return i; } void put_iterator_impl(ContentsGenerator::iterator_impl *in) override { delete in; } ContentsGenerator::iterator_impl *dup_iterator_impl( const ContentsGenerator::iterator_impl *in) override { ContentsGenerator::iterator_impl *retval = get_iterator_impl(in->get_cont()); retval->seek(in->get_pos()); return retval; } }; class VarLenGenerator : public RandGenerator { uint64_t max_length; uint64_t min_stride_size; uint64_t max_stride_size; public: VarLenGenerator( uint64_t length, uint64_t min_stride_size, uint64_t max_stride_size) : max_length(length), min_stride_size(min_stride_size), max_stride_size(max_stride_size) {} void get_ranges_map( const ContDesc &cont, std::map<uint64_t, uint64_t> &out) override; uint64_t get_length(const ContDesc &in) override { RandWrap rand(in.seqnum); if (max_length == 0) return 0; return (rand() % (max_length/2)) + ((max_length - 1)/2) + 1; } }; class AttrGenerator : public RandGenerator { uint64_t max_len; uint64_t big_max_len; public: AttrGenerator(uint64_t max_len, uint64_t big_max_len) : max_len(max_len), big_max_len(big_max_len) {} void get_ranges_map( const ContDesc &cont, std::map<uint64_t, uint64_t> &out) override { out.insert(std::pair<uint64_t, uint64_t>(0, get_length(cont))); } uint64_t get_length(const ContDesc &in) override { RandWrap rand(in.seqnum); // make some attrs big if (in.seqnum & 3) return (rand() % max_len); else return (rand() % big_max_len); } bufferlist gen_bl(const ContDesc &in) { bufferlist bl; for (iterator i = get_iterator(in); !i.end(); ++i) { bl.append(*i); } ceph_assert(bl.length() < big_max_len); return bl; } }; class AppendGenerator : public RandGenerator { uint64_t off; uint64_t alignment; uint64_t min_append_size; uint64_t max_append_size; uint64_t max_append_total; uint64_t round_up(uint64_t in, uint64_t by) { if (by) in += (by - (in % by)); return in; } public: AppendGenerator( uint64_t off, uint64_t alignment, uint64_t min_append_size, uint64_t _max_append_size, uint64_t max_append_multiple) : off(off), alignment(alignment), min_append_size(round_up(min_append_size, alignment)), max_append_size(round_up(_max_append_size, alignment)) { if (_max_append_size == min_append_size) max_append_size += alignment; max_append_total = max_append_multiple * max_append_size; } uint64_t get_append_size(const ContDesc &in) { RandWrap rand(in.seqnum); return round_up(rand() % max_append_total, alignment); } uint64_t get_length(const ContDesc &in) override { return off + get_append_size(in); } void get_ranges_map( const ContDesc &cont, std::map<uint64_t, uint64_t> &out) override; }; class ObjectDesc { public: ObjectDesc() : exists(false), dirty(false), version(0), flushed(false) {} ObjectDesc(const ContDesc &init, ContentsGenerator *cont_gen) : exists(false), dirty(false), version(0), flushed(false) { layers.push_front(std::pair<std::shared_ptr<ContentsGenerator>, ContDesc>(std::shared_ptr<ContentsGenerator>(cont_gen), init)); } class iterator { public: uint64_t pos; uint64_t size; uint64_t cur_valid_till; class ContState { interval_set<uint64_t> ranges; const uint64_t size; public: ContDesc cont; std::shared_ptr<ContentsGenerator> gen; ContentsGenerator::iterator iter; ContState( const ContDesc &_cont, std::shared_ptr<ContentsGenerator> _gen, ContentsGenerator::iterator _iter) : size(_gen->get_length(_cont)), cont(_cont), gen(_gen), iter(_iter) { gen->get_ranges(cont, ranges); } const interval_set<uint64_t> &get_ranges() { return ranges; } uint64_t get_size() { return gen->get_length(cont); } bool covers(uint64_t pos) { return ranges.contains(pos) || (!ranges.starts_after(pos) && pos >= size); } uint64_t next(uint64_t pos) { ceph_assert(!covers(pos)); return ranges.starts_after(pos) ? ranges.start_after(pos) : size; } uint64_t valid_till(uint64_t pos) { ceph_assert(covers(pos)); return ranges.contains(pos) ? ranges.end_after(pos) : std::numeric_limits<uint64_t>::max(); } }; // from latest to earliest using layers_t = std::vector<ContState>; layers_t layers; struct StackState { const uint64_t next; const uint64_t size; }; std::stack<std::pair<layers_t::iterator, StackState> > stack; layers_t::iterator current; explicit iterator(ObjectDesc &obj) : pos(0), size(obj.layers.begin()->first->get_length(obj.layers.begin()->second)), cur_valid_till(0) { for (auto &&i : obj.layers) { layers.push_back({i.second, i.first, i.first->get_iterator(i.second)}); } current = layers.begin(); adjust_stack(); } void adjust_stack(); iterator &operator++() { ceph_assert(cur_valid_till >= pos); ++pos; if (pos >= cur_valid_till) { adjust_stack(); } return *this; } char operator*() { if (current == layers.end()) { return '\0'; } else { return pos >= size ? '\0' : *(current->iter); } } bool end() { return pos >= size; } // advance @c pos to given position void seek(uint64_t _pos) { if (_pos < pos) { ceph_abort(); } while (pos < _pos) { ceph_assert(cur_valid_till >= pos); uint64_t next = std::min(_pos - pos, cur_valid_till - pos); pos += next; if (pos >= cur_valid_till) { ceph_assert(pos == cur_valid_till); adjust_stack(); } } ceph_assert(pos == _pos); } // grab the bytes in the range of [pos, pos+s), and advance @c pos // // @returns the bytes in the specified range bufferlist gen_bl_advance(uint64_t s) { bufferlist ret; while (s > 0) { ceph_assert(cur_valid_till >= pos); uint64_t next = std::min(s, cur_valid_till - pos); if (current != layers.end() && pos < size) { ret.append(current->iter.gen_bl_advance(next)); } else { ret.append_zero(next); } pos += next; ceph_assert(next <= s); s -= next; if (pos >= cur_valid_till) { ceph_assert(cur_valid_till == pos); adjust_stack(); } } return ret; } // compare the range of [pos, pos+bl.length()) with given @c bl, and // advance @pos if all bytes in the range match // // @param error_at the offset of the first byte which does not match // @returns true if all bytes match, false otherwise bool check_bl_advance(bufferlist &bl, uint64_t *error_at = nullptr) { uint64_t off = 0; while (off < bl.length()) { ceph_assert(cur_valid_till >= pos); uint64_t next = std::min(bl.length() - off, cur_valid_till - pos); bufferlist to_check; to_check.substr_of(bl, off, next); if (current != layers.end() && pos < size) { if (!current->iter.check_bl_advance(to_check, error_at)) { if (error_at) *error_at += off; return false; } } else { uint64_t at = pos; for (auto i = to_check.begin(); !i.end(); ++i, ++at) { if (*i) { if (error_at) *error_at = at; return false; } } } pos += next; off += next; ceph_assert(off <= bl.length()); if (pos >= cur_valid_till) { ceph_assert(cur_valid_till == pos); adjust_stack(); } } ceph_assert(off == bl.length()); return true; } }; iterator begin() { return iterator(*this); } bool deleted() { return !exists; } bool has_contents() { return layers.size(); } // takes ownership of gen void update(ContentsGenerator *gen, const ContDesc &next); bool check(bufferlist &to_check); bool check_sparse(const std::map<uint64_t, uint64_t>& extends, bufferlist &to_check); const ContDesc &most_recent(); ContentsGenerator *most_recent_gen() { return layers.begin()->first.get(); } std::map<std::string, ContDesc> attrs; // Both omap and xattrs bufferlist header; bool exists; bool dirty; uint64_t version; std::string redirect_target; std::map<uint64_t, ChunkDesc> chunk_info; bool flushed; private: std::list<std::pair<std::shared_ptr<ContentsGenerator>, ContDesc> > layers; }; #endif
13,817
24.54159
131
h
null
ceph-main/src/test/osd/RadosModel.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "include/interval_set.h" #include "include/buffer.h" #include <list> #include <map> #include <set> #include "include/rados/librados.h" #include "RadosModel.h" #include "TestOpStat.h" void TestOp::begin() { _begin(); } void TestOp::finish(TestOp::CallbackInfo *info) { _finish(info); } void read_callback(librados::completion_t comp, void *arg) { TestOp* op = static_cast<TestOp*>(arg); op->finish(NULL); } void write_callback(librados::completion_t comp, void *arg) { std::pair<TestOp*, TestOp::CallbackInfo*> *args = static_cast<std::pair<TestOp*, TestOp::CallbackInfo*> *>(arg); TestOp* op = args->first; TestOp::CallbackInfo *info = args->second; op->finish(info); delete args; delete info; }
833
21.540541
71
cc
null
ceph-main/src/test/osd/RadosModel.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "include/int_types.h" #include "common/ceph_mutex.h" #include "include/rados/librados.hpp" #include <iostream> #include <iterator> #include <sstream> #include <map> #include <set> #include <list> #include <string> #include <string.h> #include <stdlib.h> #include <errno.h> #include <time.h> #include "Object.h" #include "TestOpStat.h" #include "test/librados/test.h" #include "common/sharedptr_registry.hpp" #include "common/errno.h" #include "osd/HitSet.h" #include "common/ceph_crypto.h" #include "cls/cas/cls_cas_client.h" #include "cls/cas/cls_cas_internal.h" #ifndef RADOSMODEL_H #define RADOSMODEL_H class RadosTestContext; class TestOpStat; template <typename T> typename T::iterator rand_choose(T &cont) { if (std::empty(cont)) { return std::end(cont); } return std::next(std::begin(cont), rand() % cont.size()); } enum TestOpType { TEST_OP_READ, TEST_OP_WRITE, TEST_OP_WRITE_EXCL, TEST_OP_WRITESAME, TEST_OP_DELETE, TEST_OP_SNAP_CREATE, TEST_OP_SNAP_REMOVE, TEST_OP_ROLLBACK, TEST_OP_SETATTR, TEST_OP_RMATTR, TEST_OP_WATCH, TEST_OP_COPY_FROM, TEST_OP_HIT_SET_LIST, TEST_OP_UNDIRTY, TEST_OP_IS_DIRTY, TEST_OP_CACHE_FLUSH, TEST_OP_CACHE_TRY_FLUSH, TEST_OP_CACHE_EVICT, TEST_OP_APPEND, TEST_OP_APPEND_EXCL, TEST_OP_SET_REDIRECT, TEST_OP_UNSET_REDIRECT, TEST_OP_CHUNK_READ, TEST_OP_TIER_PROMOTE, TEST_OP_TIER_FLUSH, TEST_OP_SET_CHUNK, TEST_OP_TIER_EVICT }; class TestWatchContext : public librados::WatchCtx2 { TestWatchContext(const TestWatchContext&); public: ceph::condition_variable cond; uint64_t handle = 0; bool waiting = false; ceph::mutex lock = ceph::make_mutex("watch lock"); TestWatchContext() = default; void handle_notify(uint64_t notify_id, uint64_t cookie, uint64_t notifier_id, bufferlist &bl) override { std::lock_guard l{lock}; waiting = false; cond.notify_all(); } void handle_error(uint64_t cookie, int err) override { std::lock_guard l{lock}; std::cout << "watch handle_error " << err << std::endl; } void start() { std::lock_guard l{lock}; waiting = true; } void wait() { std::unique_lock l{lock}; cond.wait(l, [this] { return !waiting; }); } uint64_t &get_handle() { return handle; } }; class TestOp { public: const int num; RadosTestContext *context; TestOpStat *stat; bool done = false; TestOp(int n, RadosTestContext *context, TestOpStat *stat = 0) : num(n), context(context), stat(stat) {} virtual ~TestOp() {}; /** * This struct holds data to be passed by a callback * to a TestOp::finish method. */ struct CallbackInfo { uint64_t id; explicit CallbackInfo(uint64_t id) : id(id) {} virtual ~CallbackInfo() {}; }; virtual void _begin() = 0; /** * Called when the operation completes. * This should be overridden by asynchronous operations. * * @param info information stored by a callback, or NULL - * useful for multi-operation TestOps */ virtual void _finish(CallbackInfo *info) { return; } virtual std::string getType() = 0; virtual bool finished() { return true; } void begin(); void finish(CallbackInfo *info); virtual bool must_quiesce_other_ops() { return false; } }; class TestOpGenerator { public: virtual ~TestOpGenerator() {}; virtual TestOp *next(RadosTestContext &context) = 0; }; class RadosTestContext { public: ceph::mutex state_lock = ceph::make_mutex("Context Lock"); ceph::condition_variable wait_cond; // snap => {oid => desc} std::map<int, std::map<std::string,ObjectDesc> > pool_obj_cont; std::set<std::string> oid_in_use; std::set<std::string> oid_not_in_use; std::set<std::string> oid_flushing; std::set<std::string> oid_not_flushing; std::set<std::string> oid_redirect_not_in_use; std::set<std::string> oid_redirect_in_use; std::set<std::string> oid_set_chunk_tgt_pool; SharedPtrRegistry<int, int> snaps_in_use; int current_snap; std::string pool_name; librados::IoCtx io_ctx; librados::Rados rados; int next_oid; std::string prefix; int errors; int max_in_flight; int seq_num; std::map<int,uint64_t> snaps; uint64_t seq; const char *rados_id; bool initialized; std::map<std::string, TestWatchContext*> watches; const uint64_t max_size; const uint64_t min_stride_size; const uint64_t max_stride_size; AttrGenerator attr_gen; const bool no_omap; const bool no_sparse; bool pool_snaps; bool write_fadvise_dontneed; std::string low_tier_pool_name; librados::IoCtx low_tier_io_ctx; int snapname_num; std::map<std::string, std::string> redirect_objs; bool enable_dedup; std::string chunk_algo; std::string chunk_size; RadosTestContext(const std::string &pool_name, int max_in_flight, uint64_t max_size, uint64_t min_stride_size, uint64_t max_stride_size, bool no_omap, bool no_sparse, bool pool_snaps, bool write_fadvise_dontneed, const std::string &low_tier_pool_name, bool enable_dedup, std::string chunk_algo, std::string chunk_size, const char *id = 0) : pool_obj_cont(), current_snap(0), pool_name(pool_name), next_oid(0), errors(0), max_in_flight(max_in_flight), seq_num(0), seq(0), rados_id(id), initialized(false), max_size(max_size), min_stride_size(min_stride_size), max_stride_size(max_stride_size), attr_gen(2000, 20000), no_omap(no_omap), no_sparse(no_sparse), pool_snaps(pool_snaps), write_fadvise_dontneed(write_fadvise_dontneed), low_tier_pool_name(low_tier_pool_name), snapname_num(0), enable_dedup(enable_dedup), chunk_algo(chunk_algo), chunk_size(chunk_size) { } int init() { int r = rados.init(rados_id); if (r < 0) return r; r = rados.conf_read_file(NULL); if (r < 0) return r; r = rados.conf_parse_env(NULL); if (r < 0) return r; r = rados.connect(); if (r < 0) return r; r = rados.ioctx_create(pool_name.c_str(), io_ctx); if (r < 0) { rados.shutdown(); return r; } if (!low_tier_pool_name.empty()) { r = rados.ioctx_create(low_tier_pool_name.c_str(), low_tier_io_ctx); if (r < 0) { rados.shutdown(); return r; } } bufferlist inbl; r = rados.mon_command( "{\"prefix\": \"osd pool set\", \"pool\": \"" + pool_name + "\", \"var\": \"write_fadvise_dontneed\", \"val\": \"" + (write_fadvise_dontneed ? "true" : "false") + "\"}", inbl, NULL, NULL); if (r < 0) { rados.shutdown(); return r; } if (enable_dedup) { r = rados.mon_command( "{\"prefix\": \"osd pool set\", \"pool\": \"" + pool_name + "\", \"var\": \"fingerprint_algorithm\", \"val\": \"" + "sha256" + "\"}", inbl, NULL, NULL); if (r < 0) { rados.shutdown(); return r; } r = rados.mon_command( "{\"prefix\": \"osd pool set\", \"pool\": \"" + pool_name + "\", \"var\": \"dedup_tier\", \"val\": \"" + low_tier_pool_name + "\"}", inbl, NULL, NULL); if (r < 0) { rados.shutdown(); return r; } r = rados.mon_command( "{\"prefix\": \"osd pool set\", \"pool\": \"" + pool_name + "\", \"var\": \"dedup_chunk_algorithm\", \"val\": \"" + chunk_algo + "\"}", inbl, NULL, NULL); if (r < 0) { rados.shutdown(); return r; } r = rados.mon_command( "{\"prefix\": \"osd pool set\", \"pool\": \"" + pool_name + "\", \"var\": \"dedup_cdc_chunk_size\", \"val\": \"" + chunk_size + "\"}", inbl, NULL, NULL); if (r < 0) { rados.shutdown(); return r; } } char hostname_cstr[100]; gethostname(hostname_cstr, 100); std::stringstream hostpid; hostpid << hostname_cstr << getpid() << "-"; prefix = hostpid.str(); ceph_assert(!initialized); initialized = true; return 0; } void shutdown() { if (initialized) { rados.shutdown(); } } void loop(TestOpGenerator *gen) { ceph_assert(initialized); std::list<TestOp*> inflight; std::unique_lock state_locker{state_lock}; TestOp *next = gen->next(*this); TestOp *waiting = NULL; while (next || !inflight.empty()) { if (next && next->must_quiesce_other_ops() && !inflight.empty()) { waiting = next; next = NULL; // Force to wait for inflight to drain } if (next) { inflight.push_back(next); } state_lock.unlock(); if (next) { (*inflight.rbegin())->begin(); } state_lock.lock(); while (1) { for (auto i = inflight.begin(); i != inflight.end();) { if ((*i)->finished()) { std::cout << (*i)->num << ": done (" << (inflight.size()-1) << " left)" << std::endl; delete *i; inflight.erase(i++); } else { ++i; } } if (inflight.size() >= (unsigned) max_in_flight || (!next && !inflight.empty())) { std::cout << " waiting on " << inflight.size() << std::endl; wait_cond.wait(state_locker); } else { break; } } if (waiting) { next = waiting; waiting = NULL; } else { next = gen->next(*this); } } } void kick() { wait_cond.notify_all(); } TestWatchContext *get_watch_context(const std::string &oid) { return watches.count(oid) ? watches[oid] : 0; } TestWatchContext *watch(const std::string &oid) { ceph_assert(!watches.count(oid)); return (watches[oid] = new TestWatchContext); } void unwatch(const std::string &oid) { ceph_assert(watches.count(oid)); delete watches[oid]; watches.erase(oid); } ObjectDesc get_most_recent(const std::string &oid) { ObjectDesc new_obj; for (auto i = pool_obj_cont.rbegin(); i != pool_obj_cont.rend(); ++i) { std::map<std::string,ObjectDesc>::iterator j = i->second.find(oid); if (j != i->second.end()) { new_obj = j->second; break; } } return new_obj; } void rm_object_attrs(const std::string &oid, const std::set<std::string> &attrs) { ObjectDesc new_obj = get_most_recent(oid); for (std::set<std::string>::const_iterator i = attrs.begin(); i != attrs.end(); ++i) { new_obj.attrs.erase(*i); } new_obj.dirty = true; new_obj.flushed = false; pool_obj_cont[current_snap].insert_or_assign(oid, new_obj); } void remove_object_header(const std::string &oid) { ObjectDesc new_obj = get_most_recent(oid); new_obj.header = bufferlist(); new_obj.dirty = true; new_obj.flushed = false; pool_obj_cont[current_snap].insert_or_assign(oid, new_obj); } void update_object_header(const std::string &oid, const bufferlist &bl) { ObjectDesc new_obj = get_most_recent(oid); new_obj.header = bl; new_obj.exists = true; new_obj.dirty = true; new_obj.flushed = false; pool_obj_cont[current_snap].insert_or_assign(oid, new_obj); } void update_object_attrs(const std::string &oid, const std::map<std::string, ContDesc> &attrs) { ObjectDesc new_obj = get_most_recent(oid); for (auto i = attrs.cbegin(); i != attrs.cend(); ++i) { new_obj.attrs[i->first] = i->second; } new_obj.exists = true; new_obj.dirty = true; new_obj.flushed = false; pool_obj_cont[current_snap].insert_or_assign(oid, new_obj); } void update_object(ContentsGenerator *cont_gen, const std::string &oid, const ContDesc &contents) { ObjectDesc new_obj = get_most_recent(oid); new_obj.exists = true; new_obj.dirty = true; new_obj.flushed = false; new_obj.update(cont_gen, contents); pool_obj_cont[current_snap].insert_or_assign(oid, new_obj); } void update_object_full(const std::string &oid, const ObjectDesc &contents) { pool_obj_cont[current_snap].insert_or_assign(oid, contents); pool_obj_cont[current_snap][oid].dirty = true; } void update_object_undirty(const std::string &oid) { ObjectDesc new_obj = get_most_recent(oid); new_obj.dirty = false; pool_obj_cont[current_snap].insert_or_assign(oid, new_obj); } void update_object_version(const std::string &oid, uint64_t version, int snap = -1) { for (auto i = pool_obj_cont.rbegin(); i != pool_obj_cont.rend(); ++i) { if (snap != -1 && snap < i->first) continue; std::map<std::string,ObjectDesc>::iterator j = i->second.find(oid); if (j != i->second.end()) { if (version) j->second.version = version; std::cout << __func__ << " oid " << oid << " v " << version << " " << j->second.most_recent() << " " << (j->second.dirty ? "dirty" : "clean") << " " << (j->second.exists ? "exists" : "dne") << std::endl; break; } } } void remove_object(const std::string &oid) { ceph_assert(!get_watch_context(oid)); ObjectDesc new_obj; pool_obj_cont[current_snap].insert_or_assign(oid, new_obj); } bool find_object(const std::string &oid, ObjectDesc *contents, int snap = -1) const { for (auto i = pool_obj_cont.crbegin(); i != pool_obj_cont.crend(); ++i) { if (snap != -1 && snap < i->first) continue; if (i->second.count(oid) != 0) { *contents = i->second.find(oid)->second; return true; } } return false; } void update_object_redirect_target(const std::string &oid, const std::string &target) { redirect_objs[oid] = target; } void update_object_chunk_target(const std::string &oid, uint64_t offset, const ChunkDesc &info) { for (auto i = pool_obj_cont.crbegin(); i != pool_obj_cont.crend(); ++i) { if (i->second.count(oid) != 0) { ObjectDesc obj_desc = i->second.find(oid)->second; obj_desc.chunk_info[offset] = info; update_object_full(oid, obj_desc); return ; } } return; } bool object_existed_at(const std::string &oid, int snap = -1) const { ObjectDesc contents; bool found = find_object(oid, &contents, snap); return found && contents.exists; } void remove_snap(int snap) { std::map<int, std::map<std::string,ObjectDesc> >::iterator next_iter = pool_obj_cont.find(snap); ceph_assert(next_iter != pool_obj_cont.end()); std::map<int, std::map<std::string,ObjectDesc> >::iterator current_iter = next_iter++; ceph_assert(current_iter != pool_obj_cont.end()); std::map<std::string,ObjectDesc> &current = current_iter->second; std::map<std::string,ObjectDesc> &next = next_iter->second; for (auto i = current.begin(); i != current.end(); ++i) { if (next.count(i->first) == 0) { next.insert(std::pair<std::string,ObjectDesc>(i->first, i->second)); } } pool_obj_cont.erase(current_iter); snaps.erase(snap); } void add_snap(uint64_t snap) { snaps[current_snap] = snap; current_snap++; pool_obj_cont[current_snap]; seq = snap; } void roll_back(const std::string &oid, int snap) { ceph_assert(!get_watch_context(oid)); ObjectDesc contents; find_object(oid, &contents, snap); contents.dirty = true; contents.flushed = false; pool_obj_cont.rbegin()->second.insert_or_assign(oid, contents); } void update_object_tier_flushed(const std::string &oid, int snap) { for (auto i = pool_obj_cont.rbegin(); i != pool_obj_cont.rend(); ++i) { if (snap != -1 && snap < i->first) continue; std::map<std::string,ObjectDesc>::iterator j = i->second.find(oid); if (j != i->second.end()) { j->second.flushed = true; break; } } } bool check_oldest_snap_flushed(const std::string &oid, int snap) { for (auto i = pool_obj_cont.rbegin(); i != pool_obj_cont.rend(); ++i) { if (snap != -1 && snap < i->first) continue; std::map<std::string,ObjectDesc>::iterator j = i->second.find(oid); if (j != i->second.end() && !j->second.flushed) { std::cout << __func__ << " oid " << oid << " v " << j->second.version << " " << j->second.most_recent() << " " << (j->second.flushed ? "flushed" : "unflushed") << " " << i->first << std::endl; return false; } } return true; } bool check_chunks_refcount(librados::IoCtx &chunk_pool_ctx, librados::IoCtx &manifest_pool_ctx) { librados::ObjectCursor shard_start; librados::ObjectCursor shard_end; librados::ObjectCursor begin; librados::ObjectCursor end; begin = chunk_pool_ctx.object_list_begin(); end = chunk_pool_ctx.object_list_end(); chunk_pool_ctx.object_list_slice( begin, end, 1, 1, &shard_start, &shard_end); librados::ObjectCursor c(shard_start); while(c < shard_end) { std::vector<librados::ObjectItem> result; int r = chunk_pool_ctx.object_list(c, shard_end, 12, {}, &result, &c); if (r < 0) { std::cerr << "error object_list : " << cpp_strerror(r) << std::endl; return false; } for (const auto & i : result) { auto oid = i.oid; chunk_refs_t refs; { bufferlist t; r = chunk_pool_ctx.getxattr(oid, CHUNK_REFCOUNT_ATTR, t); if (r < 0) { continue; } auto p = t.cbegin(); decode(refs, p); } ceph_assert(refs.get_type() == chunk_refs_t::TYPE_BY_OBJECT); chunk_refs_by_object_t *byo = static_cast<chunk_refs_by_object_t*>(refs.r.get()); for (auto& pp : byo->by_object) { int src_refcount = 0; int dst_refcount = byo->by_object.count(pp); for (int tries = 0; tries < 10; tries++) { r = cls_cas_references_chunk(manifest_pool_ctx, pp.oid.name, oid); if (r == -ENOENT || r == -ENOLINK) { src_refcount = 0; } else if (r == -EBUSY) { sleep(10); continue; } else { src_refcount = r; } break; } if (src_refcount > dst_refcount) { std::cerr << " src_object " << pp << ": src_refcount " << src_refcount << ", dst_object " << oid << ": dst_refcount " << dst_refcount << std::endl; return false; } } } } return true; } }; void read_callback(librados::completion_t comp, void *arg); void write_callback(librados::completion_t comp, void *arg); /// remove random xattrs from given object, and optionally remove omap /// entries if @c no_omap is not specified in context class RemoveAttrsOp : public TestOp { public: std::string oid; librados::ObjectWriteOperation op; librados::AioCompletion *comp; RemoveAttrsOp(int n, RadosTestContext *context, const std::string &oid, TestOpStat *stat) : TestOp(n, context, stat), oid(oid), comp(NULL) {} void _begin() override { ContDesc cont; std::set<std::string> to_remove; { std::lock_guard l{context->state_lock}; ObjectDesc obj; if (!context->find_object(oid, &obj)) { context->kick(); done = true; return; } cont = ContDesc(context->seq_num, context->current_snap, context->seq_num, ""); context->oid_in_use.insert(oid); context->oid_not_in_use.erase(oid); if (rand() % 30) { ContentsGenerator::iterator iter = context->attr_gen.get_iterator(cont); for (auto i = obj.attrs.begin(); i != obj.attrs.end(); ++i, ++iter) { if (!(*iter % 3)) { to_remove.insert(i->first); op.rmxattr(i->first.c_str()); } } if (to_remove.empty()) { context->kick(); context->oid_in_use.erase(oid); context->oid_not_in_use.insert(oid); done = true; return; } if (!context->no_omap) { op.omap_rm_keys(to_remove); } } else { if (!context->no_omap) { op.omap_clear(); } for (auto i = obj.attrs.begin(); i != obj.attrs.end(); ++i) { op.rmxattr(i->first.c_str()); to_remove.insert(i->first); } context->remove_object_header(oid); } context->rm_object_attrs(oid, to_remove); } std::pair<TestOp*, TestOp::CallbackInfo*> *cb_arg = new std::pair<TestOp*, TestOp::CallbackInfo*>(this, new TestOp::CallbackInfo(0)); comp = context->rados.aio_create_completion((void*) cb_arg, &write_callback); context->io_ctx.aio_operate(context->prefix+oid, comp, &op); } void _finish(CallbackInfo *info) override { std::lock_guard l{context->state_lock}; done = true; context->update_object_version(oid, comp->get_version64()); context->oid_in_use.erase(oid); context->oid_not_in_use.insert(oid); context->kick(); } bool finished() override { return done; } std::string getType() override { return "RemoveAttrsOp"; } }; /// add random xattrs to given object, and optionally add omap /// entries if @c no_omap is not specified in context class SetAttrsOp : public TestOp { public: std::string oid; librados::ObjectWriteOperation op; librados::AioCompletion *comp; SetAttrsOp(int n, RadosTestContext *context, const std::string &oid, TestOpStat *stat) : TestOp(n, context, stat), oid(oid), comp(NULL) {} void _begin() override { ContDesc cont; { std::lock_guard l{context->state_lock}; cont = ContDesc(context->seq_num, context->current_snap, context->seq_num, ""); context->oid_in_use.insert(oid); context->oid_not_in_use.erase(oid); } std::map<std::string, bufferlist> omap_contents; std::map<std::string, ContDesc> omap; bufferlist header; ContentsGenerator::iterator keygen = context->attr_gen.get_iterator(cont); op.create(false); while (!*keygen) ++keygen; while (*keygen) { if (*keygen != '_') header.append(*keygen); ++keygen; } for (int i = 0; i < 20; ++i) { std::string key; while (!*keygen) ++keygen; while (*keygen && key.size() < 40) { key.push_back((*keygen % 20) + 'a'); ++keygen; } ContDesc val(cont); val.seqnum += (unsigned)(*keygen); val.prefix = ("oid: " + oid); omap[key] = val; bufferlist val_buffer = context->attr_gen.gen_bl(val); omap_contents[key] = val_buffer; op.setxattr(key.c_str(), val_buffer); } if (!context->no_omap) { op.omap_set_header(header); op.omap_set(omap_contents); } { std::lock_guard l{context->state_lock}; context->update_object_header(oid, header); context->update_object_attrs(oid, omap); } std::pair<TestOp*, TestOp::CallbackInfo*> *cb_arg = new std::pair<TestOp*, TestOp::CallbackInfo*>(this, new TestOp::CallbackInfo(0)); comp = context->rados.aio_create_completion((void*) cb_arg, &write_callback); context->io_ctx.aio_operate(context->prefix+oid, comp, &op); } void _finish(CallbackInfo *info) override { std::lock_guard l{context->state_lock}; int r; if ((r = comp->get_return_value())) { std::cerr << "err " << r << std::endl; ceph_abort(); } done = true; context->update_object_version(oid, comp->get_version64()); context->oid_in_use.erase(oid); context->oid_not_in_use.insert(oid); context->kick(); } bool finished() override { return done; } std::string getType() override { return "SetAttrsOp"; } }; class WriteOp : public TestOp { public: const std::string oid; ContDesc cont; std::set<librados::AioCompletion *> waiting; librados::AioCompletion *rcompletion = nullptr; // numbers of async ops submitted uint64_t waiting_on = 0; uint64_t last_acked_tid = 0; librados::ObjectReadOperation read_op; librados::ObjectWriteOperation write_op; bufferlist rbuffer; const bool do_append; const bool do_excl; WriteOp(int n, RadosTestContext *context, const std::string &oid, bool do_append, bool do_excl, TestOpStat *stat = 0) : TestOp(n, context, stat), oid(oid), do_append(do_append), do_excl(do_excl) {} void _begin() override { assert(!done); std::stringstream acc; std::lock_guard state_locker{context->state_lock}; acc << context->prefix << "OID: " << oid << " snap " << context->current_snap << std::endl; std::string prefix = acc.str(); cont = ContDesc(context->seq_num, context->current_snap, context->seq_num, prefix); ContentsGenerator *cont_gen; if (do_append) { ObjectDesc old_value; bool found = context->find_object(oid, &old_value); uint64_t prev_length = found && old_value.has_contents() ? old_value.most_recent_gen()->get_length(old_value.most_recent()) : 0; bool requires_alignment; int r = context->io_ctx.pool_requires_alignment2(&requires_alignment); ceph_assert(r == 0); uint64_t alignment = 0; if (requires_alignment) { r = context->io_ctx.pool_required_alignment2(&alignment); ceph_assert(r == 0); ceph_assert(alignment != 0); } cont_gen = new AppendGenerator( prev_length, alignment, context->min_stride_size, context->max_stride_size, 3); } else { cont_gen = new VarLenGenerator( context->max_size, context->min_stride_size, context->max_stride_size); } context->update_object(cont_gen, oid, cont); context->oid_in_use.insert(oid); context->oid_not_in_use.erase(oid); std::map<uint64_t, uint64_t> ranges; cont_gen->get_ranges_map(cont, ranges); std::cout << num << ": seq_num " << context->seq_num << " ranges " << ranges << std::endl; context->seq_num++; waiting_on = ranges.size(); ContentsGenerator::iterator gen_pos = cont_gen->get_iterator(cont); // assure that tid is greater than last_acked_tid uint64_t tid = last_acked_tid + 1; for (auto [offset, len] : ranges) { gen_pos.seek(offset); bufferlist to_write = gen_pos.gen_bl_advance(len); ceph_assert(to_write.length() == len); ceph_assert(to_write.length() > 0); std::cout << num << ": writing " << context->prefix+oid << " from " << offset << " to " << len + offset << " tid " << tid << std::endl; auto cb_arg = new std::pair<TestOp*, TestOp::CallbackInfo*>(this, new TestOp::CallbackInfo(tid++)); librados::AioCompletion *completion = context->rados.aio_create_completion((void*) cb_arg, &write_callback); waiting.insert(completion); librados::ObjectWriteOperation op; if (do_append) { op.append(to_write); } else { op.write(offset, to_write); } if (do_excl && cb_arg->second->id == last_acked_tid + 1) op.assert_exists(); context->io_ctx.aio_operate( context->prefix+oid, completion, &op); } bufferlist contbl; encode(cont, contbl); std::pair<TestOp*, TestOp::CallbackInfo*> *cb_arg = new std::pair<TestOp*, TestOp::CallbackInfo*>( this, new TestOp::CallbackInfo(tid++)); librados::AioCompletion *completion = context->rados.aio_create_completion( (void*) cb_arg, &write_callback); waiting.insert(completion); waiting_on++; write_op.setxattr("_header", contbl); if (!do_append) { write_op.truncate(cont_gen->get_length(cont)); } context->io_ctx.aio_operate( context->prefix+oid, completion, &write_op); cb_arg = new std::pair<TestOp*, TestOp::CallbackInfo*>( this, new TestOp::CallbackInfo(tid++)); rcompletion = context->rados.aio_create_completion( (void*) cb_arg, &write_callback); waiting_on++; read_op.read(0, 1, &rbuffer, 0); context->io_ctx.aio_operate( context->prefix+oid, rcompletion, &read_op, librados::OPERATION_ORDER_READS_WRITES, // order wrt previous write/update 0); } void _finish(CallbackInfo *info) override { ceph_assert(info); std::lock_guard state_locker{context->state_lock}; uint64_t tid = info->id; std::cout << num << ": finishing write tid " << tid << " to " << context->prefix + oid << std::endl; if (tid <= last_acked_tid) { std::cerr << "Error: finished tid " << tid << " when last_acked_tid was " << last_acked_tid << std::endl; ceph_abort(); } last_acked_tid = tid; ceph_assert(!done); waiting_on--; if (waiting_on == 0) { uint64_t version = 0; for (auto i = waiting.begin(); i != waiting.end();) { ceph_assert((*i)->is_complete()); if (int err = (*i)->get_return_value()) { std::cerr << "Error: oid " << oid << " write returned error code " << err << std::endl; ceph_abort(); } if ((*i)->get_version64() > version) { std::cout << num << ": oid " << oid << " updating version " << version << " to " << (*i)->get_version64() << std::endl; version = (*i)->get_version64(); } else { std::cout << num << ": oid " << oid << " version " << version << " is already newer than " << (*i)->get_version64() << std::endl; } (*i)->release(); waiting.erase(i++); } context->update_object_version(oid, version); ceph_assert(rcompletion->is_complete()); int r = rcompletion->get_return_value(); assertf(r >= 0, "r = %d", r); if (rcompletion->get_version64() != version) { std::cerr << "Error: racing read on " << oid << " returned version " << rcompletion->get_version64() << " rather than version " << version << std::endl; ceph_abort_msg("racing read got wrong version"); } rcompletion->release(); { ObjectDesc old_value; ceph_assert(context->find_object(oid, &old_value, -1)); if (old_value.deleted()) std::cout << num << ": left oid " << oid << " deleted" << std::endl; else std::cout << num << ": left oid " << oid << " " << old_value.most_recent() << std::endl; } context->oid_in_use.erase(oid); context->oid_not_in_use.insert(oid); context->kick(); done = true; } } bool finished() override { return done; } std::string getType() override { return "WriteOp"; } }; class WriteSameOp : public TestOp { public: std::string oid; ContDesc cont; std::set<librados::AioCompletion *> waiting; librados::AioCompletion *rcompletion; uint64_t waiting_on; uint64_t last_acked_tid; librados::ObjectReadOperation read_op; librados::ObjectWriteOperation write_op; bufferlist rbuffer; WriteSameOp(int n, RadosTestContext *context, const std::string &oid, TestOpStat *stat = 0) : TestOp(n, context, stat), oid(oid), rcompletion(NULL), waiting_on(0), last_acked_tid(0) {} void _begin() override { std::lock_guard state_locker{context->state_lock}; done = 0; std::stringstream acc; acc << context->prefix << "OID: " << oid << " snap " << context->current_snap << std::endl; std::string prefix = acc.str(); cont = ContDesc(context->seq_num, context->current_snap, context->seq_num, prefix); ContentsGenerator *cont_gen; cont_gen = new VarLenGenerator( context->max_size, context->min_stride_size, context->max_stride_size); context->update_object(cont_gen, oid, cont); context->oid_in_use.insert(oid); context->oid_not_in_use.erase(oid); std::map<uint64_t, uint64_t> ranges; cont_gen->get_ranges_map(cont, ranges); std::cout << num << ": seq_num " << context->seq_num << " ranges " << ranges << std::endl; context->seq_num++; waiting_on = ranges.size(); ContentsGenerator::iterator gen_pos = cont_gen->get_iterator(cont); // assure that tid is greater than last_acked_tid uint64_t tid = last_acked_tid + 1; for (auto [offset, len] : ranges) { gen_pos.seek(offset); bufferlist to_write = gen_pos.gen_bl_advance(len); ceph_assert(to_write.length() == len); ceph_assert(to_write.length() > 0); std::cout << num << ": writing " << context->prefix+oid << " from " << offset << " to " << offset + len << " tid " << tid << std::endl; auto cb_arg = new std::pair<TestOp*, TestOp::CallbackInfo*>(this, new TestOp::CallbackInfo(tid++)); librados::AioCompletion *completion = context->rados.aio_create_completion((void*) cb_arg, &write_callback); waiting.insert(completion); librados::ObjectWriteOperation op; /* no writesame multiplication factor for now */ op.writesame(offset, to_write.length(), to_write); context->io_ctx.aio_operate( context->prefix+oid, completion, &op); } bufferlist contbl; encode(cont, contbl); std::pair<TestOp*, TestOp::CallbackInfo*> *cb_arg = new std::pair<TestOp*, TestOp::CallbackInfo*>( this, new TestOp::CallbackInfo(tid++)); librados::AioCompletion *completion = context->rados.aio_create_completion( (void*) cb_arg, &write_callback); waiting.insert(completion); waiting_on++; write_op.setxattr("_header", contbl); write_op.truncate(cont_gen->get_length(cont)); context->io_ctx.aio_operate( context->prefix+oid, completion, &write_op); cb_arg = new std::pair<TestOp*, TestOp::CallbackInfo*>( this, new TestOp::CallbackInfo(tid++)); rcompletion = context->rados.aio_create_completion( (void*) cb_arg, &write_callback); waiting_on++; read_op.read(0, 1, &rbuffer, 0); context->io_ctx.aio_operate( context->prefix+oid, rcompletion, &read_op, librados::OPERATION_ORDER_READS_WRITES, // order wrt previous write/update 0); } void _finish(CallbackInfo *info) override { ceph_assert(info); std::lock_guard state_locker{context->state_lock}; uint64_t tid = info->id; std::cout << num << ": finishing writesame tid " << tid << " to " << context->prefix + oid << std::endl; if (tid <= last_acked_tid) { std::cerr << "Error: finished tid " << tid << " when last_acked_tid was " << last_acked_tid << std::endl; ceph_abort(); } last_acked_tid = tid; ceph_assert(!done); waiting_on--; if (waiting_on == 0) { uint64_t version = 0; for (auto i = waiting.begin(); i != waiting.end();) { ceph_assert((*i)->is_complete()); if (int err = (*i)->get_return_value()) { std::cerr << "Error: oid " << oid << " writesame returned error code " << err << std::endl; ceph_abort(); } if ((*i)->get_version64() > version) { std::cout << "oid " << oid << "updating version " << version << "to " << (*i)->get_version64() << std::endl; version = (*i)->get_version64(); } else { std::cout << "oid " << oid << "version " << version << "is already newer than " << (*i)->get_version64() << std::endl; } (*i)->release(); waiting.erase(i++); } context->update_object_version(oid, version); ceph_assert(rcompletion->is_complete()); int r = rcompletion->get_return_value(); assertf(r >= 0, "r = %d", r); if (rcompletion->get_version64() != version) { std::cerr << "Error: racing read on " << oid << " returned version " << rcompletion->get_version64() << " rather than version " << version << std::endl; ceph_abort_msg("racing read got wrong version"); } rcompletion->release(); { ObjectDesc old_value; ceph_assert(context->find_object(oid, &old_value, -1)); if (old_value.deleted()) std::cout << num << ": left oid " << oid << " deleted" << std::endl; else std::cout << num << ": left oid " << oid << " " << old_value.most_recent() << std::endl; } context->oid_in_use.erase(oid); context->oid_not_in_use.insert(oid); context->kick(); done = true; } } bool finished() override { return done; } std::string getType() override { return "WriteSameOp"; } }; class DeleteOp : public TestOp { public: std::string oid; DeleteOp(int n, RadosTestContext *context, const std::string &oid, TestOpStat *stat = 0) : TestOp(n, context, stat), oid(oid) {} void _begin() override { std::unique_lock state_locker{context->state_lock}; if (context->get_watch_context(oid)) { context->kick(); return; } ObjectDesc contents; context->find_object(oid, &contents); bool present = !contents.deleted(); context->oid_in_use.insert(oid); context->oid_not_in_use.erase(oid); context->seq_num++; context->remove_object(oid); interval_set<uint64_t> ranges; state_locker.unlock(); int r = 0; if (rand() % 2) { librados::ObjectWriteOperation op; op.assert_exists(); op.remove(); r = context->io_ctx.operate(context->prefix+oid, &op); } else { r = context->io_ctx.remove(context->prefix+oid); } if (r && !(r == -ENOENT && !present)) { std::cerr << "r is " << r << " while deleting " << oid << " and present is " << present << std::endl; ceph_abort(); } state_locker.lock(); context->oid_in_use.erase(oid); context->oid_not_in_use.insert(oid); context->kick(); } std::string getType() override { return "DeleteOp"; } }; class ReadOp : public TestOp { public: std::vector<librados::AioCompletion *> completions; librados::ObjectReadOperation op; std::string oid; ObjectDesc old_value; int snap; bool balance_reads; bool localize_reads; std::shared_ptr<int> in_use; std::vector<bufferlist> results; std::vector<int> retvals; std::vector<std::map<uint64_t, uint64_t>> extent_results; std::vector<bool> is_sparse_read; uint64_t waiting_on; std::vector<bufferlist> checksums; std::vector<int> checksum_retvals; std::map<std::string, bufferlist> attrs; int attrretval; std::set<std::string> omap_requested_keys; std::map<std::string, bufferlist> omap_returned_values; std::set<std::string> omap_keys; std::map<std::string, bufferlist> omap; bufferlist header; std::map<std::string, bufferlist> xattrs; ReadOp(int n, RadosTestContext *context, const std::string &oid, bool balance_reads, bool localize_reads, TestOpStat *stat = 0) : TestOp(n, context, stat), completions(3), oid(oid), snap(0), balance_reads(balance_reads), localize_reads(localize_reads), results(3), retvals(3), extent_results(3), is_sparse_read(3, false), waiting_on(0), checksums(3), checksum_retvals(3), attrretval(0) {} void _do_read(librados::ObjectReadOperation& read_op, int index) { uint64_t len = 0; if (old_value.has_contents()) len = old_value.most_recent_gen()->get_length(old_value.most_recent()); if (context->no_sparse || rand() % 2) { is_sparse_read[index] = false; read_op.read(0, len, &results[index], &retvals[index]); bufferlist init_value_bl; encode(static_cast<uint32_t>(-1), init_value_bl); read_op.checksum(LIBRADOS_CHECKSUM_TYPE_CRC32C, init_value_bl, 0, len, 0, &checksums[index], &checksum_retvals[index]); } else { is_sparse_read[index] = true; read_op.sparse_read(0, len, &extent_results[index], &results[index], &retvals[index]); } } void _begin() override { std::unique_lock state_locker{context->state_lock}; if (!(rand() % 4) && !context->snaps.empty()) { snap = rand_choose(context->snaps)->first; in_use = context->snaps_in_use.lookup_or_create(snap, snap); } else { snap = -1; } std::cout << num << ": read oid " << oid << " snap " << snap << std::endl; done = 0; for (uint32_t i = 0; i < 3; i++) { completions[i] = context->rados.aio_create_completion((void *) this, &read_callback); } context->oid_in_use.insert(oid); context->oid_not_in_use.erase(oid); ceph_assert(context->find_object(oid, &old_value, snap)); if (old_value.deleted()) std::cout << num << ": expect deleted" << std::endl; else std::cout << num << ": expect " << old_value.most_recent() << std::endl; TestWatchContext *ctx = context->get_watch_context(oid); state_locker.unlock(); if (ctx) { ceph_assert(old_value.exists); TestAlarm alarm; std::cerr << num << ": about to start" << std::endl; ctx->start(); std::cerr << num << ": started" << std::endl; bufferlist bl; context->io_ctx.set_notify_timeout(600); int r = context->io_ctx.notify2(context->prefix+oid, bl, 0, NULL); if (r < 0) { std::cerr << "r is " << r << std::endl; ceph_abort(); } std::cerr << num << ": notified, waiting" << std::endl; ctx->wait(); } state_locker.lock(); if (snap >= 0) { context->io_ctx.snap_set_read(context->snaps[snap]); } _do_read(op, 0); for (auto i = old_value.attrs.begin(); i != old_value.attrs.end(); ++i) { if (rand() % 2) { std::string key = i->first; if (rand() % 2) key.push_back((rand() % 26) + 'a'); omap_requested_keys.insert(key); } } if (!context->no_omap) { op.omap_get_vals_by_keys(omap_requested_keys, &omap_returned_values, 0); // NOTE: we're ignore pmore here, which assumes the OSD limit is high // enough for us. op.omap_get_keys2("", -1, &omap_keys, nullptr, nullptr); op.omap_get_vals2("", -1, &omap, nullptr, nullptr); op.omap_get_header(&header, 0); } op.getxattrs(&xattrs, 0); unsigned flags = 0; if (balance_reads) flags |= librados::OPERATION_BALANCE_READS; if (localize_reads) flags |= librados::OPERATION_LOCALIZE_READS; ceph_assert(!context->io_ctx.aio_operate(context->prefix+oid, completions[0], &op, flags, NULL)); waiting_on++; // send 2 pipelined reads on the same object/snap. This can help testing // OSD's read behavior in some scenarios for (uint32_t i = 1; i < 3; ++i) { librados::ObjectReadOperation pipeline_op; _do_read(pipeline_op, i); ceph_assert(!context->io_ctx.aio_operate(context->prefix+oid, completions[i], &pipeline_op, 0)); waiting_on++; } if (snap >= 0) { context->io_ctx.snap_set_read(0); } } void _finish(CallbackInfo *info) override { std::unique_lock state_locker{context->state_lock}; ceph_assert(!done); ceph_assert(waiting_on > 0); if (--waiting_on) { return; } context->oid_in_use.erase(oid); context->oid_not_in_use.insert(oid); int retval = completions[0]->get_return_value(); for (auto it = completions.begin(); it != completions.end(); ++it) { ceph_assert((*it)->is_complete()); uint64_t version = (*it)->get_version64(); int err = (*it)->get_return_value(); if (err != retval) { std::cerr << num << ": Error: oid " << oid << " read returned different error codes: " << retval << " and " << err << std::endl; ceph_abort(); } if (err) { if (!(err == -ENOENT && old_value.deleted())) { std::cerr << num << ": Error: oid " << oid << " read returned error code " << err << std::endl; ceph_abort(); } } else if (version != old_value.version) { std::cerr << num << ": oid " << oid << " version is " << version << " and expected " << old_value.version << std::endl; ceph_assert(version == old_value.version); } } if (!retval) { std::map<std::string, bufferlist>::iterator iter = xattrs.find("_header"); bufferlist headerbl; if (iter == xattrs.end()) { if (old_value.has_contents()) { std::cerr << num << ": Error: did not find header attr, has_contents: " << old_value.has_contents() << std::endl; ceph_assert(!old_value.has_contents()); } } else { headerbl = iter->second; xattrs.erase(iter); } if (old_value.deleted()) { std::cout << num << ": expect deleted" << std::endl; ceph_abort_msg("expected deleted"); } else { std::cout << num << ": expect " << old_value.most_recent() << std::endl; } if (old_value.has_contents()) { ContDesc to_check; auto p = headerbl.cbegin(); decode(to_check, p); if (to_check != old_value.most_recent()) { std::cerr << num << ": oid " << oid << " found incorrect object contents " << to_check << ", expected " << old_value.most_recent() << std::endl; context->errors++; } for (unsigned i = 0; i < results.size(); i++) { if (is_sparse_read[i]) { if (!old_value.check_sparse(extent_results[i], results[i])) { std::cerr << num << ": oid " << oid << " contents " << to_check << " corrupt" << std::endl; context->errors++; } } else { if (!old_value.check(results[i])) { std::cerr << num << ": oid " << oid << " contents " << to_check << " corrupt" << std::endl; context->errors++; } uint32_t checksum = 0; if (checksum_retvals[i] == 0) { try { auto bl_it = checksums[i].cbegin(); uint32_t csum_count; decode(csum_count, bl_it); decode(checksum, bl_it); } catch (const buffer::error &err) { checksum_retvals[i] = -EBADMSG; } } if (checksum_retvals[i] != 0 || checksum != results[i].crc32c(-1)) { std::cerr << num << ": oid " << oid << " checksum " << checksums[i] << " incorrect, expecting " << results[i].crc32c(-1) << std::endl; context->errors++; } } } if (context->errors) ceph_abort(); } // Attributes if (!context->no_omap) { if (!(old_value.header == header)) { std::cerr << num << ": oid " << oid << " header does not match, old size: " << old_value.header.length() << " new size " << header.length() << std::endl; ceph_assert(old_value.header == header); } if (omap.size() != old_value.attrs.size()) { std::cerr << num << ": oid " << oid << " omap.size() is " << omap.size() << " and old is " << old_value.attrs.size() << std::endl; ceph_assert(omap.size() == old_value.attrs.size()); } if (omap_keys.size() != old_value.attrs.size()) { std::cerr << num << ": oid " << oid << " omap.size() is " << omap_keys.size() << " and old is " << old_value.attrs.size() << std::endl; ceph_assert(omap_keys.size() == old_value.attrs.size()); } } if (xattrs.size() != old_value.attrs.size()) { std::cerr << num << ": oid " << oid << " xattrs.size() is " << xattrs.size() << " and old is " << old_value.attrs.size() << std::endl; ceph_assert(xattrs.size() == old_value.attrs.size()); } for (auto iter = old_value.attrs.begin(); iter != old_value.attrs.end(); ++iter) { bufferlist bl = context->attr_gen.gen_bl( iter->second); if (!context->no_omap) { std::map<std::string, bufferlist>::iterator omap_iter = omap.find(iter->first); ceph_assert(omap_iter != omap.end()); ceph_assert(bl.length() == omap_iter->second.length()); bufferlist::iterator k = bl.begin(); for(bufferlist::iterator l = omap_iter->second.begin(); !k.end() && !l.end(); ++k, ++l) { ceph_assert(*l == *k); } } auto xattr_iter = xattrs.find(iter->first); ceph_assert(xattr_iter != xattrs.end()); ceph_assert(bl.length() == xattr_iter->second.length()); bufferlist::iterator k = bl.begin(); for (bufferlist::iterator j = xattr_iter->second.begin(); !k.end() && !j.end(); ++j, ++k) { ceph_assert(*j == *k); } } if (!context->no_omap) { for (std::set<std::string>::iterator i = omap_requested_keys.begin(); i != omap_requested_keys.end(); ++i) { if (!omap_returned_values.count(*i)) ceph_assert(!old_value.attrs.count(*i)); if (!old_value.attrs.count(*i)) ceph_assert(!omap_returned_values.count(*i)); } for (auto i = omap_returned_values.begin(); i != omap_returned_values.end(); ++i) { ceph_assert(omap_requested_keys.count(i->first)); ceph_assert(omap.count(i->first)); ceph_assert(old_value.attrs.count(i->first)); ceph_assert(i->second == omap[i->first]); } } } for (auto it = completions.begin(); it != completions.end(); ++it) { (*it)->release(); } context->kick(); done = true; } bool finished() override { return done; } std::string getType() override { return "ReadOp"; } }; class SnapCreateOp : public TestOp { public: SnapCreateOp(int n, RadosTestContext *context, TestOpStat *stat = 0) : TestOp(n, context, stat) {} void _begin() override { uint64_t snap; std::string snapname; if (context->pool_snaps) { std::stringstream ss; ss << context->prefix << "snap" << ++context->snapname_num; snapname = ss.str(); int ret = context->io_ctx.snap_create(snapname.c_str()); if (ret) { std::cerr << "snap_create returned " << ret << std::endl; ceph_abort(); } ceph_assert(!context->io_ctx.snap_lookup(snapname.c_str(), &snap)); } else { ceph_assert(!context->io_ctx.selfmanaged_snap_create(&snap)); } std::unique_lock state_locker{context->state_lock}; context->add_snap(snap); if (!context->pool_snaps) { std::vector<uint64_t> snapset(context->snaps.size()); int j = 0; for (auto i = context->snaps.rbegin(); i != context->snaps.rend(); ++i, ++j) { snapset[j] = i->second; } state_locker.unlock(); int r = context->io_ctx.selfmanaged_snap_set_write_ctx(context->seq, snapset); if (r) { std::cerr << "r is " << r << " snapset is " << snapset << " seq is " << context->seq << std::endl; ceph_abort(); } } } std::string getType() override { return "SnapCreateOp"; } bool must_quiesce_other_ops() override { return context->pool_snaps; } }; class SnapRemoveOp : public TestOp { public: int to_remove; SnapRemoveOp(int n, RadosTestContext *context, int snap, TestOpStat *stat = 0) : TestOp(n, context, stat), to_remove(snap) {} void _begin() override { std::unique_lock state_locker{context->state_lock}; uint64_t snap = context->snaps[to_remove]; context->remove_snap(to_remove); if (context->pool_snaps) { std::string snapname; ceph_assert(!context->io_ctx.snap_get_name(snap, &snapname)); ceph_assert(!context->io_ctx.snap_remove(snapname.c_str())); } else { ceph_assert(!context->io_ctx.selfmanaged_snap_remove(snap)); std::vector<uint64_t> snapset(context->snaps.size()); int j = 0; for (auto i = context->snaps.rbegin(); i != context->snaps.rend(); ++i, ++j) { snapset[j] = i->second; } int r = context->io_ctx.selfmanaged_snap_set_write_ctx(context->seq, snapset); if (r) { std::cerr << "r is " << r << " snapset is " << snapset << " seq is " << context->seq << std::endl; ceph_abort(); } } } std::string getType() override { return "SnapRemoveOp"; } }; class WatchOp : public TestOp { std::string oid; public: WatchOp(int n, RadosTestContext *context, const std::string &_oid, TestOpStat *stat = 0) : TestOp(n, context, stat), oid(_oid) {} void _begin() override { std::unique_lock state_locker{context->state_lock}; ObjectDesc contents; context->find_object(oid, &contents); if (contents.deleted()) { context->kick(); return; } context->oid_in_use.insert(oid); context->oid_not_in_use.erase(oid); TestWatchContext *ctx = context->get_watch_context(oid); state_locker.unlock(); int r; if (!ctx) { { std::lock_guard l{context->state_lock}; ctx = context->watch(oid); } r = context->io_ctx.watch2(context->prefix+oid, &ctx->get_handle(), ctx); } else { r = context->io_ctx.unwatch2(ctx->get_handle()); { std::lock_guard l{context->state_lock}; context->unwatch(oid); } } if (r) { std::cerr << "r is " << r << std::endl; ceph_abort(); } { std::lock_guard l{context->state_lock}; context->oid_in_use.erase(oid); context->oid_not_in_use.insert(oid); } } std::string getType() override { return "WatchOp"; } }; class RollbackOp : public TestOp { public: std::string oid; int roll_back_to; librados::ObjectWriteOperation zero_write_op1; librados::ObjectWriteOperation zero_write_op2; librados::ObjectWriteOperation op; std::vector<librados::AioCompletion *> comps; std::shared_ptr<int> in_use; int last_finished; int outstanding; RollbackOp(int n, RadosTestContext *context, const std::string &_oid, TestOpStat *stat = 0) : TestOp(n, context, stat), oid(_oid), roll_back_to(-1), comps(3, NULL), last_finished(-1), outstanding(3) {} void _begin() override { context->state_lock.lock(); if (context->get_watch_context(oid)) { context->kick(); context->state_lock.unlock(); return; } if (context->snaps.empty()) { context->kick(); context->state_lock.unlock(); done = true; return; } context->oid_in_use.insert(oid); context->oid_not_in_use.erase(oid); roll_back_to = rand_choose(context->snaps)->first; in_use = context->snaps_in_use.lookup_or_create( roll_back_to, roll_back_to); std::cout << "rollback oid " << oid << " to " << roll_back_to << std::endl; bool existed_before = context->object_existed_at(oid); bool existed_after = context->object_existed_at(oid, roll_back_to); context->roll_back(oid, roll_back_to); uint64_t snap = context->snaps[roll_back_to]; outstanding -= (!existed_before) + (!existed_after); context->state_lock.unlock(); bufferlist bl, bl2; zero_write_op1.append(bl); zero_write_op2.append(bl2); if (context->pool_snaps) { op.snap_rollback(snap); } else { op.selfmanaged_snap_rollback(snap); } if (existed_before) { std::pair<TestOp*, TestOp::CallbackInfo*> *cb_arg = new std::pair<TestOp*, TestOp::CallbackInfo*>(this, new TestOp::CallbackInfo(0)); comps[0] = context->rados.aio_create_completion((void*) cb_arg, &write_callback); context->io_ctx.aio_operate( context->prefix+oid, comps[0], &zero_write_op1); } { std::pair<TestOp*, TestOp::CallbackInfo*> *cb_arg = new std::pair<TestOp*, TestOp::CallbackInfo*>(this, new TestOp::CallbackInfo(1)); comps[1] = context->rados.aio_create_completion((void*) cb_arg, &write_callback); context->io_ctx.aio_operate( context->prefix+oid, comps[1], &op); } if (existed_after) { std::pair<TestOp*, TestOp::CallbackInfo*> *cb_arg = new std::pair<TestOp*, TestOp::CallbackInfo*>(this, new TestOp::CallbackInfo(2)); comps[2] = context->rados.aio_create_completion((void*) cb_arg, &write_callback); context->io_ctx.aio_operate( context->prefix+oid, comps[2], &zero_write_op2); } } void _finish(CallbackInfo *info) override { std::lock_guard l{context->state_lock}; uint64_t tid = info->id; std::cout << num << ": finishing rollback tid " << tid << " to " << context->prefix + oid << std::endl; ceph_assert((int)(info->id) > last_finished); last_finished = info->id; int r; if ((r = comps[last_finished]->get_return_value()) != 0) { std::cerr << "err " << r << std::endl; ceph_abort(); } if (--outstanding == 0) { done = true; context->update_object_version(oid, comps[tid]->get_version64()); context->oid_in_use.erase(oid); context->oid_not_in_use.insert(oid); in_use = std::shared_ptr<int>(); context->kick(); } } bool finished() override { return done; } std::string getType() override { return "RollBackOp"; } }; class CopyFromOp : public TestOp { public: std::string oid, oid_src; ObjectDesc src_value; librados::ObjectWriteOperation op; librados::ObjectReadOperation rd_op; librados::AioCompletion *comp; librados::AioCompletion *comp_racing_read = nullptr; std::shared_ptr<int> in_use; int snap; int done; uint64_t version; int r; CopyFromOp(int n, RadosTestContext *context, const std::string &oid, const std::string &oid_src, TestOpStat *stat) : TestOp(n, context, stat), oid(oid), oid_src(oid_src), comp(NULL), snap(-1), done(0), version(0), r(0) {} void _begin() override { ContDesc cont; { std::lock_guard l{context->state_lock}; cont = ContDesc(context->seq_num, context->current_snap, context->seq_num, ""); context->oid_in_use.insert(oid); context->oid_not_in_use.erase(oid); context->oid_in_use.insert(oid_src); context->oid_not_in_use.erase(oid_src); // choose source snap if (0 && !(rand() % 4) && !context->snaps.empty()) { snap = rand_choose(context->snaps)->first; in_use = context->snaps_in_use.lookup_or_create(snap, snap); } else { snap = -1; } context->find_object(oid_src, &src_value, snap); if (!src_value.deleted()) context->update_object_full(oid, src_value); } std::string src = context->prefix+oid_src; op.copy_from(src.c_str(), context->io_ctx, src_value.version, 0); std::pair<TestOp*, TestOp::CallbackInfo*> *cb_arg = new std::pair<TestOp*, TestOp::CallbackInfo*>(this, new TestOp::CallbackInfo(0)); comp = context->rados.aio_create_completion((void*) cb_arg, &write_callback); context->io_ctx.aio_operate(context->prefix+oid, comp, &op); // queue up a racing read, too. std::pair<TestOp*, TestOp::CallbackInfo*> *read_cb_arg = new std::pair<TestOp*, TestOp::CallbackInfo*>(this, new TestOp::CallbackInfo(1)); comp_racing_read = context->rados.aio_create_completion((void*) read_cb_arg, &write_callback); rd_op.stat(NULL, NULL, NULL); context->io_ctx.aio_operate(context->prefix+oid, comp_racing_read, &rd_op, librados::OPERATION_ORDER_READS_WRITES, // order wrt previous write/update NULL); } void _finish(CallbackInfo *info) override { std::lock_guard l{context->state_lock}; // note that the read can (and atm will) come back before the // write reply, but will reflect the update and the versions will // match. if (info->id == 0) { // copy_from ceph_assert(comp->is_complete()); std::cout << num << ": finishing copy_from to " << context->prefix + oid << std::endl; if ((r = comp->get_return_value())) { if (r == -ENOENT && src_value.deleted()) { std::cout << num << ": got expected ENOENT (src dne)" << std::endl; } else { std::cerr << "Error: oid " << oid << " copy_from " << oid_src << " returned error code " << r << std::endl; ceph_abort(); } } else { ceph_assert(!version || comp->get_version64() == version); version = comp->get_version64(); context->update_object_version(oid, comp->get_version64()); } } else if (info->id == 1) { // racing read ceph_assert(comp_racing_read->is_complete()); std::cout << num << ": finishing copy_from racing read to " << context->prefix + oid << std::endl; if ((r = comp_racing_read->get_return_value())) { if (!(r == -ENOENT && src_value.deleted())) { std::cerr << "Error: oid " << oid << " copy_from " << oid_src << " returned error code " << r << std::endl; } } else { ceph_assert(comp_racing_read->get_return_value() == 0); ceph_assert(!version || comp_racing_read->get_version64() == version); version = comp_racing_read->get_version64(); } } if (++done == 2) { context->oid_in_use.erase(oid); context->oid_not_in_use.insert(oid); context->oid_in_use.erase(oid_src); context->oid_not_in_use.insert(oid_src); context->kick(); } } bool finished() override { return done == 2; } std::string getType() override { return "CopyFromOp"; } }; class ChunkReadOp : public TestOp { public: std::vector<librados::AioCompletion *> completions; librados::ObjectReadOperation op; std::string oid; ObjectDesc old_value; ObjectDesc tgt_value; int snap; bool balance_reads; bool localize_reads; std::shared_ptr<int> in_use; std::vector<bufferlist> results; std::vector<int> retvals; std::vector<bool> is_sparse_read; uint64_t waiting_on; std::vector<bufferlist> checksums; std::vector<int> checksum_retvals; uint32_t offset = 0; uint32_t length = 0; std::string tgt_oid; std::string tgt_pool_name; uint32_t tgt_offset = 0; ChunkReadOp(int n, RadosTestContext *context, const std::string &oid, const std::string &tgt_pool_name, bool balance_reads, bool localize_reads, TestOpStat *stat = 0) : TestOp(n, context, stat), completions(2), oid(oid), snap(0), balance_reads(balance_reads), localize_reads(localize_reads), results(2), retvals(2), waiting_on(0), checksums(2), checksum_retvals(2), tgt_pool_name(tgt_pool_name) {} void _do_read(librados::ObjectReadOperation& read_op, uint32_t offset, uint32_t length, int index) { read_op.read(offset, length, &results[index], &retvals[index]); if (index != 0) { bufferlist init_value_bl; encode(static_cast<uint32_t>(-1), init_value_bl); read_op.checksum(LIBRADOS_CHECKSUM_TYPE_CRC32C, init_value_bl, offset, length, 0, &checksums[index], &checksum_retvals[index]); } } void _begin() override { context->state_lock.lock(); std::cout << num << ": chunk read oid " << oid << " snap " << snap << std::endl; done = 0; for (uint32_t i = 0; i < 2; i++) { completions[i] = context->rados.aio_create_completion((void *) this, &read_callback); } context->find_object(oid, &old_value); if (old_value.chunk_info.size() == 0) { std::cout << ": no chunks" << std::endl; context->kick(); context->state_lock.unlock(); done = true; return; } context->oid_in_use.insert(oid); context->oid_not_in_use.erase(oid); if (old_value.deleted()) { std::cout << num << ": expect deleted" << std::endl; } else { std::cout << num << ": expect " << old_value.most_recent() << std::endl; } int rand_index = rand() % old_value.chunk_info.size(); auto iter = old_value.chunk_info.begin(); for (int i = 0; i < rand_index; i++) { iter++; } offset = iter->first; offset += (rand() % iter->second.length)/2; uint32_t t_length = rand() % iter->second.length; while (t_length + offset > iter->first + iter->second.length) { t_length = rand() % iter->second.length; } length = t_length; tgt_offset = iter->second.offset + offset - iter->first; tgt_oid = iter->second.oid; std::cout << num << ": ori offset " << iter->first << " req offset " << offset << " ori length " << iter->second.length << " req length " << length << " ori tgt_offset " << iter->second.offset << " req tgt_offset " << tgt_offset << " tgt_oid " << tgt_oid << std::endl; TestWatchContext *ctx = context->get_watch_context(oid); context->state_lock.unlock(); if (ctx) { ceph_assert(old_value.exists); TestAlarm alarm; std::cerr << num << ": about to start" << std::endl; ctx->start(); std::cerr << num << ": started" << std::endl; bufferlist bl; context->io_ctx.set_notify_timeout(600); int r = context->io_ctx.notify2(context->prefix+oid, bl, 0, NULL); if (r < 0) { std::cerr << "r is " << r << std::endl; ceph_abort(); } std::cerr << num << ": notified, waiting" << std::endl; ctx->wait(); } std::lock_guard state_locker{context->state_lock}; _do_read(op, offset, length, 0); unsigned flags = 0; if (balance_reads) flags |= librados::OPERATION_BALANCE_READS; if (localize_reads) flags |= librados::OPERATION_LOCALIZE_READS; ceph_assert(!context->io_ctx.aio_operate(context->prefix+oid, completions[0], &op, flags, NULL)); waiting_on++; _do_read(op, tgt_offset, length, 1); ceph_assert(!context->io_ctx.aio_operate(context->prefix+tgt_oid, completions[1], &op, flags, NULL)); waiting_on++; } void _finish(CallbackInfo *info) override { std::lock_guard l{context->state_lock}; ceph_assert(!done); ceph_assert(waiting_on > 0); if (--waiting_on) { return; } context->oid_in_use.erase(oid); context->oid_not_in_use.insert(oid); int retval = completions[0]->get_return_value(); std::cout << ": finish!! ret: " << retval << std::endl; context->find_object(tgt_oid, &tgt_value); for (int i = 0; i < 2; i++) { ceph_assert(completions[i]->is_complete()); int err = completions[i]->get_return_value(); if (err != retval) { std::cerr << num << ": Error: oid " << oid << " read returned different error codes: " << retval << " and " << err << std::endl; ceph_abort(); } if (err) { if (!(err == -ENOENT && old_value.deleted())) { std::cerr << num << ": Error: oid " << oid << " read returned error code " << err << std::endl; ceph_abort(); } } } if (!retval) { if (old_value.deleted()) { std::cout << num << ": expect deleted" << std::endl; ceph_abort_msg("expected deleted"); } else { std::cout << num << ": expect " << old_value.most_recent() << std::endl; } if (tgt_value.has_contents()) { uint32_t checksum[2] = {0}; if (checksum_retvals[1] == 0) { try { auto bl_it = checksums[1].cbegin(); uint32_t csum_count; decode(csum_count, bl_it); decode(checksum[1], bl_it); } catch (const buffer::error &err) { checksum_retvals[1] = -EBADMSG; } } if (checksum_retvals[1] != 0) { std::cerr << num << ": oid " << oid << " checksum retvals " << checksums[0] << " error " << std::endl; context->errors++; } checksum[0] = results[0].crc32c(-1); if (checksum[0] != checksum[1]) { std::cerr << num << ": oid " << oid << " checksum src " << checksum[0] << " chunksum tgt " << checksum[1] << " incorrect, expecting " << results[0].crc32c(-1) << std::endl; context->errors++; } if (context->errors) ceph_abort(); } } for (auto it = completions.begin(); it != completions.end(); ++it) { (*it)->release(); } context->kick(); done = true; } bool finished() override { return done; } std::string getType() override { return "ChunkReadOp"; } }; class CopyOp : public TestOp { public: std::string oid, oid_src, tgt_pool_name; librados::ObjectWriteOperation op; librados::ObjectReadOperation rd_op; librados::AioCompletion *comp; ObjectDesc src_value, tgt_value; int done; int r; CopyOp(int n, RadosTestContext *context, const std::string &oid_src, const std::string &oid, const std::string &tgt_pool_name, TestOpStat *stat = 0) : TestOp(n, context, stat), oid(oid), oid_src(oid_src), tgt_pool_name(tgt_pool_name), comp(NULL), done(0), r(0) {} void _begin() override { std::lock_guard l{context->state_lock}; context->oid_in_use.insert(oid_src); context->oid_not_in_use.erase(oid_src); std::string src = context->prefix+oid_src; context->find_object(oid_src, &src_value); op.copy_from(src.c_str(), context->io_ctx, src_value.version, 0); std::cout << "copy op oid " << oid_src << " to " << oid << " tgt_pool_name " << tgt_pool_name << std::endl; std::pair<TestOp*, TestOp::CallbackInfo*> *cb_arg = new std::pair<TestOp*, TestOp::CallbackInfo*>(this, new TestOp::CallbackInfo(0)); comp = context->rados.aio_create_completion((void*) cb_arg, &write_callback); if (tgt_pool_name == context->low_tier_pool_name) { context->low_tier_io_ctx.aio_operate(context->prefix+oid, comp, &op); } else { context->io_ctx.aio_operate(context->prefix+oid, comp, &op); } } void _finish(CallbackInfo *info) override { std::lock_guard l{context->state_lock}; if (info->id == 0) { ceph_assert(comp->is_complete()); std::cout << num << ": finishing copy op to oid " << oid << std::endl; if ((r = comp->get_return_value())) { std::cerr << "Error: oid " << oid << " write returned error code " << r << std::endl; ceph_abort(); } } if (++done == 1) { context->oid_in_use.erase(oid_src); context->oid_not_in_use.insert(oid_src); context->kick(); } } bool finished() override { return done == 1; } std::string getType() override { return "CopyOp"; } }; class SetChunkOp : public TestOp { public: std::string oid, oid_tgt; ObjectDesc src_value, tgt_value; librados::ObjectReadOperation op; librados::AioCompletion *comp; int done; int r; uint64_t offset; uint32_t length; uint32_t tgt_offset; int snap; std::shared_ptr<int> in_use; SetChunkOp(int n, RadosTestContext *context, const std::string &oid, const std::string &oid_tgt, TestOpStat *stat = 0) : TestOp(n, context, stat), oid(oid), oid_tgt(oid_tgt), comp(NULL), done(0), r(0), offset(0), length(0), tgt_offset(0), snap(0) {} std::pair<uint64_t, uint64_t> get_rand_off_len(uint32_t max_len) { std::pair<uint64_t, uint64_t> r (0, 0); r.first = rand() % max_len; r.second = rand() % max_len; r.first = r.first - (r.first % 512); r.second = r.second - (r.second % 512); while (r.first + r.second > max_len || r.second == 0) { r.first = rand() % max_len; r.second = rand() % max_len; r.first = r.first - (r.first % 512); r.second = r.second - (r.second % 512); } return r; } void _begin() override { std::lock_guard l{context->state_lock}; if (!(rand() % 4) && !context->snaps.empty()) { snap = rand_choose(context->snaps)->first; in_use = context->snaps_in_use.lookup_or_create(snap, snap); } else { snap = -1; } context->oid_in_use.insert(oid); context->oid_not_in_use.erase(oid); context->find_object(oid, &src_value, snap); context->find_object(oid_tgt, &tgt_value); uint32_t max_len = 0; if (src_value.deleted()) { /* just random length to check ENOENT */ max_len = context->max_size; } else { max_len = src_value.most_recent_gen()->get_length(src_value.most_recent()); } std::pair<uint64_t, uint64_t> off_len; // first: offset, second: length if (snap >= 0) { context->io_ctx.snap_set_read(context->snaps[snap]); off_len = get_rand_off_len(max_len); } else if (src_value.version != 0 && !src_value.deleted()) { op.assert_version(src_value.version); off_len = get_rand_off_len(max_len); } else if (src_value.deleted()) { off_len.first = 0; off_len.second = max_len; } offset = off_len.first; length = off_len.second; tgt_offset = offset; std::string target_oid; if (!src_value.deleted() && oid_tgt.empty()) { bufferlist bl; int r = context->io_ctx.read(context->prefix+oid, bl, length, offset); ceph_assert(r > 0); std::string fp_oid = ceph::crypto::digest<ceph::crypto::SHA256>(bl).to_str(); r = context->low_tier_io_ctx.write(fp_oid, bl, bl.length(), 0); ceph_assert(r == 0); target_oid = fp_oid; tgt_offset = 0; } else { target_oid = context->prefix+oid_tgt; } std::cout << num << ": " << "set_chunk oid " << oid << " offset: " << offset << " length: " << length << " target oid " << target_oid << " offset: " << tgt_offset << " snap " << snap << std::endl; op.set_chunk(offset, length, context->low_tier_io_ctx, target_oid, tgt_offset, CEPH_OSD_OP_FLAG_WITH_REFERENCE); std::pair<TestOp*, TestOp::CallbackInfo*> *cb_arg = new std::pair<TestOp*, TestOp::CallbackInfo*>(this, new TestOp::CallbackInfo(0)); comp = context->rados.aio_create_completion((void*) cb_arg, &write_callback); context->io_ctx.aio_operate(context->prefix+oid, comp, &op, librados::OPERATION_ORDER_READS_WRITES, NULL); if (snap >= 0) { context->io_ctx.snap_set_read(0); } } void _finish(CallbackInfo *info) override { std::lock_guard l{context->state_lock}; if (info->id == 0) { ceph_assert(comp->is_complete()); std::cout << num << ": finishing set_chunk to oid " << oid << std::endl; if ((r = comp->get_return_value())) { if (r == -ENOENT && src_value.deleted()) { std::cout << num << ": got expected ENOENT (src dne)" << std::endl; } else if (r == -ENOENT && context->oid_set_chunk_tgt_pool.find(oid_tgt) != context->oid_set_chunk_tgt_pool.end()) { std::cout << num << ": get expected ENOENT tgt oid " << oid_tgt << std::endl; } else if (r == -ERANGE && src_value.deleted()) { std::cout << num << ": got expected ERANGE (src dne)" << std::endl; } else if (r == -EOPNOTSUPP) { std::cout << "Range is overlapped: oid " << oid << " set_chunk " << oid_tgt << " returned error code " << r << " offset: " << offset << " length: " << length << std::endl; context->update_object_version(oid, comp->get_version64()); } else { std::cerr << "Error: oid " << oid << " set_chunk " << oid_tgt << " returned error code " << r << std::endl; ceph_abort(); } } else { if (snap == -1) { ChunkDesc info {tgt_offset, length, oid_tgt}; context->update_object_chunk_target(oid, offset, info); context->update_object_version(oid, comp->get_version64()); } } } if (++done == 1) { context->oid_set_chunk_tgt_pool.insert(oid_tgt); context->oid_in_use.erase(oid); context->oid_not_in_use.insert(oid); context->kick(); } } bool finished() override { return done == 1; } std::string getType() override { return "SetChunkOp"; } }; class SetRedirectOp : public TestOp { public: std::string oid, oid_tgt, tgt_pool_name; ObjectDesc src_value, tgt_value; librados::ObjectWriteOperation op; librados::ObjectReadOperation rd_op; librados::AioCompletion *comp; std::shared_ptr<int> in_use; int done; int r; SetRedirectOp(int n, RadosTestContext *context, const std::string &oid, const std::string &oid_tgt, const std::string &tgt_pool_name, TestOpStat *stat = 0) : TestOp(n, context, stat), oid(oid), oid_tgt(oid_tgt), tgt_pool_name(tgt_pool_name), comp(NULL), done(0), r(0) {} void _begin() override { std::lock_guard l{context->state_lock}; context->oid_in_use.insert(oid); context->oid_not_in_use.erase(oid); context->oid_redirect_in_use.insert(oid_tgt); context->oid_redirect_not_in_use.erase(oid_tgt); if (tgt_pool_name.empty()) ceph_abort(); context->find_object(oid, &src_value); if(!context->redirect_objs[oid].empty()) { /* copy_from oid --> oid_tgt */ comp = context->rados.aio_create_completion(); std::string src = context->prefix+oid; op.copy_from(src.c_str(), context->io_ctx, src_value.version, 0); context->low_tier_io_ctx.aio_operate(context->prefix+oid_tgt, comp, &op, librados::OPERATION_ORDER_READS_WRITES); comp->wait_for_complete(); if ((r = comp->get_return_value())) { std::cerr << "Error: oid " << oid << " copy_from " << oid_tgt << " returned error code " << r << std::endl; ceph_abort(); } comp->release(); /* unset redirect target */ comp = context->rados.aio_create_completion(); bool present = !src_value.deleted(); op.unset_manifest(); context->io_ctx.aio_operate(context->prefix+oid, comp, &op, librados::OPERATION_ORDER_READS_WRITES | librados::OPERATION_IGNORE_REDIRECT); comp->wait_for_complete(); if ((r = comp->get_return_value())) { if (!(r == -ENOENT && !present) && r != -EOPNOTSUPP) { std::cerr << "r is " << r << " while deleting " << oid << " and present is " << present << std::endl; ceph_abort(); } } comp->release(); context->oid_redirect_not_in_use.insert(context->redirect_objs[oid]); context->oid_redirect_in_use.erase(context->redirect_objs[oid]); } comp = context->rados.aio_create_completion(); rd_op.stat(NULL, NULL, NULL); context->io_ctx.aio_operate(context->prefix+oid, comp, &rd_op, librados::OPERATION_ORDER_READS_WRITES | librados::OPERATION_IGNORE_REDIRECT, NULL); comp->wait_for_complete(); if ((r = comp->get_return_value()) && !src_value.deleted()) { std::cerr << "Error: oid " << oid << " stat returned error code " << r << std::endl; ceph_abort(); } context->update_object_version(oid, comp->get_version64()); comp->release(); comp = context->rados.aio_create_completion(); rd_op.stat(NULL, NULL, NULL); context->low_tier_io_ctx.aio_operate(context->prefix+oid_tgt, comp, &rd_op, librados::OPERATION_ORDER_READS_WRITES | librados::OPERATION_IGNORE_REDIRECT, NULL); comp->wait_for_complete(); if ((r = comp->get_return_value())) { std::cerr << "Error: oid " << oid_tgt << " stat returned error code " << r << std::endl; ceph_abort(); } uint64_t tgt_version = comp->get_version64(); comp->release(); context->find_object(oid, &src_value); if (src_value.version != 0 && !src_value.deleted()) op.assert_version(src_value.version); op.set_redirect(context->prefix+oid_tgt, context->low_tier_io_ctx, tgt_version); std::pair<TestOp*, TestOp::CallbackInfo*> *cb_arg = new std::pair<TestOp*, TestOp::CallbackInfo*>(this, new TestOp::CallbackInfo(0)); comp = context->rados.aio_create_completion((void*) cb_arg, &write_callback); context->io_ctx.aio_operate(context->prefix+oid, comp, &op, librados::OPERATION_ORDER_READS_WRITES); } void _finish(CallbackInfo *info) override { std::lock_guard l{context->state_lock}; if (info->id == 0) { ceph_assert(comp->is_complete()); std::cout << num << ": finishing set_redirect to oid " << oid << std::endl; if ((r = comp->get_return_value())) { if (r == -ENOENT && src_value.deleted()) { std::cout << num << ": got expected ENOENT (src dne)" << std::endl; } else { std::cerr << "Error: oid " << oid << " set_redirect " << oid_tgt << " returned error code " << r << std::endl; ceph_abort(); } } else { context->update_object_redirect_target(oid, oid_tgt); context->update_object_version(oid, comp->get_version64()); } } if (++done == 1) { context->oid_in_use.erase(oid); context->oid_not_in_use.insert(oid); context->kick(); } } bool finished() override { return done == 1; } std::string getType() override { return "SetRedirectOp"; } }; class UnsetRedirectOp : public TestOp { public: std::string oid; librados::ObjectWriteOperation op; librados::AioCompletion *comp = nullptr; UnsetRedirectOp(int n, RadosTestContext *context, const std::string &oid, TestOpStat *stat = 0) : TestOp(n, context, stat), oid(oid) {} void _begin() override { std::unique_lock state_locker{context->state_lock}; if (context->get_watch_context(oid)) { context->kick(); return; } ObjectDesc contents; context->find_object(oid, &contents); bool present = !contents.deleted(); context->oid_in_use.insert(oid); context->oid_not_in_use.erase(oid); context->seq_num++; context->remove_object(oid); state_locker.unlock(); comp = context->rados.aio_create_completion(); op.remove(); context->io_ctx.aio_operate(context->prefix+oid, comp, &op, librados::OPERATION_ORDER_READS_WRITES | librados::OPERATION_IGNORE_REDIRECT); comp->wait_for_complete(); int r = comp->get_return_value(); if (r && !(r == -ENOENT && !present)) { std::cerr << "r is " << r << " while deleting " << oid << " and present is " << present << std::endl; ceph_abort(); } state_locker.lock(); context->oid_in_use.erase(oid); context->oid_not_in_use.insert(oid); if(!context->redirect_objs[oid].empty()) { context->oid_redirect_not_in_use.insert(context->redirect_objs[oid]); context->oid_redirect_in_use.erase(context->redirect_objs[oid]); context->update_object_redirect_target(oid, {}); } context->kick(); } std::string getType() override { return "UnsetRedirectOp"; } }; class TierPromoteOp : public TestOp { public: librados::AioCompletion *completion; librados::ObjectWriteOperation op; std::string oid; std::shared_ptr<int> in_use; ObjectDesc src_value; TierPromoteOp(int n, RadosTestContext *context, const std::string &oid, TestOpStat *stat) : TestOp(n, context, stat), completion(NULL), oid(oid) {} void _begin() override { context->state_lock.lock(); context->oid_in_use.insert(oid); context->oid_not_in_use.erase(oid); context->find_object(oid, &src_value); std::pair<TestOp*, TestOp::CallbackInfo*> *cb_arg = new std::pair<TestOp*, TestOp::CallbackInfo*>(this, new TestOp::CallbackInfo(0)); completion = context->rados.aio_create_completion((void *) cb_arg, &write_callback); context->state_lock.unlock(); op.tier_promote(); int r = context->io_ctx.aio_operate(context->prefix+oid, completion, &op); ceph_assert(!r); } void _finish(CallbackInfo *info) override { std::lock_guard l{context->state_lock}; ceph_assert(!done); ceph_assert(completion->is_complete()); ObjectDesc oid_value; context->find_object(oid, &oid_value); int r = completion->get_return_value(); std::cout << num << ": got " << cpp_strerror(r) << std::endl; if (r == 0) { // sucess } else if (r == -ENOENT && src_value.deleted()) { std::cout << num << ": got expected ENOENT (src dne)" << std::endl; } else { ceph_abort_msg("shouldn't happen"); } context->update_object_version(oid, completion->get_version64()); context->find_object(oid, &oid_value); context->oid_in_use.erase(oid); context->oid_not_in_use.insert(oid); context->kick(); done = true; } bool finished() override { return done; } std::string getType() override { return "TierPromoteOp"; } }; class TierFlushOp : public TestOp { public: librados::AioCompletion *completion; librados::ObjectReadOperation op; std::string oid; std::shared_ptr<int> in_use; int snap; ObjectDesc src_value; TierFlushOp(int n, RadosTestContext *context, const std::string &oid, TestOpStat *stat) : TestOp(n, context, stat), completion(NULL), oid(oid), snap(-1) {} void _begin() override { context->state_lock.lock(); context->oid_in_use.insert(oid); context->oid_not_in_use.erase(oid); if (0 && !(rand() % 4) && !context->snaps.empty()) { snap = rand_choose(context->snaps)->first; in_use = context->snaps_in_use.lookup_or_create(snap, snap); } else { snap = -1; } std::cout << num << ": tier_flush oid " << oid << " snap " << snap << std::endl; if (snap >= 0) { context->io_ctx.snap_set_read(context->snaps[snap]); } context->find_object(oid, &src_value, snap); std::pair<TestOp*, TestOp::CallbackInfo*> *cb_arg = new std::pair<TestOp*, TestOp::CallbackInfo*>(this, new TestOp::CallbackInfo(0)); completion = context->rados.aio_create_completion((void *) cb_arg, &write_callback); context->state_lock.unlock(); op.tier_flush(); unsigned flags = librados::OPERATION_IGNORE_CACHE; int r = context->io_ctx.aio_operate(context->prefix+oid, completion, &op, flags, NULL); ceph_assert(!r); if (snap >= 0) { context->io_ctx.snap_set_read(0); } } void _finish(CallbackInfo *info) override { context->state_lock.lock(); ceph_assert(!done); ceph_assert(completion->is_complete()); int r = completion->get_return_value(); std::cout << num << ": got " << cpp_strerror(r) << std::endl; if (r == 0) { // sucess context->update_object_tier_flushed(oid, snap); context->update_object_version(oid, completion->get_version64(), snap); } else if (r == -EBUSY) { // could fail if snap is not oldest ceph_assert(!context->check_oldest_snap_flushed(oid, snap)); } else if (r == -ENOENT) { // could fail if object is removed if (src_value.deleted()) { std::cout << num << ": got expected ENOENT (src dne)" << std::endl; } else { std::cerr << num << ": got unexpected ENOENT" << std::endl; ceph_abort(); } } else { if (r != -ENOENT && src_value.deleted()) { std::cerr << num << ": src dne, but r is not ENOENT" << std::endl; } ceph_abort_msg("shouldn't happen"); } context->oid_in_use.erase(oid); context->oid_not_in_use.insert(oid); context->kick(); done = true; context->state_lock.unlock(); } bool finished() override { return done; } std::string getType() override { return "TierFlushOp"; } }; class TierEvictOp : public TestOp { public: librados::AioCompletion *completion; librados::ObjectReadOperation op; std::string oid; std::shared_ptr<int> in_use; int snap; ObjectDesc src_value; TierEvictOp(int n, RadosTestContext *context, const std::string &oid, TestOpStat *stat) : TestOp(n, context, stat), completion(NULL), oid(oid), snap(-1) {} void _begin() override { context->state_lock.lock(); context->oid_in_use.insert(oid); context->oid_not_in_use.erase(oid); if (0 && !(rand() % 4) && !context->snaps.empty()) { snap = rand_choose(context->snaps)->first; in_use = context->snaps_in_use.lookup_or_create(snap, snap); } else { snap = -1; } std::cout << num << ": tier_evict oid " << oid << " snap " << snap << std::endl; if (snap >= 0) { context->io_ctx.snap_set_read(context->snaps[snap]); } context->find_object(oid, &src_value, snap); std::pair<TestOp*, TestOp::CallbackInfo*> *cb_arg = new std::pair<TestOp*, TestOp::CallbackInfo*>(this, new TestOp::CallbackInfo(0)); completion = context->rados.aio_create_completion((void *) cb_arg, &write_callback); context->state_lock.unlock(); op.cache_evict(); int r = context->io_ctx.aio_operate(context->prefix+oid, completion, &op, librados::OPERATION_IGNORE_CACHE, NULL); ceph_assert(!r); if (snap >= 0) { context->io_ctx.snap_set_read(0); } } void _finish(CallbackInfo *info) override { std::lock_guard state_locker{context->state_lock}; ceph_assert(!done); ceph_assert(completion->is_complete()); int r = completion->get_return_value(); std::cout << num << ": got " << cpp_strerror(r) << std::endl; if (r == 0) { // ok } else if (r == -EINVAL) { // modifying manifest object makes existing chunk_map clear // as a result, the modified object is no longer manifest object // this casues to return -EINVAL } else if (r == -ENOENT) { // could fail if object is removed if (src_value.deleted()) { std::cout << num << ": got expected ENOENT (src dne)" << std::endl; } else { std::cerr << num << ": got unexpected ENOENT" << std::endl; ceph_abort(); } } else { if (r != -ENOENT && src_value.deleted()) { std::cerr << num << ": src dne, but r is not ENOENT" << std::endl; } ceph_abort_msg("shouldn't happen"); } context->oid_in_use.erase(oid); context->oid_not_in_use.insert(oid); context->kick(); done = true; } bool finished() override { return done; } std::string getType() override { return "TierEvictOp"; } }; class HitSetListOp : public TestOp { librados::AioCompletion *comp1, *comp2; uint32_t hash; std::list< std::pair<time_t, time_t> > ls; bufferlist bl; public: HitSetListOp(int n, RadosTestContext *context, uint32_t hash, TestOpStat *stat = 0) : TestOp(n, context, stat), comp1(NULL), comp2(NULL), hash(hash) {} void _begin() override { std::pair<TestOp*, TestOp::CallbackInfo*> *cb_arg = new std::pair<TestOp*, TestOp::CallbackInfo*>(this, new TestOp::CallbackInfo(0)); comp1 = context->rados.aio_create_completion((void*) cb_arg, &write_callback); int r = context->io_ctx.hit_set_list(hash, comp1, &ls); ceph_assert(r == 0); } void _finish(CallbackInfo *info) override { std::lock_guard l{context->state_lock}; if (!comp2) { if (ls.empty()) { std::cerr << num << ": no hitsets" << std::endl; done = true; } else { std::cerr << num << ": hitsets are " << ls << std::endl; int r = rand() % ls.size(); auto p = ls.begin(); while (r--) ++p; auto cb_arg = new std::pair<TestOp*, TestOp::CallbackInfo*>( this, new TestOp::CallbackInfo(0)); comp2 = context->rados.aio_create_completion((void*) cb_arg, &write_callback); r = context->io_ctx.hit_set_get(hash, comp2, p->second, &bl); ceph_assert(r == 0); } } else { int r = comp2->get_return_value(); if (r == 0) { HitSet hitset; auto p = bl.cbegin(); decode(hitset, p); std::cout << num << ": got hitset of type " << hitset.get_type_name() << " size " << bl.length() << std::endl; } else { // FIXME: we could verify that we did in fact race with a trim... ceph_assert(r == -ENOENT); } done = true; } context->kick(); } bool finished() override { return done; } std::string getType() override { return "HitSetListOp"; } }; class UndirtyOp : public TestOp { public: librados::AioCompletion *completion; librados::ObjectWriteOperation op; std::string oid; UndirtyOp(int n, RadosTestContext *context, const std::string &oid, TestOpStat *stat = 0) : TestOp(n, context, stat), completion(NULL), oid(oid) {} void _begin() override { context->state_lock.lock(); std::pair<TestOp*, TestOp::CallbackInfo*> *cb_arg = new std::pair<TestOp*, TestOp::CallbackInfo*>(this, new TestOp::CallbackInfo(0)); completion = context->rados.aio_create_completion((void *) cb_arg, &write_callback); context->oid_in_use.insert(oid); context->oid_not_in_use.erase(oid); context->update_object_undirty(oid); context->state_lock.unlock(); op.undirty(); int r = context->io_ctx.aio_operate(context->prefix+oid, completion, &op, 0); ceph_assert(!r); } void _finish(CallbackInfo *info) override { std::lock_guard state_locker{context->state_lock}; ceph_assert(!done); ceph_assert(completion->is_complete()); context->oid_in_use.erase(oid); context->oid_not_in_use.insert(oid); context->update_object_version(oid, completion->get_version64()); context->kick(); done = true; } bool finished() override { return done; } std::string getType() override { return "UndirtyOp"; } }; class IsDirtyOp : public TestOp { public: librados::AioCompletion *completion; librados::ObjectReadOperation op; std::string oid; bool dirty; ObjectDesc old_value; int snap = 0; std::shared_ptr<int> in_use; IsDirtyOp(int n, RadosTestContext *context, const std::string &oid, TestOpStat *stat = 0) : TestOp(n, context, stat), completion(NULL), oid(oid), dirty(false) {} void _begin() override { context->state_lock.lock(); if (!(rand() % 4) && !context->snaps.empty()) { snap = rand_choose(context->snaps)->first; in_use = context->snaps_in_use.lookup_or_create(snap, snap); } else { snap = -1; } std::cout << num << ": is_dirty oid " << oid << " snap " << snap << std::endl; std::pair<TestOp*, TestOp::CallbackInfo*> *cb_arg = new std::pair<TestOp*, TestOp::CallbackInfo*>(this, new TestOp::CallbackInfo(0)); completion = context->rados.aio_create_completion((void *) cb_arg, &write_callback); context->oid_in_use.insert(oid); context->oid_not_in_use.erase(oid); context->state_lock.unlock(); if (snap >= 0) { context->io_ctx.snap_set_read(context->snaps[snap]); } op.is_dirty(&dirty, NULL); int r = context->io_ctx.aio_operate(context->prefix+oid, completion, &op, 0); ceph_assert(!r); if (snap >= 0) { context->io_ctx.snap_set_read(0); } } void _finish(CallbackInfo *info) override { std::lock_guard state_locker{context->state_lock}; ceph_assert(!done); ceph_assert(completion->is_complete()); context->oid_in_use.erase(oid); context->oid_not_in_use.insert(oid); ceph_assert(context->find_object(oid, &old_value, snap)); int r = completion->get_return_value(); if (r == 0) { std::cout << num << ": " << (dirty ? "dirty" : "clean") << std::endl; ceph_assert(!old_value.deleted()); ceph_assert(dirty == old_value.dirty); } else { std::cout << num << ": got " << r << std::endl; ceph_assert(r == -ENOENT); ceph_assert(old_value.deleted()); } context->kick(); done = true; } bool finished() override { return done; } std::string getType() override { return "IsDirtyOp"; } }; class CacheFlushOp : public TestOp { public: librados::AioCompletion *completion; librados::ObjectReadOperation op; std::string oid; bool blocking; int snap; bool can_fail; std::shared_ptr<int> in_use; CacheFlushOp(int n, RadosTestContext *context, const std::string &oid, TestOpStat *stat, bool b) : TestOp(n, context, stat), completion(NULL), oid(oid), blocking(b), snap(0), can_fail(false) {} void _begin() override { context->state_lock.lock(); if (!(rand() % 4) && !context->snaps.empty()) { snap = rand_choose(context->snaps)->first; in_use = context->snaps_in_use.lookup_or_create(snap, snap); } else { snap = -1; } // not being particularly specific here about knowing which // flushes are on the oldest clean snap and which ones are not. can_fail = !blocking || !context->snaps.empty(); // FIXME: we could fail if we've ever removed a snap due to // the async snap trimming. can_fail = true; std::cout << num << ": " << (blocking ? "cache_flush" : "cache_try_flush") << " oid " << oid << " snap " << snap << std::endl; if (snap >= 0) { context->io_ctx.snap_set_read(context->snaps[snap]); } std::pair<TestOp*, TestOp::CallbackInfo*> *cb_arg = new std::pair<TestOp*, TestOp::CallbackInfo*>(this, new TestOp::CallbackInfo(0)); completion = context->rados.aio_create_completion((void *) cb_arg, &write_callback); context->oid_flushing.insert(oid); context->oid_not_flushing.erase(oid); context->state_lock.unlock(); unsigned flags = librados::OPERATION_IGNORE_CACHE; if (blocking) { op.cache_flush(); } else { op.cache_try_flush(); flags = librados::OPERATION_SKIPRWLOCKS; } int r = context->io_ctx.aio_operate(context->prefix+oid, completion, &op, flags, NULL); ceph_assert(!r); if (snap >= 0) { context->io_ctx.snap_set_read(0); } } void _finish(CallbackInfo *info) override { std::lock_guard state_locker{context->state_lock}; ceph_assert(!done); ceph_assert(completion->is_complete()); context->oid_flushing.erase(oid); context->oid_not_flushing.insert(oid); int r = completion->get_return_value(); std::cout << num << ": got " << cpp_strerror(r) << std::endl; if (r == 0) { context->update_object_version(oid, 0, snap); } else if (r == -EBUSY) { ceph_assert(can_fail); } else if (r == -EINVAL) { // caching not enabled? } else if (r == -ENOENT) { // may have raced with a remove? } else { ceph_abort_msg("shouldn't happen"); } context->kick(); done = true; } bool finished() override { return done; } std::string getType() override { return "CacheFlushOp"; } }; class CacheEvictOp : public TestOp { public: librados::AioCompletion *completion; librados::ObjectReadOperation op; std::string oid; std::shared_ptr<int> in_use; CacheEvictOp(int n, RadosTestContext *context, const std::string &oid, TestOpStat *stat) : TestOp(n, context, stat), completion(NULL), oid(oid) {} void _begin() override { context->state_lock.lock(); int snap; if (!(rand() % 4) && !context->snaps.empty()) { snap = rand_choose(context->snaps)->first; in_use = context->snaps_in_use.lookup_or_create(snap, snap); } else { snap = -1; } std::cout << num << ": cache_evict oid " << oid << " snap " << snap << std::endl; if (snap >= 0) { context->io_ctx.snap_set_read(context->snaps[snap]); } std::pair<TestOp*, TestOp::CallbackInfo*> *cb_arg = new std::pair<TestOp*, TestOp::CallbackInfo*>(this, new TestOp::CallbackInfo(0)); completion = context->rados.aio_create_completion((void *) cb_arg, &write_callback); context->state_lock.unlock(); op.cache_evict(); int r = context->io_ctx.aio_operate(context->prefix+oid, completion, &op, librados::OPERATION_IGNORE_CACHE, NULL); ceph_assert(!r); if (snap >= 0) { context->io_ctx.snap_set_read(0); } } void _finish(CallbackInfo *info) override { std::lock_guard state_locker{context->state_lock}; ceph_assert(!done); ceph_assert(completion->is_complete()); int r = completion->get_return_value(); std::cout << num << ": got " << cpp_strerror(r) << std::endl; if (r == 0) { // yay! } else if (r == -EBUSY) { // raced with something that dirtied the object } else if (r == -EINVAL) { // caching not enabled? } else if (r == -ENOENT) { // may have raced with a remove? } else { ceph_abort_msg("shouldn't happen"); } context->kick(); done = true; } bool finished() override { return done; } std::string getType() override { return "CacheEvictOp"; } }; #endif
98,817
27.065322
115
h
null
ceph-main/src/test/osd/TestECBackend.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system * * Copyright (C) 2013 Inktank Storage, Inc. * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #include <iostream> #include <sstream> #include <errno.h> #include <signal.h> #include "osd/ECBackend.h" #include "gtest/gtest.h" using namespace std; TEST(ECUtil, stripe_info_t) { const uint64_t swidth = 4096; const uint64_t ssize = 4; ECUtil::stripe_info_t s(ssize, swidth); ASSERT_EQ(s.get_stripe_width(), swidth); ASSERT_EQ(s.logical_to_next_chunk_offset(0), 0u); ASSERT_EQ(s.logical_to_next_chunk_offset(1), s.get_chunk_size()); ASSERT_EQ(s.logical_to_next_chunk_offset(swidth - 1), s.get_chunk_size()); ASSERT_EQ(s.logical_to_prev_chunk_offset(0), 0u); ASSERT_EQ(s.logical_to_prev_chunk_offset(swidth), s.get_chunk_size()); ASSERT_EQ(s.logical_to_prev_chunk_offset((swidth * 2) - 1), s.get_chunk_size()); ASSERT_EQ(s.logical_to_next_stripe_offset(0), 0u); ASSERT_EQ(s.logical_to_next_stripe_offset(swidth - 1), s.get_stripe_width()); ASSERT_EQ(s.logical_to_prev_stripe_offset(swidth), s.get_stripe_width()); ASSERT_EQ(s.logical_to_prev_stripe_offset(swidth), s.get_stripe_width()); ASSERT_EQ(s.logical_to_prev_stripe_offset((swidth * 2) - 1), s.get_stripe_width()); ASSERT_EQ(s.aligned_logical_offset_to_chunk_offset(2*swidth), 2*s.get_chunk_size()); ASSERT_EQ(s.aligned_chunk_offset_to_logical_offset(2*s.get_chunk_size()), 2*s.get_stripe_width()); ASSERT_EQ(s.aligned_offset_len_to_chunk(make_pair(swidth, 10*swidth)), make_pair(s.get_chunk_size(), 10*s.get_chunk_size())); ASSERT_EQ(s.offset_len_to_stripe_bounds(make_pair(swidth-10, (uint64_t)20)), make_pair((uint64_t)0, 2*swidth)); }
2,005
30.84127
78
cc
null
ceph-main/src/test/osd/TestMClockScheduler.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- #include <chrono> #include "gtest/gtest.h" #include "global/global_context.h" #include "global/global_init.h" #include "common/common_init.h" #include "osd/scheduler/mClockScheduler.h" #include "osd/scheduler/OpSchedulerItem.h" using namespace ceph::osd::scheduler; int main(int argc, char **argv) { std::vector<const char*> args(argv, argv+argc); auto cct = global_init(nullptr, args, CEPH_ENTITY_TYPE_OSD, CODE_ENVIRONMENT_UTILITY, CINIT_FLAG_NO_DEFAULT_CONFIG_FILE); common_init_finish(g_ceph_context); ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } class mClockSchedulerTest : public testing::Test { public: int whoami; uint32_t num_shards; int shard_id; bool is_rotational; MonClient *monc; mClockScheduler q; uint64_t client1; uint64_t client2; uint64_t client3; mClockSchedulerTest() : whoami(0), num_shards(1), shard_id(0), is_rotational(false), monc(nullptr), q(g_ceph_context, whoami, num_shards, shard_id, is_rotational, monc), client1(1001), client2(9999), client3(100000001) {} struct MockDmclockItem : public PGOpQueueable { op_scheduler_class scheduler_class; MockDmclockItem(op_scheduler_class _scheduler_class) : PGOpQueueable(spg_t()), scheduler_class(_scheduler_class) {} MockDmclockItem() : MockDmclockItem(op_scheduler_class::background_best_effort) {} ostream &print(ostream &rhs) const final { return rhs; } std::string print() const final { return std::string(); } std::optional<OpRequestRef> maybe_get_op() const final { return std::nullopt; } op_scheduler_class get_scheduler_class() const final { return scheduler_class; } void run(OSD *osd, OSDShard *sdata, PGRef& pg, ThreadPool::TPHandle &handle) final {} }; }; template <typename... Args> OpSchedulerItem create_item( epoch_t e, uint64_t owner, Args&&... args) { return OpSchedulerItem( std::make_unique<mClockSchedulerTest::MockDmclockItem>( std::forward<Args>(args)...), 12, 12, utime_t(), owner, e); } template <typename... Args> OpSchedulerItem create_high_prio_item( unsigned priority, epoch_t e, uint64_t owner, Args&&... args) { // Create high priority item for testing high prio queue return OpSchedulerItem( std::make_unique<mClockSchedulerTest::MockDmclockItem>( std::forward<Args>(args)...), 12, priority, utime_t(), owner, e); } OpSchedulerItem get_item(WorkItem item) { return std::move(std::get<OpSchedulerItem>(item)); } TEST_F(mClockSchedulerTest, TestEmpty) { ASSERT_TRUE(q.empty()); for (unsigned i = 100; i < 105; i+=2) { q.enqueue(create_item(i, client1, op_scheduler_class::client)); std::this_thread::sleep_for(std::chrono::microseconds(1)); } ASSERT_FALSE(q.empty()); std::list<OpSchedulerItem> reqs; reqs.push_back(get_item(q.dequeue())); reqs.push_back(get_item(q.dequeue())); ASSERT_EQ(2u, reqs.size()); ASSERT_FALSE(q.empty()); for (auto &&i : reqs) { q.enqueue_front(std::move(i)); } reqs.clear(); ASSERT_FALSE(q.empty()); for (int i = 0; i < 3; ++i) { ASSERT_FALSE(q.empty()); q.dequeue(); } ASSERT_TRUE(q.empty()); } TEST_F(mClockSchedulerTest, TestSingleClientOrderedEnqueueDequeue) { ASSERT_TRUE(q.empty()); for (unsigned i = 100; i < 105; ++i) { q.enqueue(create_item(i, client1, op_scheduler_class::client)); std::this_thread::sleep_for(std::chrono::microseconds(1)); } auto r = get_item(q.dequeue()); ASSERT_EQ(100u, r.get_map_epoch()); r = get_item(q.dequeue()); ASSERT_EQ(101u, r.get_map_epoch()); r = get_item(q.dequeue()); ASSERT_EQ(102u, r.get_map_epoch()); r = get_item(q.dequeue()); ASSERT_EQ(103u, r.get_map_epoch()); r = get_item(q.dequeue()); ASSERT_EQ(104u, r.get_map_epoch()); } TEST_F(mClockSchedulerTest, TestMultiClientOrderedEnqueueDequeue) { const unsigned NUM = 1000; for (unsigned i = 0; i < NUM; ++i) { for (auto &&c: {client1, client2, client3}) { q.enqueue(create_item(i, c)); std::this_thread::sleep_for(std::chrono::microseconds(1)); } } std::map<uint64_t, epoch_t> next; for (auto &&c: {client1, client2, client3}) { next[c] = 0; } for (unsigned i = 0; i < NUM * 3; ++i) { ASSERT_FALSE(q.empty()); auto r = get_item(q.dequeue()); auto owner = r.get_owner(); auto niter = next.find(owner); ASSERT_FALSE(niter == next.end()); ASSERT_EQ(niter->second, r.get_map_epoch()); niter->second++; } ASSERT_TRUE(q.empty()); } TEST_F(mClockSchedulerTest, TestHighPriorityQueueEnqueueDequeue) { ASSERT_TRUE(q.empty()); for (unsigned i = 200; i < 205; ++i) { q.enqueue(create_high_prio_item(i, i, client1, op_scheduler_class::client)); std::this_thread::sleep_for(std::chrono::milliseconds(1)); } ASSERT_FALSE(q.empty()); // Higher priority ops should be dequeued first auto r = get_item(q.dequeue()); ASSERT_EQ(204u, r.get_map_epoch()); r = get_item(q.dequeue()); ASSERT_EQ(203u, r.get_map_epoch()); r = get_item(q.dequeue()); ASSERT_EQ(202u, r.get_map_epoch()); r = get_item(q.dequeue()); ASSERT_EQ(201u, r.get_map_epoch()); r = get_item(q.dequeue()); ASSERT_EQ(200u, r.get_map_epoch()); ASSERT_TRUE(q.empty()); } TEST_F(mClockSchedulerTest, TestAllQueuesEnqueueDequeue) { ASSERT_TRUE(q.empty()); // Insert ops into the mClock queue for (unsigned i = 100; i < 102; ++i) { q.enqueue(create_item(i, client1, op_scheduler_class::client)); std::this_thread::sleep_for(std::chrono::microseconds(1)); } // Insert Immediate ops for (unsigned i = 103; i < 105; ++i) { q.enqueue(create_item(i, client1, op_scheduler_class::immediate)); std::this_thread::sleep_for(std::chrono::microseconds(1)); } // Insert ops into the high queue for (unsigned i = 200; i < 202; ++i) { q.enqueue(create_high_prio_item(i, i, client1, op_scheduler_class::client)); std::this_thread::sleep_for(std::chrono::milliseconds(1)); } ASSERT_FALSE(q.empty()); auto r = get_item(q.dequeue()); // Ops classified as Immediate should be dequeued first ASSERT_EQ(103u, r.get_map_epoch()); r = get_item(q.dequeue()); ASSERT_EQ(104u, r.get_map_epoch()); // High priority queue should be dequeued second // higher priority operation first r = get_item(q.dequeue()); ASSERT_EQ(201u, r.get_map_epoch()); r = get_item(q.dequeue()); ASSERT_EQ(200u, r.get_map_epoch()); // mClock queue will be dequeued last r = get_item(q.dequeue()); ASSERT_EQ(100u, r.get_map_epoch()); r = get_item(q.dequeue()); ASSERT_EQ(101u, r.get_map_epoch()); ASSERT_TRUE(q.empty()); }
6,769
24.938697
89
cc
null
ceph-main/src/test/osd/TestOSDMap.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- #include "gtest/gtest.h" #include "osd/OSDMap.h" #include "osd/OSDMapMapping.h" #include "mon/OSDMonitor.h" #include "mon/PGMap.h" #include "global/global_context.h" #include "global/global_init.h" #include "common/common_init.h" #include "common/ceph_argparse.h" #include "common/ceph_json.h" #include <iostream> #include <cmath> using namespace std; int main(int argc, char **argv) { map<string,string> defaults = { // make sure we have 3 copies, or some tests won't work { "osd_pool_default_size", "3" }, // our map is flat, so just try and split across OSDs, not hosts or whatever { "osd_crush_chooseleaf_type", "0" }, }; std::vector<const char*> args(argv, argv+argc); auto cct = global_init(&defaults, args, CEPH_ENTITY_TYPE_CLIENT, CODE_ENVIRONMENT_UTILITY, CINIT_FLAG_NO_DEFAULT_CONFIG_FILE); common_init_finish(g_ceph_context); ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } class OSDMapTest : public testing::Test, public ::testing::WithParamInterface<std::pair<int, int>> { int num_osds = 6; public: OSDMap osdmap; OSDMapMapping mapping; const uint64_t my_ec_pool = 1; const uint64_t my_rep_pool = 2; // Blacklist testing lists // I pulled the first two ranges and their start/end points from // https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing#CIDR_notation static const string range_addrs[]; static const string ip_addrs[]; static const string unblocked_ip_addrs[]; const string EC_RULE_NAME = "erasure"; OSDMapTest() {} void set_up_map(int new_num_osds = 6, bool no_default_pools = false) { num_osds = new_num_osds; uuid_d fsid; osdmap.build_simple(g_ceph_context, 0, fsid, num_osds); OSDMap::Incremental pending_inc(osdmap.get_epoch() + 1); pending_inc.fsid = osdmap.get_fsid(); entity_addrvec_t sample_addrs; sample_addrs.v.push_back(entity_addr_t()); uuid_d sample_uuid; for (int i = 0; i < num_osds; ++i) { sample_uuid.generate_random(); sample_addrs.v[0].nonce = i; pending_inc.new_state[i] = CEPH_OSD_EXISTS | CEPH_OSD_NEW; pending_inc.new_up_client[i] = sample_addrs; pending_inc.new_up_cluster[i] = sample_addrs; pending_inc.new_hb_back_up[i] = sample_addrs; pending_inc.new_hb_front_up[i] = sample_addrs; pending_inc.new_weight[i] = CEPH_OSD_IN; pending_inc.new_uuid[i] = sample_uuid; } osdmap.apply_incremental(pending_inc); if (no_default_pools) // do not create any default pool(s) return; OSDMap::Incremental new_pool_inc(osdmap.get_epoch() + 1); new_pool_inc.new_pool_max = osdmap.get_pool_max(); new_pool_inc.fsid = osdmap.get_fsid(); // make an ec pool set_ec_pool("ec", new_pool_inc); // and a replicated pool set_rep_pool("reppool",new_pool_inc); osdmap.apply_incremental(new_pool_inc); } int get_ec_crush_rule() { int r = osdmap.crush->get_rule_id(EC_RULE_NAME); if (r < 0) { r = osdmap.crush->add_simple_rule( EC_RULE_NAME, "default", "osd", "", "indep", pg_pool_t::TYPE_ERASURE, &cerr); } return r; } uint64_t set_ec_pool(const string &name, OSDMap::Incremental &new_pool_inc, bool assert_pool_id = true) { pg_pool_t empty; uint64_t pool_id = ++new_pool_inc.new_pool_max; if (assert_pool_id) ceph_assert(pool_id == my_ec_pool); pg_pool_t *p = new_pool_inc.get_new_pool(pool_id, &empty); p->size = 3; p->set_pg_num(64); p->set_pgp_num(64); p->type = pg_pool_t::TYPE_ERASURE; p->crush_rule = get_ec_crush_rule(); new_pool_inc.new_pool_names[pool_id] = name;//"ec"; return pool_id; } uint64_t set_rep_pool(const string name, OSDMap::Incremental &new_pool_inc, bool assert_pool_id = true) { pg_pool_t empty; uint64_t pool_id = ++new_pool_inc.new_pool_max; if (assert_pool_id) ceph_assert(pool_id == my_rep_pool); pg_pool_t *p = new_pool_inc.get_new_pool(pool_id, &empty); p->size = 3; p->set_pg_num(64); p->set_pgp_num(64); p->type = pg_pool_t::TYPE_REPLICATED; p->crush_rule = 0; p->set_flag(pg_pool_t::FLAG_HASHPSPOOL); new_pool_inc.new_pool_names[pool_id] = name;//"reppool"; return pool_id; } unsigned int get_num_osds() { return num_osds; } void get_crush(const OSDMap& tmap, CrushWrapper& newcrush) { bufferlist bl; tmap.crush->encode(bl, CEPH_FEATURES_SUPPORTED_DEFAULT); auto p = bl.cbegin(); newcrush.decode(p); } int crush_move(OSDMap& tmap, const string &name, const vector<string> &argvec) { map<string,string> loc; CrushWrapper::parse_loc_map(argvec, &loc); CrushWrapper newcrush; get_crush(tmap, newcrush); if (!newcrush.name_exists(name)) { return -ENOENT; } int id = newcrush.get_item_id(name); int err; if (!newcrush.check_item_loc(g_ceph_context, id, loc, (int *)NULL)) { if (id >= 0) { err = newcrush.create_or_move_item(g_ceph_context, id, 0, name, loc); } else { err = newcrush.move_bucket(g_ceph_context, id, loc); } if (err >= 0) { OSDMap::Incremental pending_inc(tmap.get_epoch() + 1); pending_inc.crush.clear(); newcrush.encode(pending_inc.crush, CEPH_FEATURES_SUPPORTED_DEFAULT); tmap.apply_incremental(pending_inc); err = 0; } } else { // already there err = 0; } return err; } int crush_rule_create_replicated(const string &name, const string &root, const string &type) { if (osdmap.crush->rule_exists(name)) { return osdmap.crush->get_rule_id(name); } CrushWrapper newcrush; get_crush(osdmap, newcrush); string device_class; stringstream ss; int ruleno = newcrush.add_simple_rule( name, root, type, device_class, "firstn", pg_pool_t::TYPE_REPLICATED, &ss); if (ruleno >= 0) { OSDMap::Incremental pending_inc(osdmap.get_epoch() + 1); pending_inc.crush.clear(); newcrush.encode(pending_inc.crush, CEPH_FEATURES_SUPPORTED_DEFAULT); osdmap.apply_incremental(pending_inc); } return ruleno; } void test_mappings(int pool, int num, vector<int> *any, vector<int> *first, vector<int> *primary) { mapping.update(osdmap); for (int i=0; i<num; ++i) { vector<int> up, acting; int up_primary, acting_primary; pg_t pgid(i, pool); osdmap.pg_to_up_acting_osds(pgid, &up, &up_primary, &acting, &acting_primary); for (unsigned j=0; j<acting.size(); ++j) (*any)[acting[j]]++; if (!acting.empty()) (*first)[acting[0]]++; if (acting_primary >= 0) (*primary)[acting_primary]++; // compare to precalc mapping vector<int> up2, acting2; int up_primary2, acting_primary2; pgid = osdmap.raw_pg_to_pg(pgid); mapping.get(pgid, &up2, &up_primary2, &acting2, &acting_primary2); ASSERT_EQ(up, up2); ASSERT_EQ(up_primary, up_primary2); ASSERT_EQ(acting, acting2); ASSERT_EQ(acting_primary, acting_primary2); } cout << "any: " << *any << std::endl;; cout << "first: " << *first << std::endl;; cout << "primary: " << *primary << std::endl;; } void clean_pg_upmaps(CephContext *cct, const OSDMap& om, OSDMap::Incremental& pending_inc) { int cpu_num = 8; int pgs_per_chunk = 256; ThreadPool tp(cct, "BUG_40104::clean_upmap_tp", "clean_upmap_tp", cpu_num); tp.start(); ParallelPGMapper mapper(cct, &tp); vector<pg_t> pgs_to_check; om.get_upmap_pgs(&pgs_to_check); OSDMonitor::CleanUpmapJob job(cct, om, pending_inc); mapper.queue(&job, pgs_per_chunk, pgs_to_check); job.wait(); tp.stop(); } void set_primary_affinity_all(float pa) { for (uint i = 0 ; i < get_num_osds() ; i++) { osdmap.set_primary_affinity(i, int(pa * CEPH_OSD_MAX_PRIMARY_AFFINITY)); } } bool score_in_range(float score, uint nosds = 0) { if (nosds == 0) { nosds = get_num_osds(); } return score >= 1.0 && score <= float(nosds); } }; TEST_F(OSDMapTest, Create) { set_up_map(); ASSERT_EQ(get_num_osds(), (unsigned)osdmap.get_max_osd()); ASSERT_EQ(get_num_osds(), osdmap.get_num_in_osds()); } TEST_F(OSDMapTest, Features) { // with EC pool set_up_map(); uint64_t features = osdmap.get_features(CEPH_ENTITY_TYPE_OSD, NULL); ASSERT_TRUE(features & CEPH_FEATURE_CRUSH_TUNABLES); ASSERT_TRUE(features & CEPH_FEATURE_CRUSH_TUNABLES2); ASSERT_TRUE(features & CEPH_FEATURE_CRUSH_TUNABLES3); ASSERT_TRUE(features & CEPH_FEATURE_CRUSH_V2); ASSERT_TRUE(features & CEPH_FEATURE_OSDHASHPSPOOL); ASSERT_TRUE(features & CEPH_FEATURE_OSD_PRIMARY_AFFINITY); // clients have a slightly different view features = osdmap.get_features(CEPH_ENTITY_TYPE_CLIENT, NULL); ASSERT_TRUE(features & CEPH_FEATURE_CRUSH_TUNABLES); ASSERT_TRUE(features & CEPH_FEATURE_CRUSH_TUNABLES2); ASSERT_TRUE(features & CEPH_FEATURE_CRUSH_TUNABLES3); ASSERT_TRUE(features & CEPH_FEATURE_CRUSH_V2); ASSERT_TRUE(features & CEPH_FEATURE_OSDHASHPSPOOL); ASSERT_TRUE(features & CEPH_FEATURE_OSD_PRIMARY_AFFINITY); // remove teh EC pool, but leave the rule. add primary affinity. { OSDMap::Incremental new_pool_inc(osdmap.get_epoch() + 1); new_pool_inc.old_pools.insert(osdmap.lookup_pg_pool_name("ec")); new_pool_inc.new_primary_affinity[0] = 0x8000; osdmap.apply_incremental(new_pool_inc); } features = osdmap.get_features(CEPH_ENTITY_TYPE_MON, NULL); ASSERT_TRUE(features & CEPH_FEATURE_CRUSH_TUNABLES); ASSERT_TRUE(features & CEPH_FEATURE_CRUSH_TUNABLES2); ASSERT_TRUE(features & CEPH_FEATURE_CRUSH_TUNABLES3); // shared bit with primary affinity ASSERT_FALSE(features & CEPH_FEATURE_CRUSH_V2); ASSERT_TRUE(features & CEPH_FEATURE_OSDHASHPSPOOL); ASSERT_TRUE(features & CEPH_FEATURE_OSD_PRIMARY_AFFINITY); // FIXME: test tiering feature bits } TEST_F(OSDMapTest, MapPG) { set_up_map(); std::cerr << " osdmap.pool_max==" << osdmap.get_pool_max() << std::endl; pg_t rawpg(0, my_rep_pool); pg_t pgid = osdmap.raw_pg_to_pg(rawpg); vector<int> up_osds, acting_osds; int up_primary, acting_primary; osdmap.pg_to_up_acting_osds(pgid, &up_osds, &up_primary, &acting_osds, &acting_primary); vector<int> old_up_osds, old_acting_osds; osdmap.pg_to_up_acting_osds(pgid, old_up_osds, old_acting_osds); ASSERT_EQ(old_up_osds, up_osds); ASSERT_EQ(old_acting_osds, acting_osds); ASSERT_EQ(osdmap.get_pg_pool(my_rep_pool)->get_size(), up_osds.size()); } TEST_F(OSDMapTest, MapFunctionsMatch) { // TODO: make sure pg_to_up_acting_osds and pg_to_acting_osds match set_up_map(); pg_t rawpg(0, my_rep_pool); pg_t pgid = osdmap.raw_pg_to_pg(rawpg); vector<int> up_osds, acting_osds; int up_primary, acting_primary; osdmap.pg_to_up_acting_osds(pgid, &up_osds, &up_primary, &acting_osds, &acting_primary); vector<int> up_osds_two, acting_osds_two; osdmap.pg_to_up_acting_osds(pgid, up_osds_two, acting_osds_two); ASSERT_EQ(up_osds, up_osds_two); ASSERT_EQ(acting_osds, acting_osds_two); int acting_primary_two; osdmap.pg_to_acting_osds(pgid, &acting_osds_two, &acting_primary_two); EXPECT_EQ(acting_osds, acting_osds_two); EXPECT_EQ(acting_primary, acting_primary_two); osdmap.pg_to_acting_osds(pgid, acting_osds_two); EXPECT_EQ(acting_osds, acting_osds_two); } /** This test must be removed or modified appropriately when we allow * other ways to specify a primary. */ TEST_F(OSDMapTest, PrimaryIsFirst) { set_up_map(); pg_t rawpg(0, my_rep_pool); pg_t pgid = osdmap.raw_pg_to_pg(rawpg); vector<int> up_osds, acting_osds; int up_primary, acting_primary; osdmap.pg_to_up_acting_osds(pgid, &up_osds, &up_primary, &acting_osds, &acting_primary); EXPECT_EQ(up_osds[0], up_primary); EXPECT_EQ(acting_osds[0], acting_primary); } TEST_F(OSDMapTest, PGTempRespected) { set_up_map(); pg_t rawpg(0, my_rep_pool); pg_t pgid = osdmap.raw_pg_to_pg(rawpg); vector<int> up_osds, acting_osds; int up_primary, acting_primary; osdmap.pg_to_up_acting_osds(pgid, &up_osds, &up_primary, &acting_osds, &acting_primary); // copy and swap first and last element in acting_osds vector<int> new_acting_osds(acting_osds); int first = new_acting_osds[0]; new_acting_osds[0] = *new_acting_osds.rbegin(); *new_acting_osds.rbegin() = first; // apply pg_temp to osdmap OSDMap::Incremental pgtemp_map(osdmap.get_epoch() + 1); pgtemp_map.new_pg_temp[pgid] = mempool::osdmap::vector<int>( new_acting_osds.begin(), new_acting_osds.end()); osdmap.apply_incremental(pgtemp_map); osdmap.pg_to_up_acting_osds(pgid, &up_osds, &up_primary, &acting_osds, &acting_primary); EXPECT_EQ(new_acting_osds, acting_osds); } TEST_F(OSDMapTest, PrimaryTempRespected) { set_up_map(); pg_t rawpg(0, my_rep_pool); pg_t pgid = osdmap.raw_pg_to_pg(rawpg); vector<int> up_osds; vector<int> acting_osds; int up_primary, acting_primary; osdmap.pg_to_up_acting_osds(pgid, &up_osds, &up_primary, &acting_osds, &acting_primary); // make second OSD primary via incremental OSDMap::Incremental pgtemp_map(osdmap.get_epoch() + 1); pgtemp_map.new_primary_temp[pgid] = acting_osds[1]; osdmap.apply_incremental(pgtemp_map); osdmap.pg_to_up_acting_osds(pgid, &up_osds, &up_primary, &acting_osds, &acting_primary); EXPECT_EQ(acting_primary, acting_osds[1]); } TEST_F(OSDMapTest, CleanTemps) { set_up_map(); OSDMap::Incremental pgtemp_map(osdmap.get_epoch() + 1); OSDMap::Incremental pending_inc(osdmap.get_epoch() + 2); pg_t pga = osdmap.raw_pg_to_pg(pg_t(0, my_rep_pool)); { vector<int> up_osds, acting_osds; int up_primary, acting_primary; osdmap.pg_to_up_acting_osds(pga, &up_osds, &up_primary, &acting_osds, &acting_primary); pgtemp_map.new_pg_temp[pga] = mempool::osdmap::vector<int>( up_osds.begin(), up_osds.end()); pgtemp_map.new_primary_temp[pga] = up_primary; } pg_t pgb = osdmap.raw_pg_to_pg(pg_t(1, my_rep_pool)); { vector<int> up_osds, acting_osds; int up_primary, acting_primary; osdmap.pg_to_up_acting_osds(pgb, &up_osds, &up_primary, &acting_osds, &acting_primary); pending_inc.new_pg_temp[pgb] = mempool::osdmap::vector<int>( up_osds.begin(), up_osds.end()); pending_inc.new_primary_temp[pgb] = up_primary; } osdmap.apply_incremental(pgtemp_map); OSDMap tmpmap; tmpmap.deepish_copy_from(osdmap); tmpmap.apply_incremental(pending_inc); OSDMap::clean_temps(g_ceph_context, osdmap, tmpmap, &pending_inc); EXPECT_TRUE(pending_inc.new_pg_temp.count(pga) && pending_inc.new_pg_temp[pga].size() == 0); EXPECT_EQ(-1, pending_inc.new_primary_temp[pga]); EXPECT_TRUE(!pending_inc.new_pg_temp.count(pgb) && !pending_inc.new_primary_temp.count(pgb)); } TEST_F(OSDMapTest, KeepsNecessaryTemps) { set_up_map(); pg_t rawpg(0, my_rep_pool); pg_t pgid = osdmap.raw_pg_to_pg(rawpg); vector<int> up_osds, acting_osds; int up_primary, acting_primary; osdmap.pg_to_up_acting_osds(pgid, &up_osds, &up_primary, &acting_osds, &acting_primary); // find unused OSD and stick it in there OSDMap::Incremental pgtemp_map(osdmap.get_epoch() + 1); // find an unused osd and put it in place of the first one int i = 0; for(; i != (int)get_num_osds(); ++i) { bool in_use = false; for (vector<int>::iterator osd_it = up_osds.begin(); osd_it != up_osds.end(); ++osd_it) { if (i == *osd_it) { in_use = true; break; } } if (!in_use) { up_osds[1] = i; break; } } if (i == (int)get_num_osds()) FAIL() << "did not find unused OSD for temp mapping"; pgtemp_map.new_pg_temp[pgid] = mempool::osdmap::vector<int>( up_osds.begin(), up_osds.end()); pgtemp_map.new_primary_temp[pgid] = up_osds[1]; osdmap.apply_incremental(pgtemp_map); OSDMap::Incremental pending_inc(osdmap.get_epoch() + 1); OSDMap tmpmap; tmpmap.deepish_copy_from(osdmap); tmpmap.apply_incremental(pending_inc); OSDMap::clean_temps(g_ceph_context, osdmap, tmpmap, &pending_inc); EXPECT_FALSE(pending_inc.new_pg_temp.count(pgid)); EXPECT_FALSE(pending_inc.new_primary_temp.count(pgid)); } TEST_F(OSDMapTest, PrimaryAffinity) { set_up_map(); int n = get_num_osds(); for (map<int64_t,pg_pool_t>::const_iterator p = osdmap.get_pools().begin(); p != osdmap.get_pools().end(); ++p) { int pool = p->first; int expect_primary = 10000 / n; cout << "pool " << pool << " size " << (int)p->second.size << " expect_primary " << expect_primary << std::endl; { vector<int> any(n, 0); vector<int> first(n, 0); vector<int> primary(n, 0); test_mappings(pool, 10000, &any, &first, &primary); for (int i=0; i<n; ++i) { ASSERT_LT(0, any[i]); ASSERT_LT(0, first[i]); ASSERT_LT(0, primary[i]); } } osdmap.set_primary_affinity(0, 0); osdmap.set_primary_affinity(1, 0); { vector<int> any(n, 0); vector<int> first(n, 0); vector<int> primary(n, 0); test_mappings(pool, 10000, &any, &first, &primary); for (int i=0; i<n; ++i) { ASSERT_LT(0, any[i]); if (i >= 2) { ASSERT_LT(0, first[i]); ASSERT_LT(0, primary[i]); } else { if (p->second.is_replicated()) { ASSERT_EQ(0, first[i]); } ASSERT_EQ(0, primary[i]); } } } osdmap.set_primary_affinity(0, 0x8000); osdmap.set_primary_affinity(1, 0); { vector<int> any(n, 0); vector<int> first(n, 0); vector<int> primary(n, 0); test_mappings(pool, 10000, &any, &first, &primary); int expect = (10000 / (n-2)) / 2; // half weight cout << "expect " << expect << std::endl; for (int i=0; i<n; ++i) { ASSERT_LT(0, any[i]); if (i >= 2) { ASSERT_LT(0, first[i]); ASSERT_LT(0, primary[i]); } else if (i == 1) { if (p->second.is_replicated()) { ASSERT_EQ(0, first[i]); } ASSERT_EQ(0, primary[i]); } else { ASSERT_LT(expect *2/3, primary[0]); ASSERT_GT(expect *4/3, primary[0]); } } } osdmap.set_primary_affinity(0, 0x10000); osdmap.set_primary_affinity(1, 0x10000); } } TEST_F(OSDMapTest, get_osd_crush_node_flags) { set_up_map(); for (unsigned i=0; i<get_num_osds(); ++i) { ASSERT_EQ(0u, osdmap.get_osd_crush_node_flags(i)); } OSDMap::Incremental inc(osdmap.get_epoch() + 1); inc.new_crush_node_flags[-1] = 123u; osdmap.apply_incremental(inc); for (unsigned i=0; i<get_num_osds(); ++i) { ASSERT_EQ(123u, osdmap.get_osd_crush_node_flags(i)); } ASSERT_EQ(0u, osdmap.get_osd_crush_node_flags(1000)); OSDMap::Incremental inc3(osdmap.get_epoch() + 1); inc3.new_crush_node_flags[-1] = 456u; osdmap.apply_incremental(inc3); for (unsigned i=0; i<get_num_osds(); ++i) { ASSERT_EQ(456u, osdmap.get_osd_crush_node_flags(i)); } ASSERT_EQ(0u, osdmap.get_osd_crush_node_flags(1000)); OSDMap::Incremental inc2(osdmap.get_epoch() + 1); inc2.new_crush_node_flags[-1] = 0; osdmap.apply_incremental(inc2); for (unsigned i=0; i<get_num_osds(); ++i) { ASSERT_EQ(0u, osdmap.get_crush_node_flags(i)); } } TEST_F(OSDMapTest, parse_osd_id_list) { set_up_map(); set<int> out; set<int> all; osdmap.get_all_osds(all); ASSERT_EQ(0, osdmap.parse_osd_id_list({"osd.0"}, &out, &cout)); ASSERT_EQ(1u, out.size()); ASSERT_EQ(0, *out.begin()); ASSERT_EQ(0, osdmap.parse_osd_id_list({"1"}, &out, &cout)); ASSERT_EQ(1u, out.size()); ASSERT_EQ(1, *out.begin()); ASSERT_EQ(0, osdmap.parse_osd_id_list({"osd.0","osd.1"}, &out, &cout)); ASSERT_EQ(2u, out.size()); ASSERT_EQ(0, *out.begin()); ASSERT_EQ(1, *out.rbegin()); ASSERT_EQ(0, osdmap.parse_osd_id_list({"osd.0","1"}, &out, &cout)); ASSERT_EQ(2u, out.size()); ASSERT_EQ(0, *out.begin()); ASSERT_EQ(1, *out.rbegin()); ASSERT_EQ(0, osdmap.parse_osd_id_list({"*"}, &out, &cout)); ASSERT_EQ(all.size(), out.size()); ASSERT_EQ(all, out); ASSERT_EQ(0, osdmap.parse_osd_id_list({"all"}, &out, &cout)); ASSERT_EQ(all, out); ASSERT_EQ(0, osdmap.parse_osd_id_list({"any"}, &out, &cout)); ASSERT_EQ(all, out); ASSERT_EQ(-EINVAL, osdmap.parse_osd_id_list({"foo"}, &out, &cout)); ASSERT_EQ(-EINVAL, osdmap.parse_osd_id_list({"-12"}, &out, &cout)); } TEST_F(OSDMapTest, CleanPGUpmaps) { set_up_map(); // build a crush rule of type host const int expected_host_num = 3; int osd_per_host = get_num_osds() / expected_host_num; ASSERT_GE(2, osd_per_host); int index = 0; for (int i = 0; i < (int)get_num_osds(); i++) { if (i && i % osd_per_host == 0) { ++index; } stringstream osd_name; stringstream host_name; vector<string> move_to; osd_name << "osd." << i; host_name << "host-" << index; move_to.push_back("root=default"); string host_loc = "host=" + host_name.str(); move_to.push_back(host_loc); int r = crush_move(osdmap, osd_name.str(), move_to); ASSERT_EQ(0, r); } const string upmap_rule = "upmap"; int upmap_rule_no = crush_rule_create_replicated( upmap_rule, "default", "host"); ASSERT_LT(0, upmap_rule_no); // create a replicated pool which references the above rule OSDMap::Incremental new_pool_inc(osdmap.get_epoch() + 1); new_pool_inc.new_pool_max = osdmap.get_pool_max(); new_pool_inc.fsid = osdmap.get_fsid(); pg_pool_t empty; uint64_t upmap_pool_id = ++new_pool_inc.new_pool_max; pg_pool_t *p = new_pool_inc.get_new_pool(upmap_pool_id, &empty); p->size = 2; p->set_pg_num(64); p->set_pgp_num(64); p->type = pg_pool_t::TYPE_REPLICATED; p->crush_rule = upmap_rule_no; p->set_flag(pg_pool_t::FLAG_HASHPSPOOL); new_pool_inc.new_pool_names[upmap_pool_id] = "upmap_pool"; osdmap.apply_incremental(new_pool_inc); pg_t rawpg(0, upmap_pool_id); pg_t pgid = osdmap.raw_pg_to_pg(rawpg); vector<int> up; int up_primary; osdmap.pg_to_raw_up(pgid, &up, &up_primary); ASSERT_LT(1U, up.size()); { // validate we won't have two OSDs from a same host int parent_0 = osdmap.crush->get_parent_of_type(up[0], osdmap.crush->get_type_id("host")); int parent_1 = osdmap.crush->get_parent_of_type(up[1], osdmap.crush->get_type_id("host")); ASSERT_TRUE(parent_0 != parent_1); } { // cancel stale upmaps osdmap.pg_to_raw_up(pgid, &up, &up_primary); int from = -1; for (int i = 0; i < (int)get_num_osds(); i++) { if (std::find(up.begin(), up.end(), i) == up.end()) { from = i; break; } } ASSERT_TRUE(from >= 0); int to = -1; for (int i = 0; i < (int)get_num_osds(); i++) { if (std::find(up.begin(), up.end(), i) == up.end() && i != from) { to = i; break; } } ASSERT_TRUE(to >= 0); vector<pair<int32_t,int32_t>> new_pg_upmap_items; new_pg_upmap_items.push_back(make_pair(from, to)); OSDMap::Incremental pending_inc(osdmap.get_epoch() + 1); pending_inc.new_pg_upmap_items[pgid] = mempool::osdmap::vector<pair<int32_t,int32_t>>( new_pg_upmap_items.begin(), new_pg_upmap_items.end()); OSDMap nextmap; nextmap.deepish_copy_from(osdmap); nextmap.apply_incremental(pending_inc); ASSERT_TRUE(nextmap.have_pg_upmaps(pgid)); OSDMap::Incremental new_pending_inc(nextmap.get_epoch() + 1); clean_pg_upmaps(g_ceph_context, nextmap, new_pending_inc); nextmap.apply_incremental(new_pending_inc); ASSERT_TRUE(!nextmap.have_pg_upmaps(pgid)); } { // https://tracker.ceph.com/issues/37493 pg_t ec_pg(0, my_ec_pool); pg_t ec_pgid = osdmap.raw_pg_to_pg(ec_pg); OSDMap tmpmap; // use a tmpmap here, so we do not dirty origin map.. int from = -1; int to = -1; { // insert a valid pg_upmap_item vector<int> ec_up; int ec_up_primary; osdmap.pg_to_raw_up(ec_pgid, &ec_up, &ec_up_primary); ASSERT_TRUE(!ec_up.empty()); from = *(ec_up.begin()); ASSERT_TRUE(from >= 0); for (int i = 0; i < (int)get_num_osds(); i++) { if (std::find(ec_up.begin(), ec_up.end(), i) == ec_up.end()) { to = i; break; } } ASSERT_TRUE(to >= 0); ASSERT_TRUE(from != to); vector<pair<int32_t,int32_t>> new_pg_upmap_items; new_pg_upmap_items.push_back(make_pair(from, to)); OSDMap::Incremental pending_inc(osdmap.get_epoch() + 1); pending_inc.new_pg_upmap_items[ec_pgid] = mempool::osdmap::vector<pair<int32_t,int32_t>>( new_pg_upmap_items.begin(), new_pg_upmap_items.end()); tmpmap.deepish_copy_from(osdmap); tmpmap.apply_incremental(pending_inc); ASSERT_TRUE(tmpmap.have_pg_upmaps(ec_pgid)); } { // mark one of the target OSDs of the above pg_upmap_item as down OSDMap::Incremental pending_inc(tmpmap.get_epoch() + 1); pending_inc.new_state[to] = CEPH_OSD_UP; tmpmap.apply_incremental(pending_inc); ASSERT_TRUE(!tmpmap.is_up(to)); ASSERT_TRUE(tmpmap.have_pg_upmaps(ec_pgid)); } { // confirm *clean_pg_upmaps* won't do anything bad OSDMap::Incremental pending_inc(tmpmap.get_epoch() + 1); clean_pg_upmaps(g_ceph_context, tmpmap, pending_inc); tmpmap.apply_incremental(pending_inc); ASSERT_TRUE(tmpmap.have_pg_upmaps(ec_pgid)); } } { // http://tracker.ceph.com/issues/37501 pg_t ec_pg(0, my_ec_pool); pg_t ec_pgid = osdmap.raw_pg_to_pg(ec_pg); OSDMap tmpmap; // use a tmpmap here, so we do not dirty origin map.. int from = -1; int to = -1; { // insert a valid pg_upmap_item vector<int> ec_up; int ec_up_primary; osdmap.pg_to_raw_up(ec_pgid, &ec_up, &ec_up_primary); ASSERT_TRUE(!ec_up.empty()); from = *(ec_up.begin()); ASSERT_TRUE(from >= 0); for (int i = 0; i < (int)get_num_osds(); i++) { if (std::find(ec_up.begin(), ec_up.end(), i) == ec_up.end()) { to = i; break; } } ASSERT_TRUE(to >= 0); ASSERT_TRUE(from != to); vector<pair<int32_t,int32_t>> new_pg_upmap_items; new_pg_upmap_items.push_back(make_pair(from, to)); OSDMap::Incremental pending_inc(osdmap.get_epoch() + 1); pending_inc.new_pg_upmap_items[ec_pgid] = mempool::osdmap::vector<pair<int32_t,int32_t>>( new_pg_upmap_items.begin(), new_pg_upmap_items.end()); tmpmap.deepish_copy_from(osdmap); tmpmap.apply_incremental(pending_inc); ASSERT_TRUE(tmpmap.have_pg_upmaps(ec_pgid)); } { // mark one of the target OSDs of the above pg_upmap_item as out OSDMap::Incremental pending_inc(tmpmap.get_epoch() + 1); pending_inc.new_weight[to] = CEPH_OSD_OUT; tmpmap.apply_incremental(pending_inc); ASSERT_TRUE(tmpmap.is_out(to)); ASSERT_TRUE(tmpmap.have_pg_upmaps(ec_pgid)); } { // *clean_pg_upmaps* should be able to remove the above *bad* mapping OSDMap::Incremental pending_inc(tmpmap.get_epoch() + 1); clean_pg_upmaps(g_ceph_context, tmpmap, pending_inc); tmpmap.apply_incremental(pending_inc); ASSERT_TRUE(!tmpmap.have_pg_upmaps(ec_pgid)); } } { // http://tracker.ceph.com/issues/37968 // build a temporary crush topology of 2 hosts, 3 osds per host OSDMap tmp; // use a tmpmap here, so we do not dirty origin map.. tmp.deepish_copy_from(osdmap); const int expected_host_num = 2; int osd_per_host = get_num_osds() / expected_host_num; ASSERT_GE(osd_per_host, 3); int index = 0; for (int i = 0; i < (int)get_num_osds(); i++) { if (i && i % osd_per_host == 0) { ++index; } stringstream osd_name; stringstream host_name; vector<string> move_to; osd_name << "osd." << i; host_name << "host-" << index; move_to.push_back("root=default"); string host_loc = "host=" + host_name.str(); move_to.push_back(host_loc); auto r = crush_move(tmp, osd_name.str(), move_to); ASSERT_EQ(0, r); } // build crush rule CrushWrapper crush; get_crush(tmp, crush); string rule_name = "rule_37968"; int rule_type = pg_pool_t::TYPE_ERASURE; ASSERT_TRUE(!crush.rule_exists(rule_name)); int rno; for (rno = 0; rno < crush.get_max_rules(); rno++) { if (!crush.rule_exists(rno)) break; } string root_name = "default"; int root = crush.get_item_id(root_name); int steps = 6; crush_rule *rule = crush_make_rule(steps, rule_type); int step = 0; crush_rule_set_step(rule, step++, CRUSH_RULE_SET_CHOOSELEAF_TRIES, 5, 0); crush_rule_set_step(rule, step++, CRUSH_RULE_SET_CHOOSE_TRIES, 100, 0); crush_rule_set_step(rule, step++, CRUSH_RULE_TAKE, root, 0); crush_rule_set_step(rule, step++, CRUSH_RULE_CHOOSE_INDEP, 2, 1 /* host*/); crush_rule_set_step(rule, step++, CRUSH_RULE_CHOOSE_INDEP, 2, 0 /* osd */); crush_rule_set_step(rule, step++, CRUSH_RULE_EMIT, 0, 0); ASSERT_TRUE(step == steps); auto r = crush_add_rule(crush.get_crush_map(), rule, rno); ASSERT_TRUE(r >= 0); crush.set_rule_name(rno, rule_name); { OSDMap::Incremental pending_inc(tmp.get_epoch() + 1); pending_inc.crush.clear(); crush.encode(pending_inc.crush, CEPH_FEATURES_SUPPORTED_DEFAULT); tmp.apply_incremental(pending_inc); } // create a erasuce-coded pool referencing the above rule int64_t pool_37968; { OSDMap::Incremental new_pool_inc(tmp.get_epoch() + 1); new_pool_inc.new_pool_max = tmp.get_pool_max(); new_pool_inc.fsid = tmp.get_fsid(); pg_pool_t empty; pool_37968 = ++new_pool_inc.new_pool_max; pg_pool_t *p = new_pool_inc.get_new_pool(pool_37968, &empty); p->size = 4; p->set_pg_num(8); p->set_pgp_num(8); p->type = pg_pool_t::TYPE_ERASURE; p->crush_rule = rno; p->set_flag(pg_pool_t::FLAG_HASHPSPOOL); new_pool_inc.new_pool_names[pool_37968] = "pool_37968"; tmp.apply_incremental(new_pool_inc); } pg_t ec_pg(0, pool_37968); pg_t ec_pgid = tmp.raw_pg_to_pg(ec_pg); int from = -1; int to = -1; { // insert a valid pg_upmap_item vector<int> ec_up; int ec_up_primary; tmp.pg_to_raw_up(ec_pgid, &ec_up, &ec_up_primary); ASSERT_TRUE(ec_up.size() == 4); from = *(ec_up.begin()); ASSERT_TRUE(from >= 0); auto parent = tmp.crush->get_parent_of_type(from, 1 /* host */, rno); ASSERT_TRUE(parent < 0); // pick an osd of the same parent with *from* for (int i = 0; i < (int)get_num_osds(); i++) { if (std::find(ec_up.begin(), ec_up.end(), i) == ec_up.end()) { auto p = tmp.crush->get_parent_of_type(i, 1 /* host */, rno); if (p == parent) { to = i; break; } } } ASSERT_TRUE(to >= 0); ASSERT_TRUE(from != to); vector<pair<int32_t,int32_t>> new_pg_upmap_items; new_pg_upmap_items.push_back(make_pair(from, to)); OSDMap::Incremental pending_inc(tmp.get_epoch() + 1); pending_inc.new_pg_upmap_items[ec_pgid] = mempool::osdmap::vector<pair<int32_t,int32_t>>( new_pg_upmap_items.begin(), new_pg_upmap_items.end()); tmp.apply_incremental(pending_inc); ASSERT_TRUE(tmp.have_pg_upmaps(ec_pgid)); } { // *clean_pg_upmaps* should not remove the above upmap_item OSDMap::Incremental pending_inc(tmp.get_epoch() + 1); clean_pg_upmaps(g_ceph_context, tmp, pending_inc); tmp.apply_incremental(pending_inc); ASSERT_TRUE(tmp.have_pg_upmaps(ec_pgid)); } } { // TEST pg_upmap { // STEP-1: enumerate all children of up[0]'s parent, // replace up[1] with one of them (other than up[0]) int parent = osdmap.crush->get_parent_of_type(up[0], osdmap.crush->get_type_id("host")); set<int> candidates; osdmap.crush->get_leaves(osdmap.crush->get_item_name(parent), &candidates); ASSERT_LT(1U, candidates.size()); int replaced_by = -1; for (auto c: candidates) { if (c != up[0]) { replaced_by = c; break; } } { // Check we can handle a negative pg_upmap value vector<int32_t> new_pg_upmap; new_pg_upmap.push_back(up[0]); new_pg_upmap.push_back(-823648512); OSDMap::Incremental pending_inc(osdmap.get_epoch() + 1); pending_inc.new_pg_upmap[pgid] = mempool::osdmap::vector<int32_t>( new_pg_upmap.begin(), new_pg_upmap.end()); osdmap.apply_incremental(pending_inc); vector<int> new_up; int new_up_primary; // crucial call - _apply_upmap should ignore the negative value osdmap.pg_to_raw_up(pgid, &new_up, &new_up_primary); } ASSERT_NE(-1, replaced_by); // generate a new pg_upmap item and apply vector<int32_t> new_pg_upmap; new_pg_upmap.push_back(up[0]); new_pg_upmap.push_back(replaced_by); // up[1] -> replaced_by OSDMap::Incremental pending_inc(osdmap.get_epoch() + 1); pending_inc.new_pg_upmap[pgid] = mempool::osdmap::vector<int32_t>( new_pg_upmap.begin(), new_pg_upmap.end()); osdmap.apply_incremental(pending_inc); { // validate pg_upmap is there vector<int> new_up; int new_up_primary; osdmap.pg_to_raw_up(pgid, &new_up, &new_up_primary); ASSERT_EQ(new_up.size(), up.size()); ASSERT_EQ(new_up[0], new_pg_upmap[0]); ASSERT_EQ(new_up[1], new_pg_upmap[1]); // and we shall have two OSDs from a same host now.. int parent_0 = osdmap.crush->get_parent_of_type(new_up[0], osdmap.crush->get_type_id("host")); int parent_1 = osdmap.crush->get_parent_of_type(new_up[1], osdmap.crush->get_type_id("host")); ASSERT_EQ(parent_0, parent_1); } } { // STEP-2: apply cure OSDMap::Incremental pending_inc(osdmap.get_epoch() + 1); clean_pg_upmaps(g_ceph_context, osdmap, pending_inc); osdmap.apply_incremental(pending_inc); { // validate pg_upmap is gone (reverted) vector<int> new_up; int new_up_primary; osdmap.pg_to_raw_up(pgid, &new_up, &new_up_primary); ASSERT_EQ(new_up, up); ASSERT_EQ(new_up_primary, up_primary); } } } { // TEST pg_upmap_items // enumerate all used hosts first set<int> parents; for (auto u: up) { int parent = osdmap.crush->get_parent_of_type(u, osdmap.crush->get_type_id("host")); ASSERT_GT(0, parent); parents.insert(parent); } int candidate_parent = 0; set<int> candidate_children; vector<int> up_after_out; { // STEP-1: try mark out up[1] and all other OSDs from the same host int parent = osdmap.crush->get_parent_of_type(up[1], osdmap.crush->get_type_id("host")); set<int> children; osdmap.crush->get_leaves(osdmap.crush->get_item_name(parent), &children); OSDMap::Incremental pending_inc(osdmap.get_epoch() + 1); for (auto c: children) { pending_inc.new_weight[c] = CEPH_OSD_OUT; } OSDMap tmpmap; tmpmap.deepish_copy_from(osdmap); tmpmap.apply_incremental(pending_inc); vector<int> new_up; int new_up_primary; tmpmap.pg_to_raw_up(pgid, &new_up, &new_up_primary); // verify that we'll have OSDs from a different host.. int will_choose = -1; for (auto o: new_up) { int parent = tmpmap.crush->get_parent_of_type(o, osdmap.crush->get_type_id("host")); if (!parents.count(parent)) { will_choose = o; candidate_parent = parent; // record break; } } ASSERT_LT(-1, will_choose); // it is an OSD! ASSERT_NE(candidate_parent, 0); osdmap.crush->get_leaves(osdmap.crush->get_item_name(candidate_parent), &candidate_children); ASSERT_TRUE(candidate_children.count(will_choose)); candidate_children.erase(will_choose); ASSERT_FALSE(candidate_children.empty()); up_after_out = new_up; // needed for verification.. } { // Make sure we can handle a negative pg_upmap_item int victim = up[0]; int replaced_by = -823648512; vector<pair<int32_t,int32_t>> new_pg_upmap_items; new_pg_upmap_items.push_back(make_pair(victim, replaced_by)); // apply OSDMap::Incremental pending_inc(osdmap.get_epoch() + 1); pending_inc.new_pg_upmap_items[pgid] = mempool::osdmap::vector<pair<int32_t,int32_t>>( new_pg_upmap_items.begin(), new_pg_upmap_items.end()); osdmap.apply_incremental(pending_inc); vector<int> new_up; int new_up_primary; // crucial call - _apply_upmap should ignore the negative value osdmap.pg_to_raw_up(pgid, &new_up, &new_up_primary); } { // STEP-2: generating a new pg_upmap_items entry by // replacing up[0] with one coming from candidate_children int victim = up[0]; int replaced_by = *candidate_children.begin(); vector<pair<int32_t,int32_t>> new_pg_upmap_items; new_pg_upmap_items.push_back(make_pair(victim, replaced_by)); // apply OSDMap::Incremental pending_inc(osdmap.get_epoch() + 1); pending_inc.new_pg_upmap_items[pgid] = mempool::osdmap::vector<pair<int32_t,int32_t>>( new_pg_upmap_items.begin(), new_pg_upmap_items.end()); osdmap.apply_incremental(pending_inc); { // validate pg_upmap_items is there vector<int> new_up; int new_up_primary; osdmap.pg_to_raw_up(pgid, &new_up, &new_up_primary); ASSERT_EQ(new_up.size(), up.size()); ASSERT_TRUE(std::find(new_up.begin(), new_up.end(), replaced_by) != new_up.end()); // and up[1] too ASSERT_TRUE(std::find(new_up.begin(), new_up.end(), up[1]) != new_up.end()); } } { // STEP-3: mark out up[1] and all other OSDs from the same host int parent = osdmap.crush->get_parent_of_type(up[1], osdmap.crush->get_type_id("host")); set<int> children; osdmap.crush->get_leaves(osdmap.crush->get_item_name(parent), &children); OSDMap::Incremental pending_inc(osdmap.get_epoch() + 1); for (auto c: children) { pending_inc.new_weight[c] = CEPH_OSD_OUT; } osdmap.apply_incremental(pending_inc); { // validate we have two OSDs from the same host now.. vector<int> new_up; int new_up_primary; osdmap.pg_to_raw_up(pgid, &new_up, &new_up_primary); ASSERT_EQ(up.size(), new_up.size()); int parent_0 = osdmap.crush->get_parent_of_type(new_up[0], osdmap.crush->get_type_id("host")); int parent_1 = osdmap.crush->get_parent_of_type(new_up[1], osdmap.crush->get_type_id("host")); ASSERT_EQ(parent_0, parent_1); } } { // STEP-4: apply cure OSDMap::Incremental pending_inc(osdmap.get_epoch() + 1); clean_pg_upmaps(g_ceph_context, osdmap, pending_inc); osdmap.apply_incremental(pending_inc); { // validate pg_upmap_items is gone (reverted) vector<int> new_up; int new_up_primary; osdmap.pg_to_raw_up(pgid, &new_up, &new_up_primary); ASSERT_EQ(new_up, up_after_out); } } } } TEST_F(OSDMapTest, BUG_38897) { // http://tracker.ceph.com/issues/38897 // build a fresh map with 12 OSDs, without any default pools set_up_map(12, true); const string pool_1("pool1"); const string pool_2("pool2"); int64_t pool_1_id = -1; { // build customized crush rule for "pool1" string host_name = "host_for_pool_1"; // build a customized host to capture osd.1~5 for (int i = 1; i < 5; i++) { stringstream osd_name; vector<string> move_to; osd_name << "osd." << i; move_to.push_back("root=default"); string host_loc = "host=" + host_name; move_to.push_back(host_loc); auto r = crush_move(osdmap, osd_name.str(), move_to); ASSERT_EQ(0, r); } CrushWrapper crush; get_crush(osdmap, crush); auto host_id = crush.get_item_id(host_name); ASSERT_TRUE(host_id < 0); string rule_name = "rule_for_pool1"; int rule_type = pg_pool_t::TYPE_REPLICATED; ASSERT_TRUE(!crush.rule_exists(rule_name)); int rno; for (rno = 0; rno < crush.get_max_rules(); rno++) { if (!crush.rule_exists(rno)) break; } int steps = 7; crush_rule *rule = crush_make_rule(steps, rule_type); int step = 0; crush_rule_set_step(rule, step++, CRUSH_RULE_SET_CHOOSELEAF_TRIES, 5, 0); crush_rule_set_step(rule, step++, CRUSH_RULE_SET_CHOOSE_TRIES, 100, 0); // always choose osd.0 crush_rule_set_step(rule, step++, CRUSH_RULE_TAKE, 0, 0); crush_rule_set_step(rule, step++, CRUSH_RULE_EMIT, 0, 0); // then pick any other random osds crush_rule_set_step(rule, step++, CRUSH_RULE_TAKE, host_id, 0); crush_rule_set_step(rule, step++, CRUSH_RULE_CHOOSELEAF_FIRSTN, 2, 0); crush_rule_set_step(rule, step++, CRUSH_RULE_EMIT, 0, 0); ASSERT_TRUE(step == steps); auto r = crush_add_rule(crush.get_crush_map(), rule, rno); ASSERT_TRUE(r >= 0); crush.set_rule_name(rno, rule_name); { OSDMap::Incremental pending_inc(osdmap.get_epoch() + 1); pending_inc.crush.clear(); crush.encode(pending_inc.crush, CEPH_FEATURES_SUPPORTED_DEFAULT); osdmap.apply_incremental(pending_inc); } // create "pool1" OSDMap::Incremental pending_inc(osdmap.get_epoch() + 1); pending_inc.new_pool_max = osdmap.get_pool_max(); auto pool_id = ++pending_inc.new_pool_max; pool_1_id = pool_id; pg_pool_t empty; auto p = pending_inc.get_new_pool(pool_id, &empty); p->size = 3; p->min_size = 1; p->set_pg_num(3); p->set_pgp_num(3); p->type = pg_pool_t::TYPE_REPLICATED; p->crush_rule = rno; p->set_flag(pg_pool_t::FLAG_HASHPSPOOL); pending_inc.new_pool_names[pool_id] = pool_1; osdmap.apply_incremental(pending_inc); ASSERT_TRUE(osdmap.have_pg_pool(pool_id)); ASSERT_TRUE(osdmap.get_pool_name(pool_id) == pool_1); { for (unsigned i = 0; i < 3; i++) { // 1.x -> [1] pg_t rawpg(i, pool_id); pg_t pgid = osdmap.raw_pg_to_pg(rawpg); vector<int> up; int up_primary; osdmap.pg_to_raw_up(pgid, &up, &up_primary); ASSERT_TRUE(up.size() == 3); ASSERT_TRUE(up[0] == 0); // insert a new pg_upmap vector<int32_t> new_up; // and remap 1.x to osd.1 only // this way osd.0 is deemed to be *underfull* // and osd.1 is deemed to be *overfull* new_up.push_back(1); { OSDMap::Incremental pending_inc(osdmap.get_epoch() + 1); pending_inc.new_pg_upmap[pgid] = mempool::osdmap::vector<int32_t>( new_up.begin(), new_up.end()); osdmap.apply_incremental(pending_inc); } osdmap.pg_to_raw_up(pgid, &up, &up_primary); ASSERT_TRUE(up.size() == 1); ASSERT_TRUE(up[0] == 1); } } } { // build customized crush rule for "pool2" string host_name = "host_for_pool_2"; // build a customized host to capture osd.6~11 for (int i = 6; i < (int)get_num_osds(); i++) { stringstream osd_name; vector<string> move_to; osd_name << "osd." << i; move_to.push_back("root=default"); string host_loc = "host=" + host_name; move_to.push_back(host_loc); auto r = crush_move(osdmap, osd_name.str(), move_to); ASSERT_EQ(0, r); } CrushWrapper crush; get_crush(osdmap, crush); auto host_id = crush.get_item_id(host_name); ASSERT_TRUE(host_id < 0); string rule_name = "rule_for_pool2"; int rule_type = pg_pool_t::TYPE_REPLICATED; ASSERT_TRUE(!crush.rule_exists(rule_name)); int rno; for (rno = 0; rno < crush.get_max_rules(); rno++) { if (!crush.rule_exists(rno)) break; } int steps = 7; crush_rule *rule = crush_make_rule(steps, rule_type); int step = 0; crush_rule_set_step(rule, step++, CRUSH_RULE_SET_CHOOSELEAF_TRIES, 5, 0); crush_rule_set_step(rule, step++, CRUSH_RULE_SET_CHOOSE_TRIES, 100, 0); // always choose osd.0 crush_rule_set_step(rule, step++, CRUSH_RULE_TAKE, 0, 0); crush_rule_set_step(rule, step++, CRUSH_RULE_EMIT, 0, 0); // then pick any other random osds crush_rule_set_step(rule, step++, CRUSH_RULE_TAKE, host_id, 0); crush_rule_set_step(rule, step++, CRUSH_RULE_CHOOSELEAF_FIRSTN, 2, 0); crush_rule_set_step(rule, step++, CRUSH_RULE_EMIT, 0, 0); ASSERT_TRUE(step == steps); auto r = crush_add_rule(crush.get_crush_map(), rule, rno); ASSERT_TRUE(r >= 0); crush.set_rule_name(rno, rule_name); { OSDMap::Incremental pending_inc(osdmap.get_epoch() + 1); pending_inc.crush.clear(); crush.encode(pending_inc.crush, CEPH_FEATURES_SUPPORTED_DEFAULT); osdmap.apply_incremental(pending_inc); } // create "pool2" OSDMap::Incremental pending_inc(osdmap.get_epoch() + 1); pending_inc.new_pool_max = osdmap.get_pool_max(); auto pool_id = ++pending_inc.new_pool_max; pg_pool_t empty; auto p = pending_inc.get_new_pool(pool_id, &empty); p->size = 3; // include a single PG p->set_pg_num(1); p->set_pgp_num(1); p->type = pg_pool_t::TYPE_REPLICATED; p->crush_rule = rno; p->set_flag(pg_pool_t::FLAG_HASHPSPOOL); pending_inc.new_pool_names[pool_id] = pool_2; osdmap.apply_incremental(pending_inc); ASSERT_TRUE(osdmap.have_pg_pool(pool_id)); ASSERT_TRUE(osdmap.get_pool_name(pool_id) == pool_2); pg_t rawpg(0, pool_id); pg_t pgid = osdmap.raw_pg_to_pg(rawpg); EXPECT_TRUE(!osdmap.have_pg_upmaps(pgid)); vector<int> up; int up_primary; osdmap.pg_to_raw_up(pgid, &up, &up_primary); ASSERT_TRUE(up.size() == 3); ASSERT_TRUE(up[0] == 0); { // build a pg_upmap_item that will // remap pg out from *underfull* osd.0 vector<pair<int32_t,int32_t>> new_pg_upmap_items; new_pg_upmap_items.push_back(make_pair(0, 10)); // osd.0 -> osd.10 OSDMap::Incremental pending_inc(osdmap.get_epoch() + 1); pending_inc.new_pg_upmap_items[pgid] = mempool::osdmap::vector<pair<int32_t,int32_t>>( new_pg_upmap_items.begin(), new_pg_upmap_items.end()); osdmap.apply_incremental(pending_inc); ASSERT_TRUE(osdmap.have_pg_upmaps(pgid)); vector<int> up; int up_primary; osdmap.pg_to_raw_up(pgid, &up, &up_primary); ASSERT_TRUE(up.size() == 3); ASSERT_TRUE(up[0] == 10); } } // ready to go { set<int64_t> only_pools; ASSERT_TRUE(pool_1_id >= 0); only_pools.insert(pool_1_id); OSDMap::Incremental pending_inc(osdmap.get_epoch() + 1); // require perfect distribution! (max deviation 0) osdmap.calc_pg_upmaps(g_ceph_context, 0, // so we can force optimizing 100, only_pools, &pending_inc); osdmap.apply_incremental(pending_inc); } } TEST_F(OSDMapTest, BUG_40104) { // http://tracker.ceph.com/issues/40104 int big_osd_num = 5000; int big_pg_num = 10000; set_up_map(big_osd_num, true); int pool_id; { OSDMap::Incremental pending_inc(osdmap.get_epoch() + 1); pending_inc.new_pool_max = osdmap.get_pool_max(); pool_id = ++pending_inc.new_pool_max; pg_pool_t empty; auto p = pending_inc.get_new_pool(pool_id, &empty); p->size = 3; p->min_size = 1; p->set_pg_num(big_pg_num); p->set_pgp_num(big_pg_num); p->type = pg_pool_t::TYPE_REPLICATED; p->crush_rule = 0; p->set_flag(pg_pool_t::FLAG_HASHPSPOOL); pending_inc.new_pool_names[pool_id] = "big_pool"; osdmap.apply_incremental(pending_inc); ASSERT_TRUE(osdmap.have_pg_pool(pool_id)); ASSERT_TRUE(osdmap.get_pool_name(pool_id) == "big_pool"); } { // generate pg_upmap_items for each pg OSDMap::Incremental pending_inc(osdmap.get_epoch() + 1); for (int i = 0; i < big_pg_num; i++) { pg_t rawpg(i, pool_id); pg_t pgid = osdmap.raw_pg_to_pg(rawpg); vector<int> up; int up_primary; osdmap.pg_to_raw_up(pgid, &up, &up_primary); ASSERT_TRUE(up.size() == 3); int victim = up[0]; int replaced_by = random() % big_osd_num; vector<pair<int32_t,int32_t>> new_pg_upmap_items; // note that it might or might not be valid, we don't care new_pg_upmap_items.push_back(make_pair(victim, replaced_by)); pending_inc.new_pg_upmap_items[pgid] = mempool::osdmap::vector<pair<int32_t,int32_t>>( new_pg_upmap_items.begin(), new_pg_upmap_items.end()); } osdmap.apply_incremental(pending_inc); } { OSDMap::Incremental pending_inc(osdmap.get_epoch() + 1); auto start = mono_clock::now(); clean_pg_upmaps(g_ceph_context, osdmap, pending_inc); auto latency = mono_clock::now() - start; std::cout << "clean_pg_upmaps (~" << big_pg_num << " pg_upmap_items) latency:" << timespan_str(latency) << std::endl; } } TEST_F(OSDMapTest, BUG_42052) { // https://tracker.ceph.com/issues/42052 set_up_map(6, true); const string pool_name("pool"); // build customized crush rule for "pool" CrushWrapper crush; get_crush(osdmap, crush); string rule_name = "rule"; int rule_type = pg_pool_t::TYPE_REPLICATED; ASSERT_TRUE(!crush.rule_exists(rule_name)); int rno; for (rno = 0; rno < crush.get_max_rules(); rno++) { if (!crush.rule_exists(rno)) break; } int steps = 8; crush_rule *rule = crush_make_rule(steps, rule_type); int step = 0; crush_rule_set_step(rule, step++, CRUSH_RULE_SET_CHOOSELEAF_TRIES, 5, 0); crush_rule_set_step(rule, step++, CRUSH_RULE_SET_CHOOSE_TRIES, 100, 0); // always choose osd.0, osd.1, osd.2 crush_rule_set_step(rule, step++, CRUSH_RULE_TAKE, 0, 0); crush_rule_set_step(rule, step++, CRUSH_RULE_EMIT, 0, 0); crush_rule_set_step(rule, step++, CRUSH_RULE_TAKE, 0, 1); crush_rule_set_step(rule, step++, CRUSH_RULE_EMIT, 0, 0); crush_rule_set_step(rule, step++, CRUSH_RULE_TAKE, 0, 2); crush_rule_set_step(rule, step++, CRUSH_RULE_EMIT, 0, 0); ASSERT_TRUE(step == steps); auto r = crush_add_rule(crush.get_crush_map(), rule, rno); ASSERT_TRUE(r >= 0); crush.set_rule_name(rno, rule_name); { OSDMap::Incremental pending_inc(osdmap.get_epoch() + 1); pending_inc.crush.clear(); crush.encode(pending_inc.crush, CEPH_FEATURES_SUPPORTED_DEFAULT); osdmap.apply_incremental(pending_inc); } // create "pool" OSDMap::Incremental pending_inc(osdmap.get_epoch() + 1); pending_inc.new_pool_max = osdmap.get_pool_max(); auto pool_id = ++pending_inc.new_pool_max; pg_pool_t empty; auto p = pending_inc.get_new_pool(pool_id, &empty); p->size = 3; p->min_size = 1; p->set_pg_num(1); p->set_pgp_num(1); p->type = pg_pool_t::TYPE_REPLICATED; p->crush_rule = rno; p->set_flag(pg_pool_t::FLAG_HASHPSPOOL); pending_inc.new_pool_names[pool_id] = pool_name; osdmap.apply_incremental(pending_inc); ASSERT_TRUE(osdmap.have_pg_pool(pool_id)); ASSERT_TRUE(osdmap.get_pool_name(pool_id) == pool_name); pg_t rawpg(0, pool_id); pg_t pgid = osdmap.raw_pg_to_pg(rawpg); { // pg_upmap 1.0 [2,3,5] vector<int32_t> new_up{2,3,5}; OSDMap::Incremental pending_inc(osdmap.get_epoch() + 1); pending_inc.new_pg_upmap[pgid] = mempool::osdmap::vector<int32_t>( new_up.begin(), new_up.end()); osdmap.apply_incremental(pending_inc); } { // pg_upmap_items 1.0 [0,3,4,5] vector<pair<int32_t,int32_t>> new_pg_upmap_items; new_pg_upmap_items.push_back(make_pair(0, 3)); new_pg_upmap_items.push_back(make_pair(4, 5)); OSDMap::Incremental pending_inc(osdmap.get_epoch() + 1); pending_inc.new_pg_upmap_items[pgid] = mempool::osdmap::vector<pair<int32_t,int32_t>>( new_pg_upmap_items.begin(), new_pg_upmap_items.end()); osdmap.apply_incremental(pending_inc); } { OSDMap::Incremental pending_inc(osdmap.get_epoch() + 1); clean_pg_upmaps(g_ceph_context, osdmap, pending_inc); osdmap.apply_incremental(pending_inc); ASSERT_FALSE(osdmap.have_pg_upmaps(pgid)); } } TEST_F(OSDMapTest, BUG_42485) { set_up_map(60); { // build a temporary crush topology of 2datacenters, 3racks per dc, // 1host per rack, 10osds per host OSDMap tmp; // use a tmpmap here, so we do not dirty origin map.. tmp.deepish_copy_from(osdmap); const int expected_host_num = 6; int osd_per_host = (int)get_num_osds() / expected_host_num; ASSERT_GE(osd_per_host, 10); int host_per_dc = 3; int index = 0; int dc_index = 0; for (int i = 0; i < (int)get_num_osds(); i++) { if (i && i % osd_per_host == 0) { ++index; } if (i && i % (host_per_dc * osd_per_host) == 0) { ++dc_index; } stringstream osd_name; stringstream host_name; stringstream rack_name; stringstream dc_name; vector<string> move_to; osd_name << "osd." << i; host_name << "host-" << index; rack_name << "rack-" << index; dc_name << "dc-" << dc_index; move_to.push_back("root=default"); string dc_loc = "datacenter=" + dc_name.str(); move_to.push_back(dc_loc); string rack_loc = "rack=" + rack_name.str(); move_to.push_back(rack_loc); string host_loc = "host=" + host_name.str(); move_to.push_back(host_loc); auto r = crush_move(tmp, osd_name.str(), move_to); ASSERT_EQ(0, r); } // build crush rule CrushWrapper crush; get_crush(tmp, crush); string rule_name = "rule_xeus_993_1"; int rule_type = pg_pool_t::TYPE_REPLICATED; ASSERT_TRUE(!crush.rule_exists(rule_name)); int rno; for (rno = 0; rno < crush.get_max_rules(); rno++) { if (!crush.rule_exists(rno)) break; } string root_name = "default"; string dc_1 = "dc-0"; int dc1 = crush.get_item_id(dc_1); string dc_2 = "dc-1"; int dc2 = crush.get_item_id(dc_2); int steps = 8; crush_rule *rule = crush_make_rule(steps, rule_type); int step = 0; crush_rule_set_step(rule, step++, CRUSH_RULE_SET_CHOOSELEAF_TRIES, 5, 0); crush_rule_set_step(rule, step++, CRUSH_RULE_SET_CHOOSE_TRIES, 100, 0); crush_rule_set_step(rule, step++, CRUSH_RULE_TAKE, dc1, 0); crush_rule_set_step(rule, step++, CRUSH_RULE_CHOOSELEAF_FIRSTN, 2, 3 /* rack */); crush_rule_set_step(rule, step++, CRUSH_RULE_EMIT, 0, 0); crush_rule_set_step(rule, step++, CRUSH_RULE_TAKE, dc2, 0); crush_rule_set_step(rule, step++, CRUSH_RULE_CHOOSELEAF_FIRSTN, 2, 3 /* rack */); crush_rule_set_step(rule, step++, CRUSH_RULE_EMIT, 0, 0); ASSERT_TRUE(step == steps); auto r = crush_add_rule(crush.get_crush_map(), rule, rno); ASSERT_TRUE(r >= 0); crush.set_rule_name(rno, rule_name); { OSDMap::Incremental pending_inc(tmp.get_epoch() + 1); pending_inc.crush.clear(); crush.encode(pending_inc.crush, CEPH_FEATURES_SUPPORTED_DEFAULT); tmp.apply_incremental(pending_inc); } // create a repliacted pool referencing the above rule int64_t pool_xeus_993; { OSDMap::Incremental new_pool_inc(tmp.get_epoch() + 1); new_pool_inc.new_pool_max = tmp.get_pool_max(); new_pool_inc.fsid = tmp.get_fsid(); pg_pool_t empty; pool_xeus_993 = ++new_pool_inc.new_pool_max; pg_pool_t *p = new_pool_inc.get_new_pool(pool_xeus_993, &empty); p->size = 4; p->set_pg_num(4096); p->set_pgp_num(4096); p->type = pg_pool_t::TYPE_REPLICATED; p->crush_rule = rno; p->set_flag(pg_pool_t::FLAG_HASHPSPOOL); new_pool_inc.new_pool_names[pool_xeus_993] = "pool_xeus_993"; tmp.apply_incremental(new_pool_inc); } pg_t rep_pg(0, pool_xeus_993); pg_t rep_pgid = tmp.raw_pg_to_pg(rep_pg); { int from = -1; int to = -1; vector<int> rep_up; int rep_up_primary; tmp.pg_to_raw_up(rep_pgid, &rep_up, &rep_up_primary); std::cout << "pgid " << rep_up << " up " << rep_up << std::endl; ASSERT_TRUE(rep_up.size() == 4); from = *(rep_up.begin()); ASSERT_TRUE(from >= 0); auto dc_parent = tmp.crush->get_parent_of_type(from, 8 /* dc */, rno); if (dc_parent == dc1) dc_parent = dc2; else dc_parent = dc1; auto rack_parent = tmp.crush->get_parent_of_type(from, 3 /* rack */, rno); ASSERT_TRUE(dc_parent < 0); ASSERT_TRUE(rack_parent < 0); set<int> rack_parents; for (auto &i: rep_up) { if (i == from) continue; auto rack_parent = tmp.crush->get_parent_of_type(i, 3 /* rack */, rno); rack_parents.insert(rack_parent); } for (int i = 0; i < (int)get_num_osds(); i++) { if (std::find(rep_up.begin(), rep_up.end(), i) == rep_up.end()) { auto dc_p = tmp.crush->get_parent_of_type(i, 8 /* dc */, rno); auto rack_p = tmp.crush->get_parent_of_type(i, 3 /* rack */, rno); if (dc_p == dc_parent && rack_parents.find(rack_p) == rack_parents.end()) { to = i; break; } } } ASSERT_TRUE(to >= 0); ASSERT_TRUE(from != to); std::cout << "from " << from << " to " << to << std::endl; vector<pair<int32_t,int32_t>> new_pg_upmap_items; new_pg_upmap_items.push_back(make_pair(from, to)); OSDMap::Incremental pending_inc(tmp.get_epoch() + 1); pending_inc.new_pg_upmap_items[rep_pgid] = mempool::osdmap::vector<pair<int32_t,int32_t>>( new_pg_upmap_items.begin(), new_pg_upmap_items.end()); tmp.apply_incremental(pending_inc); ASSERT_TRUE(tmp.have_pg_upmaps(rep_pgid)); } pg_t rep_pg2(2, pool_xeus_993); pg_t rep_pgid2 = tmp.raw_pg_to_pg(rep_pg2); { pg_t rep_pgid = rep_pgid2; vector<int> from_osds{-1, -1}; vector<int> rep_up; int rep_up_primary; tmp.pg_to_raw_up(rep_pgid, &rep_up, &rep_up_primary); ASSERT_TRUE(rep_up.size() == 4); from_osds[0] = *(rep_up.begin()); from_osds[1] = *(rep_up.rbegin()); std::cout << "pgid " << rep_pgid2 << " up " << rep_up << std::endl; ASSERT_TRUE(*(from_osds.begin()) >= 0); ASSERT_TRUE(*(from_osds.rbegin()) >= 0); vector<pair<int32_t,int32_t>> new_pg_upmap_items; for (auto &from: from_osds) { int to = -1; auto dc_parent = tmp.crush->get_parent_of_type(from, 8 /* dc */, rno); if (dc_parent == dc1) dc_parent = dc2; else dc_parent = dc1; auto rack_parent = tmp.crush->get_parent_of_type(from, 3 /* rack */, rno); ASSERT_TRUE(dc_parent < 0); ASSERT_TRUE(rack_parent < 0); set<int> rack_parents; for (auto &i: rep_up) { if (i == from) continue; auto rack_parent = tmp.crush->get_parent_of_type(i, 3 /* rack */, rno); rack_parents.insert(rack_parent); } for (auto &i: new_pg_upmap_items) { auto rack_from = tmp.crush->get_parent_of_type(i.first, 3, rno); auto rack_to = tmp.crush->get_parent_of_type(i.second, 3, rno); rack_parents.insert(rack_from); rack_parents.insert(rack_to); } for (int i = 0; i < (int)get_num_osds(); i++) { if (std::find(rep_up.begin(), rep_up.end(), i) == rep_up.end()) { auto dc_p = tmp.crush->get_parent_of_type(i, 8 /* dc */, rno); auto rack_p = tmp.crush->get_parent_of_type(i, 3 /* rack */, rno); if (dc_p == dc_parent && rack_parents.find(rack_p) == rack_parents.end()) { to = i; break; } } } ASSERT_TRUE(to >= 0); ASSERT_TRUE(from != to); std::cout << "from " << from << " to " << to << std::endl; new_pg_upmap_items.push_back(make_pair(from, to)); } OSDMap::Incremental pending_inc(tmp.get_epoch() + 1); pending_inc.new_pg_upmap_items[rep_pgid] = mempool::osdmap::vector<pair<int32_t,int32_t>>( new_pg_upmap_items.begin(), new_pg_upmap_items.end()); tmp.apply_incremental(pending_inc); ASSERT_TRUE(tmp.have_pg_upmaps(rep_pgid)); } { // *maybe_remove_pg_upmaps* should remove the above upmap_item OSDMap::Incremental pending_inc(tmp.get_epoch() + 1); clean_pg_upmaps(g_ceph_context, tmp, pending_inc); tmp.apply_incremental(pending_inc); ASSERT_FALSE(tmp.have_pg_upmaps(rep_pgid)); ASSERT_FALSE(tmp.have_pg_upmaps(rep_pgid2)); } } } TEST(PGTempMap, basic) { PGTempMap m; pg_t a(1,1); for (auto i=3; i<1000; ++i) { pg_t x(i, 1); m.set(x, {static_cast<int>(i)}); } pg_t b(2,1); m.set(a, {1, 2}); ASSERT_NE(m.find(a), m.end()); ASSERT_EQ(m.find(a), m.begin()); ASSERT_EQ(m.find(b), m.end()); ASSERT_EQ(998u, m.size()); } TEST_F(OSDMapTest, BUG_43124) { set_up_map(200); { // https://tracker.ceph.com/issues/43124 // build a temporary crush topology of 5racks, // 4 hosts per rack, 10osds per host OSDMap tmp; // use a tmpmap here, so we do not dirty origin map.. tmp.deepish_copy_from(osdmap); const int expected_host_num = 20; int osd_per_host = (int)get_num_osds() / expected_host_num; ASSERT_GE(osd_per_host, 10); int host_per_rack = 4; int index = 0; int rack_index = 0; for (int i = 0; i < (int)get_num_osds(); i++) { if (i && i % osd_per_host == 0) { ++index; } if (i && i % (host_per_rack * osd_per_host) == 0) { ++rack_index; } stringstream osd_name; stringstream host_name; stringstream rack_name; vector<string> move_to; osd_name << "osd." << i; host_name << "host-" << index; rack_name << "rack-" << rack_index; move_to.push_back("root=default"); string rack_loc = "rack=" + rack_name.str(); move_to.push_back(rack_loc); string host_loc = "host=" + host_name.str(); move_to.push_back(host_loc); auto r = crush_move(tmp, osd_name.str(), move_to); ASSERT_EQ(0, r); } // build crush rule CrushWrapper crush; get_crush(tmp, crush); string rule_name = "rule_angel_1944"; int rule_type = pg_pool_t::TYPE_ERASURE; ASSERT_TRUE(!crush.rule_exists(rule_name)); int rno; for (rno = 0; rno < crush.get_max_rules(); rno++) { if (!crush.rule_exists(rno)) break; } int steps = 6; string root_name = "default"; int root = crush.get_item_id(root_name); crush_rule *rule = crush_make_rule(steps, rule_type); int step = 0; crush_rule_set_step(rule, step++, CRUSH_RULE_SET_CHOOSELEAF_TRIES, 5, 0); crush_rule_set_step(rule, step++, CRUSH_RULE_SET_CHOOSE_TRIES, 100, 0); crush_rule_set_step(rule, step++, CRUSH_RULE_TAKE, root, 0); crush_rule_set_step(rule, step++, CRUSH_RULE_CHOOSE_FIRSTN, 4, 3 /* rack */); crush_rule_set_step(rule, step++, CRUSH_RULE_CHOOSELEAF_INDEP, 3, 1 /* host */); crush_rule_set_step(rule, step++, CRUSH_RULE_EMIT, 0, 0); ASSERT_TRUE(step == steps); auto r = crush_add_rule(crush.get_crush_map(), rule, rno); ASSERT_TRUE(r >= 0); crush.set_rule_name(rno, rule_name); { OSDMap::Incremental pending_inc(tmp.get_epoch() + 1); pending_inc.crush.clear(); crush.encode(pending_inc.crush, CEPH_FEATURES_SUPPORTED_DEFAULT); tmp.apply_incremental(pending_inc); } { stringstream oss; crush.dump_tree(&oss, NULL); std::cout << oss.str() << std::endl; Formatter *f = Formatter::create("json-pretty"); f->open_object_section("crush_rules"); crush.dump_rules(f); f->close_section(); f->flush(cout); delete f; } // create a erasuce-coded pool referencing the above rule int64_t pool_angel_1944; { OSDMap::Incremental new_pool_inc(tmp.get_epoch() + 1); new_pool_inc.new_pool_max = tmp.get_pool_max(); new_pool_inc.fsid = tmp.get_fsid(); pg_pool_t empty; pool_angel_1944 = ++new_pool_inc.new_pool_max; pg_pool_t *p = new_pool_inc.get_new_pool(pool_angel_1944, &empty); p->size = 12; p->set_pg_num(4096); p->set_pgp_num(4096); p->type = pg_pool_t::TYPE_ERASURE; p->crush_rule = rno; p->set_flag(pg_pool_t::FLAG_HASHPSPOOL); new_pool_inc.new_pool_names[pool_angel_1944] = "pool_angel_1944"; tmp.apply_incremental(new_pool_inc); } pg_t rep_pg(0, pool_angel_1944); pg_t rep_pgid = tmp.raw_pg_to_pg(rep_pg); { // insert a pg_upmap_item int from = -1; int to = -1; vector<int> rep_up; int rep_up_primary; tmp.pg_to_raw_up(rep_pgid, &rep_up, &rep_up_primary); std::cout << "pgid " << rep_pgid << " up " << rep_up << std::endl; ASSERT_TRUE(rep_up.size() == 12); from = *(rep_up.begin()); ASSERT_TRUE(from >= 0); auto from_rack = tmp.crush->get_parent_of_type(from, 3 /* rack */, rno); set<int> failure_domains; for (auto &osd : rep_up) { failure_domains.insert(tmp.crush->get_parent_of_type(osd, 1 /* host */, rno)); } for (int i = 0; i < (int)get_num_osds(); i++) { if (std::find(rep_up.begin(), rep_up.end(), i) == rep_up.end()) { auto to_rack = tmp.crush->get_parent_of_type(i, 3 /* rack */, rno); auto to_host = tmp.crush->get_parent_of_type(i, 1 /* host */, rno); if (to_rack != from_rack && failure_domains.count(to_host) == 0) { to = i; break; } } } ASSERT_TRUE(to >= 0); ASSERT_TRUE(from != to); std::cout << "from " << from << " to " << to << std::endl; vector<pair<int32_t,int32_t>> new_pg_upmap_items; new_pg_upmap_items.push_back(make_pair(from, to)); OSDMap::Incremental pending_inc(tmp.get_epoch() + 1); pending_inc.new_pg_upmap_items[rep_pgid] = mempool::osdmap::vector<pair<int32_t,int32_t>>( new_pg_upmap_items.begin(), new_pg_upmap_items.end()); tmp.apply_incremental(pending_inc); ASSERT_TRUE(tmp.have_pg_upmaps(rep_pgid)); } { // *maybe_remove_pg_upmaps* should not remove the above upmap_item OSDMap::Incremental pending_inc(tmp.get_epoch() + 1); clean_pg_upmaps(g_ceph_context, tmp, pending_inc); tmp.apply_incremental(pending_inc); ASSERT_TRUE(tmp.have_pg_upmaps(rep_pgid)); } } } TEST_F(OSDMapTest, BUG_48884) { set_up_map(12); unsigned int host_index = 1; for (unsigned int x=0; x < get_num_osds();) { // Create three hosts with four osds each for (unsigned int y=0; y < 4; y++) { stringstream osd_name; stringstream host_name; vector<string> move_to; osd_name << "osd." << x; host_name << "host-" << host_index; move_to.push_back("root=default"); move_to.push_back("rack=localrack"); string host_loc = "host=" + host_name.str(); move_to.push_back(host_loc); int r = crush_move(osdmap, osd_name.str(), move_to); ASSERT_EQ(0, r); x++; } host_index++; } CrushWrapper crush; get_crush(osdmap, crush); auto host_id = crush.get_item_id("localhost"); crush.remove_item(g_ceph_context, host_id, false); OSDMap::Incremental pending_inc(osdmap.get_epoch() + 1); pending_inc.crush.clear(); crush.encode(pending_inc.crush, CEPH_FEATURES_SUPPORTED_DEFAULT); osdmap.apply_incremental(pending_inc); PGMap pgmap; osd_stat_t stats, stats_null; stats.statfs.total = 500000; stats.statfs.available = 50000; stats.statfs.omap_allocated = 50000; stats.statfs.internal_metadata = 50000; stats_null.statfs.total = 0; stats_null.statfs.available = 0; stats_null.statfs.omap_allocated = 0; stats_null.statfs.internal_metadata = 0; for (unsigned int x=0; x < get_num_osds(); x++) { if (x > 3 && x < 8) { pgmap.osd_stat.insert({x,stats_null}); } else { pgmap.osd_stat.insert({x,stats}); } } stringstream ss; boost::scoped_ptr<Formatter> f(Formatter::create("json-pretty")); print_osd_utilization(osdmap, pgmap, ss, f.get(), true, "root"); JSONParser parser; parser.parse(ss.str().c_str(), static_cast<int>(ss.str().size())); auto iter = parser.find_first(); for (const auto& bucket : (*iter)->get_array_elements()) { JSONParser parser2; parser2.parse(bucket.c_str(), static_cast<int>(bucket.size())); auto* obj = parser2.find_obj("name"); if (obj->get_data().compare("localrack") == 0) { obj = parser2.find_obj("kb"); ASSERT_EQ(obj->get_data(), "3904"); obj = parser2.find_obj("kb_used"); ASSERT_EQ(obj->get_data(), "3512"); obj = parser2.find_obj("kb_used_omap"); ASSERT_EQ(obj->get_data(), "384"); obj = parser2.find_obj("kb_used_meta"); ASSERT_EQ(obj->get_data(), "384"); obj = parser2.find_obj("kb_avail"); ASSERT_EQ(obj->get_data(), "384"); } } } TEST_P(OSDMapTest, BUG_51842) { set_up_map(3, true); OSDMap tmp; // use a tmpmap here, so we do not dirty origin map.. tmp.deepish_copy_from(osdmap); for (int i = 0; i < (int)get_num_osds(); i++) { stringstream osd_name; stringstream host_name; vector<string> move_to; osd_name << "osd." << i; host_name << "host=host-" << i; move_to.push_back("root=infra-1706"); move_to.push_back(host_name.str()); auto r = crush_move(tmp, osd_name.str(), move_to); ASSERT_EQ(0, r); } // build crush rule CrushWrapper crush; get_crush(tmp, crush); string rule_name = "infra-1706"; int rule_type = pg_pool_t::TYPE_REPLICATED; ASSERT_TRUE(!crush.rule_exists(rule_name)); int rno; for (rno = 0; rno < crush.get_max_rules(); rno++) { if (!crush.rule_exists(rno)) break; } string root_bucket = "infra-1706"; int root = crush.get_item_id(root_bucket); int steps = 5; crush_rule *rule = crush_make_rule(steps, rule_type); int step = 0; crush_rule_set_step(rule, step++, CRUSH_RULE_SET_CHOOSELEAF_TRIES, 5, 0); crush_rule_set_step(rule, step++, CRUSH_RULE_SET_CHOOSE_TRIES, 100, 0); crush_rule_set_step(rule, step++, CRUSH_RULE_TAKE, root, 0); // note: it's ok to set like 'step chooseleaf_firstn 0 host' std::pair<int, int> param = GetParam(); int rep_num = std::get<0>(param); int domain = std::get<1>(param); crush_rule_set_step(rule, step++, CRUSH_RULE_CHOOSELEAF_FIRSTN, rep_num, domain); crush_rule_set_step(rule, step++, CRUSH_RULE_EMIT, 0, 0); ASSERT_TRUE(step == steps); auto r = crush_add_rule(crush.get_crush_map(), rule, rno); ASSERT_TRUE(r >= 0); crush.set_rule_name(rno, rule_name); { OSDMap::Incremental pending_inc(tmp.get_epoch() + 1); pending_inc.crush.clear(); crush.encode(pending_inc.crush, CEPH_FEATURES_SUPPORTED_DEFAULT); tmp.apply_incremental(pending_inc); } { stringstream oss; crush.dump_tree(&oss, NULL); std::cout << oss.str() << std::endl; Formatter *f = Formatter::create("json-pretty"); f->open_object_section("crush_rules"); crush.dump_rules(f); f->close_section(); f->flush(cout); delete f; } // create a replicated pool referencing the above rule int64_t pool_infra_1706; { OSDMap::Incremental new_pool_inc(tmp.get_epoch() + 1); new_pool_inc.new_pool_max = tmp.get_pool_max(); new_pool_inc.fsid = tmp.get_fsid(); pg_pool_t empty; pool_infra_1706 = ++new_pool_inc.new_pool_max; pg_pool_t *p = new_pool_inc.get_new_pool(pool_infra_1706, &empty); p->size = 3; p->min_size = 1; p->set_pg_num(256); p->set_pgp_num(256); p->type = pg_pool_t::TYPE_REPLICATED; p->crush_rule = rno; p->set_flag(pg_pool_t::FLAG_HASHPSPOOL); new_pool_inc.new_pool_names[pool_infra_1706] = "pool_infra_1706"; tmp.apply_incremental(new_pool_inc); } // add upmaps pg_t rep_pg(3, pool_infra_1706); pg_t rep_pgid = tmp.raw_pg_to_pg(rep_pg); pg_t rep_pg2(4, pool_infra_1706); pg_t rep_pgid2 = tmp.raw_pg_to_pg(rep_pg2); pg_t rep_pg3(6, pool_infra_1706); pg_t rep_pgid3 = tmp.raw_pg_to_pg(rep_pg3); { OSDMap::Incremental pending_inc(tmp.get_epoch() + 1); pending_inc.new_pg_upmap[rep_pgid] = mempool::osdmap::vector<int32_t>({1,0,2}); pending_inc.new_pg_upmap[rep_pgid2] = mempool::osdmap::vector<int32_t>({1,2,0}); pending_inc.new_pg_upmap[rep_pgid3] = mempool::osdmap::vector<int32_t>({1,2,0}); tmp.apply_incremental(pending_inc); ASSERT_TRUE(tmp.have_pg_upmaps(rep_pgid)); ASSERT_TRUE(tmp.have_pg_upmaps(rep_pgid2)); ASSERT_TRUE(tmp.have_pg_upmaps(rep_pgid3)); } { // now, set pool size to 1 OSDMap tmpmap; tmpmap.deepish_copy_from(tmp); OSDMap::Incremental new_pool_inc(tmpmap.get_epoch() + 1); pg_pool_t p = *tmpmap.get_pg_pool(pool_infra_1706); p.size = 1; p.last_change = new_pool_inc.epoch; new_pool_inc.new_pools[pool_infra_1706] = p; tmpmap.apply_incremental(new_pool_inc); OSDMap::Incremental new_pending_inc(tmpmap.get_epoch() + 1); clean_pg_upmaps(g_ceph_context, tmpmap, new_pending_inc); tmpmap.apply_incremental(new_pending_inc); // check pg upmaps ASSERT_TRUE(!tmpmap.have_pg_upmaps(rep_pgid)); ASSERT_TRUE(!tmpmap.have_pg_upmaps(rep_pgid2)); ASSERT_TRUE(!tmpmap.have_pg_upmaps(rep_pgid3)); } { // now, set pool size to 4 OSDMap tmpmap; tmpmap.deepish_copy_from(tmp); OSDMap::Incremental new_pool_inc(tmpmap.get_epoch() + 1); pg_pool_t p = *tmpmap.get_pg_pool(pool_infra_1706); p.size = 4; p.last_change = new_pool_inc.epoch; new_pool_inc.new_pools[pool_infra_1706] = p; tmpmap.apply_incremental(new_pool_inc); OSDMap::Incremental new_pending_inc(tmpmap.get_epoch() + 1); clean_pg_upmaps(g_ceph_context, tmpmap, new_pending_inc); tmpmap.apply_incremental(new_pending_inc); // check pg upmaps ASSERT_TRUE(!tmpmap.have_pg_upmaps(rep_pgid)); ASSERT_TRUE(!tmpmap.have_pg_upmaps(rep_pgid2)); ASSERT_TRUE(!tmpmap.have_pg_upmaps(rep_pgid3)); } } const string OSDMapTest::range_addrs[] = {"198.51.100.0/22", "10.2.5.102/32", "2001:db8::/48", "3001:db8::/72", "4001:db8::/30", "5001:db8::/64", "6001:db8::/128", "7001:db8::/127"}; const string OSDMapTest::ip_addrs[] = {"198.51.100.14", "198.51.100.0", "198.51.103.255", "10.2.5.102", "2001:db8:0:0:0:0:0:0", "2001:db8:0:0:0:0001:ffff:ffff", "2001:db8:0:ffff:ffff:ffff:ffff:ffff", "3001:db8:0:0:0:0:0:0", "3001:db8:0:0:0:0001:ffff:ffff", "3001:db8:0:0:00ff:ffff:ffff:ffff", "4001:db8::", "4001:db8:0:0:0:0001:ffff:ffff", "4001:dbb:ffff:ffff:ffff:ffff:ffff:ffff", "5001:db8:0:0:0:0:0:0", "5001:db8:0:0:0:0:ffff:ffff", "5001:db8:0:0:ffff:ffff:ffff:ffff", "6001:db8:0:0:0:0:0:0", "7001:db8:0:0:0:0:0:0", "7001:db8:0:0:0:0:0:0001" }; const string OSDMapTest::unblocked_ip_addrs[] = { "0.0.0.0", "1.1.1.1", "192.168.1.1", "198.51.99.255", "198.51.104.0", "10.2.5.101", "10.2.5.103", "2001:db7:ffff:ffff:ffff:ffff:ffff:ffff", "2001:db8:0001::", "3001:db7:ffff:ffff:ffff:ffff:ffff:ffff", "3001:db8:0:0:0100::", "4001:db7:ffff:ffff:ffff:ffff:ffff:ffff", "4001:dbc::", "5001:db7:ffff:ffff:ffff:ffff:ffff:ffff", "5001:db8:0:0001:0:0:0:0", "6001:db8:0:0:0:0:0:0001", "7001:db7:ffff:ffff:ffff:ffff:ffff:ffff", "7001:db8:0:0:0:0:0:0002" }; TEST_F(OSDMapTest, blocklisting_ips) { set_up_map(6); //whatever OSDMap::Incremental new_blocklist_inc(osdmap.get_epoch() + 1); for (const auto& a : ip_addrs) { entity_addr_t addr; addr.parse(a); addr.set_type(entity_addr_t::TYPE_LEGACY); new_blocklist_inc.new_blocklist[addr] = ceph_clock_now(); } osdmap.apply_incremental(new_blocklist_inc); for (const auto& a: ip_addrs) { entity_addr_t addr; addr.parse(a); addr.set_type(entity_addr_t::TYPE_LEGACY); ASSERT_TRUE(osdmap.is_blocklisted(addr, g_ceph_context)); } for (const auto& a: unblocked_ip_addrs) { entity_addr_t addr; addr.parse(a); addr.set_type(entity_addr_t::TYPE_LEGACY); ASSERT_FALSE(osdmap.is_blocklisted(addr, g_ceph_context)); } OSDMap::Incremental rm_blocklist_inc(osdmap.get_epoch() + 1); for (const auto& a : ip_addrs) { entity_addr_t addr; addr.parse(a); addr.set_type(entity_addr_t::TYPE_LEGACY); rm_blocklist_inc.old_blocklist.push_back(addr); } osdmap.apply_incremental(rm_blocklist_inc); for (const auto& a: ip_addrs) { entity_addr_t addr; addr.parse(a); addr.set_type(entity_addr_t::TYPE_LEGACY); ASSERT_FALSE(osdmap.is_blocklisted(addr, g_ceph_context)); } for (const auto& a: unblocked_ip_addrs) { entity_addr_t addr; addr.parse(a); addr.set_type(entity_addr_t::TYPE_LEGACY); bool blocklisted = osdmap.is_blocklisted(addr, g_ceph_context); if (blocklisted) { cout << "erroneously blocklisted " << addr << std::endl; } EXPECT_FALSE(blocklisted); } } TEST_F(OSDMapTest, blocklisting_ranges) { set_up_map(6); //whatever OSDMap::Incremental range_blocklist_inc(osdmap.get_epoch() + 1); for (const auto& a : range_addrs) { entity_addr_t addr; addr.parse(a); addr.type = entity_addr_t::TYPE_CIDR; range_blocklist_inc.new_range_blocklist[addr] = ceph_clock_now(); } osdmap.apply_incremental(range_blocklist_inc); for (const auto& a: ip_addrs) { entity_addr_t addr; addr.parse(a); addr.set_type(entity_addr_t::TYPE_LEGACY); bool blocklisted = osdmap.is_blocklisted(addr, g_ceph_context); if (!blocklisted) { cout << "erroneously not blocklisted " << addr << std::endl; } ASSERT_TRUE(blocklisted); } for (const auto& a: unblocked_ip_addrs) { entity_addr_t addr; addr.parse(a); addr.set_type(entity_addr_t::TYPE_LEGACY); bool blocklisted = osdmap.is_blocklisted(addr, g_ceph_context); if (blocklisted) { cout << "erroneously blocklisted " << addr << std::endl; } EXPECT_FALSE(blocklisted); } OSDMap::Incremental rm_range_blocklist(osdmap.get_epoch() + 1); for (const auto& a : range_addrs) { entity_addr_t addr; addr.parse(a); addr.type = entity_addr_t::TYPE_CIDR; rm_range_blocklist.old_range_blocklist.push_back(addr); } osdmap.apply_incremental(rm_range_blocklist); for (const auto& a: ip_addrs) { entity_addr_t addr; addr.parse(a); addr.set_type(entity_addr_t::TYPE_LEGACY); ASSERT_FALSE(osdmap.is_blocklisted(addr, g_ceph_context)); } for (const auto& a: unblocked_ip_addrs) { entity_addr_t addr; addr.parse(a); addr.set_type(entity_addr_t::TYPE_LEGACY); bool blocklisted = osdmap.is_blocklisted(addr, g_ceph_context); if (blocklisted) { cout << "erroneously blocklisted " << addr << std::endl; } EXPECT_FALSE(blocklisted); } } TEST_F(OSDMapTest, blocklisting_everything) { set_up_map(6); //whatever OSDMap::Incremental range_blocklist_inc(osdmap.get_epoch() + 1); entity_addr_t baddr; baddr.parse("2001:db8::/0"); baddr.type = entity_addr_t::TYPE_CIDR; range_blocklist_inc.new_range_blocklist[baddr] = ceph_clock_now(); osdmap.apply_incremental(range_blocklist_inc); for (const auto& a: ip_addrs) { entity_addr_t addr; addr.parse(a); addr.set_type(entity_addr_t::TYPE_LEGACY); if (addr.is_ipv4()) continue; bool blocklisted = osdmap.is_blocklisted(addr, g_ceph_context); if (!blocklisted) { cout << "erroneously not blocklisted " << addr << std::endl; } ASSERT_TRUE(blocklisted); } for (const auto& a: unblocked_ip_addrs) { entity_addr_t addr; addr.parse(a); addr.set_type(entity_addr_t::TYPE_LEGACY); if (addr.is_ipv4()) continue; bool blocklisted = osdmap.is_blocklisted(addr, g_ceph_context); if (!blocklisted) { cout << "erroneously not blocklisted " << addr << std::endl; } ASSERT_TRUE(blocklisted); } OSDMap::Incremental swap_blocklist_inc(osdmap.get_epoch()+1); swap_blocklist_inc.old_range_blocklist.push_back(baddr); entity_addr_t caddr; caddr.parse("1.1.1.1/0"); caddr.type = entity_addr_t::TYPE_CIDR; swap_blocklist_inc.new_range_blocklist[caddr] = ceph_clock_now(); osdmap.apply_incremental(swap_blocklist_inc); for (const auto& a: ip_addrs) { entity_addr_t addr; addr.parse(a); addr.set_type(entity_addr_t::TYPE_LEGACY); if (!addr.is_ipv4()) continue; bool blocklisted = osdmap.is_blocklisted(addr, g_ceph_context); if (!blocklisted) { cout << "erroneously not blocklisted " << addr << std::endl; } ASSERT_TRUE(blocklisted); } for (const auto& a: unblocked_ip_addrs) { entity_addr_t addr; addr.parse(a); addr.set_type(entity_addr_t::TYPE_LEGACY); if (!addr.is_ipv4()) continue; bool blocklisted = osdmap.is_blocklisted(addr, g_ceph_context); if (!blocklisted) { cout << "erroneously not blocklisted " << addr << std::endl; } ASSERT_TRUE(blocklisted); } } TEST_F(OSDMapTest, ReadBalanceScore1) { std::srand ( unsigned ( std::time(0) ) ); uint osd_rand = rand() % 13; set_up_map(6 + osd_rand); //whatever auto pools = osdmap.get_pools(); for (auto &[pid, pg_pool] : pools) { const pg_pool_t *pi = osdmap.get_pg_pool(pid); if (pi->is_replicated()) { //cout << "pool " << pid << " " << pg_pool << std::endl; auto replica_count = pi->get_size(); OSDMap::read_balance_info_t rbi; auto rc = osdmap.calc_read_balance_score(g_ceph_context, pid, &rbi); // "Normal" score is between 1 and num_osds ASSERT_TRUE(rc == 0); ASSERT_TRUE(score_in_range(rbi.adjusted_score)); ASSERT_TRUE(score_in_range(rbi.acting_adj_score)); ASSERT_TRUE(rbi.err_msg.empty()); // When all OSDs have primary_affinity 0, score should be 0 auto num_osds = get_num_osds(); set_primary_affinity_all(0.); rc = osdmap.calc_read_balance_score(g_ceph_context, pid, &rbi); ASSERT_TRUE(rc < 0); ASSERT_TRUE(rbi.adjusted_score == 0.); ASSERT_TRUE(rbi.acting_adj_score == 0.); ASSERT_FALSE(rbi.err_msg.empty()); std::vector<uint> osds; for (uint i = 0 ; i < num_osds ; i++) { osds.push_back(i); } // Change primary_affinity of some OSDs to 1 others are 0 float fratio = 1. / (float)replica_count; for (int iter = 0 ; iter < 100 ; iter++) { // run the test 100 times // Create random shuffle of OSDs std::random_shuffle (osds.begin(), osds.end()); for (uint i = 0 ; i < num_osds ; i++) { if ((float(i + 1) / float(num_osds)) < fratio) { ASSERT_TRUE(osds[i] < num_osds); osdmap.set_primary_affinity(osds[i], CEPH_OSD_MAX_PRIMARY_AFFINITY); rc = osdmap.calc_read_balance_score(g_ceph_context, pid, &rbi); ASSERT_TRUE(rc < 0); ASSERT_TRUE(rbi.adjusted_score == 0.); ASSERT_TRUE(rbi.acting_adj_score == 0.); ASSERT_FALSE(rbi.err_msg.empty()); } else { if (rc < 0) { ASSERT_TRUE(rbi.adjusted_score == 0.); ASSERT_TRUE(rbi.acting_adj_score == 0.); ASSERT_FALSE(rbi.err_msg.empty()); } else { ASSERT_TRUE(score_in_range(rbi.acting_adj_score, i + 1)); ASSERT_TRUE(rbi.err_msg.empty()); } } } set_primary_affinity_all(0.); } } } } TEST_F(OSDMapTest, ReadBalanceScore2) { std::srand ( unsigned ( std::time(0) ) ); uint osd_num = 6 + rand() % 13; set_up_map(osd_num, true); for (int i = 0 ; i < 100 ; i++) { //running 100 random tests uint num_pa_osds = 0; float pa_sum = 0.; OSDMap::read_balance_info_t rbi; // set pa for all osds for (uint j = 0 ; j < osd_num ; j++) { uint pa = 1 + rand() % 100; if (pa > 80) pa = 100; if (pa < 20) pa = 0; float fpa = (float)pa / 100.; if (pa > 0) { num_pa_osds++; pa_sum += fpa; } osdmap.set_primary_affinity(j, int(fpa * CEPH_OSD_MAX_PRIMARY_AFFINITY)); } float pa_ratio = pa_sum / (float) osd_num; // create a pool with the current osdmap configuration OSDMap::Incremental new_pool_inc(osdmap.get_epoch() + 1); new_pool_inc.new_pool_max = osdmap.get_pool_max(); new_pool_inc.fsid = osdmap.get_fsid(); string pool_name = "rep_pool" + stringify(i); uint64_t new_pid = set_rep_pool(pool_name, new_pool_inc, false); ASSERT_TRUE(new_pid > 0); osdmap.apply_incremental(new_pool_inc); // now run the test on the pool. const pg_pool_t *pi = osdmap.get_pg_pool(new_pid); ASSERT_NE(pi, nullptr); ASSERT_TRUE(pi->is_replicated()); float fratio = 1. / (float)pi->get_size(); auto rc = osdmap.calc_read_balance_score(g_ceph_context, new_pid, &rbi); if (pa_ratio < fratio) { ASSERT_TRUE(rc < 0); ASSERT_FALSE(rbi.err_msg.empty()); ASSERT_TRUE(rbi.acting_adj_score == 0.); ASSERT_TRUE(rbi.adjusted_score == 0.); } else { if (rc < 0) { ASSERT_TRUE(rbi.adjusted_score == 0.); ASSERT_TRUE(rbi.acting_adj_score == 0.); ASSERT_FALSE(rbi.err_msg.empty()); } else { if (rbi.err_msg.empty()) { ASSERT_TRUE(score_in_range(rbi.acting_adj_score, num_pa_osds)); } } } } //TODO add ReadBalanceScore3 - with weighted osds. } TEST_F(OSDMapTest, read_balance_small_map) { // Set up a map with 4 OSDs and default pools set_up_map(4); const vector<string> test_cases = {"basic", "prim_affinity"}; for (const auto & test : test_cases) { if (test == "prim_affinity") { // Make osd.0 off-limits for primaries by giving it prim affinity 0 OSDMap::Incremental pending_inc0(osdmap.get_epoch() + 1); pending_inc0.new_primary_affinity[0] = 0; osdmap.apply_incremental(pending_inc0); // Ensure osd.0 has no primaries assigned to it map<uint64_t,set<pg_t>> prim_pgs_by_osd, acting_prims_by_osd; osdmap.get_pgs_by_osd(g_ceph_context, my_rep_pool, &prim_pgs_by_osd, &acting_prims_by_osd); ASSERT_TRUE(prim_pgs_by_osd[0].size() == 0); ASSERT_TRUE(acting_prims_by_osd[0].size() == 0); } // Make sure capacity is balanced first set<int64_t> only_pools; only_pools.insert(my_rep_pool); OSDMap::Incremental pending_inc(osdmap.get_epoch()+1); osdmap.calc_pg_upmaps(g_ceph_context, 0, 100, only_pools, &pending_inc); osdmap.apply_incremental(pending_inc); // Get read balance score before balancing OSDMap::read_balance_info_t rb_info; auto rc = osdmap.calc_read_balance_score(g_ceph_context, my_rep_pool, &rb_info); ASSERT_TRUE(rc >= 0); float read_balance_score_before = rb_info.adjusted_score; // Calculate desired prim distributions to verify later map<uint64_t,set<pg_t>> prim_pgs_by_osd_2, acting_prims_by_osd_2; osdmap.get_pgs_by_osd(g_ceph_context, my_rep_pool, &prim_pgs_by_osd_2, &acting_prims_by_osd_2); vector<uint64_t> osds_to_check; for (const auto & [osd, pgs] : prim_pgs_by_osd_2) { osds_to_check.push_back(osd); } map<uint64_t,float> desired_prim_dist; rc = osdmap.calc_desired_primary_distribution(g_ceph_context, my_rep_pool, osds_to_check, desired_prim_dist); ASSERT_TRUE(rc >= 0); // Balance reads OSDMap::Incremental pending_inc_2(osdmap.get_epoch()+1); int num_changes = osdmap.balance_primaries(g_ceph_context, my_rep_pool, &pending_inc_2, osdmap); osdmap.apply_incremental(pending_inc_2); if (test == "prim_affinity") { // Ensure osd.0 still has no primaries assigned to it map<uint64_t,set<pg_t>> prim_pgs_by_osd_3, acting_prims_by_osd_3; osdmap.get_pgs_by_osd(g_ceph_context, my_rep_pool, &prim_pgs_by_osd_3, &acting_prims_by_osd_3); ASSERT_TRUE(prim_pgs_by_osd_3[0].size() == 0); ASSERT_TRUE(acting_prims_by_osd_3[0].size() == 0); } // Get read balance score after balancing rc = osdmap.calc_read_balance_score(g_ceph_context, my_rep_pool, &rb_info); ASSERT_TRUE(rc >= 0); float read_balance_score_after = rb_info.adjusted_score; // Ensure the score hasn't gotten worse ASSERT_TRUE(read_balance_score_after <= read_balance_score_before); // Check for improvements if (num_changes > 0) { ASSERT_TRUE(read_balance_score_after < read_balance_score_before); // Check num primaries for each OSD is within range map<uint64_t,set<pg_t>> prim_pgs_by_osd_4, acting_prims_by_osd_4; osdmap.get_pgs_by_osd(g_ceph_context, my_rep_pool, &prim_pgs_by_osd_4, &acting_prims_by_osd_4); for (const auto & [osd, primaries] : prim_pgs_by_osd_4) { ASSERT_TRUE(primaries.size() >= floor(desired_prim_dist[osd] - 1)); ASSERT_TRUE(primaries.size() <= ceil(desired_prim_dist[osd] + 1)); } } } } TEST_F(OSDMapTest, read_balance_large_map) { // Set up a map with 60 OSDs and default pools set_up_map(60); const vector<string> test_cases = {"basic", "prim_affinity"}; for (const auto & test : test_cases) { if (test == "prim_affinity") { // Make osd.0 off-limits for primaries by giving it prim affinity 0 OSDMap::Incremental pending_inc0(osdmap.get_epoch() + 1); pending_inc0.new_primary_affinity[0] = 0; osdmap.apply_incremental(pending_inc0); // Ensure osd.0 has no primaries assigned to it map<uint64_t,set<pg_t>> prim_pgs_by_osd, acting_prims_by_osd; osdmap.get_pgs_by_osd(g_ceph_context, my_rep_pool, &prim_pgs_by_osd, &acting_prims_by_osd); ASSERT_TRUE(prim_pgs_by_osd[0].size() == 0); ASSERT_TRUE(acting_prims_by_osd[0].size() == 0); } // Make sure capacity is balanced first set<int64_t> only_pools; only_pools.insert(my_rep_pool); OSDMap::Incremental pending_inc(osdmap.get_epoch()+1); osdmap.calc_pg_upmaps(g_ceph_context, 0, 100, only_pools, &pending_inc); osdmap.apply_incremental(pending_inc); // Get read balance score before balancing OSDMap::read_balance_info_t rb_info; auto rc = osdmap.calc_read_balance_score(g_ceph_context, my_rep_pool, &rb_info); ASSERT_TRUE(rc >= 0); float read_balance_score_before = rb_info.adjusted_score; // Calculate desired prim distributions to verify later map<uint64_t,set<pg_t>> prim_pgs_by_osd_2, acting_prims_by_osd_2; osdmap.get_pgs_by_osd(g_ceph_context, my_rep_pool, &prim_pgs_by_osd_2, &acting_prims_by_osd_2); vector<uint64_t> osds_to_check; for (auto [osd, pgs] : prim_pgs_by_osd_2) { osds_to_check.push_back(osd); } map<uint64_t,float> desired_prim_dist; rc = osdmap.calc_desired_primary_distribution(g_ceph_context, my_rep_pool, osds_to_check, desired_prim_dist); ASSERT_TRUE(rc >= 0); // Balance reads OSDMap::Incremental pending_inc_2(osdmap.get_epoch()+1); int num_changes = osdmap.balance_primaries(g_ceph_context, my_rep_pool, &pending_inc_2, osdmap); osdmap.apply_incremental(pending_inc_2); if (test == "prim_affinity") { // Ensure osd.0 still has no primaries assigned to it map<uint64_t,set<pg_t>> prim_pgs_by_osd_3, acting_prims_by_osd_3; osdmap.get_pgs_by_osd(g_ceph_context, my_rep_pool, &prim_pgs_by_osd_3, &acting_prims_by_osd_3); ASSERT_TRUE(prim_pgs_by_osd_3[0].size() == 0); ASSERT_TRUE(acting_prims_by_osd_3[0].size() == 0); } // Get read balance score after balancing rc = osdmap.calc_read_balance_score(g_ceph_context, my_rep_pool, &rb_info); ASSERT_TRUE(rc >= 0); float read_balance_score_after = rb_info.adjusted_score; // Ensure the score hasn't gotten worse ASSERT_TRUE(read_balance_score_after <= read_balance_score_before); // Check for improvements if (num_changes > 0) { ASSERT_TRUE(read_balance_score_after < read_balance_score_before); // Check num primaries for each OSD is within range map<uint64_t,set<pg_t>> prim_pgs_by_osd_4, acting_prims_by_osd_4; osdmap.get_pgs_by_osd(g_ceph_context, my_rep_pool, &prim_pgs_by_osd_4, &acting_prims_by_osd_4); for (const auto & [osd, primaries] : prim_pgs_by_osd_4) { ASSERT_TRUE(primaries.size() >= floor(desired_prim_dist[osd] - 1)); ASSERT_TRUE(primaries.size() <= ceil(desired_prim_dist[osd] + 1)); } } } } TEST_F(OSDMapTest, read_balance_random_map) { // Set up map with random number of OSDs std::srand ( unsigned ( std::time(0) ) ); uint num_osds = 3 + (rand() % 10); ASSERT_TRUE(num_osds >= 3); set_up_map(num_osds); const vector<string> test_cases = {"basic", "prim_affinity"}; for (const auto & test : test_cases) { uint rand_osd = rand() % num_osds; if (test == "prim_affinity") { // Make a random OSD off-limits for primaries by giving it prim affinity 0 ASSERT_TRUE(rand_osd < num_osds); OSDMap::Incremental pending_inc0(osdmap.get_epoch() + 1); pending_inc0.new_primary_affinity[rand_osd] = 0; osdmap.apply_incremental(pending_inc0); // Ensure the random OSD has no primaries assigned to it map<uint64_t,set<pg_t>> prim_pgs_by_osd, acting_prims_by_osd; osdmap.get_pgs_by_osd(g_ceph_context, my_rep_pool, &prim_pgs_by_osd, &acting_prims_by_osd); ASSERT_TRUE(prim_pgs_by_osd[rand_osd].size() == 0); ASSERT_TRUE(acting_prims_by_osd[rand_osd].size() == 0); } // Make sure capacity is balanced first set<int64_t> only_pools; only_pools.insert(my_rep_pool); OSDMap::Incremental pending_inc(osdmap.get_epoch()+1); osdmap.calc_pg_upmaps(g_ceph_context, 0, 100, only_pools, &pending_inc); osdmap.apply_incremental(pending_inc); // Get read balance score before balancing OSDMap::read_balance_info_t rb_info; auto rc = osdmap.calc_read_balance_score(g_ceph_context, my_rep_pool, &rb_info); ASSERT_TRUE(rc >= 0); float read_balance_score_before = rb_info.adjusted_score; // Calculate desired prim distributions to verify later map<uint64_t,set<pg_t>> prim_pgs_by_osd_2, acting_prims_by_osd_2; osdmap.get_pgs_by_osd(g_ceph_context, my_rep_pool, &prim_pgs_by_osd_2, &acting_prims_by_osd_2); vector<uint64_t> osds_to_check; for (const auto & [osd, pgs] : prim_pgs_by_osd_2) { osds_to_check.push_back(osd); } map<uint64_t,float> desired_prim_dist; rc = osdmap.calc_desired_primary_distribution(g_ceph_context, my_rep_pool, osds_to_check, desired_prim_dist); ASSERT_TRUE(rc >= 0); // Balance reads OSDMap::Incremental pending_inc_2(osdmap.get_epoch()+1); int num_changes = osdmap.balance_primaries(g_ceph_context, my_rep_pool, &pending_inc_2, osdmap); osdmap.apply_incremental(pending_inc_2); if (test == "prim_affinity") { // Ensure the random OSD still has no primaries assigned to it map<uint64_t,set<pg_t>> prim_pgs_by_osd_3, acting_prims_by_osd_3; osdmap.get_pgs_by_osd(g_ceph_context, my_rep_pool, &prim_pgs_by_osd_3, &acting_prims_by_osd_3); ASSERT_TRUE(prim_pgs_by_osd_3[rand_osd].size() == 0); ASSERT_TRUE(acting_prims_by_osd_3[rand_osd].size() == 0); } // Get read balance score after balancing rc = osdmap.calc_read_balance_score(g_ceph_context, my_rep_pool, &rb_info); ASSERT_TRUE(rc >= 0); float read_balance_score_after = rb_info.adjusted_score; // Ensure the score hasn't gotten worse ASSERT_TRUE(read_balance_score_after <= read_balance_score_before); // Check for improvements if (num_changes > 0) { ASSERT_TRUE(read_balance_score_after < read_balance_score_before); // Check num primaries for each OSD is within range map<uint64_t,set<pg_t>> prim_pgs_by_osd_4, acting_prims_by_osd_4; osdmap.get_pgs_by_osd(g_ceph_context, my_rep_pool, &prim_pgs_by_osd_4, &acting_prims_by_osd_4); for (auto [osd, primaries] : prim_pgs_by_osd_4) { ASSERT_TRUE(primaries.size() >= floor(desired_prim_dist[osd] - 1)); ASSERT_TRUE(primaries.size() <= ceil(desired_prim_dist[osd] + 1)); } for (auto [osd, primaries] : prim_pgs_by_osd_4) { ASSERT_TRUE(primaries.size() >= floor(desired_prim_dist[osd] - 1)); ASSERT_TRUE(primaries.size() <= ceil(desired_prim_dist[osd] + 1)); } } } } INSTANTIATE_TEST_SUITE_P( OSDMap, OSDMapTest, ::testing::Values( std::make_pair<int, int>(0, 1), // chooseleaf firstn 0 host std::make_pair<int, int>(3, 1), // chooseleaf firstn 3 host std::make_pair<int, int>(0, 0), // chooseleaf firstn 0 osd std::make_pair<int, int>(3, 0) // chooseleaf firstn 3 osd ) );
98,246
35.146799
101
cc
null
ceph-main/src/test/osd/TestOSDScrub.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system * * Copyright (C) 2013 Cloudwatt <libre.licensing@cloudwatt.com> * * Author: Loic Dachary <loic@dachary.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU Library Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Library Public License for more details. * */ #include <stdio.h> #include <signal.h> #include <gtest/gtest.h> #include "common/async/context_pool.h" #include "osd/OSD.h" #include "os/ObjectStore.h" #include "mon/MonClient.h" #include "common/ceph_argparse.h" #include "msg/Messenger.h" class TestOSDScrub: public OSD { public: TestOSDScrub(CephContext *cct_, std::unique_ptr<ObjectStore> store_, int id, Messenger *internal, Messenger *external, Messenger *hb_front_client, Messenger *hb_back_client, Messenger *hb_front_server, Messenger *hb_back_server, Messenger *osdc_messenger, MonClient *mc, const std::string &dev, const std::string &jdev, ceph::async::io_context_pool& ictx) : OSD(cct_, std::move(store_), id, internal, external, hb_front_client, hb_back_client, hb_front_server, hb_back_server, osdc_messenger, mc, dev, jdev, ictx) { } bool scrub_time_permit(utime_t now) { return service.get_scrub_services().scrub_time_permit(now); } }; TEST(TestOSDScrub, scrub_time_permit) { ceph::async::io_context_pool icp(1); std::unique_ptr<ObjectStore> store = ObjectStore::create(g_ceph_context, g_conf()->osd_objectstore, g_conf()->osd_data, g_conf()->osd_journal); std::string cluster_msgr_type = g_conf()->ms_cluster_type.empty() ? g_conf().get_val<std::string>("ms_type") : g_conf()->ms_cluster_type; Messenger *ms = Messenger::create(g_ceph_context, cluster_msgr_type, entity_name_t::OSD(0), "make_checker", getpid()); ms->set_cluster_protocol(CEPH_OSD_PROTOCOL); ms->set_default_policy(Messenger::Policy::stateless_server(0)); ms->bind(g_conf()->public_addr); MonClient mc(g_ceph_context, icp); mc.build_initial_monmap(); TestOSDScrub* osd = new TestOSDScrub(g_ceph_context, std::move(store), 0, ms, ms, ms, ms, ms, ms, ms, &mc, "", "", icp); // These are now invalid int err = g_ceph_context->_conf.set_val("osd_scrub_begin_hour", "24"); ASSERT_TRUE(err < 0); //GTEST_LOG_(INFO) << " osd_scrub_begin_hour = " << g_ceph_context->_conf.get_val<int64_t>("osd_scrub_begin_hour"); err = g_ceph_context->_conf.set_val("osd_scrub_end_hour", "24"); ASSERT_TRUE(err < 0); //GTEST_LOG_(INFO) << " osd_scrub_end_hour = " << g_ceph_context->_conf.get_val<int64_t>("osd_scrub_end_hour"); err = g_ceph_context->_conf.set_val("osd_scrub_begin_week_day", "7"); ASSERT_TRUE(err < 0); //GTEST_LOG_(INFO) << " osd_scrub_begin_week_day = " << g_ceph_context->_conf.get_val<int64_t>("osd_scrub_begin_week_day"); err = g_ceph_context->_conf.set_val("osd_scrub_end_week_day", "7"); ASSERT_TRUE(err < 0); //GTEST_LOG_(INFO) << " osd_scrub_end_week_day = " << g_ceph_context->_conf.get_val<int64_t>("osd_scrub_end_week_day"); // Test all day g_ceph_context->_conf.set_val("osd_scrub_begin_hour", "0"); g_ceph_context->_conf.set_val("osd_scrub_end_hour", "0"); g_ceph_context->_conf.apply_changes(nullptr); tm tm; tm.tm_isdst = -1; strptime("2015-01-16 12:05:13", "%Y-%m-%d %H:%M:%S", &tm); utime_t now = utime_t(mktime(&tm), 0); bool ret = osd->scrub_time_permit(now); ASSERT_TRUE(ret); g_ceph_context->_conf.set_val("osd_scrub_begin_hour", "20"); g_ceph_context->_conf.set_val("osd_scrub_end_hour", "07"); g_ceph_context->_conf.apply_changes(nullptr); strptime("2015-01-16 01:05:13", "%Y-%m-%d %H:%M:%S", &tm); now = utime_t(mktime(&tm), 0); ret = osd->scrub_time_permit(now); ASSERT_TRUE(ret); g_ceph_context->_conf.set_val("osd_scrub_begin_hour", "20"); g_ceph_context->_conf.set_val("osd_scrub_end_hour", "07"); g_ceph_context->_conf.apply_changes(nullptr); strptime("2015-01-16 20:05:13", "%Y-%m-%d %H:%M:%S", &tm); now = utime_t(mktime(&tm), 0); ret = osd->scrub_time_permit(now); ASSERT_TRUE(ret); g_ceph_context->_conf.set_val("osd_scrub_begin_hour", "20"); g_ceph_context->_conf.set_val("osd_scrub_end_hour", "07"); g_ceph_context->_conf.apply_changes(nullptr); strptime("2015-01-16 08:05:13", "%Y-%m-%d %H:%M:%S", &tm); now = utime_t(mktime(&tm), 0); ret = osd->scrub_time_permit(now); ASSERT_FALSE(ret); g_ceph_context->_conf.set_val("osd_scrub_begin_hour", "01"); g_ceph_context->_conf.set_val("osd_scrub_end_hour", "07"); g_ceph_context->_conf.apply_changes(nullptr); strptime("2015-01-16 20:05:13", "%Y-%m-%d %H:%M:%S", &tm); now = utime_t(mktime(&tm), 0); ret = osd->scrub_time_permit(now); ASSERT_FALSE(ret); g_ceph_context->_conf.set_val("osd_scrub_begin_hour", "01"); g_ceph_context->_conf.set_val("osd_scrub_end_hour", "07"); g_ceph_context->_conf.apply_changes(nullptr); strptime("2015-01-16 00:05:13", "%Y-%m-%d %H:%M:%S", &tm); now = utime_t(mktime(&tm), 0); ret = osd->scrub_time_permit(now); ASSERT_FALSE(ret); g_ceph_context->_conf.set_val("osd_scrub_begin_hour", "01"); g_ceph_context->_conf.set_val("osd_scrub_end_hour", "07"); g_ceph_context->_conf.apply_changes(nullptr); strptime("2015-01-16 04:05:13", "%Y-%m-%d %H:%M:%S", &tm); now = utime_t(mktime(&tm), 0); ret = osd->scrub_time_permit(now); ASSERT_TRUE(ret); // Sun = 0, Mon = 1, Tue = 2, Wed = 3, Thu = 4m, Fri = 5, Sat = 6 // Jan 16, 2015 is a Friday (5) // every day g_ceph_context->_conf.set_val("osd_scrub_begin_week day", "0"); // inclusive g_ceph_context->_conf.set_val("osd_scrub_end_week_day", "0"); // not inclusive g_ceph_context->_conf.apply_changes(nullptr); strptime("2015-01-16 04:05:13", "%Y-%m-%d %H:%M:%S", &tm); now = utime_t(mktime(&tm), 0); ret = osd->scrub_time_permit(now); ASSERT_TRUE(ret); // test Sun - Thu g_ceph_context->_conf.set_val("osd_scrub_begin_week day", "0"); // inclusive g_ceph_context->_conf.set_val("osd_scrub_end_week_day", "5"); // not inclusive g_ceph_context->_conf.apply_changes(nullptr); strptime("2015-01-16 04:05:13", "%Y-%m-%d %H:%M:%S", &tm); now = utime_t(mktime(&tm), 0); ret = osd->scrub_time_permit(now); ASSERT_FALSE(ret); // test Fri - Sat g_ceph_context->_conf.set_val("osd_scrub_begin_week day", "5"); // inclusive g_ceph_context->_conf.set_val("osd_scrub_end_week_day", "0"); // not inclusive g_ceph_context->_conf.apply_changes(nullptr); strptime("2015-01-16 04:05:13", "%Y-%m-%d %H:%M:%S", &tm); now = utime_t(mktime(&tm), 0); ret = osd->scrub_time_permit(now); ASSERT_TRUE(ret); // Jan 14, 2015 is a Wednesday (3) // test Tue - Fri g_ceph_context->_conf.set_val("osd_scrub_begin_week day", "2"); // inclusive g_ceph_context->_conf.set_val("osd_scrub_end_week_day", "6"); // not inclusive g_ceph_context->_conf.apply_changes(nullptr); strptime("2015-01-14 04:05:13", "%Y-%m-%d %H:%M:%S", &tm); now = utime_t(mktime(&tm), 0); ret = osd->scrub_time_permit(now); ASSERT_TRUE(ret); // Test Sat - Sun g_ceph_context->_conf.set_val("osd_scrub_begin_week day", "6"); // inclusive g_ceph_context->_conf.set_val("osd_scrub_end_week_day", "1"); // not inclusive g_ceph_context->_conf.apply_changes(nullptr); strptime("2015-01-14 04:05:13", "%Y-%m-%d %H:%M:%S", &tm); now = utime_t(mktime(&tm), 0); ret = osd->scrub_time_permit(now); ASSERT_FALSE(ret); } // Local Variables: // compile-command: "cd ../.. ; make unittest_osdscrub ; ./unittest_osdscrub --log-to-stderr=true --debug-osd=20 # --gtest_filter=*.* " // End:
8,055
38.490196
139
cc
null
ceph-main/src/test/osd/TestOpStat.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- #include "include/interval_set.h" #include "include/buffer.h" #include <list> #include <map> #include <set> #include "RadosModel.h" #include "TestOpStat.h" void TestOpStat::begin(TestOp *in) { std::lock_guard l{stat_lock}; stats[in->getType()].begin(in); } void TestOpStat::end(TestOp *in) { std::lock_guard l{stat_lock}; stats[in->getType()].end(in); } void TestOpStat::TypeStatus::export_latencies(std::map<double,uint64_t> &in) const { auto i = in.begin(); auto j = latencies.begin(); int count = 0; while (j != latencies.end() && i != in.end()) { count++; if ((((double)count)/((double)latencies.size())) * 100 >= i->first) { i->second = *j; ++i; } ++j; } } std::ostream & operator<<(std::ostream &out, const TestOpStat &rhs) { std::lock_guard l{rhs.stat_lock}; for (auto i = rhs.stats.begin(); i != rhs.stats.end(); ++i) { std::map<double,uint64_t> latency; latency[10] = 0; latency[50] = 0; latency[90] = 0; latency[99] = 0; i->second.export_latencies(latency); out << i->first << " latency: " << std::endl; for (auto j = latency.begin(); j != latency.end(); ++j) { if (j->second == 0) break; out << "\t" << j->first << "th percentile: " << j->second / 1000 << "ms" << std::endl; } } return out; }
1,405
22.830508
82
cc
null
ceph-main/src/test/osd/TestOpStat.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- #include "common/ceph_mutex.h" #include "common/Cond.h" #include "include/rados/librados.hpp" #ifndef TESTOPSTAT_H #define TESTOPSTAT_H class TestOp; class TestOpStat { public: mutable ceph::mutex stat_lock = ceph::make_mutex("TestOpStat lock"); TestOpStat() = default; static uint64_t gettime() { timeval t; gettimeofday(&t,0); return (1000000*t.tv_sec) + t.tv_usec; } class TypeStatus { public: std::map<TestOp*,uint64_t> inflight; std::multiset<uint64_t> latencies; void begin(TestOp *in) { ceph_assert(!inflight.count(in)); inflight[in] = gettime(); } void end(TestOp *in) { ceph_assert(inflight.count(in)); uint64_t curtime = gettime(); latencies.insert(curtime - inflight[in]); inflight.erase(in); } void export_latencies(std::map<double,uint64_t> &in) const; }; std::map<std::string,TypeStatus> stats; void begin(TestOp *in); void end(TestOp *in); friend std::ostream & operator<<(std::ostream &, const TestOpStat &); }; std::ostream & operator<<(std::ostream &out, const TestOpStat &rhs); #endif
1,199
21.222222
71
h
null
ceph-main/src/test/osd/TestPGLog.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system * * Copyright (C) 2013 Cloudwatt <libre.licensing@cloudwatt.com> * * Author: Loic Dachary <loic@dachary.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU Library Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Library Public License for more details. * */ #include <stdio.h> #include <signal.h> #include "gtest/gtest.h" #include "osd/PGLog.h" #include "osd/OSDMap.h" #include "include/coredumpctl.h" #include "../objectstore/store_test_fixture.h" using namespace std; struct PGLogTestBase { static hobject_t mk_obj(unsigned id) { hobject_t hoid; stringstream ss; ss << "obj_" << id; hoid.oid = ss.str(); hoid.set_hash(id); hoid.pool = 1; return hoid; } static eversion_t mk_evt(unsigned ep, unsigned v) { return eversion_t(ep, v); } static pg_log_entry_t mk_ple_mod( const hobject_t &hoid, eversion_t v, eversion_t pv, osd_reqid_t reqid) { pg_log_entry_t e; e.mark_unrollbackable(); e.op = pg_log_entry_t::MODIFY; e.soid = hoid; e.version = v; e.prior_version = pv; e.reqid = reqid; return e; } static pg_log_entry_t mk_ple_dt( const hobject_t &hoid, eversion_t v, eversion_t pv, osd_reqid_t reqid) { pg_log_entry_t e; e.mark_unrollbackable(); e.op = pg_log_entry_t::DELETE; e.soid = hoid; e.version = v; e.prior_version = pv; e.reqid = reqid; return e; } static pg_log_entry_t mk_ple_ldt( const hobject_t &hoid, eversion_t v, eversion_t pv) { pg_log_entry_t e; e.mark_unrollbackable(); e.op = pg_log_entry_t::LOST_DELETE; e.soid = hoid; e.version = v; e.prior_version = pv; return e; } static pg_log_entry_t mk_ple_mod_rb( const hobject_t &hoid, eversion_t v, eversion_t pv, osd_reqid_t reqid) { pg_log_entry_t e; e.op = pg_log_entry_t::MODIFY; e.soid = hoid; e.version = v; e.prior_version = pv; e.reqid = reqid; return e; } static pg_log_entry_t mk_ple_dt_rb( const hobject_t &hoid, eversion_t v, eversion_t pv, osd_reqid_t reqid) { pg_log_entry_t e; e.op = pg_log_entry_t::DELETE; e.soid = hoid; e.version = v; e.prior_version = pv; e.reqid = reqid; return e; } static pg_log_entry_t mk_ple_err( const hobject_t &hoid, eversion_t v, osd_reqid_t reqid) { pg_log_entry_t e; e.op = pg_log_entry_t::ERROR; e.soid = hoid; e.version = v; e.prior_version = eversion_t(0, 0); e.reqid = reqid; return e; } static pg_log_entry_t mk_ple_mod( const hobject_t &hoid, eversion_t v, eversion_t pv) { return mk_ple_mod(hoid, v, pv, osd_reqid_t()); } static pg_log_entry_t mk_ple_dt( const hobject_t &hoid, eversion_t v, eversion_t pv) { return mk_ple_dt(hoid, v, pv, osd_reqid_t()); } static pg_log_entry_t mk_ple_mod_rb( const hobject_t &hoid, eversion_t v, eversion_t pv) { return mk_ple_mod_rb(hoid, v, pv, osd_reqid_t()); } static pg_log_entry_t mk_ple_dt_rb( const hobject_t &hoid, eversion_t v, eversion_t pv) { return mk_ple_dt_rb(hoid, v, pv, osd_reqid_t()); } static pg_log_entry_t mk_ple_err( const hobject_t &hoid, eversion_t v) { return mk_ple_err(hoid, v, osd_reqid_t()); } }; // PGLogTestBase class PGLogTest : virtual public ::testing::Test, protected PGLog, public PGLogTestBase { public: PGLogTest() : PGLog(g_ceph_context) {} void SetUp() override { missing.may_include_deletes = true; } #include "common/ceph_context.h" #include "common/config.h" void TearDown() override { clear(); } struct TestCase { list<pg_log_entry_t> base; list<pg_log_entry_t> auth; list<pg_log_entry_t> div; pg_missing_t init; pg_missing_t final; set<hobject_t> toremove; list<pg_log_entry_t> torollback; bool deletes_during_peering; private: IndexedLog fullauth; IndexedLog fulldiv; pg_info_t authinfo; pg_info_t divinfo; public: TestCase() : deletes_during_peering(false) {} void setup() { init.may_include_deletes = !deletes_during_peering; final.may_include_deletes = !deletes_during_peering; fullauth.log.insert(fullauth.log.end(), base.begin(), base.end()); fullauth.log.insert(fullauth.log.end(), auth.begin(), auth.end()); fulldiv.log.insert(fulldiv.log.end(), base.begin(), base.end()); fulldiv.log.insert(fulldiv.log.end(), div.begin(), div.end()); fullauth.head = authinfo.last_update = fullauth.log.rbegin()->version; authinfo.last_complete = fullauth.log.rbegin()->version; authinfo.log_tail = fullauth.log.begin()->version; authinfo.log_tail.version--; fullauth.tail = authinfo.log_tail; authinfo.last_backfill = hobject_t::get_max(); fulldiv.head = divinfo.last_update = fulldiv.log.rbegin()->version; divinfo.last_complete = eversion_t(); divinfo.log_tail = fulldiv.log.begin()->version; divinfo.log_tail.version--; fulldiv.tail = divinfo.log_tail; divinfo.last_backfill = hobject_t::get_max(); if (init.get_items().empty()) { divinfo.last_complete = divinfo.last_update; } else { eversion_t fmissing = init.get_items().at(init.get_rmissing().begin()->second).need; for (list<pg_log_entry_t>::const_iterator i = fulldiv.log.begin(); i != fulldiv.log.end(); ++i) { if (i->version < fmissing) divinfo.last_complete = i->version; else break; } } fullauth.index(); fulldiv.index(); } void set_div_bounds(eversion_t head, eversion_t tail) { fulldiv.tail = divinfo.log_tail = tail; fulldiv.head = divinfo.last_update = head; } void set_auth_bounds(eversion_t head, eversion_t tail) { fullauth.tail = authinfo.log_tail = tail; fullauth.head = authinfo.last_update = head; } const IndexedLog &get_fullauth() const { return fullauth; } const IndexedLog &get_fulldiv() const { return fulldiv; } const pg_info_t &get_authinfo() const { return authinfo; } const pg_info_t &get_divinfo() const { return divinfo; } }; // struct TestCase struct LogHandler : public PGLog::LogEntryHandler { set<hobject_t> removed; list<pg_log_entry_t> rolledback; void rollback( const pg_log_entry_t &entry) override { rolledback.push_back(entry); } void rollforward( const pg_log_entry_t &entry) override {} void remove( const hobject_t &hoid) override { removed.insert(hoid); } void try_stash(const hobject_t &, version_t) override { // lost/unfound cases are not tested yet } void trim( const pg_log_entry_t &entry) override {} }; template <typename missing_t> void verify_missing( const TestCase &tcase, const missing_t &missing) { ASSERT_EQ(tcase.final.get_items().size(), missing.get_items().size()); for (auto i = missing.get_items().begin(); i != missing.get_items().end(); ++i) { EXPECT_TRUE(tcase.final.get_items().count(i->first)); EXPECT_EQ(tcase.final.get_items().find(i->first)->second.need, i->second.need); EXPECT_EQ(tcase.final.get_items().find(i->first)->second.have, i->second.have); } bool correct = missing.debug_verify_from_init(tcase.init, &(std::cout)); ASSERT_TRUE(correct); } void verify_sideeffects( const TestCase &tcase, const LogHandler &handler) { ASSERT_EQ(tcase.toremove.size(), handler.removed.size()); ASSERT_EQ(tcase.torollback.size(), handler.rolledback.size()); { list<pg_log_entry_t>::const_iterator titer = tcase.torollback.begin(); list<pg_log_entry_t>::const_iterator hiter = handler.rolledback.begin(); for (; titer != tcase.torollback.end(); ++titer, ++hiter) { EXPECT_EQ(titer->version, hiter->version); } } { set<hobject_t>::const_iterator titer = tcase.toremove.begin(); set<hobject_t>::const_iterator hiter = handler.removed.begin(); for (; titer != tcase.toremove.end(); ++titer, ++hiter) { EXPECT_EQ(*titer, *hiter); } } } void test_merge_log(const TestCase &tcase) { clear(); log = tcase.get_fulldiv(); pg_info_t info = tcase.get_divinfo(); missing = tcase.init; missing.flush(); IndexedLog olog; olog = tcase.get_fullauth(); pg_info_t oinfo = tcase.get_authinfo(); LogHandler h; bool dirty_info = false; bool dirty_big_info = false; merge_log( oinfo, std::move(olog), pg_shard_t(1, shard_id_t(0)), info, &h, dirty_info, dirty_big_info); ASSERT_EQ(info.last_update, oinfo.last_update); verify_missing(tcase, missing); verify_sideeffects(tcase, h); } void test_proc_replica_log(const TestCase &tcase) { clear(); log = tcase.get_fullauth(); pg_info_t info = tcase.get_authinfo(); pg_missing_t omissing = tcase.init; IndexedLog olog; olog = tcase.get_fulldiv(); pg_info_t oinfo = tcase.get_divinfo(); proc_replica_log( oinfo, olog, omissing, pg_shard_t(1, shard_id_t(0))); ceph_assert(oinfo.last_update >= log.tail); if (!tcase.base.empty()) { ASSERT_EQ(tcase.base.rbegin()->version, oinfo.last_update); } for (list<pg_log_entry_t>::const_iterator i = tcase.auth.begin(); i != tcase.auth.end(); ++i) { if (i->version > oinfo.last_update) { if (i->is_delete() && tcase.deletes_during_peering) { omissing.rm(i->soid, i->version); } else { omissing.add_next_event(*i); } } } verify_missing(tcase, omissing); } // test_proc_replica_log void run_test_case(const TestCase &tcase) { test_merge_log(tcase); test_proc_replica_log(tcase); } }; // class PGLogTest struct TestHandler : public PGLog::LogEntryHandler { list<hobject_t> &removed; explicit TestHandler(list<hobject_t> &removed) : removed(removed) {} void rollback( const pg_log_entry_t &entry) override {} void rollforward( const pg_log_entry_t &entry) override {} void remove( const hobject_t &hoid) override { removed.push_back(hoid); } void cant_rollback(const pg_log_entry_t &entry) {} void try_stash(const hobject_t &, version_t) override { // lost/unfound cases are not tested yet } void trim( const pg_log_entry_t &entry) override {} }; TEST_F(PGLogTest, rewind_divergent_log) { /* +----------------+ | log | +--------+-------+ | |object | |version | hash | | | | tail > (1,1) | x5 | | | | | | | | (1,4) | x9 < newhead | MODIFY | | | | | head > (1,5) | x9 | | DELETE | | | | | +--------+-------+ */ { clear(); pg_info_t info; list<hobject_t> remove_snap; bool dirty_info = false; bool dirty_big_info = false; hobject_t divergent_object; eversion_t divergent_version; eversion_t newhead; hobject_t divergent; divergent.set_hash(0x9); { pg_log_entry_t e; e.mark_unrollbackable(); e.version = eversion_t(1, 1); e.soid.set_hash(0x5); log.tail = e.version; log.log.push_back(e); e.version = newhead = eversion_t(1, 4); e.soid = divergent; e.op = pg_log_entry_t::MODIFY; log.log.push_back(e); e.version = divergent_version = eversion_t(1, 5); e.prior_version = eversion_t(1, 4); e.soid = divergent; divergent_object = e.soid; e.op = pg_log_entry_t::DELETE; log.log.push_back(e); log.head = e.version; log.index(); info.last_update = log.head; info.last_complete = log.head; } EXPECT_FALSE(missing.have_missing()); EXPECT_EQ(3U, log.log.size()); EXPECT_TRUE(remove_snap.empty()); EXPECT_EQ(log.head, info.last_update); EXPECT_EQ(log.head, info.last_complete); EXPECT_FALSE(is_dirty()); EXPECT_FALSE(dirty_info); EXPECT_FALSE(dirty_big_info); TestHandler h(remove_snap); rewind_divergent_log(newhead, info, &h, dirty_info, dirty_big_info); EXPECT_TRUE(log.objects.count(divergent)); EXPECT_TRUE(missing.is_missing(divergent_object)); EXPECT_EQ(1U, log.objects.count(divergent_object)); EXPECT_EQ(2U, log.log.size()); EXPECT_TRUE(remove_snap.empty()); EXPECT_EQ(newhead, info.last_update); EXPECT_EQ(newhead, info.last_complete); EXPECT_TRUE(is_dirty()); EXPECT_TRUE(dirty_info); EXPECT_TRUE(dirty_big_info); } /* +----------------+ | log | +--------+-------+ | |object | |version | hash | | | | tail > (1,1) | NULL | | | | | (1,4) | NULL < newhead | | | head > (1,5) | x9 | | | | +--------+-------+ */ { clear(); pg_info_t info; list<hobject_t> remove_snap; bool dirty_info = false; bool dirty_big_info = false; hobject_t divergent_object; eversion_t divergent_version; eversion_t prior_version; eversion_t newhead; { pg_log_entry_t e; e.mark_unrollbackable(); info.log_tail = log.tail = eversion_t(1, 1); newhead = eversion_t(1, 3); e.version = divergent_version = eversion_t(1, 5); e.soid.set_hash(0x9); divergent_object = e.soid; e.op = pg_log_entry_t::DELETE; e.prior_version = prior_version = eversion_t(0, 2); log.log.push_back(e); log.head = e.version; } EXPECT_FALSE(missing.have_missing()); EXPECT_EQ(1U, log.log.size()); EXPECT_TRUE(remove_snap.empty()); EXPECT_FALSE(is_dirty()); EXPECT_FALSE(dirty_info); EXPECT_FALSE(dirty_big_info); TestHandler h(remove_snap); rewind_divergent_log(newhead, info, &h, dirty_info, dirty_big_info); EXPECT_TRUE(missing.is_missing(divergent_object)); EXPECT_EQ(0U, log.objects.count(divergent_object)); EXPECT_TRUE(log.empty()); EXPECT_TRUE(remove_snap.empty()); EXPECT_TRUE(is_dirty()); EXPECT_TRUE(dirty_info); EXPECT_TRUE(dirty_big_info); } // Test for 13965 { clear(); list<hobject_t> remove_snap; pg_info_t info; info.log_tail = log.tail = eversion_t(1, 5); info.last_update = eversion_t(1, 6); bool dirty_info = false; bool dirty_big_info = false; { pg_log_entry_t e; e.mark_unrollbackable(); e.version = eversion_t(1, 5); e.soid.set_hash(0x9); add(e); } { pg_log_entry_t e; e.mark_unrollbackable(); e.version = eversion_t(1, 6); e.soid.set_hash(0x10); add(e); } TestHandler h(remove_snap); roll_forward_to(eversion_t(1, 6), &h); rewind_divergent_log(eversion_t(1, 5), info, &h, dirty_info, dirty_big_info); pg_log_t log; reset_backfill_claim_log(log, &h); } } TEST_F(PGLogTest, merge_old_entry) { // entries > last_backfill are silently ignored { clear(); ObjectStore::Transaction t; pg_log_entry_t oe; oe.mark_unrollbackable(); pg_info_t info; list<hobject_t> remove_snap; info.last_backfill = hobject_t(); info.last_backfill.set_hash(100); oe.soid.set_hash(2); ASSERT_GT(oe.soid, info.last_backfill); EXPECT_FALSE(is_dirty()); EXPECT_TRUE(remove_snap.empty()); EXPECT_TRUE(t.empty()); EXPECT_FALSE(missing.have_missing()); EXPECT_TRUE(log.empty()); TestHandler h(remove_snap); merge_old_entry(t, oe, info, &h); EXPECT_FALSE(is_dirty()); EXPECT_TRUE(remove_snap.empty()); EXPECT_TRUE(t.empty()); EXPECT_FALSE(missing.have_missing()); EXPECT_TRUE(log.empty()); } // the new entry (from the logs) has a version that is higher than // the old entry (from the log entry given in argument) : do // nothing and return false { clear(); ObjectStore::Transaction t; pg_info_t info; list<hobject_t> remove_snap; pg_log_entry_t ne; ne.mark_unrollbackable(); ne.version = eversion_t(2,1); log.add(ne); EXPECT_FALSE(is_dirty()); EXPECT_TRUE(remove_snap.empty()); EXPECT_TRUE(t.empty()); EXPECT_FALSE(missing.have_missing()); EXPECT_EQ(1U, log.log.size()); EXPECT_EQ(ne.version, log.log.front().version); // the newer entry ( from the logs ) can be DELETE { log.log.front().op = pg_log_entry_t::DELETE; pg_log_entry_t oe; oe.mark_unrollbackable(); oe.version = eversion_t(1,1); TestHandler h(remove_snap); merge_old_entry(t, oe, info, &h); } // if the newer entry is not DELETE, the object must be in missing { pg_log_entry_t &ne = log.log.front(); ne.op = pg_log_entry_t::MODIFY; missing.add_next_event(ne); pg_log_entry_t oe; oe.mark_unrollbackable(); oe.version = eversion_t(1,1); TestHandler h(remove_snap); merge_old_entry(t, oe, info, &h); missing.rm(ne.soid, ne.version); } missing.flush(); EXPECT_FALSE(is_dirty()); EXPECT_FALSE(remove_snap.empty()); EXPECT_TRUE(t.empty()); EXPECT_FALSE(missing.have_missing()); EXPECT_EQ(1U, log.log.size()); EXPECT_EQ(ne.version, log.log.front().version); } // the new entry (from the logs) has a version that is lower than // the old entry (from the log entry given in argument) and // old and new are delete : do nothing and return false { clear(); ObjectStore::Transaction t; pg_log_entry_t oe; oe.mark_unrollbackable(); pg_info_t info; list<hobject_t> remove_snap; pg_log_entry_t ne; ne.mark_unrollbackable(); ne.version = eversion_t(1,1); ne.op = pg_log_entry_t::DELETE; log.add(ne); oe.version = eversion_t(2,1); oe.op = pg_log_entry_t::DELETE; EXPECT_FALSE(is_dirty()); EXPECT_TRUE(remove_snap.empty()); EXPECT_TRUE(t.empty()); EXPECT_FALSE(missing.have_missing()); EXPECT_EQ(1U, log.log.size()); TestHandler h(remove_snap); merge_old_entry(t, oe, info, &h); EXPECT_FALSE(is_dirty()); EXPECT_TRUE(remove_snap.empty()); EXPECT_TRUE(t.empty()); EXPECT_FALSE(missing.have_missing()); EXPECT_EQ(1U, log.log.size()); } // the new entry (from the logs) has a version that is lower than // the old entry (from the log entry given in argument) and // old is update and new is DELETE : // if the object is in missing, it is removed { clear(); ObjectStore::Transaction t; pg_log_entry_t oe; oe.mark_unrollbackable(); pg_info_t info; list<hobject_t> remove_snap; pg_log_entry_t ne; ne.mark_unrollbackable(); ne.version = eversion_t(1,1); ne.op = pg_log_entry_t::DELETE; log.add(ne); oe.version = eversion_t(2,1); oe.op = pg_log_entry_t::MODIFY; missing.add_next_event(oe); missing.flush(); EXPECT_FALSE(is_dirty()); EXPECT_TRUE(remove_snap.empty()); EXPECT_TRUE(t.empty()); EXPECT_TRUE(missing.is_missing(oe.soid)); EXPECT_EQ(1U, log.log.size()); TestHandler h(remove_snap); merge_old_entry(t, oe, info, &h); missing.flush(); EXPECT_FALSE(is_dirty()); EXPECT_TRUE(remove_snap.size() > 0); EXPECT_TRUE(t.empty()); EXPECT_FALSE(missing.have_missing()); EXPECT_EQ(1U, log.log.size()); } // there is no new entry (from the logs) and // the old entry (from the log entry given in argument) is not a CLONE and // the old entry prior_version is greater than the tail of the log : // do nothing and return false { clear(); ObjectStore::Transaction t; pg_log_entry_t oe; oe.mark_unrollbackable(); pg_info_t info; list<hobject_t> remove_snap; info.log_tail = eversion_t(1,1); oe.op = pg_log_entry_t::MODIFY; oe.prior_version = eversion_t(2,1); missing_add(oe.soid, oe.prior_version, eversion_t()); missing.flush(); EXPECT_FALSE(is_dirty()); EXPECT_TRUE(remove_snap.empty()); EXPECT_TRUE(t.empty()); EXPECT_TRUE(log.empty()); TestHandler h(remove_snap); merge_old_entry(t, oe, info, &h); missing.flush(); EXPECT_FALSE(is_dirty()); EXPECT_TRUE(remove_snap.empty()); EXPECT_TRUE(t.empty()); EXPECT_TRUE(log.empty()); } // there is no new entry (from the logs) and // the old entry (from the log entry given in argument) is not a CLONE and // the old entry (from the log entry given in argument) is not a DELETE and // the old entry prior_version is lower than the tail of the log : // add the old object to the remove_snap list and // add the old object to divergent priors and // add or update the prior_version of the object to missing and // return false { clear(); ObjectStore::Transaction t; pg_log_entry_t oe; oe.mark_unrollbackable(); pg_info_t info; list<hobject_t> remove_snap; info.log_tail = eversion_t(2,1); oe.soid.set_hash(1); oe.op = pg_log_entry_t::MODIFY; oe.prior_version = eversion_t(1,1); EXPECT_FALSE(is_dirty()); EXPECT_TRUE(remove_snap.empty()); EXPECT_TRUE(t.empty()); EXPECT_FALSE(missing.have_missing()); EXPECT_TRUE(log.empty()); TestHandler h(remove_snap); merge_old_entry(t, oe, info, &h); EXPECT_TRUE(is_dirty()); EXPECT_EQ(oe.soid, remove_snap.front()); EXPECT_TRUE(t.empty()); EXPECT_TRUE(missing.is_missing(oe.soid)); EXPECT_TRUE(log.empty()); } // there is no new entry (from the logs) and // the old entry (from the log entry given in argument) is not a CLONE and // the old entry (from the log entry given in argument) is a DELETE and // the old entry prior_version is lower than the tail of the log : // add the old object to divergent priors and // add or update the prior_version of the object to missing and // return false { clear(); ObjectStore::Transaction t; pg_log_entry_t oe; oe.mark_unrollbackable(); pg_info_t info; list<hobject_t> remove_snap; info.log_tail = eversion_t(2,1); oe.soid.set_hash(1); oe.op = pg_log_entry_t::DELETE; oe.prior_version = eversion_t(1,1); EXPECT_FALSE(is_dirty()); EXPECT_TRUE(remove_snap.empty()); EXPECT_TRUE(t.empty()); EXPECT_FALSE(missing.have_missing()); EXPECT_TRUE(log.empty()); TestHandler h(remove_snap); merge_old_entry(t, oe, info, &h); EXPECT_TRUE(is_dirty()); EXPECT_TRUE(remove_snap.empty()); EXPECT_TRUE(t.empty()); EXPECT_TRUE(missing.is_missing(oe.soid)); EXPECT_TRUE(log.empty()); } // there is no new entry (from the logs) and // the old entry (from the log entry given in argument) is not a CLONE and // the old entry (from the log entry given in argument) is not a DELETE and // the old entry prior_version is eversion_t() : // add the old object to the remove_snap list and // remove the prior_version of the object from missing, if any and // return false { clear(); ObjectStore::Transaction t; pg_log_entry_t oe; oe.mark_unrollbackable(); pg_info_t info; list<hobject_t> remove_snap; info.log_tail = eversion_t(10,1); oe.soid.set_hash(1); oe.op = pg_log_entry_t::MODIFY; oe.prior_version = eversion_t(); missing.add(oe.soid, eversion_t(1,1), eversion_t(), false); missing.flush(); EXPECT_FALSE(is_dirty()); EXPECT_TRUE(remove_snap.empty()); EXPECT_TRUE(t.empty()); EXPECT_TRUE(missing.is_missing(oe.soid)); EXPECT_TRUE(log.empty()); TestHandler h(remove_snap); merge_old_entry(t, oe, info, &h); missing.flush(); EXPECT_FALSE(is_dirty()); EXPECT_EQ(oe.soid, remove_snap.front()); EXPECT_TRUE(t.empty()); EXPECT_FALSE(missing.have_missing()); EXPECT_TRUE(log.empty()); } } TEST_F(PGLogTest, merge_log) { // head and tail match, last_backfill is set: // noop { clear(); pg_log_t olog; pg_info_t oinfo; pg_shard_t fromosd; pg_info_t info; list<hobject_t> remove_snap; bool dirty_info = false; bool dirty_big_info = false; hobject_t last_backfill(object_t("oname"), string("key"), 1, 234, 1, ""); info.last_backfill = last_backfill; eversion_t stat_version(10, 1); info.stats.version = stat_version; log.tail = olog.tail = eversion_t(1, 1); log.head = olog.head = eversion_t(2, 1); EXPECT_FALSE(missing.have_missing()); EXPECT_EQ(0U, log.log.size()); EXPECT_EQ(stat_version, info.stats.version); EXPECT_TRUE(remove_snap.empty()); EXPECT_EQ(last_backfill, info.last_backfill); EXPECT_TRUE(info.purged_snaps.empty()); EXPECT_FALSE(is_dirty()); EXPECT_FALSE(dirty_info); EXPECT_FALSE(dirty_big_info); TestHandler h(remove_snap); merge_log(oinfo, std::move(olog), fromosd, info, &h, dirty_info, dirty_big_info); EXPECT_FALSE(missing.have_missing()); EXPECT_EQ(0U, log.log.size()); EXPECT_EQ(stat_version, info.stats.version); EXPECT_TRUE(remove_snap.empty()); EXPECT_TRUE(info.purged_snaps.empty()); EXPECT_FALSE(is_dirty()); EXPECT_FALSE(dirty_info); EXPECT_FALSE(dirty_big_info); } // head and tail match, last_backfill is not set: info.stats is // copied from oinfo.stats but info.stats.reported_* is guaranteed to // never be replaced by a lower version { clear(); pg_log_t olog; pg_info_t oinfo; pg_shard_t fromosd; pg_info_t info; list<hobject_t> remove_snap; bool dirty_info = false; bool dirty_big_info = false; eversion_t stat_version(10, 1); oinfo.stats.version = stat_version; info.stats.reported_seq = 1; info.stats.reported_epoch = 10; oinfo.stats.reported_seq = 1; oinfo.stats.reported_epoch = 1; log.tail = olog.tail = eversion_t(1, 1); log.head = olog.head = eversion_t(2, 1); missing.may_include_deletes = false; EXPECT_FALSE(missing.have_missing()); EXPECT_EQ(0U, log.log.size()); EXPECT_EQ(eversion_t(), info.stats.version); EXPECT_EQ(1ull, info.stats.reported_seq); EXPECT_EQ(10u, info.stats.reported_epoch); EXPECT_TRUE(remove_snap.empty()); EXPECT_TRUE(info.last_backfill.is_max()); EXPECT_TRUE(info.purged_snaps.empty()); EXPECT_FALSE(is_dirty()); EXPECT_FALSE(dirty_info); EXPECT_FALSE(dirty_big_info); TestHandler h(remove_snap); merge_log(oinfo, std::move(olog), fromosd, info, &h, dirty_info, dirty_big_info); EXPECT_FALSE(missing.have_missing()); EXPECT_EQ(0U, log.log.size()); EXPECT_EQ(stat_version, info.stats.version); EXPECT_EQ(1ull, info.stats.reported_seq); EXPECT_EQ(10u, info.stats.reported_epoch); EXPECT_TRUE(remove_snap.empty()); EXPECT_TRUE(info.purged_snaps.empty()); EXPECT_FALSE(is_dirty()); EXPECT_FALSE(dirty_info); EXPECT_FALSE(dirty_big_info); } /* Before +--------------------------+ | log olog | +--------+-------+---------+ | |object | | |version | hash | version | | | | | | | x5 | (1,1) < tail | | | | | | | | tail > (1,4) | x7 | | | | | | | | | | head > (1,5) | x9 | (1,5) < head | | | | | | | | +--------+-------+---------+ After +----------------- | log | +--------+-------+ | |object | |version | hash | | | | tail > (1,1) | x5 | | | | | | | | (1,4) | x7 | | | | | | | head > (1,5) | x9 | | | | | | | +--------+-------+ */ { clear(); pg_log_t olog; pg_info_t oinfo; pg_shard_t fromosd; pg_info_t info; list<hobject_t> remove_snap; bool dirty_info = false; bool dirty_big_info = false; missing.may_include_deletes = false; { pg_log_entry_t e; e.mark_unrollbackable(); e.version = eversion_t(1, 4); e.soid.set_hash(0x5); log.tail = e.version; log.log.push_back(e); e.version = eversion_t(1, 5); e.soid.set_hash(0x9); log.log.push_back(e); log.head = e.version; log.index(); info.last_update = log.head; e.version = eversion_t(1, 1); e.soid.set_hash(0x5); olog.tail = e.version; olog.log.push_back(e); e.version = eversion_t(1, 5); e.soid.set_hash(0x9); olog.log.push_back(e); olog.head = e.version; } hobject_t last_backfill(object_t("oname"), string("key"), 1, 234, 1, ""); info.last_backfill = last_backfill; eversion_t stat_version(10, 1); info.stats.version = stat_version; EXPECT_FALSE(missing.have_missing()); EXPECT_EQ(2U, log.log.size()); EXPECT_EQ(stat_version, info.stats.version); EXPECT_TRUE(remove_snap.empty()); EXPECT_EQ(last_backfill, info.last_backfill); EXPECT_TRUE(info.purged_snaps.empty()); EXPECT_FALSE(is_dirty()); EXPECT_FALSE(dirty_info); EXPECT_FALSE(dirty_big_info); TestHandler h(remove_snap); merge_log(oinfo, std::move(olog), fromosd, info, &h, dirty_info, dirty_big_info); EXPECT_FALSE(missing.have_missing()); EXPECT_EQ(3U, log.log.size()); EXPECT_EQ(stat_version, info.stats.version); EXPECT_TRUE(remove_snap.empty()); EXPECT_TRUE(info.purged_snaps.empty()); EXPECT_TRUE(is_dirty()); EXPECT_TRUE(dirty_info); EXPECT_TRUE(dirty_big_info); } /* +--------------------------+ | log olog | +--------+-------+---------+ | |object | | |version | hash | version | | | | | tail > (1,1) | x5 | (1,1) < tail | | | | | | | | | (1,2) | x3 | (1,2) < lower_bound | | | | | | | | head > (1,3) | x9 | | | DELETE | | | | | | | | | x9 | (2,3) | | | | MODIFY | | | | | | | x7 | (2,4) < head | | | DELETE | +--------+-------+---------+ The log entry (1,3) deletes the object x9 but the olog entry (2,3) modifies it and is authoritative : the log entry (1,3) is divergent. */ { clear(); pg_log_t olog; pg_info_t oinfo; pg_shard_t fromosd; pg_info_t info; list<hobject_t> remove_snap; bool dirty_info = false; bool dirty_big_info = false; hobject_t divergent_object; missing.may_include_deletes = true; { pg_log_entry_t e; e.mark_unrollbackable(); e.version = eversion_t(1, 1); e.soid.set_hash(0x5); log.tail = e.version; log.log.push_back(e); e.version = eversion_t(1, 2); e.soid.set_hash(0x3); log.log.push_back(e); e.version = eversion_t(1,3); e.soid.set_hash(0x9); divergent_object = e.soid; e.op = pg_log_entry_t::DELETE; log.log.push_back(e); log.head = e.version; log.index(); info.last_update = log.head; e.version = eversion_t(1, 1); e.soid.set_hash(0x5); olog.tail = e.version; olog.log.push_back(e); e.version = eversion_t(1, 2); e.soid.set_hash(0x3); olog.log.push_back(e); e.version = eversion_t(2, 3); e.soid.set_hash(0x9); e.op = pg_log_entry_t::MODIFY; olog.log.push_back(e); e.version = eversion_t(2, 4); e.soid.set_hash(0x7); e.op = pg_log_entry_t::DELETE; olog.log.push_back(e); olog.head = e.version; } snapid_t purged_snap(1); { oinfo.last_update = olog.head; oinfo.purged_snaps.insert(purged_snap); } EXPECT_FALSE(missing.have_missing()); EXPECT_EQ(1U, log.objects.count(divergent_object)); EXPECT_EQ(3U, log.log.size()); EXPECT_TRUE(remove_snap.empty()); EXPECT_EQ(log.head, info.last_update); EXPECT_TRUE(info.purged_snaps.empty()); EXPECT_FALSE(is_dirty()); EXPECT_FALSE(dirty_info); EXPECT_FALSE(dirty_big_info); TestHandler h(remove_snap); merge_log(oinfo, std::move(olog), fromosd, info, &h, dirty_info, dirty_big_info); /* When the divergent entry is a DELETE and the authoritative entry is a MODIFY, the object will be added to missing : it is a verifiable side effect proving the entry was identified to be divergent. */ EXPECT_TRUE(missing.is_missing(divergent_object)); EXPECT_EQ(1U, log.objects.count(divergent_object)); EXPECT_EQ(4U, log.log.size()); /* DELETE entries from olog that are appended to the hed of the log, and the divergent version of the object is removed (added to remove_snap) */ EXPECT_EQ(0x9U, remove_snap.front().get_hash()); EXPECT_EQ(log.head, info.last_update); EXPECT_TRUE(info.purged_snaps.contains(purged_snap)); EXPECT_TRUE(is_dirty()); EXPECT_TRUE(dirty_info); EXPECT_TRUE(dirty_big_info); } /* +--------------------------+ | log olog | +--------+-------+---------+ | |object | | |version | hash | version | | | | | tail > (1,1) | x5 | (1,1) < tail | | | | | | | | | (1,2) | x3 | (1,2) < lower_bound | | | | | | | | head > (1,3) | x9 | | | DELETE | | | | | | | | | x9 | (2,3) | | | | MODIFY | | | | | | | x7 | (2,4) < head | | | DELETE | +--------+-------+---------+ The log entry (1,3) deletes the object x9 but the olog entry (2,3) modifies it and is authoritative : the log entry (1,3) is divergent. */ { clear(); pg_log_t olog; pg_info_t oinfo; pg_shard_t fromosd; pg_info_t info; list<hobject_t> remove_snap; bool dirty_info = false; bool dirty_big_info = false; hobject_t divergent_object; { pg_log_entry_t e; e.mark_unrollbackable(); e.version = eversion_t(1, 1); e.soid.set_hash(0x5); log.tail = e.version; log.log.push_back(e); e.version = eversion_t(1, 2); e.soid.set_hash(0x3); log.log.push_back(e); e.version = eversion_t(1,3); e.soid.set_hash(0x9); divergent_object = e.soid; e.op = pg_log_entry_t::DELETE; log.log.push_back(e); log.head = e.version; log.index(); info.last_update = log.head; e.version = eversion_t(1, 1); e.soid.set_hash(0x5); olog.tail = e.version; olog.log.push_back(e); e.version = eversion_t(1, 2); e.soid.set_hash(0x3); olog.log.push_back(e); e.version = eversion_t(2, 3); e.soid.set_hash(0x9); e.op = pg_log_entry_t::MODIFY; olog.log.push_back(e); e.version = eversion_t(2, 4); e.soid.set_hash(0x7); e.op = pg_log_entry_t::DELETE; olog.log.push_back(e); olog.head = e.version; } snapid_t purged_snap(1); { oinfo.last_update = olog.head; oinfo.purged_snaps.insert(purged_snap); } EXPECT_FALSE(missing.have_missing()); EXPECT_EQ(1U, log.objects.count(divergent_object)); EXPECT_EQ(3U, log.log.size()); EXPECT_TRUE(remove_snap.empty()); EXPECT_EQ(log.head, info.last_update); EXPECT_TRUE(info.purged_snaps.empty()); EXPECT_FALSE(is_dirty()); EXPECT_FALSE(dirty_info); EXPECT_FALSE(dirty_big_info); TestHandler h(remove_snap); missing.may_include_deletes = false; merge_log(oinfo, std::move(olog), fromosd, info, &h, dirty_info, dirty_big_info); /* When the divergent entry is a DELETE and the authoritative entry is a MODIFY, the object will be added to missing : it is a verifiable side effect proving the entry was identified to be divergent. */ EXPECT_TRUE(missing.is_missing(divergent_object)); EXPECT_EQ(1U, log.objects.count(divergent_object)); EXPECT_EQ(4U, log.log.size()); /* DELETE entries from olog that are appended to the hed of the log, and the divergent version of the object is removed (added to remove_snap). When peering handles deletes, it is the earlier version that is in the removed list. */ EXPECT_EQ(0x7U, remove_snap.front().get_hash()); EXPECT_EQ(log.head, info.last_update); EXPECT_TRUE(info.purged_snaps.contains(purged_snap)); EXPECT_TRUE(is_dirty()); EXPECT_TRUE(dirty_info); EXPECT_TRUE(dirty_big_info); } /* +--------------------------+ | log olog | +--------+-------+---------+ | |object | | |version | hash | version | | | | | tail > (1,1) | x5 | (1,1) < tail | | | | | | | | | (1,4) | x7 | (1,4) < head | | | | | | | | head > (1,5) | x9 | | | | | | | | | | +--------+-------+---------+ The head of the log entry (1,5) is divergent because it is greater than the head of olog. */ { clear(); pg_log_t olog; pg_info_t oinfo; pg_shard_t fromosd; pg_info_t info; list<hobject_t> remove_snap; bool dirty_info = false; bool dirty_big_info = false; { pg_log_entry_t e; e.mark_unrollbackable(); e.version = eversion_t(1, 1); e.soid.set_hash(0x5); log.tail = e.version; log.log.push_back(e); e.version = eversion_t(1, 4); e.soid.set_hash(0x7); log.log.push_back(e); e.version = eversion_t(1, 5); e.soid.set_hash(0x9); log.log.push_back(e); log.head = e.version; log.index(); info.last_update = log.head; e.version = eversion_t(1, 1); e.soid.set_hash(0x5); olog.tail = e.version; olog.log.push_back(e); e.version = eversion_t(1, 4); e.soid.set_hash(0x7); olog.log.push_back(e); olog.head = e.version; } hobject_t last_backfill(object_t("oname"), string("key"), 1, 234, 1, ""); info.last_backfill = last_backfill; eversion_t stat_version(10, 1); info.stats.version = stat_version; EXPECT_FALSE(missing.have_missing()); EXPECT_EQ(3U, log.log.size()); EXPECT_EQ(stat_version, info.stats.version); EXPECT_TRUE(remove_snap.empty()); EXPECT_EQ(last_backfill, info.last_backfill); EXPECT_TRUE(info.purged_snaps.empty()); EXPECT_FALSE(is_dirty()); EXPECT_FALSE(dirty_info); EXPECT_FALSE(dirty_big_info); TestHandler h(remove_snap); missing.may_include_deletes = false; merge_log(oinfo, std::move(olog), fromosd, info, &h, dirty_info, dirty_big_info); EXPECT_FALSE(missing.have_missing()); EXPECT_EQ(2U, log.log.size()); EXPECT_EQ(stat_version, info.stats.version); EXPECT_EQ(0x9U, remove_snap.front().get_hash()); EXPECT_TRUE(info.purged_snaps.empty()); EXPECT_TRUE(is_dirty()); EXPECT_TRUE(dirty_info); EXPECT_TRUE(dirty_big_info); } } TEST_F(PGLogTest, proc_replica_log) { // empty log : no side effect { clear(); pg_log_t olog; pg_info_t oinfo; pg_missing_t omissing; pg_shard_t from; eversion_t last_update(1, 1); log.head = olog.head = oinfo.last_update = last_update; eversion_t last_complete(1, 1); oinfo.last_complete = last_complete; EXPECT_FALSE(omissing.have_missing()); EXPECT_EQ(last_update, oinfo.last_update); EXPECT_EQ(last_complete, oinfo.last_complete); missing.may_include_deletes = false; proc_replica_log(oinfo, olog, omissing, from); EXPECT_FALSE(omissing.have_missing()); EXPECT_EQ(last_update, oinfo.last_update); EXPECT_EQ(last_update, oinfo.last_complete); } /* +--------------------------+ | log olog | +--------+-------+---------+ | |object | | |version | hash | version | | | | | | | x3 | (1,1) < tail | | | | | | | | tail > (1,2) | x5 | | | | | | | | | | head > (1,3) | x9 | | | DELETE | | | | | | | | | x9 | (2,3) < head | | | DELETE | | | | | +--------+-------+---------+ The log entry (1,3) deletes the object x9 and the olog entry (2,3) also deletes it : do nothing. The olog tail is ignored because it is before the log tail. */ { clear(); pg_log_t olog; pg_info_t oinfo; pg_missing_t omissing; pg_shard_t from; { pg_log_entry_t e; e.mark_unrollbackable(); e.version = eversion_t(1, 2); e.soid.set_hash(0x5); log.tail = e.version; log.log.push_back(e); e.version = eversion_t(1, 3); e.soid.set_hash(0x9); e.op = pg_log_entry_t::DELETE; log.log.push_back(e); log.head = e.version; log.index(); e.version = eversion_t(1, 1); e.soid.set_hash(0x3); olog.tail = e.version; olog.log.push_back(e); e.version = eversion_t(2, 3); e.soid.set_hash(0x9); e.op = pg_log_entry_t::DELETE; olog.log.push_back(e); olog.head = e.version; oinfo.last_update = olog.head; oinfo.last_complete = olog.head; } EXPECT_FALSE(omissing.have_missing()); EXPECT_EQ(olog.head, oinfo.last_update); EXPECT_EQ(olog.head, oinfo.last_complete); missing.may_include_deletes = false; proc_replica_log(oinfo, olog, omissing, from); EXPECT_FALSE(omissing.have_missing()); } { clear(); pg_log_t olog; pg_info_t oinfo; pg_missing_t omissing; pg_shard_t from; hobject_t divergent_object; { pg_log_entry_t e; e.mark_unrollbackable(); { e.soid = divergent_object; e.soid.set_hash(0x1); e.version = eversion_t(1, 1); log.tail = e.version; log.log.push_back(e); e.soid = divergent_object; e.prior_version = eversion_t(1, 1); e.version = eversion_t(1, 2); log.tail = e.version; log.log.push_back(e); e.soid.set_hash(0x3); e.version = eversion_t(1, 4); log.log.push_back(e); e.soid.set_hash(0x7); e.version = eversion_t(1, 5); log.log.push_back(e); e.soid.set_hash(0x8); e.version = eversion_t(1, 6); log.log.push_back(e); e.soid.set_hash(0x9); e.op = pg_log_entry_t::DELETE; e.version = eversion_t(2, 7); log.log.push_back(e); e.soid.set_hash(0xa); e.version = eversion_t(2, 8); log.head = e.version; log.log.push_back(e); } log.index(); { e.soid = divergent_object; e.soid.set_hash(0x1); e.version = eversion_t(1, 1); olog.tail = e.version; olog.log.push_back(e); e.soid = divergent_object; e.prior_version = eversion_t(1, 1); e.version = eversion_t(1, 2); olog.log.push_back(e); e.prior_version = eversion_t(0, 0); e.soid.set_hash(0x3); e.version = eversion_t(1, 4); olog.log.push_back(e); e.soid.set_hash(0x7); e.version = eversion_t(1, 5); olog.log.push_back(e); e.soid.set_hash(0x8); e.version = eversion_t(1, 6); olog.log.push_back(e); e.soid.set_hash(0x9); // should not be added to missing, create e.op = pg_log_entry_t::MODIFY; e.version = eversion_t(1, 7); olog.log.push_back(e); e.soid = divergent_object; // should be added to missing at 1,2 e.op = pg_log_entry_t::MODIFY; e.version = eversion_t(1, 8); e.prior_version = eversion_t(1, 2); olog.log.push_back(e); olog.head = e.version; } oinfo.last_update = olog.head; oinfo.last_complete = olog.head; } EXPECT_FALSE(omissing.have_missing()); EXPECT_EQ(olog.head, oinfo.last_update); EXPECT_EQ(olog.head, oinfo.last_complete); missing.may_include_deletes = false; proc_replica_log(oinfo, olog, omissing, from); EXPECT_TRUE(omissing.have_missing()); EXPECT_TRUE(omissing.is_missing(divergent_object)); EXPECT_EQ(eversion_t(1, 2), omissing.get_items().at(divergent_object).need); EXPECT_EQ(eversion_t(1, 6), oinfo.last_update); EXPECT_EQ(eversion_t(1, 1), oinfo.last_complete); } /* +--------------------------+ | olog log | +--------+-------+---------+ | |object | | |version | hash | version | | | | | tail > (1,1) | x9 | (1,1) < tail | | | | | | | | | (1,2) | x3 | (1,2) | | | | | | | | | head > (1,3) | x9 | | | DELETE | | | | | | | | | x9 | (2,3) < head | | | DELETE | | | | | +--------+-------+---------+ The log entry (1,3) deletes the object x9 and the olog entry (2,3) also deletes it : do nothing. */ { clear(); pg_log_t olog; pg_info_t oinfo; pg_missing_t omissing; pg_shard_t from; eversion_t last_update(1, 2); hobject_t divergent_object; divergent_object.set_hash(0x9); { pg_log_entry_t e; e.mark_unrollbackable(); e.version = eversion_t(1, 1); e.soid = divergent_object; log.tail = e.version; log.log.push_back(e); e.version = last_update; e.soid.set_hash(0x3); log.log.push_back(e); e.version = eversion_t(2, 3); e.prior_version = eversion_t(1, 1); e.soid = divergent_object; e.op = pg_log_entry_t::DELETE; log.log.push_back(e); log.head = e.version; log.index(); e.version = eversion_t(1, 1); e.soid = divergent_object; olog.tail = e.version; olog.log.push_back(e); e.version = last_update; e.soid.set_hash(0x3); olog.log.push_back(e); e.version = eversion_t(1, 3); e.prior_version = eversion_t(1, 1); e.soid = divergent_object; e.op = pg_log_entry_t::DELETE; olog.log.push_back(e); olog.head = e.version; oinfo.last_update = olog.head; oinfo.last_complete = olog.head; } EXPECT_FALSE(omissing.have_missing()); EXPECT_EQ(olog.head, oinfo.last_update); EXPECT_EQ(olog.head, oinfo.last_complete); missing.may_include_deletes = false; proc_replica_log(oinfo, olog, omissing, from); EXPECT_TRUE(omissing.have_missing()); EXPECT_TRUE(omissing.is_missing(divergent_object)); EXPECT_EQ(omissing.get_items().at(divergent_object).have, eversion_t(0, 0)); EXPECT_EQ(omissing.get_items().at(divergent_object).need, eversion_t(1, 1)); EXPECT_EQ(last_update, oinfo.last_update); } /* +--------------------------+ | olog log | +--------+-------+---------+ | |object | | |version | hash | version | | | | | tail > (1,1) | x9 | (1,1) < tail | | | | | | | | | (1,2) | x3 | (1,2) | | | | | | | | | head > (1,3) | x9 | | | MODIFY | | | | | | | | | x9 | (2,3) < head | | | DELETE | | | | | +--------+-------+---------+ The log entry (2,3) deletes the object x9 but the olog entry (1,3) modifies it : remove it from omissing. */ { clear(); pg_log_t olog; pg_info_t oinfo; pg_missing_t omissing; pg_shard_t from; eversion_t last_update(1, 2); hobject_t divergent_object; { pg_log_entry_t e; e.mark_unrollbackable(); e.version = eversion_t(1, 1); e.soid = divergent_object; log.tail = e.version; log.log.push_back(e); e.version = last_update; e.soid.set_hash(0x3); log.log.push_back(e); e.version = eversion_t(2, 3); e.prior_version = eversion_t(1, 1); e.soid = divergent_object; e.op = pg_log_entry_t::DELETE; log.log.push_back(e); log.head = e.version; log.index(); e.version = eversion_t(1, 1); e.soid = divergent_object; olog.tail = e.version; olog.log.push_back(e); e.version = last_update; e.soid.set_hash(0x3); olog.log.push_back(e); e.version = eversion_t(1, 3); e.prior_version = eversion_t(1, 1); e.soid = divergent_object; divergent_object = e.soid; omissing.add(divergent_object, e.version, eversion_t(), false); e.op = pg_log_entry_t::MODIFY; olog.log.push_back(e); olog.head = e.version; oinfo.last_update = olog.head; oinfo.last_complete = olog.head; } EXPECT_TRUE(omissing.have_missing()); EXPECT_TRUE(omissing.is_missing(divergent_object)); EXPECT_EQ(eversion_t(1, 3), omissing.get_items().at(divergent_object).need); EXPECT_EQ(olog.head, oinfo.last_update); EXPECT_EQ(olog.head, oinfo.last_complete); missing.may_include_deletes = false; proc_replica_log(oinfo, olog, omissing, from); EXPECT_TRUE(omissing.have_missing()); EXPECT_TRUE(omissing.is_missing(divergent_object)); EXPECT_EQ(omissing.get_items().at(divergent_object).have, eversion_t(0, 0)); EXPECT_EQ(omissing.get_items().at(divergent_object).need, eversion_t(1, 1)); EXPECT_EQ(last_update, oinfo.last_update); } /* +--------------------------+ | log olog | +--------+-------+---------+ | |object | | |version | hash | version | | | | | tail > (1,1) | x9 | (1,1) < tail | | | | | | | | | (1,2) | x3 | (1,2) | | | | | | | | | | | x9 | (1,3) < head | | | MODIFY | | | | | head > (2,3) | x9 | | | DELETE | | | | | | | +--------+-------+---------+ The log entry (2,3) deletes the object x9 but the olog entry (1,3) modifies it : proc_replica_log should adjust missing to 1,1 for that object until add_next_event in PG::activate processes the delete. */ { clear(); pg_log_t olog; pg_info_t oinfo; pg_missing_t omissing; pg_shard_t from; eversion_t last_update(1, 2); hobject_t divergent_object; eversion_t new_version(2, 3); eversion_t divergent_version(1, 3); { pg_log_entry_t e; e.mark_unrollbackable(); e.version = eversion_t(1, 1); e.soid.set_hash(0x9); log.tail = e.version; log.log.push_back(e); e.version = last_update; e.soid.set_hash(0x3); log.log.push_back(e); e.version = new_version; e.prior_version = eversion_t(1, 1); e.soid.set_hash(0x9); e.op = pg_log_entry_t::DELETE; log.log.push_back(e); log.head = e.version; log.index(); e.op = pg_log_entry_t::MODIFY; e.version = eversion_t(1, 1); e.soid.set_hash(0x9); olog.tail = e.version; olog.log.push_back(e); e.version = last_update; e.soid.set_hash(0x3); olog.log.push_back(e); e.version = divergent_version; e.prior_version = eversion_t(1, 1); e.soid.set_hash(0x9); divergent_object = e.soid; omissing.add(divergent_object, e.version, eversion_t(), false); e.op = pg_log_entry_t::MODIFY; olog.log.push_back(e); olog.head = e.version; oinfo.last_update = olog.head; oinfo.last_complete = olog.head; } EXPECT_TRUE(omissing.have_missing()); EXPECT_TRUE(omissing.is_missing(divergent_object)); EXPECT_EQ(divergent_version, omissing.get_items().at(divergent_object).need); EXPECT_EQ(olog.head, oinfo.last_update); EXPECT_EQ(olog.head, oinfo.last_complete); missing.may_include_deletes = false; proc_replica_log(oinfo, olog, omissing, from); EXPECT_TRUE(omissing.have_missing()); EXPECT_TRUE(omissing.get_items().begin()->second.need == eversion_t(1, 1)); EXPECT_EQ(last_update, oinfo.last_update); EXPECT_EQ(eversion_t(0, 0), oinfo.last_complete); } } TEST_F(PGLogTest, merge_log_1) { TestCase t; t.base.push_back(mk_ple_mod(mk_obj(1), mk_evt(10, 100), mk_evt(8, 80))); t.div.push_back(mk_ple_mod(mk_obj(1), mk_evt(10, 101), mk_evt(10, 100))); t.final.add(mk_obj(1), mk_evt(10, 100), mk_evt(0, 0), false); t.toremove.insert(mk_obj(1)); t.setup(); run_test_case(t); } TEST_F(PGLogTest, merge_log_2) { TestCase t; t.base.push_back(mk_ple_mod_rb(mk_obj(1), mk_evt(10, 100), mk_evt(8, 80))); t.div.push_back(mk_ple_mod_rb(mk_obj(1), mk_evt(10, 101), mk_evt(10, 100))); t.div.push_back(mk_ple_mod_rb(mk_obj(1), mk_evt(10, 102), mk_evt(10, 101))); t.torollback.insert( t.torollback.begin(), t.div.rbegin(), t.div.rend()); t.setup(); run_test_case(t); } TEST_F(PGLogTest, merge_log_3) { TestCase t; t.base.push_back(mk_ple_mod_rb(mk_obj(1), mk_evt(10, 100), mk_evt(8, 80))); t.div.push_back(mk_ple_mod(mk_obj(1), mk_evt(10, 101), mk_evt(10, 100))); t.div.push_back(mk_ple_mod_rb(mk_obj(1), mk_evt(10, 102), mk_evt(10, 101))); t.final.add(mk_obj(1), mk_evt(10, 100), mk_evt(0, 0), false); t.toremove.insert(mk_obj(1)); t.setup(); run_test_case(t); } TEST_F(PGLogTest, merge_log_4) { TestCase t; t.base.push_back(mk_ple_mod_rb(mk_obj(1), mk_evt(10, 100), mk_evt(8, 80))); t.div.push_back(mk_ple_mod_rb(mk_obj(1), mk_evt(10, 101), mk_evt(10, 100))); t.div.push_back(mk_ple_mod_rb(mk_obj(1), mk_evt(10, 102), mk_evt(10, 101))); t.init.add(mk_obj(1), mk_evt(10, 102), mk_evt(0, 0), false); t.final.add(mk_obj(1), mk_evt(10, 100), mk_evt(0, 0), false); t.setup(); run_test_case(t); } TEST_F(PGLogTest, merge_log_5) { TestCase t; t.base.push_back(mk_ple_mod_rb(mk_obj(1), mk_evt(10, 100), mk_evt(8, 80))); t.div.push_back(mk_ple_mod(mk_obj(1), mk_evt(10, 101), mk_evt(10, 100))); t.div.push_back(mk_ple_mod_rb(mk_obj(1), mk_evt(10, 102), mk_evt(10, 101))); t.auth.push_back(mk_ple_mod(mk_obj(1), mk_evt(11, 101), mk_evt(10, 100))); t.final.add(mk_obj(1), mk_evt(11, 101), mk_evt(0, 0), false); t.toremove.insert(mk_obj(1)); t.setup(); run_test_case(t); } TEST_F(PGLogTest, merge_log_6) { TestCase t; t.base.push_back(mk_ple_mod_rb(mk_obj(1), mk_evt(10, 100), mk_evt(8, 80))); t.auth.push_back(mk_ple_mod(mk_obj(1), mk_evt(11, 101), mk_evt(10, 100))); t.final.add(mk_obj(1), mk_evt(11, 101), mk_evt(10, 100), false); t.setup(); run_test_case(t); } TEST_F(PGLogTest, merge_log_7) { TestCase t; t.base.push_back(mk_ple_mod_rb(mk_obj(1), mk_evt(10, 100), mk_evt(8, 80))); t.auth.push_back(mk_ple_mod(mk_obj(1), mk_evt(11, 101), mk_evt(10, 100))); t.init.add(mk_obj(1), mk_evt(10, 100), mk_evt(8, 80), false); t.final.add(mk_obj(1), mk_evt(11, 101), mk_evt(8, 80), false); t.setup(); run_test_case(t); } TEST_F(PGLogTest, merge_log_8) { TestCase t; t.base.push_back(mk_ple_mod_rb(mk_obj(1), mk_evt(10, 100), mk_evt(8, 80))); t.auth.push_back(mk_ple_dt(mk_obj(1), mk_evt(11, 101), mk_evt(10, 100))); t.init.add(mk_obj(1), mk_evt(10, 100), mk_evt(8, 80), false); t.final.add(mk_obj(1), mk_evt(11, 101), mk_evt(8, 80), true); t.setup(); run_test_case(t); } TEST_F(PGLogTest, merge_log_9) { TestCase t; t.base.push_back(mk_ple_mod_rb(mk_obj(1), mk_evt(10, 100), mk_evt(8, 80))); t.auth.push_back(mk_ple_dt(mk_obj(1), mk_evt(11, 101), mk_evt(10, 100))); t.init.add(mk_obj(1), mk_evt(10, 100), mk_evt(8, 80), false); t.toremove.insert(mk_obj(1)); t.deletes_during_peering = true; t.setup(); run_test_case(t); } TEST_F(PGLogTest, merge_log_10) { TestCase t; t.base.push_back(mk_ple_mod_rb(mk_obj(1), mk_evt(10, 100), mk_evt(8, 80))); t.auth.push_back(mk_ple_ldt(mk_obj(1), mk_evt(11, 101), mk_evt(10, 100))); t.init.add(mk_obj(1), mk_evt(10, 100), mk_evt(8, 80), false); t.final.add(mk_obj(1), mk_evt(11, 101), mk_evt(8, 80), true); t.setup(); run_test_case(t); } TEST_F(PGLogTest, merge_log_prior_version_have) { TestCase t; t.base.push_back(mk_ple_mod_rb(mk_obj(1), mk_evt(10, 100), mk_evt(8, 80))); t.div.push_back(mk_ple_mod(mk_obj(1), mk_evt(10, 101), mk_evt(10, 100))); t.init.add(mk_obj(1), mk_evt(10, 101), mk_evt(10, 100), false); t.setup(); run_test_case(t); } TEST_F(PGLogTest, merge_log_split_missing_entries_at_head) { TestCase t; t.auth.push_back(mk_ple_mod_rb(mk_obj(1), mk_evt(10, 100), mk_evt(8, 70))); t.auth.push_back(mk_ple_mod_rb(mk_obj(1), mk_evt(15, 150), mk_evt(10, 100))); t.div.push_back(mk_ple_mod(mk_obj(1), mk_evt(8, 70), mk_evt(8, 65))); t.setup(); t.set_div_bounds(mk_evt(9, 79), mk_evt(8, 69)); t.set_auth_bounds(mk_evt(15, 160), mk_evt(9, 77)); t.final.add(mk_obj(1), mk_evt(15, 150), mk_evt(8, 70), false); run_test_case(t); } TEST_F(PGLogTest, olog_tail_gt_log_tail_split) { TestCase t; t.auth.push_back(mk_ple_mod(mk_obj(1), mk_evt(10, 100), mk_evt(8, 70))); t.auth.push_back(mk_ple_mod(mk_obj(1), mk_evt(15, 150), mk_evt(10, 100))); t.auth.push_back(mk_ple_mod(mk_obj(1), mk_evt(15, 155), mk_evt(15, 150))); t.setup(); t.set_div_bounds(mk_evt(15, 153), mk_evt(15, 151)); t.set_auth_bounds(mk_evt(15, 156), mk_evt(10, 99)); t.final.add(mk_obj(1), mk_evt(15, 155), mk_evt(15, 150), false); run_test_case(t); } TEST_F(PGLogTest, olog_tail_gt_log_tail_split2) { TestCase t; t.auth.push_back(mk_ple_mod(mk_obj(1), mk_evt(10, 100), mk_evt(8, 70))); t.auth.push_back(mk_ple_mod(mk_obj(1), mk_evt(15, 150), mk_evt(10, 100))); t.auth.push_back(mk_ple_mod(mk_obj(1), mk_evt(16, 155), mk_evt(15, 150))); t.div.push_back(mk_ple_mod(mk_obj(1), mk_evt(15, 153), mk_evt(15, 150))); t.setup(); t.set_div_bounds(mk_evt(15, 153), mk_evt(15, 151)); t.set_auth_bounds(mk_evt(16, 156), mk_evt(10, 99)); t.final.add(mk_obj(1), mk_evt(16, 155), mk_evt(0, 0), false); t.toremove.insert(mk_obj(1)); run_test_case(t); } TEST_F(PGLogTest, filter_log_1) { { clear(); int osd_id = 1; epoch_t epoch = 40; int64_t pool_id = 1; int bits = 2; int max_osd = 4; int pg_num = max_osd << bits; int num_objects = 1000; int num_internal = 10; // Set up splitting map std::unique_ptr<OSDMap> osdmap(new OSDMap); uuid_d test_uuid; test_uuid.generate_random(); osdmap->build_simple_with_pool(g_ceph_context, epoch, test_uuid, max_osd, bits, bits); osdmap->set_state(osd_id, CEPH_OSD_EXISTS); const string hit_set_namespace("internal"); { pg_log_entry_t e; e.mark_unrollbackable(); e.op = pg_log_entry_t::MODIFY; e.soid.pool = pool_id; uuid_d uuid_name; int i; for (i = 1; i <= num_objects; ++i) { e.version = eversion_t(epoch, i); // Use this to generate random file names uuid_name.generate_random(); ostringstream name; name << uuid_name; e.soid.oid.name = name.str(); // First has no namespace if (i != 1) { // num_internal have the internal namspace if (i <= num_internal + 1) { e.soid.nspace = hit_set_namespace; } else { // rest have different namespaces ostringstream ns; ns << "ns" << i; e.soid.nspace = ns.str(); } } log.log.push_back(e); if (i == 1) log.tail = e.version; } log.head = e.version; log.index(); } spg_t pgid(pg_t(2, pool_id), shard_id_t::NO_SHARD); // See if we created the right number of entries int total = log.log.size(); ASSERT_EQ(total, num_objects); // Some should be removed { pg_log_t filtered, reject; pg_log_t::filter_log( pgid, *osdmap, hit_set_namespace, log, filtered, reject); log = IndexedLog(filtered); } EXPECT_LE(log.log.size(), (size_t)total); // If we filter a second time, there should be the same total total = log.log.size(); { pg_log_t filtered, reject; pg_log_t::filter_log( pgid, *osdmap, hit_set_namespace, log, filtered, reject); log = IndexedLog(filtered); } EXPECT_EQ(log.log.size(), (size_t)total); // Increase pg_num as if there would be a split int new_pg_num = pg_num * 16; OSDMap::Incremental inc(epoch + 1); inc.fsid = test_uuid; const pg_pool_t *pool = osdmap->get_pg_pool(pool_id); pg_pool_t newpool; newpool = *pool; newpool.set_pg_num(new_pg_num); newpool.set_pgp_num(new_pg_num); inc.new_pools[pool_id] = newpool; int ret = osdmap->apply_incremental(inc); ASSERT_EQ(ret, 0); // We should have fewer entries after a filter { pg_log_t filtered, reject; pg_log_t::filter_log( pgid, *osdmap, hit_set_namespace, log, filtered, reject); log = IndexedLog(filtered); } EXPECT_LE(log.log.size(), (size_t)total); // Make sure all internal entries are retained int count = 0; for (list<pg_log_entry_t>::iterator i = log.log.begin(); i != log.log.end(); ++i) { if (i->soid.nspace == hit_set_namespace) count++; } EXPECT_EQ(count, num_internal); } } TEST_F(PGLogTest, get_request) { clear(); // make sure writes, deletes, and errors are found vector<pg_log_entry_t> entries; hobject_t oid(object_t("objname"), "key", 123, 456, 0, ""); entries.push_back( pg_log_entry_t(pg_log_entry_t::ERROR, oid, eversion_t(6,2), eversion_t(3,4), 1, osd_reqid_t(entity_name_t::CLIENT(777), 8, 1), utime_t(0,1), -ENOENT)); entries.push_back( pg_log_entry_t(pg_log_entry_t::MODIFY, oid, eversion_t(6,3), eversion_t(3,4), 2, osd_reqid_t(entity_name_t::CLIENT(777), 8, 2), utime_t(1,2), 0)); entries.push_back( pg_log_entry_t(pg_log_entry_t::DELETE, oid, eversion_t(7,4), eversion_t(7,4), 3, osd_reqid_t(entity_name_t::CLIENT(777), 8, 3), utime_t(10,2), 0)); entries.push_back( pg_log_entry_t(pg_log_entry_t::ERROR, oid, eversion_t(7,5), eversion_t(7,4), 3, osd_reqid_t(entity_name_t::CLIENT(777), 8, 4), utime_t(20,1), -ENOENT)); for (auto &entry : entries) { log.add(entry); } for (auto &entry : entries) { eversion_t replay_version; version_t user_version; int return_code = 0; vector<pg_log_op_return_item_t> op_returns; bool got = log.get_request( entry.reqid, &replay_version, &user_version, &return_code, &op_returns); EXPECT_TRUE(got); EXPECT_EQ(entry.return_code, return_code); EXPECT_EQ(entry.version, replay_version); EXPECT_EQ(entry.user_version, user_version); } } TEST_F(PGLogTest, ErrorNotIndexedByObject) { clear(); // make sure writes, deletes, and errors are found hobject_t oid(object_t("objname"), "key", 123, 456, 0, ""); log.add( pg_log_entry_t(pg_log_entry_t::ERROR, oid, eversion_t(6,2), eversion_t(3,4), 1, osd_reqid_t(entity_name_t::CLIENT(777), 8, 1), utime_t(0,1), -ENOENT)); EXPECT_FALSE(log.logged_object(oid)); pg_log_entry_t modify(pg_log_entry_t::MODIFY, oid, eversion_t(6,3), eversion_t(3,4), 2, osd_reqid_t(entity_name_t::CLIENT(777), 8, 2), utime_t(1,2), 0); log.add(modify); EXPECT_TRUE(log.logged_object(oid)); pg_log_entry_t *entry = log.objects[oid]; EXPECT_EQ(modify.op, entry->op); EXPECT_EQ(modify.version, entry->version); EXPECT_EQ(modify.prior_version, entry->prior_version); EXPECT_EQ(modify.user_version, entry->user_version); EXPECT_EQ(modify.reqid, entry->reqid); pg_log_entry_t del(pg_log_entry_t::DELETE, oid, eversion_t(7,4), eversion_t(7,4), 3, osd_reqid_t(entity_name_t::CLIENT(777), 8, 3), utime_t(10,2), 0); log.add(del); EXPECT_TRUE(log.logged_object(oid)); entry = log.objects[oid]; EXPECT_EQ(del.op, entry->op); EXPECT_EQ(del.version, entry->version); EXPECT_EQ(del.prior_version, entry->prior_version); EXPECT_EQ(del.user_version, entry->user_version); EXPECT_EQ(del.reqid, entry->reqid); log.add( pg_log_entry_t(pg_log_entry_t::ERROR, oid, eversion_t(7,5), eversion_t(7,4), 3, osd_reqid_t(entity_name_t::CLIENT(777), 8, 4), utime_t(20,1), -ENOENT)); EXPECT_TRUE(log.logged_object(oid)); entry = log.objects[oid]; EXPECT_EQ(del.op, entry->op); EXPECT_EQ(del.version, entry->version); EXPECT_EQ(del.prior_version, entry->prior_version); EXPECT_EQ(del.user_version, entry->user_version); EXPECT_EQ(del.reqid, entry->reqid); } TEST_F(PGLogTest, split_into_preserves_may_include_deletes) { clear(); { may_include_deletes_in_missing_dirty = false; missing.may_include_deletes = true; PGLog child_log(cct); pg_t child_pg; split_into(child_pg, 6, &child_log); ASSERT_TRUE(child_log.get_missing().may_include_deletes); ASSERT_TRUE(child_log.get_may_include_deletes_in_missing_dirty()); } { may_include_deletes_in_missing_dirty = false; missing.may_include_deletes = false; PGLog child_log(cct); pg_t child_pg; split_into(child_pg, 6, &child_log); ASSERT_FALSE(child_log.get_missing().may_include_deletes); ASSERT_FALSE(child_log.get_may_include_deletes_in_missing_dirty()); } } class PGLogTestRebuildMissing : public PGLogTest, public StoreTestFixture { public: PGLogTestRebuildMissing() : PGLogTest(), StoreTestFixture("memstore") {} void SetUp() override { StoreTestFixture::SetUp(); ObjectStore::Transaction t; test_coll = coll_t(spg_t(pg_t(1, 1))); ch = store->create_new_collection(test_coll); t.create_collection(test_coll, 0); store->queue_transaction(ch, std::move(t)); existing_oid = mk_obj(0); nonexistent_oid = mk_obj(1); ghobject_t existing_ghobj(existing_oid); object_info_t existing_info; existing_info.version = eversion_t(6, 2); bufferlist enc_oi; encode(existing_info, enc_oi, 0); ObjectStore::Transaction t2; t2.touch(test_coll, ghobject_t(existing_oid)); t2.setattr(test_coll, ghobject_t(existing_oid), OI_ATTR, enc_oi); ASSERT_EQ(0, store->queue_transaction(ch, std::move(t2))); info.last_backfill = hobject_t::get_max(); info.last_complete = eversion_t(); } void TearDown() override { clear(); missing.may_include_deletes = false; StoreTestFixture::TearDown(); } pg_info_t info; coll_t test_coll; hobject_t existing_oid, nonexistent_oid; void run_rebuild_missing_test(const map<hobject_t, pg_missing_item> &expected_missing_items) { rebuild_missing_set_with_deletes(store.get(), ch, info); ASSERT_EQ(expected_missing_items, missing.get_items()); } }; TEST_F(PGLogTestRebuildMissing, EmptyLog) { missing.add(existing_oid, mk_evt(6, 2), mk_evt(6, 3), false); missing.add(nonexistent_oid, mk_evt(7, 4), mk_evt(0, 0), false); map<hobject_t, pg_missing_item> orig_missing = missing.get_items(); run_rebuild_missing_test(orig_missing); } TEST_F(PGLogTestRebuildMissing, SameVersionMod) { missing.add(existing_oid, mk_evt(6, 2), mk_evt(6, 1), false); log.add(mk_ple_mod(existing_oid, mk_evt(6, 2), mk_evt(6, 1))); map<hobject_t, pg_missing_item> empty_missing; run_rebuild_missing_test(empty_missing); } TEST_F(PGLogTestRebuildMissing, DelExisting) { missing.add(existing_oid, mk_evt(6, 3), mk_evt(6, 2), false); log.add(mk_ple_dt(existing_oid, mk_evt(7, 5), mk_evt(7, 4))); map<hobject_t, pg_missing_item> expected; expected[existing_oid] = pg_missing_item(mk_evt(7, 5), mk_evt(6, 2), true); run_rebuild_missing_test(expected); } TEST_F(PGLogTestRebuildMissing, DelNonexistent) { log.add(mk_ple_dt(nonexistent_oid, mk_evt(7, 5), mk_evt(7, 4))); map<hobject_t, pg_missing_item> expected; expected[nonexistent_oid] = pg_missing_item(mk_evt(7, 5), mk_evt(0, 0), true); run_rebuild_missing_test(expected); } TEST_F(PGLogTestRebuildMissing, MissingNotInLog) { missing.add(mk_obj(10), mk_evt(8, 12), mk_evt(8, 10), false); log.add(mk_ple_dt(nonexistent_oid, mk_evt(7, 5), mk_evt(7, 4))); map<hobject_t, pg_missing_item> expected; expected[nonexistent_oid] = pg_missing_item(mk_evt(7, 5), mk_evt(0, 0), true); expected[mk_obj(10)] = pg_missing_item(mk_evt(8, 12), mk_evt(8, 10), false); run_rebuild_missing_test(expected); } class PGLogMergeDupsTest : protected PGLog, public StoreTestFixture { public: PGLogMergeDupsTest() : PGLog(g_ceph_context), StoreTestFixture("memstore") { } void SetUp() override { StoreTestFixture::SetUp(); ObjectStore::Transaction t; test_coll = coll_t(spg_t(pg_t(1, 1))); auto ch = store->create_new_collection(test_coll); t.create_collection(test_coll, 0); store->queue_transaction(ch, std::move(t)); } void TearDown() override { test_disk_roundtrip(); clear(); StoreTestFixture::TearDown(); } static pg_log_dup_t create_dup_entry(uint a, uint b) { // make each dup_entry unique by using different client id's static uint client_id = 777; return pg_log_dup_t(eversion_t(a, b), a, osd_reqid_t(entity_name_t::CLIENT(client_id++), 8, 1), 0); } static std::vector<pg_log_dup_t> example_dups_1() { std::vector<pg_log_dup_t> result = { create_dup_entry(10, 11), create_dup_entry(10, 12), create_dup_entry(11, 1), create_dup_entry(12, 3), create_dup_entry(13, 99) }; return result; } static std::vector<pg_log_dup_t> example_dups_2() { std::vector<pg_log_dup_t> result = { create_dup_entry(12, 3), create_dup_entry(13, 99), create_dup_entry(15, 11), create_dup_entry(16, 14), create_dup_entry(16, 32) }; return result; } void add_dups(uint a, uint b) { log.dups.push_back(create_dup_entry(a, b)); write_from_dups = std::min(write_from_dups, log.dups.back().version); } void add_dups(const std::vector<pg_log_dup_t>& l) { for (auto& i : l) { log.dups.push_back(i); write_from_dups = std::min(write_from_dups, log.dups.back().version); } } static void add_dups(IndexedLog& log, const std::vector<pg_log_dup_t>& dups) { for (auto& i : dups) { log.dups.push_back(i); } } void check_order() { eversion_t prev(0, 0); for (auto& i : log.dups) { EXPECT_LT(prev, i.version) << "verify versions monotonically increase"; prev = i.version; } } void check_index() { EXPECT_EQ(log.dups.size(), log.dup_index.size()); for (auto& i : log.dups) { EXPECT_EQ(1u, log.dup_index.count(i.reqid)); } } void test_disk_roundtrip() { ObjectStore::Transaction t; hobject_t hoid; hoid.pool = 1; hoid.oid = "log"; ghobject_t log_oid(hoid); map<string, bufferlist> km; write_log_and_missing(t, &km, test_coll, log_oid, false); if (!km.empty()) { t.omap_setkeys(test_coll, log_oid, km); } auto ch = store->open_collection(test_coll); ASSERT_EQ(0, store->queue_transaction(ch, std::move(t))); auto orig_dups = log.dups; clear(); ostringstream err; read_log_and_missing(store.get(), ch, log_oid, pg_info_t(), err, false); ASSERT_EQ(orig_dups.size(), log.dups.size()); ASSERT_EQ(orig_dups, log.dups); auto dups_it = log.dups.begin(); for (auto orig_dup : orig_dups) { ASSERT_EQ(orig_dup, *dups_it); ++dups_it; } } coll_t test_coll; }; TEST_F(PGLogMergeDupsTest, OtherEmpty) { log.tail = eversion_t(14, 5); IndexedLog olog; add_dups(example_dups_1()); index(); bool changed = merge_log_dups(olog); EXPECT_FALSE(changed); EXPECT_EQ(5u, log.dups.size()); if (5 == log.dups.size()) { EXPECT_EQ(10u, log.dups.front().version.epoch); EXPECT_EQ(11u, log.dups.front().version.version); EXPECT_EQ(13u, log.dups.back().version.epoch); EXPECT_EQ(99u, log.dups.back().version.version); } check_order(); check_index(); } TEST_F(PGLogMergeDupsTest, AmEmpty) { log.tail = eversion_t(14, 5); index(); IndexedLog olog; add_dups(olog, example_dups_1()); bool changed = merge_log_dups(olog); EXPECT_TRUE(changed); EXPECT_EQ(5u, log.dups.size()); if (5 == log.dups.size()) { EXPECT_EQ(10u, log.dups.front().version.epoch); EXPECT_EQ(11u, log.dups.front().version.version); EXPECT_EQ(13u, log.dups.back().version.epoch); EXPECT_EQ(99u, log.dups.back().version.version); } check_order(); check_index(); } TEST_F(PGLogMergeDupsTest, AmEmptyOverlap) { log.tail = eversion_t(12, 3); index(); IndexedLog olog; add_dups(olog, example_dups_1()); bool changed = merge_log_dups(olog); EXPECT_TRUE(changed); EXPECT_EQ(4u, log.dups.size()); if (4 == log.dups.size()) { EXPECT_EQ(10u, log.dups.front().version.epoch); EXPECT_EQ(11u, log.dups.front().version.version); EXPECT_EQ(12u, log.dups.back().version.epoch); EXPECT_EQ(3u, log.dups.back().version.version); } check_order(); check_index(); } TEST_F(PGLogMergeDupsTest, Same) { log.tail = eversion_t(14, 1); IndexedLog olog; add_dups(example_dups_1()); index(); add_dups(olog, example_dups_1()); bool changed = merge_log_dups(olog); EXPECT_FALSE(changed); EXPECT_EQ(5u, log.dups.size()); if (5 == log.dups.size()) { EXPECT_EQ(10u, log.dups.front().version.epoch); EXPECT_EQ(11u, log.dups.front().version.version); EXPECT_EQ(13u, log.dups.back().version.epoch); EXPECT_EQ(99u, log.dups.back().version.version); } check_order(); check_index(); } TEST_F(PGLogMergeDupsTest, Later) { log.tail = eversion_t(16, 14); IndexedLog olog; add_dups(example_dups_1()); index(); add_dups(olog, example_dups_2()); bool changed = merge_log_dups(olog); EXPECT_TRUE(changed); EXPECT_EQ(7u, log.dups.size()); if (7 == log.dups.size()) { EXPECT_EQ(10u, log.dups.front().version.epoch); EXPECT_EQ(11u, log.dups.front().version.version); EXPECT_EQ(16u, log.dups.back().version.epoch); EXPECT_EQ(14u, log.dups.back().version.version); } check_order(); check_index(); } TEST_F(PGLogMergeDupsTest, Earlier) { log.tail = eversion_t(17, 2); IndexedLog olog; add_dups(example_dups_2()); index(); add_dups(olog, example_dups_1()); bool changed = merge_log_dups(olog); EXPECT_TRUE(changed); EXPECT_EQ(8u, log.dups.size()); if (6 == log.dups.size()) { EXPECT_EQ(10u, log.dups.front().version.epoch); EXPECT_EQ(11u, log.dups.front().version.version); EXPECT_EQ(16u, log.dups.back().version.epoch); EXPECT_EQ(32u, log.dups.back().version.version); } check_order(); check_index(); } TEST_F(PGLogMergeDupsTest, Superset) { log.tail = eversion_t(17, 2); IndexedLog olog; add_dups(example_dups_1()); index(); olog.dups.push_back(create_dup_entry(9, 5)); olog.dups.push_back(create_dup_entry(15, 11)); bool changed = merge_log_dups(olog); EXPECT_TRUE(changed); EXPECT_EQ(7u, log.dups.size()); if (7 == log.dups.size()) { EXPECT_EQ(9u, log.dups.front().version.epoch); EXPECT_EQ(5u, log.dups.front().version.version); EXPECT_EQ(15u, log.dups.back().version.epoch); EXPECT_EQ(11u, log.dups.back().version.version); } check_order(); check_index(); } struct PGLogTrimTest : public ::testing::Test, public PGLogTestBase, public PGLog::IndexedLog { CephContext *cct = g_ceph_context; using ::testing::Test::SetUp; void SetUp(unsigned dup_track) { constexpr size_t size = 10; char dup_track_s[size]; snprintf(dup_track_s, size, "%u", dup_track); cct->_conf.set_val_or_die("osd_pg_log_dups_tracked", dup_track_s); } }; // struct PGLogTrimTest TEST_F(PGLogTrimTest, TestMakingCephContext) { SetUp(5); EXPECT_EQ(5u, cct->_conf->osd_pg_log_dups_tracked); } TEST_F(PGLogTrimTest, TestPartialTrim) { SetUp(20); PGLog::IndexedLog log; log.head = mk_evt(24, 0); log.skip_can_rollback_to_to_head(); log.head = mk_evt(9, 0); log.add(mk_ple_mod(mk_obj(1), mk_evt(10, 100), mk_evt(8, 70))); log.add(mk_ple_dt(mk_obj(2), mk_evt(15, 150), mk_evt(10, 100))); log.add(mk_ple_mod_rb(mk_obj(3), mk_evt(15, 155), mk_evt(15, 150))); log.add(mk_ple_mod(mk_obj(1), mk_evt(19, 160), mk_evt(25, 152))); log.add(mk_ple_mod(mk_obj(4), mk_evt(21, 165), mk_evt(26, 160))); log.add(mk_ple_dt_rb(mk_obj(5), mk_evt(21, 167), mk_evt(31, 166))); std::set<eversion_t> trimmed; std::set<std::string> trimmed_dups; eversion_t write_from_dups = eversion_t::max(); log.trim(cct, mk_evt(19, 157), &trimmed, &trimmed_dups, &write_from_dups); EXPECT_EQ(eversion_t(15, 150), write_from_dups); EXPECT_EQ(3u, log.log.size()); EXPECT_EQ(3u, trimmed.size()); EXPECT_EQ(2u, log.dups.size()); EXPECT_EQ(0u, trimmed_dups.size()); SetUp(15); std::set<eversion_t> trimmed2; std::set<std::string> trimmed_dups2; eversion_t write_from_dups2 = eversion_t::max(); log.trim(cct, mk_evt(20, 164), &trimmed2, &trimmed_dups2, &write_from_dups2); EXPECT_EQ(eversion_t(19, 160), write_from_dups2); EXPECT_EQ(2u, log.log.size()); EXPECT_EQ(1u, trimmed2.size()); EXPECT_EQ(3u, log.dups.size()); EXPECT_EQ(0u, trimmed_dups2.size()); } TEST_F(PGLogTrimTest, TestTrimNoTrimmed) { SetUp(20); PGLog::IndexedLog log; log.head = mk_evt(20, 0); log.skip_can_rollback_to_to_head(); log.head = mk_evt(9, 0); log.add(mk_ple_mod(mk_obj(1), mk_evt(10, 100), mk_evt(8, 70))); log.add(mk_ple_dt(mk_obj(2), mk_evt(15, 150), mk_evt(10, 100))); log.add(mk_ple_mod_rb(mk_obj(3), mk_evt(15, 155), mk_evt(15, 150))); log.add(mk_ple_mod(mk_obj(1), mk_evt(20, 160), mk_evt(25, 152))); log.add(mk_ple_mod(mk_obj(4), mk_evt(21, 165), mk_evt(26, 160))); log.add(mk_ple_dt_rb(mk_obj(5), mk_evt(21, 167), mk_evt(31, 166))); eversion_t write_from_dups = eversion_t::max(); log.trim(cct, mk_evt(19, 157), nullptr, nullptr, &write_from_dups); EXPECT_EQ(eversion_t(15, 150), write_from_dups); EXPECT_EQ(3u, log.log.size()); EXPECT_EQ(2u, log.dups.size()); } TEST_F(PGLogTrimTest, TestTrimNoDups) { SetUp(10); PGLog::IndexedLog log; log.head = mk_evt(20, 0); log.skip_can_rollback_to_to_head(); log.head = mk_evt(9, 0); log.add(mk_ple_mod(mk_obj(1), mk_evt(10, 100), mk_evt(8, 70))); log.add(mk_ple_dt(mk_obj(2), mk_evt(15, 150), mk_evt(10, 100))); log.add(mk_ple_mod_rb(mk_obj(3), mk_evt(15, 155), mk_evt(15, 150))); log.add(mk_ple_mod(mk_obj(1), mk_evt(20, 160), mk_evt(25, 152))); log.add(mk_ple_mod(mk_obj(4), mk_evt(21, 165), mk_evt(26, 160))); log.add(mk_ple_dt_rb(mk_obj(5), mk_evt(21, 167), mk_evt(31, 166))); std::set<eversion_t> trimmed; std::set<std::string> trimmed_dups; eversion_t write_from_dups = eversion_t::max(); log.trim(cct, mk_evt(19, 157), &trimmed, &trimmed_dups, &write_from_dups); EXPECT_EQ(eversion_t::max(), write_from_dups); EXPECT_EQ(3u, log.log.size()); EXPECT_EQ(3u, trimmed.size()); EXPECT_EQ(0u, log.dups.size()); EXPECT_EQ(0u, trimmed_dups.size()); } TEST_F(PGLogTrimTest, TestNoTrim) { SetUp(20); PGLog::IndexedLog log; log.head = mk_evt(24, 0); log.skip_can_rollback_to_to_head(); log.head = mk_evt(9, 0); log.add(mk_ple_mod(mk_obj(1), mk_evt(10, 100), mk_evt(8, 70))); log.add(mk_ple_dt(mk_obj(2), mk_evt(15, 150), mk_evt(10, 100))); log.add(mk_ple_mod_rb(mk_obj(3), mk_evt(15, 155), mk_evt(15, 150))); log.add(mk_ple_mod(mk_obj(1), mk_evt(19, 160), mk_evt(25, 152))); log.add(mk_ple_mod(mk_obj(4), mk_evt(21, 165), mk_evt(26, 160))); log.add(mk_ple_dt_rb(mk_obj(5), mk_evt(21, 167), mk_evt(31, 166))); std::set<eversion_t> trimmed; std::set<std::string> trimmed_dups; eversion_t write_from_dups = eversion_t::max(); log.trim(cct, mk_evt(9, 99), &trimmed, &trimmed_dups, &write_from_dups); EXPECT_EQ(eversion_t::max(), write_from_dups); EXPECT_EQ(6u, log.log.size()); EXPECT_EQ(0u, trimmed.size()); EXPECT_EQ(0u, log.dups.size()); EXPECT_EQ(0u, trimmed_dups.size()); } TEST_F(PGLogTrimTest, TestTrimAll) { SetUp(20); PGLog::IndexedLog log; EXPECT_EQ(0u, log.dup_index.size()); // Sanity check log.head = mk_evt(24, 0); log.skip_can_rollback_to_to_head(); log.head = mk_evt(9, 0); log.add(mk_ple_mod(mk_obj(1), mk_evt(10, 100), mk_evt(8, 70))); log.add(mk_ple_dt(mk_obj(2), mk_evt(15, 150), mk_evt(10, 100))); log.add(mk_ple_mod_rb(mk_obj(3), mk_evt(15, 155), mk_evt(15, 150))); log.add(mk_ple_mod(mk_obj(1), mk_evt(19, 160), mk_evt(25, 152))); log.add(mk_ple_mod(mk_obj(4), mk_evt(21, 165), mk_evt(26, 160))); log.add(mk_ple_dt_rb(mk_obj(5), mk_evt(21, 167), mk_evt(31, 166))); std::set<eversion_t> trimmed; std::set<std::string> trimmed_dups; eversion_t write_from_dups = eversion_t::max(); log.trim(cct, mk_evt(22, 180), &trimmed, &trimmed_dups, &write_from_dups); EXPECT_EQ(eversion_t(15, 150), write_from_dups); EXPECT_EQ(0u, log.log.size()); EXPECT_EQ(6u, trimmed.size()); EXPECT_EQ(5u, log.dups.size()); EXPECT_EQ(0u, trimmed_dups.size()); EXPECT_EQ(0u, log.dup_index.size()); // dup_index entry should be trimmed } TEST_F(PGLogTrimTest, TestGetRequest) { SetUp(20); PGLog::IndexedLog log; log.head = mk_evt(20, 0); log.skip_can_rollback_to_to_head(); log.head = mk_evt(9, 0); entity_name_t client = entity_name_t::CLIENT(777); log.add(mk_ple_mod(mk_obj(1), mk_evt(10, 100), mk_evt(8, 70), osd_reqid_t(client, 8, 1))); log.add(mk_ple_dt(mk_obj(2), mk_evt(15, 150), mk_evt(10, 100), osd_reqid_t(client, 8, 2))); log.add(mk_ple_mod_rb(mk_obj(3), mk_evt(15, 155), mk_evt(15, 150), osd_reqid_t(client, 8, 3))); log.add(mk_ple_mod(mk_obj(1), mk_evt(20, 160), mk_evt(25, 152), osd_reqid_t(client, 8, 4))); log.add(mk_ple_mod(mk_obj(4), mk_evt(21, 165), mk_evt(26, 160), osd_reqid_t(client, 8, 5))); log.add(mk_ple_dt_rb(mk_obj(5), mk_evt(21, 167), mk_evt(31, 166), osd_reqid_t(client, 8, 6))); eversion_t write_from_dups = eversion_t::max(); log.trim(cct, mk_evt(19, 157), nullptr, nullptr, &write_from_dups); EXPECT_EQ(eversion_t(15, 150), write_from_dups); EXPECT_EQ(3u, log.log.size()); EXPECT_EQ(2u, log.dups.size()); eversion_t version; version_t user_version; int return_code; vector<pg_log_op_return_item_t> op_returns; osd_reqid_t log_reqid = osd_reqid_t(client, 8, 5); osd_reqid_t dup_reqid = osd_reqid_t(client, 8, 3); osd_reqid_t bad_reqid = osd_reqid_t(client, 8, 1); bool result; result = log.get_request(log_reqid, &version, &user_version, &return_code, &op_returns); EXPECT_EQ(true, result); EXPECT_EQ(mk_evt(21, 165), version); result = log.get_request(dup_reqid, &version, &user_version, &return_code, &op_returns); EXPECT_EQ(true, result); EXPECT_EQ(mk_evt(15, 155), version); result = log.get_request(bad_reqid, &version, &user_version, &return_code, &op_returns); EXPECT_FALSE(result); } TEST_F(PGLogTest, _merge_object_divergent_entries) { { // Test for issue 20843 clear(); hobject_t hoid(object_t(/*name*/"notify.7"), /*key*/string(""), /*snap*/7, /*hash*/77, /*pool*/5, /*nspace*/string("")); mempool::osd_pglog::list<pg_log_entry_t> orig_entries; orig_entries.push_back(mk_ple_mod(hoid, eversion_t(8336, 957), eversion_t(8336, 952))); orig_entries.push_back(mk_ple_err(hoid, eversion_t(8336, 958))); orig_entries.push_back(mk_ple_err(hoid, eversion_t(8336, 959))); orig_entries.push_back(mk_ple_mod(hoid, eversion_t(8336, 960), eversion_t(8336, 957))); log.add(mk_ple_mod(hoid, eversion_t(8973, 1075), eversion_t(8971, 1070))); missing.add(hoid, /*need*/eversion_t(8971, 1070), /*have*/eversion_t(8336, 952), false); pg_info_t oinfo; LogHandler rollbacker; _merge_object_divergent_entries(log, hoid, orig_entries, oinfo, log.get_can_rollback_to(), missing, &rollbacker, this); // No core dump } { // skip leading error entries clear(); hobject_t hoid(object_t(/*name*/"notify.7"), /*key*/string(""), /*snap*/7, /*hash*/77, /*pool*/5, /*nspace*/string("")); mempool::osd_pglog::list<pg_log_entry_t> orig_entries; orig_entries.push_back(mk_ple_err(hoid, eversion_t(8336, 956))); orig_entries.push_back(mk_ple_mod(hoid, eversion_t(8336, 957), eversion_t(8336, 952))); log.add(mk_ple_mod(hoid, eversion_t(8973, 1075), eversion_t(8971, 1070))); missing.add(hoid, /*need*/eversion_t(8971, 1070), /*have*/eversion_t(8336, 952), false); pg_info_t oinfo; LogHandler rollbacker; _merge_object_divergent_entries(log, hoid, orig_entries, oinfo, log.get_can_rollback_to(), missing, &rollbacker, this); // No core dump } } TEST(eversion_t, get_key_name) { eversion_t a(1234, 5678); std::string a_key_name = a.get_key_name(); EXPECT_EQ("0000001234.00000000000000005678", a_key_name); } TEST(pg_log_dup_t, get_key_name) { pg_log_dup_t a(eversion_t(1234, 5678), 13, osd_reqid_t(entity_name_t::CLIENT(777), 8, 999), 15); std::string a_key_name = a.get_key_name(); EXPECT_EQ("dup_0000001234.00000000000000005678", a_key_name); } // This tests trim() to make copies of // 2 log entries (107, 106) and 3 additional for a total // of 5 dups. Nothing from the original dups is copied. TEST_F(PGLogTrimTest, TestTrimDups) { SetUp(5); PGLog::IndexedLog log; log.head = mk_evt(21, 107); log.skip_can_rollback_to_to_head(); log.tail = mk_evt(9, 99); log.head = mk_evt(9, 99); entity_name_t client = entity_name_t::CLIENT(777); log.dups.push_back(pg_log_dup_t(mk_ple_mod(mk_obj(1), mk_evt(9, 99), mk_evt(8, 98), osd_reqid_t(client, 8, 1)))); log.add(mk_ple_mod(mk_obj(1), mk_evt(10, 100), mk_evt(9, 99), osd_reqid_t(client, 8, 1))); log.add(mk_ple_dt(mk_obj(2), mk_evt(15, 101), mk_evt(10, 100), osd_reqid_t(client, 8, 2))); log.add(mk_ple_mod_rb(mk_obj(3), mk_evt(15, 102), mk_evt(15, 101), osd_reqid_t(client, 8, 3))); log.add(mk_ple_mod(mk_obj(1), mk_evt(20, 103), mk_evt(15, 102), osd_reqid_t(client, 8, 4))); log.add(mk_ple_mod(mk_obj(4), mk_evt(21, 104), mk_evt(20, 103), osd_reqid_t(client, 8, 5))); log.add(mk_ple_dt_rb(mk_obj(5), mk_evt(21, 105), mk_evt(21, 104), osd_reqid_t(client, 8, 6))); log.add(mk_ple_dt_rb(mk_obj(5), mk_evt(21, 106), mk_evt(21, 105), osd_reqid_t(client, 8, 6))); log.add(mk_ple_dt_rb(mk_obj(5), mk_evt(21, 107), mk_evt(21, 106), osd_reqid_t(client, 8, 6))); eversion_t write_from_dups = eversion_t::max(); log.trim(cct, mk_evt(21, 105), nullptr, nullptr, &write_from_dups); EXPECT_EQ(eversion_t(20, 103), write_from_dups) << log; EXPECT_EQ(2u, log.log.size()) << log; EXPECT_EQ(4u, log.dups.size()) << log; } // This tests trim() to make copies of // 4 log entries (107, 106, 105, 104) and 5 additional for a total // of 9 dups. Only 1 of 2 existing dups are copied. TEST_F(PGLogTrimTest, TestTrimDups2) { SetUp(9); PGLog::IndexedLog log; log.head = mk_evt(21, 107); log.skip_can_rollback_to_to_head(); log.tail = mk_evt(9, 99); log.head = mk_evt(9, 99); entity_name_t client = entity_name_t::CLIENT(777); log.dups.push_back(pg_log_dup_t(mk_ple_mod(mk_obj(1), mk_evt(9, 98), mk_evt(8, 97), osd_reqid_t(client, 8, 1)))); log.dups.push_back(pg_log_dup_t(mk_ple_mod(mk_obj(1), mk_evt(9, 99), mk_evt(8, 98), osd_reqid_t(client, 8, 1)))); log.add(mk_ple_mod(mk_obj(1), mk_evt(10, 100), mk_evt(9, 99), osd_reqid_t(client, 8, 1))); log.add(mk_ple_dt(mk_obj(2), mk_evt(15, 101), mk_evt(10, 100), osd_reqid_t(client, 8, 2))); log.add(mk_ple_mod_rb(mk_obj(3), mk_evt(15, 102), mk_evt(15, 101), osd_reqid_t(client, 8, 3))); log.add(mk_ple_mod(mk_obj(1), mk_evt(20, 103), mk_evt(15, 102), osd_reqid_t(client, 8, 4))); log.add(mk_ple_mod(mk_obj(4), mk_evt(21, 104), mk_evt(20, 103), osd_reqid_t(client, 8, 5))); log.add(mk_ple_dt_rb(mk_obj(5), mk_evt(21, 105), mk_evt(21, 104), osd_reqid_t(client, 8, 6))); log.add(mk_ple_dt_rb(mk_obj(5), mk_evt(21, 106), mk_evt(21, 105), osd_reqid_t(client, 8, 6))); log.add(mk_ple_dt_rb(mk_obj(5), mk_evt(21, 107), mk_evt(21, 106), osd_reqid_t(client, 8, 6))); eversion_t write_from_dups = eversion_t::max(); log.trim(cct, mk_evt(20, 103), nullptr, nullptr, &write_from_dups); EXPECT_EQ(eversion_t(10, 100), write_from_dups) << log; EXPECT_EQ(4u, log.log.size()) << log; EXPECT_EQ(6u, log.dups.size()) << log; } // This tests copy_up_to() to make copies of // 2 log entries (107, 106) and 3 additional for a total // of 5 dups. Nothing from the original dups is copied. TEST_F(PGLogTrimTest, TestCopyUpTo) { SetUp(5); PGLog::IndexedLog log, copy; log.tail = mk_evt(9, 99); log.head = mk_evt(9, 99); entity_name_t client = entity_name_t::CLIENT(777); log.dups.push_back(pg_log_dup_t(mk_ple_mod(mk_obj(1), mk_evt(9, 99), mk_evt(8, 98), osd_reqid_t(client, 8, 1)))); log.add(mk_ple_mod(mk_obj(1), mk_evt(10, 100), mk_evt(9, 99), osd_reqid_t(client, 8, 1))); log.add(mk_ple_dt(mk_obj(2), mk_evt(15, 101), mk_evt(10, 100), osd_reqid_t(client, 8, 2))); log.add(mk_ple_mod_rb(mk_obj(3), mk_evt(15, 102), mk_evt(15, 101), osd_reqid_t(client, 8, 3))); log.add(mk_ple_mod(mk_obj(1), mk_evt(20, 103), mk_evt(15, 102), osd_reqid_t(client, 8, 4))); log.add(mk_ple_mod(mk_obj(4), mk_evt(21, 104), mk_evt(20, 103), osd_reqid_t(client, 8, 5))); log.add(mk_ple_dt_rb(mk_obj(5), mk_evt(21, 105), mk_evt(21, 104), osd_reqid_t(client, 8, 6))); log.add(mk_ple_dt_rb(mk_obj(5), mk_evt(21, 106), mk_evt(21, 105), osd_reqid_t(client, 8, 6))); log.add(mk_ple_dt_rb(mk_obj(5), mk_evt(21, 107), mk_evt(21, 106), osd_reqid_t(client, 8, 6))); copy.copy_up_to(cct, log, 2); EXPECT_EQ(2u, copy.log.size()) << copy; EXPECT_EQ(copy.head, mk_evt(21, 107)) << copy; EXPECT_EQ(copy.tail, mk_evt(21, 105)) << copy; // Tracking 5 means 3 additional as dups EXPECT_EQ(3u, copy.dups.size()) << copy; } // This tests copy_up_to() to make copies of // 4 log entries (107, 106, 105, 104) and 5 additional for a total // of 5 dups. Only 1 of 2 existing dups are copied. TEST_F(PGLogTrimTest, TestCopyUpTo2) { SetUp(9); PGLog::IndexedLog log, copy; log.tail = mk_evt(9, 99); log.head = mk_evt(9, 99); entity_name_t client = entity_name_t::CLIENT(777); log.dups.push_back(pg_log_dup_t(mk_ple_mod(mk_obj(1), mk_evt(8, 98), mk_evt(8, 97), osd_reqid_t(client, 8, 1)))); log.dups.push_back(pg_log_dup_t(mk_ple_mod(mk_obj(1), mk_evt(9, 99), mk_evt(8, 98), osd_reqid_t(client, 8, 1)))); log.add(mk_ple_mod(mk_obj(1), mk_evt(10, 100), mk_evt(9, 99), osd_reqid_t(client, 8, 1))); log.add(mk_ple_dt(mk_obj(2), mk_evt(15, 101), mk_evt(10, 100), osd_reqid_t(client, 8, 2))); log.add(mk_ple_mod_rb(mk_obj(3), mk_evt(15, 102), mk_evt(15, 101), osd_reqid_t(client, 8, 3))); log.add(mk_ple_mod(mk_obj(1), mk_evt(20, 103), mk_evt(15, 102), osd_reqid_t(client, 8, 4))); log.add(mk_ple_mod(mk_obj(4), mk_evt(21, 104), mk_evt(20, 103), osd_reqid_t(client, 8, 5))); log.add(mk_ple_dt_rb(mk_obj(5), mk_evt(21, 105), mk_evt(21, 104), osd_reqid_t(client, 8, 6))); log.add(mk_ple_dt_rb(mk_obj(5), mk_evt(21, 106), mk_evt(21, 105), osd_reqid_t(client, 8, 6))); log.add(mk_ple_dt_rb(mk_obj(5), mk_evt(21, 107), mk_evt(21, 106), osd_reqid_t(client, 8, 6))); copy.copy_up_to(cct, log, 4); EXPECT_EQ(4u, copy.log.size()) << copy; EXPECT_EQ(copy.head, mk_evt(21, 107)) << copy; EXPECT_EQ(copy.tail, mk_evt(20, 103)) << copy; // Tracking 5 means 3 additional as dups EXPECT_EQ(5u, copy.dups.size()) << copy; } // This tests copy_after() by specifying a version that copies // 2 log entries (107, 106) and 3 additional for a total // of 5 dups. Nothing of the original dups is copied. TEST_F(PGLogTrimTest, TestCopyAfter) { SetUp(5); PGLog::IndexedLog log, copy; log.tail = mk_evt(9, 99); log.head = mk_evt(9, 99); entity_name_t client = entity_name_t::CLIENT(777); log.dups.push_back(pg_log_dup_t(mk_ple_mod(mk_obj(1), mk_evt(9, 99), mk_evt(8, 98), osd_reqid_t(client, 8, 1)))); log.add(mk_ple_mod(mk_obj(1), mk_evt(10, 100), mk_evt(9, 99), osd_reqid_t(client, 8, 1))); log.add(mk_ple_dt(mk_obj(2), mk_evt(15, 101), mk_evt(10, 100), osd_reqid_t(client, 8, 2))); log.add(mk_ple_mod_rb(mk_obj(3), mk_evt(15, 102), mk_evt(15, 101), osd_reqid_t(client, 8, 3))); log.add(mk_ple_mod(mk_obj(1), mk_evt(20, 103), mk_evt(15, 102), osd_reqid_t(client, 8, 4))); log.add(mk_ple_mod(mk_obj(4), mk_evt(21, 104), mk_evt(20, 103), osd_reqid_t(client, 8, 5))); log.add(mk_ple_dt_rb(mk_obj(5), mk_evt(21, 105), mk_evt(21, 104), osd_reqid_t(client, 8, 6))); log.add(mk_ple_dt_rb(mk_obj(5), mk_evt(21, 106), mk_evt(21, 105), osd_reqid_t(client, 8, 6))); log.add(mk_ple_dt_rb(mk_obj(5), mk_evt(21, 107), mk_evt(21, 106), osd_reqid_t(client, 8, 6))); copy.copy_after(cct, log, mk_evt(21, 105)); EXPECT_EQ(2u, copy.log.size()) << copy; EXPECT_EQ(copy.head, mk_evt(21, 107)) << copy; EXPECT_EQ(copy.tail, mk_evt(21, 105)) << copy; // Tracking 5 means 3 additional as dups EXPECT_EQ(3u, copy.dups.size()) << copy; } // This copies everything dups and log because of the large max dups // and value passed to copy_after(). TEST_F(PGLogTrimTest, TestCopyAfter2) { SetUp(3000); PGLog::IndexedLog log, copy; log.tail = mk_evt(9, 99); log.head = mk_evt(9, 99); entity_name_t client = entity_name_t::CLIENT(777); log.dups.push_back(pg_log_dup_t(mk_ple_mod(mk_obj(1), mk_evt(8, 93), mk_evt(8, 92), osd_reqid_t(client, 8, 1)))); log.dups.push_back(pg_log_dup_t(mk_ple_mod(mk_obj(1), mk_evt(8, 94), mk_evt(8, 93), osd_reqid_t(client, 8, 1)))); log.dups.push_back(pg_log_dup_t(mk_ple_mod(mk_obj(1), mk_evt(8, 95), mk_evt(8, 94), osd_reqid_t(client, 8, 1)))); log.dups.push_back(pg_log_dup_t(mk_ple_mod(mk_obj(1), mk_evt(8, 96), mk_evt(8, 95), osd_reqid_t(client, 8, 1)))); log.dups.push_back(pg_log_dup_t(mk_ple_mod(mk_obj(1), mk_evt(8, 97), mk_evt(8, 96), osd_reqid_t(client, 8, 1)))); log.dups.push_back(pg_log_dup_t(mk_ple_mod(mk_obj(1), mk_evt(8, 98), mk_evt(8, 97), osd_reqid_t(client, 8, 1)))); log.dups.push_back(pg_log_dup_t(mk_ple_mod(mk_obj(1), mk_evt(9, 99), mk_evt(8, 98), osd_reqid_t(client, 8, 1)))); log.add(mk_ple_mod(mk_obj(1), mk_evt(10, 100), mk_evt(9, 99), osd_reqid_t(client, 8, 1))); log.add(mk_ple_dt(mk_obj(2), mk_evt(15, 101), mk_evt(10, 100), osd_reqid_t(client, 8, 2))); log.add(mk_ple_mod_rb(mk_obj(3), mk_evt(15, 102), mk_evt(15, 101), osd_reqid_t(client, 8, 3))); log.add(mk_ple_mod(mk_obj(1), mk_evt(20, 103), mk_evt(15, 102), osd_reqid_t(client, 8, 4))); log.add(mk_ple_mod(mk_obj(4), mk_evt(21, 104), mk_evt(20, 103), osd_reqid_t(client, 8, 5))); log.add(mk_ple_dt_rb(mk_obj(5), mk_evt(21, 105), mk_evt(21, 104), osd_reqid_t(client, 8, 6))); log.add(mk_ple_dt_rb(mk_obj(5), mk_evt(21, 106), mk_evt(21, 105), osd_reqid_t(client, 8, 6))); log.add(mk_ple_dt_rb(mk_obj(5), mk_evt(21, 107), mk_evt(21, 106), osd_reqid_t(client, 8, 6))); copy.copy_after(cct, log, mk_evt(9, 99)); EXPECT_EQ(8u, copy.log.size()) << copy; EXPECT_EQ(copy.head, mk_evt(21, 107)) << copy; EXPECT_EQ(copy.tail, mk_evt(9, 99)) << copy; // Tracking 3000 is larger than all entries, so all dups copied EXPECT_EQ(7u, copy.dups.size()) << copy; } // Local Variables: // compile-command: "cd ../.. ; make unittest_pglog ; ./unittest_pglog --log-to-stderr=true --debug-osd=20 # --gtest_filter=*.* " // End:
98,502
29.308615
130
cc
null
ceph-main/src/test/osd/TestRados.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "common/Cond.h" #include "common/errno.h" #include "common/version.h" #include <iostream> #include <sstream> #include <map> #include <numeric> #include <string> #include <vector> #include <stdlib.h> #include <unistd.h> #include "test/osd/RadosModel.h" using namespace std; class WeightedTestGenerator : public TestOpGenerator { public: WeightedTestGenerator(int ops, int objects, map<TestOpType, unsigned int> op_weights, TestOpStat *stats, int max_seconds, bool ec_pool, bool balance_reads, bool localize_reads, bool set_redirect, bool set_chunk, bool enable_dedup) : m_nextop(NULL), m_op(0), m_ops(ops), m_seconds(max_seconds), m_objects(objects), m_stats(stats), m_total_weight(0), m_ec_pool(ec_pool), m_balance_reads(balance_reads), m_localize_reads(localize_reads), m_set_redirect(set_redirect), m_set_chunk(set_chunk), m_enable_dedup(enable_dedup) { m_start = time(0); for (map<TestOpType, unsigned int>::const_iterator it = op_weights.begin(); it != op_weights.end(); ++it) { m_total_weight += it->second; m_weight_sums.insert(pair<TestOpType, unsigned int>(it->first, m_total_weight)); } if (m_set_redirect || m_set_chunk) { if (m_set_redirect) { m_ops = ops+m_objects+m_objects; } else { /* create 10 chunks per an object*/ m_ops = ops+m_objects+m_objects*10; } } } TestOp *next(RadosTestContext &context) override { TestOp *retval = NULL; ++m_op; if (m_op <= m_objects && !m_set_redirect && !m_set_chunk ) { stringstream oid; oid << m_op; /*if (m_op % 2) { // make it a long name oid << " " << string(300, 'o'); }*/ cout << m_op << ": write initial oid " << oid.str() << std::endl; context.oid_not_flushing.insert(oid.str()); if (m_ec_pool) { return new WriteOp(m_op, &context, oid.str(), true, true); } else { return new WriteOp(m_op, &context, oid.str(), false, true); } } else if (m_op >= m_ops) { return NULL; } if (m_set_redirect || m_set_chunk) { if (init_extensible_tier(context, retval)) { return retval; } } if (m_nextop) { retval = m_nextop; m_nextop = NULL; return retval; } while (retval == NULL) { unsigned int rand_val = rand() % m_total_weight; time_t now = time(0); if (m_seconds && now - m_start > m_seconds) break; for (map<TestOpType, unsigned int>::const_iterator it = m_weight_sums.begin(); it != m_weight_sums.end(); ++it) { if (rand_val < it->second) { retval = gen_op(context, it->first); break; } } } return retval; } bool init_extensible_tier(RadosTestContext &context, TestOp *& op) { /* * set-redirect or set-chunk test (manifest test) * 0. make default objects (using create op) * 1. set-redirect or set-chunk * 2. initialize target objects (using write op) * 3. wait for set-* completion */ int copy_manifest_end = 0; if (m_set_chunk) { copy_manifest_end = m_objects*2; } else { copy_manifest_end = m_objects*3; } int make_manifest_end = copy_manifest_end; if (m_set_chunk) { /* make 10 chunks per an object*/ make_manifest_end = make_manifest_end + m_objects * 10; } else { /* redirect */ make_manifest_end = make_manifest_end + m_objects; } if (m_op <= m_objects) { stringstream oid; oid << m_op; /*if (m_op % 2) { oid << " " << string(300, 'o'); }*/ cout << m_op << ": write initial oid " << oid.str() << std::endl; context.oid_not_flushing.insert(oid.str()); if (m_ec_pool) { op = new WriteOp(m_op, &context, oid.str(), true, true); } else { op = new WriteOp(m_op, &context, oid.str(), false, true); } return true; } else if (m_op <= copy_manifest_end) { stringstream oid, oid2; //int _oid = m_op-m_objects; int _oid = m_op % m_objects + 1; oid << _oid; /*if ((_oid) % 2) { oid << " " << string(300, 'o'); }*/ if (context.oid_in_use.count(oid.str())) { /* previous write is not finished */ op = NULL; m_op--; cout << m_op << " wait for completion of write op! " << std::endl; return true; } int _oid2 = m_op - m_objects + 1; if (_oid2 > copy_manifest_end - m_objects) { _oid2 -= (copy_manifest_end - m_objects); } oid2 << _oid2 << " " << context.low_tier_pool_name; if ((_oid2) % 2) { oid2 << " " << string(300, 'm'); } cout << m_op << ": " << "copy oid " << oid.str() << " target oid " << oid2.str() << std::endl; op = new CopyOp(m_op, &context, oid.str(), oid2.str(), context.low_tier_pool_name); return true; } else if (m_op <= make_manifest_end) { if (m_set_redirect) { stringstream oid, oid2; int _oid = m_op-copy_manifest_end; oid << _oid; /*if ((_oid) % 2) { oid << " " << string(300, 'o'); }*/ oid2 << _oid << " " << context.low_tier_pool_name; if ((_oid) % 2) { oid2 << " " << string(300, 'm'); } if (context.oid_in_use.count(oid.str())) { /* previous copy is not finished */ op = NULL; m_op--; cout << m_op << " retry set_redirect !" << std::endl; return true; } cout << m_op << ": " << "set_redirect oid " << oid.str() << " target oid " << oid2.str() << std::endl; op = new SetRedirectOp(m_op, &context, oid.str(), oid2.str(), context.pool_name); return true; } else if (m_set_chunk) { stringstream oid; int _oid = m_op % m_objects +1; oid << _oid; /*if ((_oid) % 2) { oid << " " << string(300, 'o'); }*/ if (context.oid_in_use.count(oid.str())) { /* previous set-chunk is not finished */ op = NULL; m_op--; cout << m_op << " retry set_chunk !" << std::endl; return true; } stringstream oid2; oid2 << _oid << " " << context.low_tier_pool_name; if ((_oid) % 2) { oid2 << " " << string(300, 'm'); } cout << m_op << ": " << "set_chunk oid " << oid.str() << " target oid " << oid2.str() << std::endl; op = new SetChunkOp(m_op, &context, oid.str(), oid2.str(), m_stats); return true; } } else if (m_op == make_manifest_end + 1) { int set_size = context.oid_not_in_use.size(); int set_manifest_size = context.oid_redirect_not_in_use.size(); cout << m_op << " oid_not_in_use " << set_size << " oid_redirect_not_in_use " << set_manifest_size << std::endl; /* wait for redirect or set_chunk initialization */ if (set_size != m_objects || set_manifest_size != 0) { op = NULL; m_op--; cout << m_op << " wait for manifest initialization " << std::endl; return true; } for (int t_op = m_objects+1; t_op <= m_objects*2; t_op++) { stringstream oid; oid << t_op << " " << context.low_tier_pool_name; if (t_op % 2) { oid << " " << string(300, 'm'); } cout << " redirect_not_in_use: " << oid.str() << std::endl; context.oid_redirect_not_in_use.insert(oid.str()); } } return false; } private: TestOp *gen_op(RadosTestContext &context, TestOpType type) { string oid, oid2; ceph_assert(context.oid_not_in_use.size()); switch (type) { case TEST_OP_READ: oid = *(rand_choose(context.oid_not_in_use)); return new ReadOp(m_op, &context, oid, m_balance_reads, m_localize_reads, m_stats); case TEST_OP_WRITE: oid = *(rand_choose(context.oid_not_in_use)); cout << m_op << ": " << "write oid " << oid << " current snap is " << context.current_snap << std::endl; return new WriteOp(m_op, &context, oid, false, false, m_stats); case TEST_OP_WRITE_EXCL: oid = *(rand_choose(context.oid_not_in_use)); cout << m_op << ": " << "write (excl) oid " << oid << " current snap is " << context.current_snap << std::endl; return new WriteOp(m_op, &context, oid, false, true, m_stats); case TEST_OP_WRITESAME: oid = *(rand_choose(context.oid_not_in_use)); cout << m_op << ": " << "writesame oid " << oid << " current snap is " << context.current_snap << std::endl; return new WriteSameOp(m_op, &context, oid, m_stats); case TEST_OP_DELETE: oid = *(rand_choose(context.oid_not_in_use)); cout << m_op << ": " << "delete oid " << oid << " current snap is " << context.current_snap << std::endl; return new DeleteOp(m_op, &context, oid, m_stats); case TEST_OP_SNAP_CREATE: cout << m_op << ": " << "snap_create" << std::endl; return new SnapCreateOp(m_op, &context, m_stats); case TEST_OP_SNAP_REMOVE: if (context.snaps.size() <= context.snaps_in_use.size()) { return NULL; } while (true) { int snap = rand_choose(context.snaps)->first; if (context.snaps_in_use.lookup(snap)) continue; // in use; try again! cout << m_op << ": " << "snap_remove snap " << snap << std::endl; return new SnapRemoveOp(m_op, &context, snap, m_stats); } case TEST_OP_ROLLBACK: { string oid = *(rand_choose(context.oid_not_in_use)); cout << m_op << ": " << "rollback oid " << oid << " current snap is " << context.current_snap << std::endl; return new RollbackOp(m_op, &context, oid); } case TEST_OP_SETATTR: oid = *(rand_choose(context.oid_not_in_use)); cout << m_op << ": " << "setattr oid " << oid << " current snap is " << context.current_snap << std::endl; return new SetAttrsOp(m_op, &context, oid, m_stats); case TEST_OP_RMATTR: oid = *(rand_choose(context.oid_not_in_use)); cout << m_op << ": " << "rmattr oid " << oid << " current snap is " << context.current_snap << std::endl; return new RemoveAttrsOp(m_op, &context, oid, m_stats); case TEST_OP_WATCH: oid = *(rand_choose(context.oid_not_in_use)); cout << m_op << ": " << "watch oid " << oid << " current snap is " << context.current_snap << std::endl; return new WatchOp(m_op, &context, oid, m_stats); case TEST_OP_COPY_FROM: oid = *(rand_choose(context.oid_not_in_use)); do { oid2 = *(rand_choose(context.oid_not_in_use)); } while (oid == oid2); cout << m_op << ": " << "copy_from oid " << oid << " from oid " << oid2 << " current snap is " << context.current_snap << std::endl; return new CopyFromOp(m_op, &context, oid, oid2, m_stats); case TEST_OP_HIT_SET_LIST: { uint32_t hash = rjhash32(rand()); cout << m_op << ": " << "hit_set_list " << hash << std::endl; return new HitSetListOp(m_op, &context, hash, m_stats); } case TEST_OP_UNDIRTY: { oid = *(rand_choose(context.oid_not_in_use)); cout << m_op << ": " << "undirty oid " << oid << std::endl; return new UndirtyOp(m_op, &context, oid, m_stats); } case TEST_OP_IS_DIRTY: { oid = *(rand_choose(context.oid_not_flushing)); return new IsDirtyOp(m_op, &context, oid, m_stats); } case TEST_OP_CACHE_FLUSH: { oid = *(rand_choose(context.oid_not_in_use)); return new CacheFlushOp(m_op, &context, oid, m_stats, true); } case TEST_OP_CACHE_TRY_FLUSH: { oid = *(rand_choose(context.oid_not_in_use)); return new CacheFlushOp(m_op, &context, oid, m_stats, false); } case TEST_OP_CACHE_EVICT: { oid = *(rand_choose(context.oid_not_in_use)); return new CacheEvictOp(m_op, &context, oid, m_stats); } case TEST_OP_APPEND: oid = *(rand_choose(context.oid_not_in_use)); cout << "append oid " << oid << " current snap is " << context.current_snap << std::endl; return new WriteOp(m_op, &context, oid, true, false, m_stats); case TEST_OP_APPEND_EXCL: oid = *(rand_choose(context.oid_not_in_use)); cout << "append oid (excl) " << oid << " current snap is " << context.current_snap << std::endl; return new WriteOp(m_op, &context, oid, true, true, m_stats); case TEST_OP_CHUNK_READ: oid = *(rand_choose(context.oid_not_in_use)); cout << m_op << ": " << "chunk read oid " << oid << " target oid " << oid2 << std::endl; return new ChunkReadOp(m_op, &context, oid, context.pool_name, false, m_stats); case TEST_OP_TIER_PROMOTE: oid = *(rand_choose(context.oid_not_in_use)); cout << m_op << ": " << "tier_promote oid " << oid << std::endl; return new TierPromoteOp(m_op, &context, oid, m_stats); case TEST_OP_TIER_FLUSH: oid = *(rand_choose(context.oid_not_in_use)); cout << m_op << ": " << "tier_flush oid " << oid << std::endl; return new TierFlushOp(m_op, &context, oid, m_stats); case TEST_OP_SET_REDIRECT: oid = *(rand_choose(context.oid_not_in_use)); oid2 = *(rand_choose(context.oid_redirect_not_in_use)); cout << m_op << ": " << "set_redirect oid " << oid << " target oid " << oid2 << std::endl; return new SetRedirectOp(m_op, &context, oid, oid2, context.pool_name, m_stats); case TEST_OP_UNSET_REDIRECT: oid = *(rand_choose(context.oid_not_in_use)); cout << m_op << ": " << "unset_redirect oid " << oid << std::endl; return new UnsetRedirectOp(m_op, &context, oid, m_stats); case TEST_OP_SET_CHUNK: { ceph_assert(m_enable_dedup); oid = *(rand_choose(context.oid_not_in_use)); cout << m_op << ": " << "set_chunk oid " << oid << " target oid " << std::endl; return new SetChunkOp(m_op, &context, oid, "", m_stats); } case TEST_OP_TIER_EVICT: oid = *(rand_choose(context.oid_not_in_use)); cout << m_op << ": " << "tier_evict oid " << oid << std::endl; return new TierEvictOp(m_op, &context, oid, m_stats); default: cerr << m_op << ": Invalid op type " << type << std::endl; ceph_abort(); return nullptr; } } TestOp *m_nextop; int m_op; int m_ops; int m_seconds; int m_objects; time_t m_start; TestOpStat *m_stats; map<TestOpType, unsigned int> m_weight_sums; unsigned int m_total_weight; bool m_ec_pool; bool m_balance_reads; bool m_localize_reads; bool m_set_redirect; bool m_set_chunk; bool m_enable_dedup; }; int main(int argc, char **argv) { int ops = 1000; int objects = 50; int max_in_flight = 16; int64_t size = 4000000; // 4 MB int64_t min_stride_size = -1, max_stride_size = -1; int max_seconds = 0; bool pool_snaps = false; bool write_fadvise_dontneed = false; struct { TestOpType op; const char *name; bool ec_pool_valid; } op_types[] = { { TEST_OP_READ, "read", true }, { TEST_OP_WRITE, "write", false }, { TEST_OP_WRITE_EXCL, "write_excl", false }, { TEST_OP_WRITESAME, "writesame", false }, { TEST_OP_DELETE, "delete", true }, { TEST_OP_SNAP_CREATE, "snap_create", true }, { TEST_OP_SNAP_REMOVE, "snap_remove", true }, { TEST_OP_ROLLBACK, "rollback", true }, { TEST_OP_SETATTR, "setattr", true }, { TEST_OP_RMATTR, "rmattr", true }, { TEST_OP_WATCH, "watch", true }, { TEST_OP_COPY_FROM, "copy_from", true }, { TEST_OP_HIT_SET_LIST, "hit_set_list", true }, { TEST_OP_IS_DIRTY, "is_dirty", true }, { TEST_OP_UNDIRTY, "undirty", true }, { TEST_OP_CACHE_FLUSH, "cache_flush", true }, { TEST_OP_CACHE_TRY_FLUSH, "cache_try_flush", true }, { TEST_OP_CACHE_EVICT, "cache_evict", true }, { TEST_OP_APPEND, "append", true }, { TEST_OP_APPEND_EXCL, "append_excl", true }, { TEST_OP_SET_REDIRECT, "set_redirect", true }, { TEST_OP_UNSET_REDIRECT, "unset_redirect", true }, { TEST_OP_CHUNK_READ, "chunk_read", true }, { TEST_OP_TIER_PROMOTE, "tier_promote", true }, { TEST_OP_TIER_FLUSH, "tier_flush", true }, { TEST_OP_SET_CHUNK, "set_chunk", true }, { TEST_OP_TIER_EVICT, "tier_evict", true }, { TEST_OP_READ /* grr */, NULL }, }; struct { const char *name; } chunk_algo_types[] = { { "fastcdc" }, { "fixcdc" }, }; map<TestOpType, unsigned int> op_weights; string pool_name = "rbd"; string low_tier_pool_name = ""; bool ec_pool = false; bool no_omap = false; bool no_sparse = false; bool balance_reads = false; bool localize_reads = false; bool set_redirect = false; bool set_chunk = false; bool enable_dedup = false; string chunk_algo = ""; string chunk_size = ""; for (int i = 1; i < argc; ++i) { if (strcmp(argv[i], "--max-ops") == 0) ops = atoi(argv[++i]); else if (strcmp(argv[i], "--pool") == 0) pool_name = argv[++i]; else if (strcmp(argv[i], "--max-seconds") == 0) max_seconds = atoi(argv[++i]); else if (strcmp(argv[i], "--objects") == 0) objects = atoi(argv[++i]); else if (strcmp(argv[i], "--max-in-flight") == 0) max_in_flight = atoi(argv[++i]); else if (strcmp(argv[i], "--size") == 0) size = atoi(argv[++i]); else if (strcmp(argv[i], "--min-stride-size") == 0) min_stride_size = atoi(argv[++i]); else if (strcmp(argv[i], "--max-stride-size") == 0) max_stride_size = atoi(argv[++i]); else if (strcmp(argv[i], "--no-omap") == 0) no_omap = true; else if (strcmp(argv[i], "--no-sparse") == 0) no_sparse = true; else if (strcmp(argv[i], "--balance-reads") == 0) balance_reads = true; else if (strcmp(argv[i], "--localize-reads") == 0) localize_reads = true; else if (strcmp(argv[i], "--pool-snaps") == 0) pool_snaps = true; else if (strcmp(argv[i], "--write-fadvise-dontneed") == 0) write_fadvise_dontneed = true; else if (strcmp(argv[i], "--ec-pool") == 0) { if (!op_weights.empty()) { cerr << "--ec-pool must be specified prior to any ops" << std::endl; exit(1); } ec_pool = true; no_omap = true; no_sparse = true; } else if (strcmp(argv[i], "--op") == 0) { i++; if (i == argc) { cerr << "Missing op after --op" << std::endl; return 1; } int j; for (j = 0; op_types[j].name; ++j) { if (strcmp(op_types[j].name, argv[i]) == 0) { break; } } if (!op_types[j].name) { cerr << "unknown op " << argv[i] << std::endl; exit(1); } i++; if (i == argc) { cerr << "Weight unspecified." << std::endl; return 1; } int weight = atoi(argv[i]); if (weight < 0) { cerr << "Weights must be nonnegative." << std::endl; return 1; } else if (weight > 0) { if (ec_pool && !op_types[j].ec_pool_valid) { cerr << "Error: cannot use op type " << op_types[j].name << " with --ec-pool" << std::endl; exit(1); } cout << "adding op weight " << op_types[j].name << " -> " << weight << std::endl; op_weights.insert(pair<TestOpType, unsigned int>(op_types[j].op, weight)); } } else if (strcmp(argv[i], "--set_redirect") == 0) { set_redirect = true; } else if (strcmp(argv[i], "--set_chunk") == 0) { set_chunk = true; } else if (strcmp(argv[i], "--low_tier_pool") == 0) { /* * disallow redirect or chunk object into the same pool * to prevent the race. see https://github.com/ceph/ceph/pull/20096 */ low_tier_pool_name = argv[++i]; } else if (strcmp(argv[i], "--enable_dedup") == 0) { enable_dedup = true; } else if (strcmp(argv[i], "--dedup_chunk_algo") == 0) { i++; if (i == argc) { cerr << "Missing chunking algorithm after --dedup_chunk_algo" << std::endl; return 1; } int j; for (j = 0; chunk_algo_types[j].name; ++j) { if (strcmp(chunk_algo_types[j].name, argv[i]) == 0) { break; } } if (!chunk_algo_types[j].name) { cerr << "unknown op " << argv[i] << std::endl; exit(1); } chunk_algo = chunk_algo_types[j].name; } else if (strcmp(argv[i], "--dedup_chunk_size") == 0) { chunk_size = argv[++i]; } else { cerr << "unknown arg " << argv[i] << std::endl; exit(1); } } if (set_redirect || set_chunk) { if (low_tier_pool_name == "") { cerr << "low_tier_pool is needed" << std::endl; exit(1); } } if (enable_dedup) { if (chunk_algo == "" || chunk_size == "") { cerr << "Missing chunking algorithm: " << chunk_algo << " or chunking size: " << chunk_size << std::endl; exit(1); } } if (op_weights.empty()) { cerr << "No operations specified" << std::endl; exit(1); } if (min_stride_size < 0) min_stride_size = size / 10; if (max_stride_size < 0) max_stride_size = size / 5; cout << pretty_version_to_str() << std::endl; cout << "Configuration:" << std::endl << "\tNumber of operations: " << ops << std::endl << "\tNumber of objects: " << objects << std::endl << "\tMax in flight operations: " << max_in_flight << std::endl << "\tObject size (in bytes): " << size << std::endl << "\tWrite stride min: " << min_stride_size << std::endl << "\tWrite stride max: " << max_stride_size << std::endl; if (min_stride_size >= max_stride_size) { cerr << "Error: max_stride_size must be more than min_stride_size" << std::endl; return 1; } if (min_stride_size > size || max_stride_size > size) { cerr << "Error: min_stride_size and max_stride_size must be " << "smaller than object size" << std::endl; return 1; } if (max_in_flight * 2 > objects) { cerr << "Error: max_in_flight must be <= than the number of objects / 2" << std::endl; return 1; } char *id = getenv("CEPH_CLIENT_ID"); RadosTestContext context( pool_name, max_in_flight, size, min_stride_size, max_stride_size, no_omap, no_sparse, pool_snaps, write_fadvise_dontneed, low_tier_pool_name, enable_dedup, chunk_algo, chunk_size, id); TestOpStat stats; WeightedTestGenerator gen = WeightedTestGenerator( ops, objects, op_weights, &stats, max_seconds, ec_pool, balance_reads, localize_reads, set_redirect, set_chunk, enable_dedup); int r = context.init(); if (r < 0) { cerr << "Error initializing rados test context: " << cpp_strerror(r) << std::endl; exit(1); } context.loop(&gen); if (enable_dedup) { if (!context.check_chunks_refcount(context.low_tier_io_ctx, context.io_ctx)) { cerr << " Invalid refcount " << std::endl; exit(1); } } context.shutdown(); cerr << context.errors << " errors." << std::endl; cerr << stats << std::endl; return 0; }
22,438
29.738356
119
cc
null
ceph-main/src/test/osd/ceph_test_osd_stale_read.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "gtest/gtest.h" #include "mds/mdstypes.h" #include "include/buffer.h" #include "include/rbd_types.h" #include "include/rados/librados.h" #include "include/rados/librados.hpp" #include "include/stringify.h" #include "include/types.h" #include "global/global_context.h" #include "global/global_init.h" #include "common/ceph_argparse.h" #include "common/common_init.h" #include "common/Cond.h" #include "json_spirit/json_spirit.h" #include <errno.h> #include <map> #include <sstream> #include <string> using namespace std; using namespace librados; int get_primary_osd(Rados& rados, const string& pool_name, const string& oid, int *pprimary) { bufferlist inbl; string cmd = string("{\"prefix\": \"osd map\",\"pool\":\"") + pool_name + string("\",\"object\": \"") + oid + string("\",\"format\": \"json\"}"); bufferlist outbl; if (int r = rados.mon_command(cmd, inbl, &outbl, nullptr); r < 0) { return r; } string outstr(outbl.c_str(), outbl.length()); json_spirit::Value v; if (!json_spirit::read(outstr, v)) { cerr <<" unable to parse json " << outstr << std::endl; return -1; } json_spirit::Object& o = v.get_obj(); for (json_spirit::Object::size_type i=0; i<o.size(); i++) { json_spirit::Pair& p = o[i]; if (p.name_ == "acting_primary") { cout << "primary = " << p.value_.get_int() << std::endl; *pprimary = p.value_.get_int(); return 0; } } cerr << "didn't find primary in " << outstr << std::endl; return -1; } int fence_osd(Rados& rados, int osd) { bufferlist inbl, outbl; string cmd("{\"prefix\": \"injectargs\",\"injected_args\":[" "\"--ms-blackhole-osd\", " "\"--ms-blackhole-mon\"]}"); return rados.osd_command(osd, cmd, inbl, &outbl, NULL); } int mark_down_osd(Rados& rados, int osd) { bufferlist inbl, outbl; string cmd("{\"prefix\": \"osd down\",\"ids\":[\"" + stringify(osd) + "\"]}"); return rados.mon_command(cmd, inbl, &outbl, NULL); } TEST(OSD, StaleRead) { // create two rados instances, one pool Rados rados1, rados2; IoCtx ioctx1, ioctx2; int r; r = rados1.init_with_context(g_ceph_context); ASSERT_EQ(0, r); r = rados1.connect(); ASSERT_EQ(0, r); srand(time(0)); string pool_name = "read-hole-test-" + stringify(rand()); r = rados1.pool_create(pool_name.c_str()); ASSERT_EQ(0, r); r = rados1.ioctx_create(pool_name.c_str(), ioctx1); ASSERT_EQ(0, r); r = rados2.init_with_context(g_ceph_context); ASSERT_EQ(0, r); r = rados2.connect(); ASSERT_EQ(0, r); r = rados2.ioctx_create(pool_name.c_str(), ioctx2); ASSERT_EQ(0, r); string oid = "foo"; bufferlist one; one.append("one"); { cout << "client1: writing 'one'" << std::endl; r = ioctx1.write_full(oid, one); ASSERT_EQ(0, r); } // make sure 2 can read it { cout << "client2: reading 'one'" << std::endl; bufferlist bl; r = ioctx2.read(oid, bl, 3, 0); ASSERT_EQ(3, r); ASSERT_EQ('o', bl[0]); ASSERT_EQ('n', bl[1]); ASSERT_EQ('e', bl[2]); } // find the primary int primary; r = get_primary_osd(rados1, pool_name, oid, &primary); ASSERT_EQ(0, r); // fence it cout << "client1: fencing primary" << std::endl; fence_osd(rados1, primary); mark_down_osd(rados1, primary); rados1.wait_for_latest_osdmap(); // should still be able to read the old value on 2 { cout << "client2: reading 'one' again from old primary" << std::endl; bufferlist bl; r = ioctx2.read(oid, bl, 3, 0); ASSERT_EQ(3, r); ASSERT_EQ('o', bl[0]); ASSERT_EQ('n', bl[1]); ASSERT_EQ('e', bl[2]); } // update object on 1 bufferlist two; two.append("two"); { cout << "client1: writing 'two' to new acting set" << std::endl; r = ioctx1.write_full(oid, two); ASSERT_EQ(0, r); } // make sure we can't still read the old value on 2 { cout << "client2: reading again from old primary" << std::endl; bufferlist bl; r = ioctx2.read(oid, bl, 3, 0); ASSERT_EQ(3, r); ASSERT_EQ('t', bl[0]); ASSERT_EQ('w', bl[1]); ASSERT_EQ('o', bl[2]); } rados1.shutdown(); rados2.shutdown(); } int main(int argc, char **argv) { auto args = argv_to_vec(argc, argv); auto cct = global_init(nullptr, args, CEPH_ENTITY_TYPE_CLIENT, CODE_ENVIRONMENT_UTILITY, 0); common_init_finish(g_ceph_context); ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); }
4,536
24.488764
73
cc
null
ceph-main/src/test/osd/hitset.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * Copyright 2013 Inktank */ #include "gtest/gtest.h" #include "osd/HitSet.h" #include <iostream> class HitSetTestStrap { public: HitSet *hitset; explicit HitSetTestStrap(HitSet *h) : hitset(h) {} void fill(unsigned count) { char buf[50]; for (unsigned i = 0; i < count; ++i) { sprintf(buf, "hitsettest_%u", i); hobject_t obj(object_t(buf), "", 0, i, 0, ""); hitset->insert(obj); } EXPECT_EQ(count, hitset->insert_count()); } void verify_fill(unsigned count) { char buf[50]; for (unsigned i = 0; i < count; ++i) { sprintf(buf, "hitsettest_%u", i); hobject_t obj(object_t(buf), "", 0, i, 0, ""); EXPECT_TRUE(hitset->contains(obj)); } } }; class BloomHitSetTest : public testing::Test, public HitSetTestStrap { public: BloomHitSetTest() : HitSetTestStrap(new HitSet(new BloomHitSet)) {} void rebuild(double fp, uint64_t target, uint64_t seed) { BloomHitSet::Params *bparams = new BloomHitSet::Params(fp, target, seed); HitSet::Params param(bparams); HitSet new_set(param); *hitset = new_set; } BloomHitSet *get_hitset() { return static_cast<BloomHitSet*>(hitset->impl.get()); } }; TEST_F(BloomHitSetTest, Params) { BloomHitSet::Params params(0.01, 100, 5); EXPECT_EQ(.01, params.get_fpp()); EXPECT_EQ((unsigned)100, params.target_size); EXPECT_EQ((unsigned)5, params.seed); params.set_fpp(0.1); EXPECT_EQ(0.1, params.get_fpp()); bufferlist bl; params.encode(bl); BloomHitSet::Params p2; auto iter = bl.cbegin(); p2.decode(iter); EXPECT_EQ(0.1, p2.get_fpp()); EXPECT_EQ((unsigned)100, p2.target_size); EXPECT_EQ((unsigned)5, p2.seed); } TEST_F(BloomHitSetTest, Construct) { ASSERT_EQ(hitset->impl->get_type(), HitSet::TYPE_BLOOM); // success! } TEST_F(BloomHitSetTest, Rebuild) { rebuild(0.1, 100, 1); ASSERT_EQ(hitset->impl->get_type(), HitSet::TYPE_BLOOM); } TEST_F(BloomHitSetTest, InsertsMatch) { rebuild(0.1, 100, 1); fill(50); /* * the approx unique count is atrocious on bloom filters. Empirical * evidence suggests the current test will produce a value of 62 * regardless of hitset size */ EXPECT_TRUE(hitset->approx_unique_insert_count() >= 50 && hitset->approx_unique_insert_count() <= 62); verify_fill(50); EXPECT_FALSE(hitset->is_full()); } TEST_F(BloomHitSetTest, FillsUp) { rebuild(0.1, 20, 1); fill(20); verify_fill(20); EXPECT_TRUE(hitset->is_full()); } TEST_F(BloomHitSetTest, RejectsNoMatch) { rebuild(0.001, 100, 1); fill(100); verify_fill(100); EXPECT_TRUE(hitset->is_full()); char buf[50]; int matches = 0; for (int i = 100; i < 200; ++i) { sprintf(buf, "hitsettest_%d", i); hobject_t obj(object_t(buf), "", 0, i, 0, ""); if (hitset->contains(obj)) ++matches; } // we set a 1 in 1000 false positive; allow one in our 100 EXPECT_LT(matches, 2); } class ExplicitHashHitSetTest : public testing::Test, public HitSetTestStrap { public: ExplicitHashHitSetTest() : HitSetTestStrap(new HitSet(new ExplicitHashHitSet)) {} ExplicitHashHitSet *get_hitset() { return static_cast<ExplicitHashHitSet*>(hitset->impl.get()); } }; TEST_F(ExplicitHashHitSetTest, Construct) { ASSERT_EQ(hitset->impl->get_type(), HitSet::TYPE_EXPLICIT_HASH); // success! } TEST_F(ExplicitHashHitSetTest, InsertsMatch) { fill(50); verify_fill(50); EXPECT_EQ((unsigned)50, hitset->approx_unique_insert_count()); EXPECT_FALSE(hitset->is_full()); } TEST_F(ExplicitHashHitSetTest, RejectsNoMatch) { fill(100); verify_fill(100); EXPECT_FALSE(hitset->is_full()); char buf[50]; int matches = 0; for (int i = 100; i < 200; ++i) { sprintf(buf, "hitsettest_%d", i); hobject_t obj(object_t(buf), "", 0, i, 0, ""); if (hitset->contains(obj)) { ++matches; } } EXPECT_EQ(matches, 0); } class ExplicitObjectHitSetTest : public testing::Test, public HitSetTestStrap { public: ExplicitObjectHitSetTest() : HitSetTestStrap(new HitSet(new ExplicitObjectHitSet)) {} ExplicitObjectHitSet *get_hitset() { return static_cast<ExplicitObjectHitSet*>(hitset->impl.get()); } }; TEST_F(ExplicitObjectHitSetTest, Construct) { ASSERT_EQ(hitset->impl->get_type(), HitSet::TYPE_EXPLICIT_OBJECT); // success! } TEST_F(ExplicitObjectHitSetTest, InsertsMatch) { fill(50); verify_fill(50); EXPECT_EQ((unsigned)50, hitset->approx_unique_insert_count()); EXPECT_FALSE(hitset->is_full()); } TEST_F(ExplicitObjectHitSetTest, RejectsNoMatch) { fill(100); verify_fill(100); EXPECT_FALSE(hitset->is_full()); char buf[50]; int matches = 0; for (int i = 100; i < 200; ++i) { sprintf(buf, "hitsettest_%d", i); hobject_t obj(object_t(buf), "", 0, i, 0, ""); if (hitset->contains(obj)) { ++matches; } } EXPECT_EQ(matches, 0); }
5,183
25.181818
103
cc
null
ceph-main/src/test/osd/osdcap.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system * * Copyright (C) 2012 Inktank * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #include <iostream> #include "include/stringify.h" #include "osd/OSDCap.h" #include "gtest/gtest.h" using namespace std; const char *parse_good[] = { "allow *", "allow r", "allow rwx", "allow r pool foo ", "allow r pool=foo", "allow wx pool taco", "allow pool foo r", "allow pool taco wx", "allow wx pool taco object_prefix obj", "allow wx pool taco object_prefix obj_with_underscores_and_no_quotes", "allow pool taco object_prefix obj wx", "allow pool taco object_prefix obj_with_underscores_and_no_quotes wx", "allow rwx pool 'weird name'", "allow rwx pool \"weird name with ''s\"", "allow rwx pool foo, allow r pool bar", "allow rwx pool foo ; allow r pool bar", "allow rwx pool foo ;allow r pool bar", "allow rwx pool foo; allow r pool bar", "allow pool foo rwx, allow pool bar r", "allow pool foo.froo.foo rwx, allow pool bar r", "allow pool foo rwx ; allow pool bar r", "allow pool foo rwx ;allow pool bar r", "allow pool foo rwx; allow pool bar r", "allow pool data rw, allow pool rbd rwx, allow pool images class rbd foo", "allow class-read", "allow class-write", "allow class-read class-write", "allow r class-read pool foo", "allow rw class-read class-write pool foo", "allow r class-read pool foo", "allow pool bar rwx; allow pool baz r class-read", "allow class foo", "allow class clsname \"clsthingidon'tunderstand\"", " allow rwx pool foo; allow r pool bar ", " allow rwx pool foo; allow r pool bar ", " allow pool foo rwx; allow pool bar r ", " allow pool foo rwx; allow pool bar r ", " allow wx pool taco", "\tallow\nwx\tpool \n taco\t", "allow class-read object_prefix rbd_children, allow pool libvirt-pool-test rwx", "allow class-read object_prefix rbd-children, allow pool libvirt_pool_test rwx", "allow pool foo namespace nfoo rwx, allow pool bar namespace=nbar r", "allow pool foo namespace=nfoo rwx ; allow pool bar namespace=nbar r", "allow pool foo namespace nfoo rwx ;allow pool bar namespace nbar r", "allow pool foo namespace=nfoo rwx; allow pool bar namespace nbar object_prefix rbd r", "allow rwx namespace=nfoo tag cephfs data=cephfs_a", "allow rwx namespace foo tag cephfs data =cephfs_a", "allow pool foo namespace=nfoo* rwx", "allow pool foo namespace=\"\" rwx; allow pool bar namespace='' object_prefix rbd r", "allow pool foo namespace \"\" rwx; allow pool bar namespace '' object_prefix rbd r", "profile abc, profile abc pool=bar, profile abc pool=bar namespace=foo", "allow rwx tag application key=value", "allow rwx tag application key = value", "allow rwx tag application key =value", "allow rwx tag application key= value", "allow rwx tag application key = value", "allow all tag application all=all", "allow rwx network 127.0.0.1/8", "allow rwx network ::1/128", "allow rwx network [ff::1]/128", "profile foo network 127.0.0.1/8", "allow rwx namespace foo tag cephfs data =cephfs_a network 127.0.0.1/8", "allow pool foo rwx network 1.2.3.4/24", 0 }; TEST(OSDCap, ParseGood) { for (int i=0; parse_good[i]; i++) { string str = parse_good[i]; OSDCap cap; std::cout << "Testing good input: '" << str << "'" << std::endl; ASSERT_TRUE(cap.parse(str, &cout)); } } const char *parse_bad[] = { "allow r poolfoo", "allow r w", "ALLOW r", "allow rwx,", "allow rwx x", "allow r pool foo r", "allow wwx pool taco", "allow wwx pool taco^funny&chars", "allow rwx pool 'weird name''", "allow rwx object_prefix \"beforepool\" pool weird", "allow rwx auid 123 pool asdf", "allow xrwx pool foo,, allow r pool bar", ";allow rwx pool foo rwx ; allow r pool bar", "allow rwx pool foo ;allow r pool bar gibberish", "allow rwx auid 123 pool asdf namespace=foo", "allow rwx auid 123 namespace", "allow rwx namespace", "allow namespace", "allow namespace=foo", "allow namespace=f*oo", "allow rwx auid 123 namespace asdf", "allow wwx pool ''", "allow rwx tag application key value", "allow rwx auid 123", "allow auid 123 rwx", "allow r pool foo object_prefix blah ; allow w auid 5", 0 }; TEST(OSDCap, ParseBad) { for (int i=0; parse_bad[i]; i++) { string str = parse_bad[i]; OSDCap cap; std::cout << "Testing bad input: '" << str << "'" << std::endl; ASSERT_FALSE(cap.parse(str, &cout)); } } TEST(OSDCap, AllowAll) { OSDCap cap; entity_addr_t addr; ASSERT_FALSE(cap.allow_all()); ASSERT_TRUE(cap.parse("allow r", NULL)); ASSERT_FALSE(cap.allow_all()); cap.grants.clear(); ASSERT_TRUE(cap.parse("allow w", NULL)); ASSERT_FALSE(cap.allow_all()); cap.grants.clear(); ASSERT_TRUE(cap.parse("allow x", NULL)); ASSERT_FALSE(cap.allow_all()); cap.grants.clear(); ASSERT_TRUE(cap.parse("allow rwx", NULL)); ASSERT_FALSE(cap.allow_all()); cap.grants.clear(); ASSERT_TRUE(cap.parse("allow rw", NULL)); ASSERT_FALSE(cap.allow_all()); cap.grants.clear(); ASSERT_TRUE(cap.parse("allow rx", NULL)); ASSERT_FALSE(cap.allow_all()); cap.grants.clear(); ASSERT_TRUE(cap.parse("allow wx", NULL)); ASSERT_FALSE(cap.allow_all()); cap.grants.clear(); ASSERT_TRUE(cap.parse("allow *", NULL)); ASSERT_TRUE(cap.allow_all()); ASSERT_TRUE(cap.is_capable("foo", "", {}, "asdf", true, true, {{"cls", "", true, true, true}}, addr)); ASSERT_TRUE(cap.is_capable("foo", "anamespace", {}, "asdf", true, true, {{"cls", "", true, true, true}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "asdf", true, true, {{"cls", "", true, true, true}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "anamespace", {}, "asdf", true, true, {{"cls", "", true, true, true}}, addr)); ASSERT_TRUE(cap.is_capable("foo", "", {{"application", {{"key", "value"}}}}, "asdf", true, true, {{"cls", "", true, true, true}}, addr)); ASSERT_TRUE(cap.is_capable("foo", "anamespace", {{"application", {{"key", "value"}}}}, "asdf", true, true, {{"cls", "", true, true, true}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {{"application", {{"key", "value"}}}}, "asdf", true, true, {{"cls", "", true, true, true}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "anamespace", {{"application", {{"key", "value"}}}}, "asdf", true, true, {{"cls", "", true, true, true}}, addr)); // 'allow *' overrides allow list ASSERT_TRUE(cap.is_capable("foo", "", {}, "asdf", true, true, {{"cls", "", true, true, false}}, addr)); ASSERT_TRUE(cap.is_capable("foo", "anamespace", {}, "asdf", true, true, {{"cls", "", true, true, false}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "asdf", true, true, {{"cls", "", true, true, false}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "anamespace", {}, "asdf", true, true, {{"cls", "", true, true, false}}, addr)); ASSERT_TRUE(cap.is_capable("foo", "", {{"application", {{"key", "value"}}}}, "asdf", true, true, {{"cls", "", true, true, false}}, addr)); ASSERT_TRUE(cap.is_capable("foo", "anamespace", {{"application", {{"key", "value"}}}}, "asdf", true, true, {{"cls", "", true, true, false}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {{"application", {{"key", "value"}}}}, "asdf", true, true, {{"cls", "", true, true, false}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "anamespace", {{"application", {{"key", "value"}}}}, "asdf", true, true, {{"cls", "", true, true, false}}, addr)); } TEST(OSDCap, AllowPool) { OSDCap cap; entity_addr_t addr; bool r = cap.parse("allow rwx pool foo", NULL); ASSERT_TRUE(r); ASSERT_TRUE(cap.is_capable("foo", "", {}, "", true, true, {{"cls", "", true, true, true}}, addr)); ASSERT_TRUE(cap.is_capable("foo", "ns", {}, "", true, true, {{"cls", "", true, true, true}}, addr)); ASSERT_TRUE(cap.is_capable("foo", "", {{"application", {{"key", "value"}}}}, "", true, true, {{"cls", "", true, true, true}}, addr)); ASSERT_TRUE(cap.is_capable("foo", "ns", {{"application", {{"key", "value"}}}}, "", true, true, {{"cls", "", true, true, true}}, addr)); // true->false for classes not on allow list ASSERT_FALSE(cap.is_capable("foo", "", {}, "", true, true, {{"cls", "", true, true, false}}, addr)); ASSERT_FALSE(cap.is_capable("foo", "ns", {}, "", true, true, {{"cls", "", true, true, false}}, addr)); ASSERT_FALSE(cap.is_capable("foo", "", {{"application", {{"key", "value"}}}}, "", true, true, {{"cls", "", true, true, false}}, addr)); ASSERT_FALSE(cap.is_capable("foo", "ns", {{"application", {{"key", "value"}}}}, "", true, true, {{"cls", "", true, true, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "", true, true, {{"cls", "", true, true, true}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "ns", {}, "", true, true, {{"cls", "", true, true, true}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {{"application", {{"key", "value"}}}}, "", true, true, {{"cls", "", true, true, true}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "ns", {{"application", {{"key", "value"}}}}, "", true, true, {{"cls", "", true, true, true}}, addr)); } TEST(OSDCap, AllowPools) { entity_addr_t addr; OSDCap cap; bool r = cap.parse("allow rwx pool foo, allow r pool bar", NULL); ASSERT_TRUE(r); ASSERT_TRUE(cap.is_capable("foo", "", {}, "", true, true, {{"cls", "", true, true, true}}, addr)); ASSERT_TRUE(cap.is_capable("foo", "ns", {}, "", true, true, {{"cls", "", true, true, true}}, addr)); ASSERT_TRUE(cap.is_capable("foo", "", {{"application", {{"key", "value"}}}}, "", true, true, {{"cls", "", true, true, true}}, addr)); ASSERT_TRUE(cap.is_capable("foo", "ns", {{"application", {{"key", "value"}}}}, "", true, true, {{"cls", "", true, true, true}}, addr)); // true-false for classes not on allow list ASSERT_FALSE(cap.is_capable("foo", "", {}, "", true, true, {{"cls", "", true, true, false}}, addr)); ASSERT_FALSE(cap.is_capable("foo", "ns", {}, "", true, true, {{"cls", "", true, true, false}}, addr)); ASSERT_FALSE(cap.is_capable("foo", "", {{"application", {{"key", "value"}}}}, "", true, true, {{"cls", "", true, true, false}}, addr)); ASSERT_FALSE(cap.is_capable("foo", "ns", {{"application", {{"key", "value"}}}}, "", true, true, {{"cls", "", true, true, false}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "", true, false, {}, addr)); ASSERT_TRUE(cap.is_capable("bar", "ns", {}, "", true, false, {}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {{"application", {{"key", "value"}}}}, "", true, false, {}, addr)); ASSERT_TRUE(cap.is_capable("bar", "ns", {{"application", {{"key", "value"}}}}, "", true, false, {}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "", true, true, {{"cls", "", true, true, true}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "ns", {}, "", true, true, {{"cls", "", true, true, true}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {{"application", {{"key", "value"}}}}, "", true, true, {{"cls", "", true, true, true}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "ns", {{"application", {{"key", "value"}}}}, "", true, true, {{"cls", "", true, true, true}}, addr)); ASSERT_FALSE(cap.is_capable("baz", "", {}, "", true, false, {}, addr)); ASSERT_FALSE(cap.is_capable("baz", "ns", {}, "", true, false, {}, addr)); ASSERT_FALSE(cap.is_capable("baz", "", {{"application", {{"key", "value"}}}}, "", true, false, {}, addr)); ASSERT_FALSE(cap.is_capable("baz", "ns", {{"application", {{"key", "value"}}}}, "", true, false, {}, addr)); } TEST(OSDCap, AllowPools2) { entity_addr_t addr; OSDCap cap; bool r = cap.parse("allow r, allow rwx pool foo", NULL); ASSERT_TRUE(r); ASSERT_TRUE(cap.is_capable("foo", "", {}, "", true, true, {{"cls", "", true, true, true}}, addr)); // true-false for classes not on allow list ASSERT_FALSE(cap.is_capable("foo", "", {}, "", true, true, {{"cls", "", true, true, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "", true, true, {{"cls", "", true, true, true}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "", true, false, {}, addr)); } TEST(OSDCap, ObjectPrefix) { entity_addr_t addr; OSDCap cap; bool r = cap.parse("allow rwx object_prefix foo", NULL); ASSERT_TRUE(r); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", true, true, {{"cls", "", true, true, true}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "food", true, true, {{"cls", "", true, true, true}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo_bar", true, true, {{"cls", "", true, true, true}}, addr)); // true-false for classes not on allow list ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", true, true, {{"cls", "", true, true, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "food", true, true, {{"cls", "", true, true, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo_bar", true, true, {{"cls", "", true, true, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "_foo", true, true, {{"cls", "", true, true, true}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, " foo ", true, true, {{"cls", "", true, true, true}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "fo", true, true, {{"cls", "", true, true, true}}, addr)); } TEST(OSDCap, ObjectPoolAndPrefix) { entity_addr_t addr; OSDCap cap; bool r = cap.parse("allow rwx pool bar object_prefix foo", NULL); ASSERT_TRUE(r); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", true, true, {{"cls", "", true, true, true}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "food", true, true, {{"cls", "", true, true, true}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo_bar", true, true, {{"cls", "", true, true, true}}, addr)); // true-false for classes not on allow list ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", true, true, {{"cls", "", true, true, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "food", true, true, {{"cls", "", true, true, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo_bar", true, true, {{"cls", "", true, true, false}}, addr)); ASSERT_FALSE(cap.is_capable("baz", "", {}, "foo", true, true, {{"cls", "", true, true, true}}, addr)); ASSERT_FALSE(cap.is_capable("baz", "", {}, "food", true, true, {{"cls", "", true, true, true}}, addr)); ASSERT_FALSE(cap.is_capable("baz", "", {}, "fo", true, true, {{"cls", "", true, true, true}}, addr)); } TEST(OSDCap, Namespace) { entity_addr_t addr; OSDCap cap; ASSERT_TRUE(cap.parse("allow rw namespace=nfoo")); ASSERT_TRUE(cap.is_capable("bar", "nfoo", {}, "foo", true, true, {}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", true, true, {}, addr)); ASSERT_FALSE(cap.is_capable("bar", "nfoobar", {}, "foo", true, true, {}, addr)); } TEST(OSDCap, NamespaceGlob) { entity_addr_t addr; OSDCap cap; ASSERT_TRUE(cap.parse("allow rw namespace=nfoo*")); ASSERT_TRUE(cap.is_capable("bar", "nfoo", {}, "foo", true, true, {}, addr)); ASSERT_TRUE(cap.is_capable("bar", "nfoobar", {}, "foo", true, true, {}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", true, true, {}, addr)); ASSERT_FALSE(cap.is_capable("bar", "nfo", {}, "foo", true, true, {}, addr)); } TEST(OSDCap, BasicR) { entity_addr_t addr; OSDCap cap; ASSERT_TRUE(cap.parse("allow r", NULL)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", true, false, {}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, true, {{"cls", "", false, true, true}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", true, false, {{"cls", "", true, false, true}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", true, true, {{"cls", "", true, true, true}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, true, {{"cls", "", false, true, true}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", true, true, {}, addr)); } TEST(OSDCap, BasicW) { entity_addr_t addr; OSDCap cap; ASSERT_TRUE(cap.parse("allow w", NULL)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, true, {}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, true, {{"cls", "", false, true, true}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", true, false, {{"cls", "", true, false, true}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", true, true, {{"cls", "", true, true, true}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", true, false, {}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", true, true, {}, addr)); } TEST(OSDCap, BasicX) { entity_addr_t addr; OSDCap cap; ASSERT_TRUE(cap.parse("allow x", NULL)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"cls", "", false, true, true}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"cls", "", true, false, true}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"cls", "", true, true, true}}, addr)); // true->false when class not on allow list ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"cls", "", false, true, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"cls", "", true, false, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"cls", "", true, true, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, true, {}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", true, false, {}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", true, true, {}, addr)); } TEST(OSDCap, BasicRW) { entity_addr_t addr; OSDCap cap; ASSERT_TRUE(cap.parse("allow rw", NULL)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, true, {}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", true, false, {}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", true, true, {}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, true, {{"cls", "", false, true, true}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", true, false, {{"cls", "", true, false, true}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", true, true, {{"cls", "", true, true, true}}, addr)); } TEST(OSDCap, BasicRX) { entity_addr_t addr; OSDCap cap; ASSERT_TRUE(cap.parse("allow rx", NULL)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", true, false, {{"cls", "", true, false, true}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", true, false, {}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"cls", "", false, true, true}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", true, false, {{"cls", "", true, true, true}}, addr)); // true->false for class not on allow list ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", true, false, {{"cls", "", true, false, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"cls", "", false, true, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", true, false, {{"cls", "", true, true, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, true, {}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", true, true, {}, addr)); } TEST(OSDCap, BasicWX) { entity_addr_t addr; OSDCap cap; ASSERT_TRUE(cap.parse("allow wx", NULL)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, true, {}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, true, {{"cls", "", false, true, true}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"cls", "", true, false, true}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, true, {{"cls", "", true, true, true}}, addr)); // true->false for class not on allow list ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, true, {{"cls", "", false, true, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"cls", "", true, false, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, true, {{"cls", "", true, true, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", true, false, {}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", true, true, {}, addr)); } TEST(OSDCap, BasicRWX) { entity_addr_t addr; OSDCap cap; ASSERT_TRUE(cap.parse("allow rwx", NULL)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"cls", "", true, true, true}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", true, true, {{"cls", "", true, true, true}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, true, {{"cls", "", true, true, true}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", true, false, {{"cls", "", false, true, true}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", true, false, {}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", true, true, {{"cls", "", false, true, true}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", true, true, {{"cls", "", true, false, true}}, addr)); // true->false for class not on allow list ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"cls", "", true, true, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", true, true, {{"cls", "", true, true, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, true, {{"cls", "", true, true, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", true, false, {{"cls", "", false, true, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", true, true, {{"cls", "", false, true, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", true, true, {{"cls", "", true, false, false}}, addr)); } TEST(OSDCap, BasicRWClassRClassW) { entity_addr_t addr; OSDCap cap; ASSERT_TRUE(cap.parse("allow rw class-read class-write", NULL)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"cls", "", true, true, true}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", true, true, {{"cls", "", true, true, true}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, true, {{"cls", "", true, true, true}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", true, false, {{"cls", "", false, true, true}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", true, false, {}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", true, true, {{"cls", "", false, true, true}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", true, true, {{"cls", "", true, false, true}}, addr)); // true->false when class not allow listed ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"cls", "", true, true, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", true, true, {{"cls", "", true, true, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, true, {{"cls", "", true, true, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", true, false, {{"cls", "", false, true, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", true, true, {{"cls", "", false, true, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", true, true, {{"cls", "", true, false, false}}, addr)); } TEST(OSDCap, ClassR) { entity_addr_t addr; OSDCap cap; ASSERT_TRUE(cap.parse("allow class-read", NULL)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"cls", "", true, false, true}}, addr)); // true->false when class not allow listed ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"cls", "", true, false, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", true, false, {}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, true, {}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"cls", "", false, true, true}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", true, true, {{"cls", "", true, true, true}}, addr)); } TEST(OSDCap, ClassW) { entity_addr_t addr; OSDCap cap; ASSERT_TRUE(cap.parse("allow class-write", NULL)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"cls", "", false, true, true}}, addr)); // true->false when class not allow listed ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"cls", "", false, true, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", true, false, {}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, true, {}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"cls", "", true, false, true}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", true, true, {{"cls", "", true, true, true}}, addr)); } TEST(OSDCap, ClassRW) { entity_addr_t addr; OSDCap cap; ASSERT_TRUE(cap.parse("allow class-read class-write", NULL)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"cls", "", false, true, true}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"cls", "", true, false, true}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"cls", "", true, true, true}}, addr)); // true->false when class not allow listed ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"cls", "", false, true, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"cls", "", true, false, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"cls", "", true, true, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", true, false, {}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, true, {}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", true, true, {{"cls", "", true, false, true}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", true, true, {{"cls", "", true, true, true}}, addr)); } TEST(OSDCap, BasicRClassR) { entity_addr_t addr; OSDCap cap; ASSERT_TRUE(cap.parse("allow r class-read", NULL)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"cls", "", true, false, true}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", true, false, {{"cls", "", true, false, true}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", true, false, {}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {{"application", {{"key", "value"}}}}, "foo", true, false, {}, addr)); // true->false when class not allow listed ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"cls", "", true, false, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", true, false, {{"cls", "", true, false, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {{"application", {{"key", "value"}}}}, "foo", true, false, {{"cls", "", true, false, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, true, {{"cls", "", true, true, true}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", true, true, {{"cls", "", true, true, true}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, true, {}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", true, true, {}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {{"application", {{"key", "value"}}}}, "foo", true, true, {}, addr)); ASSERT_TRUE(cap.is_capable("bar", "any", {}, "foo", false, false, {{"cls", "", true, false, true}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "any", {}, "foo", true, false, {{"cls", "", true, false, true}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "any", {}, "foo", true, false, {}, addr)); // true->false when class not allow listed ASSERT_FALSE(cap.is_capable("bar", "any", {}, "foo", false, false, {{"cls", "", true, false, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "any", {}, "foo", true, false, {{"cls", "", true, false, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "any", {{"application", {{"key", "value"}}}}, "foo", true, false, {{"cls", "", true, false, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "any", {}, "foo", false, true, {{"cls", "", true, true, true}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "any", {}, "foo", true, true, {{"cls", "", true, true, true}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "any", {}, "foo", false, true, {}, addr)); ASSERT_FALSE(cap.is_capable("bar", "any", {}, "foo", true, true, {}, addr)); ASSERT_FALSE(cap.is_capable("bar", "any", {{"application", {{"key", "value"}}}}, "foo", true, true, {}, addr)); } TEST(OSDCap, PoolClassR) { entity_addr_t addr; OSDCap cap; ASSERT_TRUE(cap.parse("allow pool bar r class-read, allow pool foo rwx", NULL)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"cls", "", true, false, true}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", true, false, {{"cls", "", true, false, true}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", true, false, {}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {{"application", {{"key", "value"}}}}, "foo", true, false, {}, addr)); // true->false when class not allow listed ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"cls", "", true, false, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", true, false, {{"cls", "", true, false, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {{"application", {{"key", "value"}}}}, "foo", true, false, {{"cls", "", true, false, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, true, {{"cls", "", true, true, true}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", true, true, {{"cls", "", true, true, true}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, true, {}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", true, true, {}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {{"application", {{"key", "value"}}}}, "foo", true, true, {}, addr)); ASSERT_TRUE(cap.is_capable("bar", "ns", {}, "foo", false, false, {{"cls", "", true, false, true}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "ns", {}, "foo", true, false, {{"cls", "", true, false, true}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "ns", {}, "foo", true, false, {}, addr)); ASSERT_TRUE(cap.is_capable("bar", "ns", {{"application", {{"key", "value"}}}}, "foo", true, false, {}, addr)); // true->false when class not allow listed ASSERT_FALSE(cap.is_capable("bar", "ns", {}, "foo", false, false, {{"cls", "", true, false, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "ns", {}, "foo", true, false, {{"cls", "", true, false, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "ns", {{"application", {{"key", "value"}}}}, "foo", true, false, {{"cls", "", true, false, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "ns", {}, "foo", false, true, {{"cls", "", true, true, true}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "ns", {}, "foo", true, true, {{"cls", "", true, true, true}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "ns", {}, "foo", false, true, {}, addr)); ASSERT_FALSE(cap.is_capable("bar", "ns", {}, "foo", true, true, {}, addr)); ASSERT_FALSE(cap.is_capable("bar", "ns", {{"application", {{"key", "value"}}}}, "foo", true, true, {}, addr)); ASSERT_TRUE(cap.is_capable("foo", "", {}, "foo", false, false, {}, addr)); ASSERT_TRUE(cap.is_capable("foo", "", {}, "foo", false, false, {{"cls", "", true, true, true}}, addr)); ASSERT_TRUE(cap.is_capable("foo", "", {}, "foo", true, true, {{"cls", "", true, true, true}}, addr)); ASSERT_TRUE(cap.is_capable("foo", "", {}, "foo", false, true, {{"cls", "", true, true, true}}, addr)); ASSERT_TRUE(cap.is_capable("foo", "", {}, "foo", true, false, {{"cls", "", false, true, true}}, addr)); ASSERT_TRUE(cap.is_capable("foo", "", {}, "foo", true, false, {}, addr)); ASSERT_TRUE(cap.is_capable("foo", "", {}, "foo", true, true, {{"cls", "", false, true, true}}, addr)); ASSERT_TRUE(cap.is_capable("foo", "", {}, "foo", true, true, {{"cls", "", true, false, true}}, addr)); ASSERT_TRUE(cap.is_capable("foo", "", {{"application", {{"key", "value"}}}}, "foo", true, true, {{"cls", "", true, false, true}}, addr)); // true->false when class not allow listed ASSERT_FALSE(cap.is_capable("foo", "", {}, "foo", false, false, {{"cls", "", true, true, false}}, addr)); ASSERT_FALSE(cap.is_capable("foo", "", {}, "foo", true, true, {{"cls", "", true, true, false}}, addr)); ASSERT_FALSE(cap.is_capable("foo", "", {}, "foo", false, true, {{"cls", "", true, true, false}}, addr)); ASSERT_FALSE(cap.is_capable("foo", "", {}, "foo", true, false, {{"cls", "", false, true, false}}, addr)); ASSERT_FALSE(cap.is_capable("foo", "", {}, "foo", true, true, {{"cls", "", false, true, false}}, addr)); ASSERT_FALSE(cap.is_capable("foo", "", {}, "foo", true, true, {{"cls", "", true, false, false}}, addr)); ASSERT_FALSE(cap.is_capable("foo", "", {{"application", {{"key", "value"}}}}, "foo", true, true, {{"cls", "", true, false, false}}, addr)); ASSERT_TRUE(cap.is_capable("foo", "ns", {}, "foo", false, false, {}, addr)); ASSERT_TRUE(cap.is_capable("foo", "ns", {}, "foo", false, false, {{"cls", "", true, true, true}}, addr)); ASSERT_TRUE(cap.is_capable("foo", "ns", {}, "foo", true, true, {{"cls", "", true, true, true}}, addr)); ASSERT_TRUE(cap.is_capable("foo", "ns", {}, "foo", false, true, {{"cls", "", true, true, true}}, addr)); ASSERT_TRUE(cap.is_capable("foo", "ns", {}, "foo", true, false, {{"cls", "", false, true, true}}, addr)); ASSERT_TRUE(cap.is_capable("foo", "ns", {}, "foo", true, false, {}, addr)); ASSERT_TRUE(cap.is_capable("foo", "ns", {}, "foo", true, true, {{"cls", "", false, true, true}}, addr)); ASSERT_TRUE(cap.is_capable("foo", "ns", {}, "foo", true, true, {{"cls", "", true, false, true}}, addr)); ASSERT_TRUE(cap.is_capable("foo", "ns", {{"application", {{"key", "value"}}}}, "foo", true, true, {{"cls", "", true, false, true}}, addr)); // true->false when class not allow listed ASSERT_FALSE(cap.is_capable("foo", "ns", {}, "foo", false, false, {{"cls", "", true, true, false}}, addr)); ASSERT_FALSE(cap.is_capable("foo", "ns", {}, "foo", true, true, {{"cls", "", true, true, false}}, addr)); ASSERT_FALSE(cap.is_capable("foo", "ns", {}, "foo", false, true, {{"cls", "", true, true, false}}, addr)); ASSERT_FALSE(cap.is_capable("foo", "ns", {}, "foo", true, false, {{"cls", "", false, true, false}}, addr)); ASSERT_FALSE(cap.is_capable("foo", "ns", {}, "foo", true, true, {{"cls", "", false, true, false}}, addr)); ASSERT_FALSE(cap.is_capable("foo", "ns", {}, "foo", true, true, {{"cls", "", true, false, false}}, addr)); ASSERT_FALSE(cap.is_capable("foo", "ns", {{"application", {{"key", "value"}}}}, "foo", true, true, {{"cls", "", true, false, false}}, addr)); ASSERT_FALSE(cap.is_capable("baz", "", {}, "foo", false, false, {}, addr)); ASSERT_FALSE(cap.is_capable("baz", "", {}, "foo", false, false, {{"cls", "", true, true, true}}, addr)); ASSERT_FALSE(cap.is_capable("baz", "", {}, "foo", true, true, {{"cls", "", true, true, true}}, addr)); ASSERT_FALSE(cap.is_capable("baz", "", {}, "foo", false, true, {{"cls", "", true, true, true}}, addr)); ASSERT_FALSE(cap.is_capable("baz", "", {}, "foo", true, false, {{"cls", "", false, true, true}}, addr)); ASSERT_FALSE(cap.is_capable("baz", "", {}, "foo", true, false, {}, addr)); ASSERT_FALSE(cap.is_capable("baz", "", {}, "foo", true, true, {{"cls", "", false, true, true}}, addr)); ASSERT_FALSE(cap.is_capable("baz", "", {}, "foo", true, true, {{"cls", "", true, false, true}}, addr)); ASSERT_FALSE(cap.is_capable("baz", "", {{"application", {{"key", "value"}}}}, "foo", true, true, {{"cls", "", true, false, true}}, addr)); } TEST(OSDCap, PoolClassRNS) { entity_addr_t addr; OSDCap cap; ASSERT_TRUE(cap.parse("allow pool bar namespace='' r class-read, allow pool foo namespace=ns rwx", NULL)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"cls", "", true, false, true}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", true, false, {{"cls", "", true, false, true}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", true, false, {}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {{"application", {{"key", "value"}}}}, "foo", true, false, {}, addr)); // true->false when class not allow listed ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"cls", "", true, false, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", true, false, {{"cls", "", true, false, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {{"application", {{"key", "value"}}}}, "foo", true, false, {{"cls", "", true, false, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, true, {{"cls", "", true, true, true}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", true, true, {{"cls", "", true, true, true}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, true, {}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", true, true, {}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {{"application", {{"key", "value"}}}}, "foo", true, true, {}, addr)); ASSERT_FALSE(cap.is_capable("bar", "ns", {}, "foo", false, false, {{"cls", "", true, false, true}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "ns", {}, "foo", true, false, {{"cls", "", true, false, true}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "ns", {}, "foo", true, false, {}, addr)); ASSERT_FALSE(cap.is_capable("bar", "ns", {{"application", {{"key", "value"}}}}, "foo", true, false, {}, addr)); ASSERT_FALSE(cap.is_capable("bar", "other", {}, "foo", false, true, {{"cls", "", true, true, true}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "other", {}, "foo", true, true, {{"cls", "", true, true, true}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "other", {}, "foo", false, true, {}, addr)); ASSERT_FALSE(cap.is_capable("bar", "other", {}, "foo", true, true, {}, addr)); ASSERT_FALSE(cap.is_capable("bar", "other", {{"application", {{"key", "value"}}}}, "foo", true, true, {}, addr)); ASSERT_FALSE(cap.is_capable("foo", "", {}, "foo", false, false, {}, addr)); ASSERT_FALSE(cap.is_capable("foo", "", {}, "foo", false, false, {{"cls", "", true, true, true}}, addr)); ASSERT_FALSE(cap.is_capable("foo", "", {}, "foo", true, true, {{"cls", "", true, true, true}}, addr)); ASSERT_FALSE(cap.is_capable("foo", "", {}, "foo", false, true, {{"cls", "", true, true, true}}, addr)); ASSERT_FALSE(cap.is_capable("foo", "", {}, "foo", true, false, {{"cls", "", false, true, true}}, addr)); ASSERT_FALSE(cap.is_capable("foo", "", {}, "foo", true, false, {}, addr)); ASSERT_FALSE(cap.is_capable("foo", "", {}, "foo", true, true, {{"cls", "", false, true, true}}, addr)); ASSERT_FALSE(cap.is_capable("foo", "", {}, "foo", true, true, {{"cls", "", true, false, true}}, addr)); ASSERT_FALSE(cap.is_capable("foo", "", {{"application", {{"key", "value"}}}}, "foo", true, true, {{"cls", "", true, false, true}}, addr)); ASSERT_TRUE(cap.is_capable("foo", "ns", {}, "foo", false, false, {}, addr)); ASSERT_TRUE(cap.is_capable("foo", "ns", {}, "foo", false, false, {{"cls", "", true, true, true}}, addr)); ASSERT_TRUE(cap.is_capable("foo", "ns", {}, "foo", true, true, {{"cls", "", true, true, true}}, addr)); ASSERT_TRUE(cap.is_capable("foo", "ns", {}, "foo", false, true, {{"cls", "", true, true, true}}, addr)); ASSERT_TRUE(cap.is_capable("foo", "ns", {}, "foo", true, false, {{"cls", "", false, true, true}}, addr)); ASSERT_TRUE(cap.is_capable("foo", "ns", {}, "foo", true, false, {}, addr)); ASSERT_TRUE(cap.is_capable("foo", "ns", {}, "foo", true, true, {{"cls", "", false, true, true}}, addr)); ASSERT_TRUE(cap.is_capable("foo", "ns", {}, "foo", true, true, {{"cls", "", true, false, true}}, addr)); ASSERT_TRUE(cap.is_capable("foo", "ns", {{"application", {{"key", "value"}}}}, "foo", true, true, {{"cls", "", true, false, true}}, addr)); // true->false when class not allow listed ASSERT_FALSE(cap.is_capable("foo", "ns", {}, "foo", false, false, {{"cls", "", true, true, false}}, addr)); ASSERT_FALSE(cap.is_capable("foo", "ns", {}, "foo", true, true, {{"cls", "", true, true, false}}, addr)); ASSERT_FALSE(cap.is_capable("foo", "ns", {}, "foo", false, true, {{"cls", "", true, true, false}}, addr)); ASSERT_FALSE(cap.is_capable("foo", "ns", {}, "foo", true, false, {{"cls", "", false, true, false}}, addr)); ASSERT_FALSE(cap.is_capable("foo", "ns", {}, "foo", true, true, {{"cls", "", false, true, false}}, addr)); ASSERT_FALSE(cap.is_capable("foo", "ns", {}, "foo", true, true, {{"cls", "", true, false, false}}, addr)); ASSERT_FALSE(cap.is_capable("foo", "ns", {{"application", {{"key", "value"}}}}, "foo", true, true, {{"cls", "", true, false, false}}, addr)); ASSERT_FALSE(cap.is_capable("baz", "", {}, "foo", false, false, {}, addr)); ASSERT_FALSE(cap.is_capable("baz", "", {}, "foo", false, false, {{"cls", "", true, true, true}}, addr)); ASSERT_FALSE(cap.is_capable("baz", "", {}, "foo", true, true, {{"cls", "", true, true, true}}, addr)); ASSERT_FALSE(cap.is_capable("baz", "", {}, "foo", false, true, {{"cls", "", true, true, true}}, addr)); ASSERT_FALSE(cap.is_capable("baz", "", {}, "foo", true, false, {{"cls", "", false, true, true}}, addr)); ASSERT_FALSE(cap.is_capable("baz", "", {}, "foo", true, false, {}, addr)); ASSERT_FALSE(cap.is_capable("baz", "", {}, "foo", true, true, {{"cls", "", false, true, true}}, addr)); ASSERT_FALSE(cap.is_capable("baz", "", {}, "foo", true, true, {{"cls", "", true, false, true}}, addr)); ASSERT_FALSE(cap.is_capable("baz", "", {{"application", {{"key", "value"}}}}, "foo", true, true, {{"cls", "", true, false, true}}, addr)); } TEST(OSDCap, NSClassR) { entity_addr_t addr; OSDCap cap; ASSERT_TRUE(cap.parse("allow namespace '' rw class-read class-write, allow namespace test r", NULL)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"cls", "", true, true, true}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", true, true, {{"cls", "", true, true, true}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, true, {{"cls", "", true, true, true}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", true, false, {{"cls", "", false, true, true}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", true, false, {}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", true, true, {{"cls", "", false, true, true}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", true, true, {{"cls", "", true, false, true}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {{"application", {{"key", "value"}}}}, "foo", true, true, {{"cls", "", true, false, true}}, addr)); // true->false when class not allow listed ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"cls", "", true, true, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", true, true, {{"cls", "", true, true, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, true, {{"cls", "", true, true, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", true, false, {{"cls", "", false, true, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", true, true, {{"cls", "", false, true, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", true, true, {{"cls", "", true, false, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {{"application", {{"key", "value"}}}}, "foo", true, true, {{"cls", "", true, false, false}}, addr)); ASSERT_TRUE(cap.is_capable("foo", "", {}, "foo", false, false, {}, addr)); ASSERT_TRUE(cap.is_capable("foo", "", {}, "foo", false, false, {{"cls", "", true, true, true}}, addr)); ASSERT_TRUE(cap.is_capable("foo", "", {}, "foo", true, true, {{"cls", "", true, true, true}}, addr)); ASSERT_TRUE(cap.is_capable("foo", "", {}, "foo", false, true, {{"cls", "", true, true, true}}, addr)); ASSERT_TRUE(cap.is_capable("foo", "", {}, "foo", true, false, {{"cls", "", false, true, true}}, addr)); ASSERT_TRUE(cap.is_capable("foo", "", {}, "foo", true, false, {}, addr)); ASSERT_TRUE(cap.is_capable("foo", "", {}, "foo", true, true, {{"cls", "", false, true, true}}, addr)); ASSERT_TRUE(cap.is_capable("foo", "", {}, "foo", true, true, {{"cls", "", true, false, true}}, addr)); ASSERT_TRUE(cap.is_capable("foo", "", {{"application", {{"key", "value"}}}}, "foo", true, true, {{"cls", "", true, false, true}}, addr)); // true->false when class not allow listed ASSERT_FALSE(cap.is_capable("foo", "", {}, "foo", false, false, {{"cls", "", true, true, false}}, addr)); ASSERT_FALSE(cap.is_capable("foo", "", {}, "foo", true, true, {{"cls", "", true, true, false}}, addr)); ASSERT_FALSE(cap.is_capable("foo", "", {}, "foo", false, true, {{"cls", "", true, true, false}}, addr)); ASSERT_FALSE(cap.is_capable("foo", "", {}, "foo", true, false, {{"cls", "", false, true, false}}, addr)); ASSERT_FALSE(cap.is_capable("foo", "", {}, "foo", true, true, {{"cls", "", false, true, false}}, addr)); ASSERT_FALSE(cap.is_capable("foo", "", {}, "foo", true, true, {{"cls", "", true, false, false}}, addr)); ASSERT_FALSE(cap.is_capable("foo", "", {{"application", {{"key", "value"}}}}, "foo", true, true, {{"cls", "", true, false, false}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "test", {}, "foo", true, false, {}, addr)); ASSERT_FALSE(cap.is_capable("bar", "test", {}, "foo", false, true, {{"cls", "", false, true, true}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "test", {}, "foo", true, false, {{"cls", "", true, false, true}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "test", {}, "foo", true, true, {{"cls", "", true, true, true}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "test", {}, "foo", false, true, {{"cls", "", false, true, true}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "test", {}, "foo", true, true, {}, addr)); ASSERT_FALSE(cap.is_capable("bar", "test", {{"application", {{"key", "value"}}}}, "foo", true, true, {}, addr)); ASSERT_TRUE(cap.is_capable("foo", "test", {}, "foo", true, false, {}, addr)); ASSERT_FALSE(cap.is_capable("foo", "test", {}, "foo", false, true, {{"cls", "", false, true, true}}, addr)); ASSERT_FALSE(cap.is_capable("foo", "test", {}, "foo", true, false, {{"cls", "", true, false, true}}, addr)); ASSERT_FALSE(cap.is_capable("foo", "test", {}, "foo", true, true, {{"cls", "", true, true, true}}, addr)); ASSERT_FALSE(cap.is_capable("foo", "test", {}, "foo", false, true, {{"cls", "", false, true, true}}, addr)); ASSERT_FALSE(cap.is_capable("foo", "test", {}, "foo", true, true, {}, addr)); ASSERT_FALSE(cap.is_capable("foo", "test", {{"application", {{"key", "value"}}}}, "foo", true, true, {}, addr)); ASSERT_FALSE(cap.is_capable("foo", "bad", {}, "foo", true, false, {}, addr)); ASSERT_FALSE(cap.is_capable("foo", "bad", {}, "foo", false, true, {}, addr)); ASSERT_FALSE(cap.is_capable("foo", "bad", {}, "foo", false, false, {{"cls", "", true, false, true}}, addr)); ASSERT_FALSE(cap.is_capable("foo", "bad", {}, "foo", false, false, {{"cls", "", false, true, true}}, addr)); ASSERT_FALSE(cap.is_capable("foo", "bad", {{"application", {{"key", "value"}}}}, "foo", false, false, {{"cls", "", false, true, true}}, addr)); } TEST(OSDCap, PoolTagBasic) { entity_addr_t addr; OSDCap cap; ASSERT_TRUE(cap.parse("allow rwx tag application key=value", NULL)); ASSERT_TRUE(cap.is_capable("foo", "", {{"application", {{"key", "value"}}}}, "foo", true, true, {}, addr)); ASSERT_TRUE(cap.is_capable("foo", "", {{"application", {{"foo", "bar"}, {"key", "value"}}}}, "foo", true, true, {}, addr)); ASSERT_TRUE(cap.is_capable("foo", "", {{"application", {{"foo", "bar"}, {"key", "value"}}}, {"app2", {{"foo", "bar"}}}}, "foo", true, true, {}, addr)); ASSERT_FALSE(cap.is_capable("foo", "", {{"application", {}}}, "foo", true, true, {}, addr)); ASSERT_FALSE(cap.is_capable("foo", "", {{"application", {{"key2", "value"}}}}, "foo", true, true, {}, addr)); ASSERT_FALSE(cap.is_capable("foo", "", {{"application", {{"foo", "bar"}}}}, "foo", true, true, {}, addr)); ASSERT_FALSE(cap.is_capable("foo", "", {{"app2", {{"key", "value"}}}}, "foo", true, true, {}, addr)); ASSERT_FALSE(cap.is_capable("foo", "", {}, "foo", true, true, {}, addr)); ASSERT_FALSE(cap.is_capable("foo", "", {{"application", {{"foo", "bar"}, {"key2", "value"}}}, {"app2", {{"foo", "bar"}}}}, "foo", true, true, {}, addr)); ASSERT_TRUE(cap.is_capable("foo", "", {{"application", {{"key", "value"}}}}, "foo", true, true, {}, addr)); ASSERT_TRUE(cap.is_capable("foo", "", {{"application", {{"key", "value"}}}}, "foo", true, false, {}, addr)); ASSERT_TRUE(cap.is_capable("foo", "", {{"application", {{"key", "value"}}}}, "foo", false, true, {}, addr)); ASSERT_TRUE(cap.is_capable("foo", "", {{"application", {{"key", "value"}}}}, "foo", false, false, {}, addr)); ASSERT_TRUE(cap.is_capable("foo", "ns", {{"application", {{"key", "value"}}}}, "foo", true, true, {}, addr)); ASSERT_FALSE(cap.is_capable("foo", "", {}, "foo", true, true, {}, addr)); ASSERT_FALSE(cap.is_capable("foo", "", {}, "foo", true, false, {}, addr)); ASSERT_FALSE(cap.is_capable("foo", "", {}, "foo", false, true, {}, addr)); ASSERT_FALSE(cap.is_capable("foo", "", {}, "foo", false, false, {}, addr)); ASSERT_FALSE(cap.is_capable("foo", "", {}, "foo", true, true, {}, addr)); ASSERT_FALSE(cap.is_capable("foo", "ns", {}, "foo", true, true, {}, addr)); ASSERT_TRUE(cap.is_capable("foo", "", {{"application", {{"key", "value"}}}}, "foo", false, true, {{"cls", "", true, true, true}}, addr)); ASSERT_TRUE(cap.is_capable("foo", "", {{"application", {{"key", "value"}}}}, "foo", false, true, {{"cls", "", true, false, true}}, addr)); ASSERT_TRUE(cap.is_capable("foo", "", {{"application", {{"key", "value"}}}}, "foo", false, true, {{"cls", "", false, true, true}}, addr)); ASSERT_TRUE(cap.is_capable("foo", "", {{"application", {{"key", "value"}}}}, "foo", false, true, {{"cls", "", false, false, true}}, addr)); // true->false when class not allow listed ASSERT_FALSE(cap.is_capable("foo", "", {{"application", {{"key", "value"}}}}, "foo", false, true, {{"cls", "", false, false, false}}, addr)); ASSERT_FALSE(cap.is_capable("foo", "", {{"application", {{"key", "value"}}}}, "foo", false, true, {{"cls", "", true, true, false}}, addr)); ASSERT_FALSE(cap.is_capable("foo", "", {{"application", {{"key", "value"}}}}, "foo", false, true, {{"cls", "", true, false, false}}, addr)); ASSERT_FALSE(cap.is_capable("foo", "", {{"application", {{"key", "value"}}}}, "foo", false, true, {{"cls", "", false, true, false}}, addr)); ASSERT_FALSE(cap.is_capable("foo", "", {}, "foo", false, true, {{"cls", "", true, true, true}}, addr)); ASSERT_FALSE(cap.is_capable("foo", "", {}, "foo", false, true, {{"cls", "", true, false, true}}, addr)); ASSERT_FALSE(cap.is_capable("foo", "", {}, "foo", false, true, {{"cls", "", false, true, true}}, addr)); ASSERT_FALSE(cap.is_capable("foo", "", {}, "foo", false, true, {{"cls", "", false, false, true}}, addr)); } TEST(OSDCap, PoolTagWildK) { entity_addr_t addr; OSDCap cap; ASSERT_TRUE(cap.parse("allow rwx tag application *=value", NULL)); ASSERT_TRUE(cap.is_capable("foo", "", {{"application", {{"key", "value"}}}}, "foo", true, true, {}, addr)); ASSERT_TRUE(cap.is_capable("foo", "", {{"application", {{"foo", "bar"}, {"key", "value"}}}}, "foo", true, true, {}, addr)); ASSERT_TRUE(cap.is_capable("foo", "", {{"application", {{"foo", "bar"}, {"key", "value"}}}, {"app2", {{"foo", "bar"}}}}, "foo", true, true, {}, addr)); ASSERT_TRUE(cap.is_capable("foo", "", {{"application", {{"key2", "value"}}}}, "foo", true, true, {}, addr)); ASSERT_FALSE(cap.is_capable("foo", "", {{"application", {}}}, "foo", true, true, {}, addr)); ASSERT_FALSE(cap.is_capable("foo", "", {{"application", {{"foo", "bar"}}}}, "foo", true, true, {}, addr)); ASSERT_FALSE(cap.is_capable("foo", "", {{"app2", {{"key", "value"}}}}, "foo", true, true, {}, addr)); ASSERT_FALSE(cap.is_capable("foo", "", {}, "foo", true, true, {}, addr)); ASSERT_TRUE(cap.is_capable("foo", "", {{"application", {{"foo", "bar"}, {"key2", "value"}}}, {"app2", {{"foo", "bar"}}}}, "foo", true, true, {}, addr)); } TEST(OSDCap, PoolTagWildV) { entity_addr_t addr; OSDCap cap; ASSERT_TRUE(cap.parse("allow rwx tag application key=*", NULL)); ASSERT_TRUE(cap.is_capable("foo", "", {{"application", {{"key", "value"}}}}, "foo", true, true, {}, addr)); ASSERT_TRUE(cap.is_capable("foo", "", {{"application", {{"foo", "bar"}, {"key", "value"}}}}, "foo", true, true, {}, addr)); ASSERT_TRUE(cap.is_capable("foo", "", {{"application", {{"foo", "bar"}, {"key", "value"}}}, {"app2", {{"foo", "bar"}}}}, "foo", true, true, {}, addr)); ASSERT_FALSE(cap.is_capable("foo", "", {{"application", {{"key2", "value"}}}}, "foo", true, true, {}, addr)); ASSERT_FALSE(cap.is_capable("foo", "", {{"application", {{"foo", "bar"}}}}, "foo", true, true, {}, addr)); ASSERT_FALSE(cap.is_capable("foo", "", {{"app2", {{"key", "value"}}}}, "foo", true, true, {}, addr)); ASSERT_FALSE(cap.is_capable("foo", "", {}, "foo", true, true, {}, addr)); ASSERT_FALSE(cap.is_capable("foo", "", {{"application", {{"foo", "bar"}, {"key2", "value"}}}, {"app2", {{"foo", "bar"}}}}, "foo", true, true, {}, addr)); } TEST(OSDCap, PoolTagWildKV) { entity_addr_t addr; OSDCap cap; ASSERT_TRUE(cap.parse("allow rwx tag application *=*", NULL)); ASSERT_TRUE(cap.is_capable("foo", "", {{"application", {{"key", "value"}}}}, "foo", true, true, {}, addr)); ASSERT_TRUE(cap.is_capable("foo", "", {{"application", {{"foo", "bar"}, {"key", "value"}}}}, "foo", true, true, {}, addr)); ASSERT_TRUE(cap.is_capable("foo", "", {{"application", {{"foo", "bar"}, {"key", "value"}}}, {"app2", {{"foo", "bar"}}}}, "foo", true, true, {}, addr)); ASSERT_TRUE(cap.is_capable("foo", "", {{"application", {}}}, "foo", true, true, {}, addr)); ASSERT_TRUE(cap.is_capable("foo", "", {{"application", {{"key2", "value"}}}}, "foo", true, true, {}, addr)); ASSERT_TRUE(cap.is_capable("foo", "", {{"application", {{"foo", "bar"}}}}, "foo", true, true, {}, addr)); ASSERT_FALSE(cap.is_capable("foo", "", {{"app2", {{"key", "value"}}}}, "foo", true, true, {}, addr)); ASSERT_FALSE(cap.is_capable("foo", "", {}, "foo", true, true, {}, addr)); ASSERT_TRUE(cap.is_capable("foo", "", {{"application", {{"foo", "bar"}, {"key2", "value"}}}, {"app2", {{"foo", "bar"}}}}, "foo", true, true, {}, addr)); } TEST(OSDCap, NSPool) { entity_addr_t addr; OSDCap cap; ASSERT_TRUE(cap.parse("allow rwx namespace ns tag application key=value", NULL)); ASSERT_TRUE(cap.is_capable("foo", "ns", {{"application", {{"key", "value"}}}}, "foo", true, true, {}, addr)); ASSERT_FALSE(cap.is_capable("foo", "", {{"application", {{"key", "value"}}}}, "foo", true, true, {}, addr)); ASSERT_FALSE(cap.is_capable("foo", "ns", {}, "foo", true, true, {}, addr)); ASSERT_FALSE(cap.is_capable("foo", "ns2", {{"application", {{"key", "value"}}}}, "foo", true, true, {}, addr)); ASSERT_FALSE(cap.is_capable("foo", "ns", {{"application", {{"key", "value2"}}}}, "foo", true, true, {}, addr)); ASSERT_FALSE(cap.is_capable("foo", "ns", {{"application", {{"key2", "value"}}}}, "foo", true, true, {}, addr)); ASSERT_FALSE(cap.is_capable("foo", "", {}, "foo", true, true, {}, addr)); } TEST(OSDCap, NSPoolGlob) { entity_addr_t addr; OSDCap cap; ASSERT_TRUE(cap.parse("allow rwx namespace ns* tag application key=value", NULL)); ASSERT_TRUE(cap.is_capable("foo", "ns", {{"application", {{"key", "value"}}}}, "foo", true, true, {}, addr)); ASSERT_TRUE(cap.is_capable("foo", "ns2", {{"application", {{"key", "value"}}}}, "foo", true, true, {}, addr)); ASSERT_FALSE(cap.is_capable("foo", "", {{"application", {{"key", "value"}}}}, "foo", true, true, {}, addr)); ASSERT_FALSE(cap.is_capable("foo", "ns", {}, "foo", true, true, {}, addr)); ASSERT_FALSE(cap.is_capable("foo", "ns", {{"application", {{"key", "value2"}}}}, "foo", true, true, {}, addr)); ASSERT_FALSE(cap.is_capable("foo", "ns", {{"application", {{"key2", "value"}}}}, "foo", true, true, {}, addr)); ASSERT_FALSE(cap.is_capable("foo", "", {}, "foo", true, true, {}, addr)); } TEST(OSDCap, OutputParsed) { entity_addr_t addr; struct CapsTest { const char *input; const char *output; }; CapsTest test_values[] = { {"allow *", "osdcap[grant(*)]"}, {"allow r", "osdcap[grant(r)]"}, {"allow rx", "osdcap[grant(rx)]"}, {"allow rwx", "osdcap[grant(rwx)]"}, {"allow rw class-read class-write", "osdcap[grant(rwx)]"}, {"allow rw class-read", "osdcap[grant(rw class-read)]"}, {"allow rw class-write", "osdcap[grant(rw class-write)]"}, {"allow rwx pool images", "osdcap[grant(pool images rwx)]"}, {"allow r pool images", "osdcap[grant(pool images r)]"}, {"allow pool images rwx", "osdcap[grant(pool images rwx)]"}, {"allow pool images r", "osdcap[grant(pool images r)]"}, {"allow pool images w", "osdcap[grant(pool images w)]"}, {"allow pool images x", "osdcap[grant(pool images x)]"}, {"allow r pool images namespace ''", "osdcap[grant(pool images namespace \"\" r)]"}, {"allow r pool images namespace foo", "osdcap[grant(pool images namespace foo r)]"}, {"allow r pool images namespace \"\"", "osdcap[grant(pool images namespace \"\" r)]"}, {"allow r namespace foo", "osdcap[grant(namespace foo r)]"}, {"allow pool images r; allow pool rbd rwx", "osdcap[grant(pool images r),grant(pool rbd rwx)]"}, {"allow pool images r, allow pool rbd rwx", "osdcap[grant(pool images r),grant(pool rbd rwx)]"}, {"allow class-read object_prefix rbd_children, allow pool libvirt-pool-test rwx", "osdcap[grant(object_prefix rbd_children class-read),grant(pool libvirt-pool-test rwx)]"}, {"allow rwx tag application key=value", "osdcap[grant(app application key key val value rwx)]"}, {"allow rwx namespace ns* tag application key=value", "osdcap[grant(namespace ns* app application key key val value rwx)]"}, {"allow all", "osdcap[grant(*)]"}, {"allow rwx tag application all=all", "osdcap[grant(app application key * val * rwx)]"}, {"allow rwx network 1.2.3.4/24", "osdcap[grant(rwx network 1.2.3.4/24)]"}, }; size_t num_tests = sizeof(test_values) / sizeof(*test_values); for (size_t i = 0; i < num_tests; ++i) { OSDCap cap; std::cout << "Testing input '" << test_values[i].input << "'" << std::endl; ASSERT_TRUE(cap.parse(test_values[i].input)); ASSERT_EQ(test_values[i].output, stringify(cap)); } } TEST(OSDCap, AllowClass) { entity_addr_t addr; OSDCap cap; ASSERT_TRUE(cap.parse("allow class foo", NULL)); // can call any method on class foo regardless of allow list status ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, false, true}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, true, true}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, true, true}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, false, false}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, true, false}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, true, false}}, addr)); // does not permit invoking class bar ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"bar", "", true, false, true}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"bar", "", false, true, true}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"bar", "", true, true, true}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"bar", "", true, false, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"bar", "", false, true, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"bar", "", true, true, false}}, addr)); } TEST(OSDCap, AllowClassMethod) { entity_addr_t addr; OSDCap cap; ASSERT_TRUE(cap.parse("allow class foo xyz", NULL)); // can call the xyz method on class foo regardless of allow list status ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "xyz", true, false, true}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "xyz", false, true, true}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "xyz", true, true, true}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "xyz", true, false, false}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "xyz", false, true, false}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "xyz", true, true, false}}, addr)); // does not permit invoking class bar ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"bar", "", true, false, true}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"bar", "", false, true, true}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"bar", "", true, true, true}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"bar", "", true, false, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"bar", "", false, true, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"bar", "", true, true, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"bar", "xyz", true, false, true}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"bar", "xyz", false, true, true}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"bar", "xyz", true, true, true}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"bar", "xyz", true, false, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"bar", "xyz", false, true, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"bar", "xyz", true, true, false}}, addr)); } TEST(OSDCap, AllowClass2) { entity_addr_t addr; OSDCap cap; ASSERT_TRUE(cap.parse("allow class foo, allow class bar", NULL)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, false, true}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, true, true}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, true, true}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, false, false}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, true, false}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, true, false}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"bar", "", true, false, true}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"bar", "", false, true, true}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"bar", "", true, true, true}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"bar", "", true, false, false}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"bar", "", false, true, false}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"bar", "", true, true, false}}, addr)); } TEST(OSDCap, AllowClassRWX) { entity_addr_t addr; OSDCap cap; ASSERT_TRUE(cap.parse("allow rwx, allow class foo", NULL)); // can call any method on class foo regardless of allow list status ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, false, true}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, true, true}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, true, true}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, false, false}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, true, false}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, true, false}}, addr)); // does not permit invoking class bar ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"bar", "", true, false, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"bar", "", false, true, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"bar", "", true, true, false}}, addr)); // allows class bar if it is allow listed ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"bar", "", true, false, true}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"bar", "", false, true, true}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"bar", "", true, true, true}}, addr)); } TEST(OSDCap, AllowClassMulti) { entity_addr_t addr; OSDCap cap; ASSERT_TRUE(cap.parse("allow class foo", NULL)); // can call any method on foo, but not bar, so the entire op is rejected // bar with allow list is rejected because it still needs rwx/class-read,write ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, true, true}, {"bar", "", true, true, true}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, true, true}, {"bar", "", true, true, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, true, true}, {"bar", "", true, false, true}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, true, true}, {"bar", "", true, false, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, true, true}, {"bar", "", false, true, true}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, true, true}, {"bar", "", false, true, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, true, true}, {"bar", "", false, false, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, true, false}, {"bar", "", true, true, true}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, true, false}, {"bar", "", true, true, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, true, false}, {"bar", "", true, false, true}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, true, false}, {"bar", "", true, false, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, true, false}, {"bar", "", false, true, true}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, true, false}, {"bar", "", false, true, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, true, false}, {"bar", "", false, false, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, false, true}, {"bar", "", true, true, true}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, false, true}, {"bar", "", true, true, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, false, true}, {"bar", "", true, false, true}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, false, true}, {"bar", "", true, false, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, false, true}, {"bar", "", false, true, true}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, false, true}, {"bar", "", false, true, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, false, true}, {"bar", "", false, false, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, false, false}, {"bar", "", true, true, true}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, false, false}, {"bar", "", true, true, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, false, false}, {"bar", "", true, false, true}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, false, false}, {"bar", "", true, false, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, false, false}, {"bar", "", false, true, true}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, false, false}, {"bar", "", false, true, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, false, false}, {"bar", "", false, false, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, true, true}, {"bar", "", true, true, true}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, true, true}, {"bar", "", true, true, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, true, true}, {"bar", "", true, false, true}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, true, true}, {"bar", "", true, false, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, true, true}, {"bar", "", false, true, true}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, true, true}, {"bar", "", false, true, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, true, true}, {"bar", "", false, false, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, true, false}, {"bar", "", true, true, true}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, true, false}, {"bar", "", true, true, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, true, false}, {"bar", "", true, false, true}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, true, false}, {"bar", "", true, false, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, true, false}, {"bar", "", false, true, true}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, true, false}, {"bar", "", false, true, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, true, false}, {"bar", "", false, false, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, false, true}, {"bar", "", true, true, true}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, false, true}, {"bar", "", true, true, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, false, true}, {"bar", "", true, false, true}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, false, true}, {"bar", "", true, false, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, false, true}, {"bar", "", false, true, true}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, false, true}, {"bar", "", false, true, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, false, true}, {"bar", "", false, false, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, false, false}, {"bar", "", true, true, true}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, false, false}, {"bar", "", true, true, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, false, false}, {"bar", "", true, false, true}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, false, false}, {"bar", "", true, false, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, false, false}, {"bar", "", false, true, true}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, false, false}, {"bar", "", false, true, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, false, false}, {"bar", "", false, false, false}}, addr)); // these are OK because 'bar' is on the allow list BUT the calls don't read or write ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, true, true}, {"bar", "", false, false, true}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, true, false}, {"bar", "", false, false, true}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, false, true}, {"bar", "", false, false, true}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, false, false}, {"bar", "", false, false, true}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, true, true}, {"bar", "", false, false, true}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, true, false}, {"bar", "", false, false, true}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, false, true}, {"bar", "", false, false, true}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, false, false}, {"bar", "", false, false, true}}, addr)); // can call any method on foo or bar regardless of allow list status OSDCap cap2; ASSERT_TRUE(cap2.parse("allow class foo, allow class bar", NULL)); ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, true, true}, {"bar", "", true, true, true}}, addr)); ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, true, true}, {"bar", "", true, true, false}}, addr)); ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, true, true}, {"bar", "", true, false, true}}, addr)); ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, true, true}, {"bar", "", true, false, false}}, addr)); ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, true, true}, {"bar", "", false, true, true}}, addr)); ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, true, true}, {"bar", "", false, true, false}}, addr)); ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, true, true}, {"bar", "", false, false, true}}, addr)); ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, true, true}, {"bar", "", false, false, false}}, addr)); ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, true, false}, {"bar", "", true, true, true}}, addr)); ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, true, false}, {"bar", "", true, true, false}}, addr)); ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, true, false}, {"bar", "", true, false, true}}, addr)); ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, true, false}, {"bar", "", true, false, false}}, addr)); ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, true, false}, {"bar", "", false, true, true}}, addr)); ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, true, false}, {"bar", "", false, true, false}}, addr)); ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, true, false}, {"bar", "", false, false, true}}, addr)); ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, true, false}, {"bar", "", false, false, false}}, addr)); ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, false, true}, {"bar", "", true, true, true}}, addr)); ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, false, true}, {"bar", "", true, true, false}}, addr)); ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, false, true}, {"bar", "", true, false, true}}, addr)); ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, false, true}, {"bar", "", true, false, false}}, addr)); ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, false, true}, {"bar", "", false, true, true}}, addr)); ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, false, true}, {"bar", "", false, true, false}}, addr)); ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, false, true}, {"bar", "", false, false, true}}, addr)); ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, false, true}, {"bar", "", false, false, false}}, addr)); ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, false, false}, {"bar", "", true, true, true}}, addr)); ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, false, false}, {"bar", "", true, true, false}}, addr)); ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, false, false}, {"bar", "", true, false, true}}, addr)); ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, false, false}, {"bar", "", true, false, false}}, addr)); ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, false, false}, {"bar", "", false, true, true}}, addr)); ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, false, false}, {"bar", "", false, true, false}}, addr)); ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, false, false}, {"bar", "", false, false, true}}, addr)); ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, false, false}, {"bar", "", false, false, false}}, addr)); ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, true, true}, {"bar", "", true, true, true}}, addr)); ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, true, true}, {"bar", "", true, true, false}}, addr)); ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, true, true}, {"bar", "", true, false, true}}, addr)); ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, true, true}, {"bar", "", true, false, false}}, addr)); ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, true, true}, {"bar", "", false, true, true}}, addr)); ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, true, true}, {"bar", "", false, true, false}}, addr)); ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, true, true}, {"bar", "", false, false, true}}, addr)); ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, true, true}, {"bar", "", false, false, false}}, addr)); ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, true, false}, {"bar", "", true, true, true}}, addr)); ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, true, false}, {"bar", "", true, true, false}}, addr)); ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, true, false}, {"bar", "", true, false, true}}, addr)); ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, true, false}, {"bar", "", true, false, false}}, addr)); ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, true, false}, {"bar", "", false, true, true}}, addr)); ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, true, false}, {"bar", "", false, true, false}}, addr)); ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, true, false}, {"bar", "", false, false, true}}, addr)); ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, true, false}, {"bar", "", false, false, false}}, addr)); ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, false, true}, {"bar", "", true, true, true}}, addr)); ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, false, true}, {"bar", "", true, true, false}}, addr)); ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, false, true}, {"bar", "", true, false, true}}, addr)); ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, false, true}, {"bar", "", true, false, false}}, addr)); ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, false, true}, {"bar", "", false, true, true}}, addr)); ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, false, true}, {"bar", "", false, true, false}}, addr)); ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, false, true}, {"bar", "", false, false, true}}, addr)); ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, false, true}, {"bar", "", false, false, false}}, addr)); ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, false, false}, {"bar", "", true, true, true}}, addr)); ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, false, false}, {"bar", "", true, true, false}}, addr)); ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, false, false}, {"bar", "", true, false, true}}, addr)); ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, false, false}, {"bar", "", true, false, false}}, addr)); ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, false, false}, {"bar", "", false, true, true}}, addr)); ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, false, false}, {"bar", "", false, true, false}}, addr)); ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, false, false}, {"bar", "", false, false, true}}, addr)); ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, false, false}, {"bar", "", false, false, false}}, addr)); } TEST(OSDCap, AllowClassMultiRWX) { entity_addr_t addr; OSDCap cap; ASSERT_TRUE(cap.parse("allow rwx, allow class foo", NULL)); // can call anything on foo, but only allow listed methods on bar ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, true, true}, {"bar", "", true, true, true}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, true, true}, {"bar", "", true, false, true}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, true, true}, {"bar", "", false, true, true}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, true, true}, {"bar", "", false, false, true}}, addr)); // fails because bar not allow listed ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, true, true}, {"bar", "", true, true, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, true, true}, {"bar", "", true, false, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, true, true}, {"bar", "", false, true, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, true, true}, {"bar", "", false, false, false}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, true, false}, {"bar", "", true, true, true}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, true, false}, {"bar", "", true, false, true}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, true, false}, {"bar", "", false, true, true}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, true, false}, {"bar", "", false, false, true}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, true, false}, {"bar", "", true, true, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, true, false}, {"bar", "", true, false, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, true, false}, {"bar", "", false, true, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, true, false}, {"bar", "", false, false, false}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, false, true}, {"bar", "", true, true, true}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, false, true}, {"bar", "", true, false, true}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, false, true}, {"bar", "", false, true, true}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, false, true}, {"bar", "", false, false, true}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, false, true}, {"bar", "", true, true, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, false, true}, {"bar", "", true, false, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, false, true}, {"bar", "", false, true, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, false, true}, {"bar", "", false, false, false}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, false, false}, {"bar", "", true, true, true}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, false, false}, {"bar", "", true, false, true}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, false, false}, {"bar", "", false, true, true}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, false, false}, {"bar", "", false, false, true}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, false, false}, {"bar", "", true, true, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, false, false}, {"bar", "", true, false, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, false, false}, {"bar", "", false, true, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, false, false}, {"bar", "", false, false, false}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, true, true}, {"bar", "", true, true, true}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, true, true}, {"bar", "", true, false, true}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, true, true}, {"bar", "", false, true, true}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, true, true}, {"bar", "", false, false, true}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, true, true}, {"bar", "", true, true, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, true, true}, {"bar", "", true, false, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, true, true}, {"bar", "", false, true, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, true, true}, {"bar", "", false, false, false}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, true, false}, {"bar", "", true, true, true}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, true, false}, {"bar", "", true, false, true}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, true, false}, {"bar", "", false, true, true}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, true, false}, {"bar", "", false, false, true}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, true, false}, {"bar", "", true, true, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, true, false}, {"bar", "", true, false, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, true, false}, {"bar", "", false, true, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, true, false}, {"bar", "", false, false, false}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, false, true}, {"bar", "", true, true, true}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, false, true}, {"bar", "", true, false, true}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, false, true}, {"bar", "", false, true, true}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, false, true}, {"bar", "", false, false, true}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, false, true}, {"bar", "", true, true, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, false, true}, {"bar", "", true, false, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, false, true}, {"bar", "", false, true, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, false, true}, {"bar", "", false, false, false}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, false, false}, {"bar", "", true, true, true}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, false, false}, {"bar", "", true, false, true}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, false, false}, {"bar", "", false, true, true}}, addr)); ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, false, false}, {"bar", "", false, false, true}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, false, false}, {"bar", "", true, true, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, false, false}, {"bar", "", true, false, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, false, false}, {"bar", "", false, true, false}}, addr)); ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, false, false}, {"bar", "", false, false, false}}, addr)); } TEST(OSDCap, AllowProfile) { entity_addr_t addr; OSDCap cap; ASSERT_TRUE(cap.parse("profile read-only, profile read-write pool abc", NULL)); ASSERT_FALSE(cap.allow_all()); ASSERT_FALSE(cap.is_capable("foo", "", {}, "asdf", true, true, {}, addr)); ASSERT_TRUE(cap.is_capable("foo", "", {}, "asdf", true, false, {}, addr)); ASSERT_TRUE(cap.is_capable("abc", "", {}, "asdf", false, true, {}, addr)); // RBD cap.grants.clear(); ASSERT_TRUE(cap.parse("profile rbd pool abc", NULL)); ASSERT_FALSE(cap.allow_all()); ASSERT_FALSE(cap.is_capable("foo", "", {}, "asdf", true, true, {}, addr)); ASSERT_FALSE(cap.is_capable("foo", "", {}, "rbd_children", true, false, {}, addr)); ASSERT_TRUE(cap.is_capable("foo", "", {}, "rbd_children", false, false, {{"rbd", "", true, false, true}}, addr)); ASSERT_TRUE(cap.is_capable("abc", "", {}, "asdf", true, true, {{"rbd", "", true, true, true}}, addr)); cap.grants.clear(); ASSERT_TRUE(cap.parse("profile rbd-read-only pool abc", NULL)); ASSERT_FALSE(cap.allow_all()); ASSERT_FALSE(cap.is_capable("foo", "", {}, "rbd_children", true, false, {}, addr)); ASSERT_TRUE(cap.is_capable("abc", "", {}, "asdf", true, false, {{"rbd", "", true, false, true}}, addr)); ASSERT_FALSE(cap.is_capable("abc", "", {}, "asdf", true, true, {}, addr)); ASSERT_TRUE(cap.is_capable("abc", "", {}, "rbd_header.ABC", false, false, {{"rbd", "child_attach", true, true, true}}, addr)); ASSERT_TRUE(cap.is_capable("abc", "", {}, "rbd_header.ABC", false, false, {{"rbd", "child_detach", true, true, true}}, addr)); ASSERT_FALSE(cap.is_capable("abc", "", {}, "rbd_header.ABC", false, false, {{"rbd", "other function", true, true, true}}, addr)); cap.grants.clear(); ASSERT_TRUE(cap.parse("profile rbd pool pool1 namespace ns1", nullptr)); ASSERT_TRUE(cap.is_capable("pool1", "", {}, "rbd_info", false, false, {{"rbd", "metadata_list", true, false, true}}, addr)); ASSERT_TRUE(cap.is_capable("pool1", "ns1", {}, "rbd_info", false, false, {{"rbd", "metadata_list", true, false, true}}, addr)); ASSERT_FALSE(cap.is_capable("pool1", "ns2", {}, "rbd_info", false, false, {{"rbd", "metadata_list", true, false, true}}, addr)); ASSERT_FALSE(cap.is_capable("pool2", "", {}, "rbd_info", false, false, {{"rbd", "metadata_list", true, false, true}}, addr)); ASSERT_FALSE(cap.is_capable("pool1", "", {}, "asdf", false, false, {{"rbd", "metadata_list", true, false, true}}, addr)); ASSERT_FALSE(cap.is_capable("pool1", "", {}, "rbd_info", false, false, {{"rbd", "other_method", true, false, true}}, addr)); cap.grants.clear(); ASSERT_TRUE(cap.parse("profile rbd-read-only pool pool1 namespace ns1", nullptr)); ASSERT_TRUE(cap.is_capable("pool1", "", {}, "rbd_info", false, false, {{"rbd", "metadata_list", true, false, true}}, addr)); ASSERT_TRUE(cap.is_capable("pool1", "ns1", {}, "rbd_info", false, false, {{"rbd", "metadata_list", true, false, true}}, addr)); ASSERT_FALSE(cap.is_capable("pool1", "ns2", {}, "rbd_info", false, false, {{"rbd", "metadata_list", true, false, true}}, addr)); ASSERT_FALSE(cap.is_capable("pool2", "", {}, "rbd_info", false, false, {{"rbd", "metadata_list", true, false, true}}, addr)); ASSERT_FALSE(cap.is_capable("pool1", "", {}, "asdf", false, false, {{"rbd", "metadata_list", true, false, true}}, addr)); ASSERT_FALSE(cap.is_capable("pool1", "", {}, "rbd_info", false, false, {{"rbd", "other_method", true, false, true}}, addr)); } TEST(OSDCap, network) { entity_addr_t a, b, c; a.parse("10.1.2.3"); b.parse("192.168.2.3"); c.parse("192.167.2.3"); OSDCap cap; ASSERT_TRUE(cap.parse("allow * network 192.168.0.0/16, allow * network 10.0.0.0/8", NULL)); ASSERT_TRUE(cap.is_capable("foo", "", {}, "asdf", true, true, {{"cls", "", true, true, true}}, a)); ASSERT_TRUE(cap.is_capable("foo", "", {}, "asdf", true, true, {{"cls", "", true, true, true}}, b)); ASSERT_FALSE(cap.is_capable("foo", "", {}, "asdf", true, true, {{"cls", "", true, true, true}}, c)); }
98,791
69.616154
155
cc
null
ceph-main/src/test/osd/safe-to-destroy.sh
#!/usr/bin/env bash source $CEPH_ROOT/qa/standalone/ceph-helpers.sh set -e function run() { local dir=$1 shift export CEPH_MON="127.0.0.1:$(get_unused_port)" export CEPH_ARGS CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none " CEPH_ARGS+="--mon-host=$CEPH_MON " set -e local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')} for func in $funcs ; do setup $dir || return 1 $func $dir || return 1 teardown $dir || return 1 done } function TEST_safe_to_destroy() { local dir=$1 run_mon $dir a run_mgr $dir x run_osd $dir 0 run_osd $dir 1 run_osd $dir 2 run_osd $dir 3 flush_pg_stats ceph osd safe-to-destroy 0 ceph osd safe-to-destroy 1 ceph osd safe-to-destroy 2 ceph osd safe-to-destroy 3 ceph osd pool create foo 128 sleep 2 flush_pg_stats wait_for_clean expect_failure $dir 'pgs currently' ceph osd safe-to-destroy 0 expect_failure $dir 'pgs currently' ceph osd safe-to-destroy 1 expect_failure $dir 'pgs currently' ceph osd safe-to-destroy 2 expect_failure $dir 'pgs currently' ceph osd safe-to-destroy 3 ceph osd out 0 sleep 2 flush_pg_stats wait_for_clean ceph osd safe-to-destroy 0 # even osds without osd_stat are ok if all pgs are active+clean id=`ceph osd create` ceph osd safe-to-destroy $id } function TEST_ok_to_stop() { local dir=$1 run_mon $dir a run_mgr $dir x run_osd $dir 0 run_osd $dir 1 run_osd $dir 2 run_osd $dir 3 ceph osd pool create foo 128 ceph osd pool set foo size 3 ceph osd pool set foo min_size 2 sleep 1 flush_pg_stats wait_for_clean ceph osd ok-to-stop 0 ceph osd ok-to-stop 1 ceph osd ok-to-stop 2 ceph osd ok-to-stop 3 expect_failure $dir bad_become_inactive ceph osd ok-to-stop 0 1 ceph osd pool set foo min_size 1 sleep 1 flush_pg_stats wait_for_clean ceph osd ok-to-stop 0 1 ceph osd ok-to-stop 1 2 ceph osd ok-to-stop 2 3 ceph osd ok-to-stop 3 4 expect_failure $dir bad_become_inactive ceph osd ok-to-stop 0 1 2 expect_failure $dir bad_become_inactive ceph osd ok-to-stop 0 1 2 3 } main safe-to-destroy "$@"
2,262
21.63
73
sh
null
ceph-main/src/test/osd/scrubber_generators.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "test/osd/scrubber_generators.h" #include <fmt/ranges.h> using namespace ScrubGenerator; // ref: PGLogTestRebuildMissing() bufferptr create_object_info(const ScrubGenerator::RealObj& objver) { object_info_t oi{}; oi.soid = objver.ghobj.hobj; oi.version = eversion_t(objver.ghobj.generation, 0); oi.size = objver.data.size; bufferlist bl; oi.encode(bl, 0 /*get_osdmap()->get_features(CEPH_ENTITY_TYPE_OSD, nullptr)*/); bufferptr bp(bl.c_str(), bl.length()); return bp; } std::pair<bufferptr, std::vector<snapid_t>> create_object_snapset( const ScrubGenerator::RealObj& robj, const SnapsetMockData* snapset_mock_data) { if (!snapset_mock_data) { return {bufferptr(), {}}; } /// \todo fill in missing version/osd details from the robj auto sns = snapset_mock_data->make_snapset(); bufferlist bl; encode(sns, bl); bufferptr bp = bufferptr(bl.c_str(), bl.length()); // extract the set of object snaps return {bp, sns.snaps}; } RealObjsConfList ScrubGenerator::make_real_objs_conf( int64_t pool_id, const RealObjsConf& blueprint, std::vector<int32_t> active_osds) { RealObjsConfList all_osds; for (auto osd : active_osds) { RealObjsConfRef this_osd_fakes = std::make_unique<RealObjsConf>(blueprint); // now - fix & corrupt every "object" in the blueprint for (RealObj& robj : this_osd_fakes->objs) { robj.ghobj.hobj.pool = pool_id; } all_osds[osd] = std::move(this_osd_fakes); } return all_osds; // reconsider (maybe add a move ctor?) } ///\todo dispose of the created buffer pointers ScrubGenerator::SmapEntry ScrubGenerator::make_smobject( const ScrubGenerator::RealObj& blueprint, int osd_num) { ScrubGenerator::SmapEntry ret; ret.ghobj = blueprint.ghobj; ret.smobj.attrs[OI_ATTR] = create_object_info(blueprint); if (blueprint.snapset_mock_data) { auto [bp, snaps] = create_object_snapset(blueprint, blueprint.snapset_mock_data); ret.smobj.attrs[SS_ATTR] = bp; std::cout << fmt::format("{}: ({}) osd:{} snaps:{}", __func__, ret.ghobj.hobj, osd_num, snaps) << std::endl; } for (const auto& [at_k, at_v] : blueprint.data.attrs) { ret.smobj.attrs[at_k] = ceph::buffer::copy(at_v.c_str(), at_v.size()); { // verifying (to be removed after dev phase) auto bk = ret.smobj.attrs[at_k].begin_deep().get_ptr( ret.smobj.attrs[at_k].length()); std::string bkstr{bk.raw_c_str(), bk.raw_length()}; std::cout << fmt::format("{}: verification: {}", __func__, bkstr) << std::endl; } } ret.smobj.size = blueprint.data.size; ret.smobj.digest = blueprint.data.hash; /// \todo handle the 'present' etc' ret.smobj.object_omap_keys = blueprint.data.omap.size(); ret.smobj.object_omap_bytes = blueprint.data.omap_bytes; return ret; } all_clones_snaps_t ScrubGenerator::all_clones( const ScrubGenerator::RealObj& head_obj) { std::cout << fmt::format("{}: head_obj.ghobj.hobj:{}", __func__, head_obj.ghobj.hobj) << std::endl; std::map<hobject_t, std::vector<snapid_t>> ret; for (const auto& clone : head_obj.snapset_mock_data->clones) { auto clone_set_it = head_obj.snapset_mock_data->clone_snaps.find(clone); if (clone_set_it == head_obj.snapset_mock_data->clone_snaps.end()) { std::cout << "note: no clone_snaps for " << clone << std::endl; continue; } auto clone_set = clone_set_it->second; hobject_t clone_hobj{head_obj.ghobj.hobj}; clone_hobj.snap = clone; ret[clone_hobj] = clone_set_it->second; std::cout << fmt::format("{}: clone:{} clone_set:{}", __func__, clone_hobj, clone_set) << std::endl; } return ret; } void ScrubGenerator::add_object(ScrubMap& map, const ScrubGenerator::RealObj& real_obj, int osd_num) { // do we have data corruption recipe for this OSD? /// \todo c++20: use contains() CorruptFunc relevant_fix = crpt_do_nothing; auto p = real_obj.corrupt_funcs->find(osd_num); if (p != real_obj.corrupt_funcs->end()) { // yes, we have a corruption recepie for this OSD // \todo c++20: use at() relevant_fix = p->second; } // create a possibly-corrupted copy of the "real object" auto modified_obj = (relevant_fix)(real_obj, osd_num); std::cout << fmt::format("{}: modified: osd:{} ho:{} key:{}", __func__, osd_num, modified_obj.ghobj.hobj, modified_obj.ghobj.hobj.get_key()) << std::endl; auto entry = make_smobject(modified_obj, osd_num); std::cout << fmt::format("{}: osd:{} smap entry: {} {}", __func__, osd_num, entry.smobj.size, entry.smobj.attrs.size()) << std::endl; map.objects[entry.ghobj.hobj] = entry.smobj; }
4,867
27.804734
79
cc
null
ceph-main/src/test/osd/scrubber_generators.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #pragma once /// \file generating scrub-related maps & objects for unit tests #include <functional> #include <map> #include <sstream> #include <string> #include <variant> #include <vector> #include "include/buffer.h" #include "include/buffer_raw.h" #include "include/object_fmt.h" #include "osd/osd_types_fmt.h" #include "osd/scrubber/pg_scrubber.h" namespace ScrubGenerator { /// \todo enhance the MockLog to capture the log messages class MockLog : public LoggerSinkSet { public: void debug(std::stringstream& s) final { std::cout << "\n<<debug>> " << s.str() << std::endl; } void info(std::stringstream& s) final { std::cout << "\n<<info>> " << s.str() << std::endl; } void sec(std::stringstream& s) final { std::cout << "\n<<sec>> " << s.str() << std::endl; } void warn(std::stringstream& s) final { std::cout << "\n<<warn>> " << s.str() << std::endl; } void error(std::stringstream& s) final { err_count++; std::cout << "\n<<error>> " << s.str() << std::endl; } OstreamTemp info() final { return OstreamTemp(CLOG_INFO, this); } OstreamTemp warn() final { return OstreamTemp(CLOG_WARN, this); } OstreamTemp error() final { return OstreamTemp(CLOG_ERROR, this); } OstreamTemp sec() final { return OstreamTemp(CLOG_ERROR, this); } OstreamTemp debug() final { return OstreamTemp(CLOG_DEBUG, this); } void do_log(clog_type prio, std::stringstream& ss) final { switch (prio) { case CLOG_DEBUG: debug(ss); break; case CLOG_INFO: info(ss); break; case CLOG_SEC: sec(ss); break; case CLOG_WARN: warn(ss); break; case CLOG_ERROR: default: error(ss); break; } } void do_log(clog_type prio, const std::string& ss) final { switch (prio) { case CLOG_DEBUG: debug() << ss; break; case CLOG_INFO: info() << ss; break; case CLOG_SEC: sec() << ss; break; case CLOG_WARN: warn() << ss; break; case CLOG_ERROR: default: error() << ss; break; } } virtual ~MockLog() {} int err_count{0}; int expected_err_count{0}; void set_expected_err_count(int c) { expected_err_count = c; } }; // ///////////////////////////////////////////////////////////////////////// // // ///////////////////////////////////////////////////////////////////////// // struct pool_conf_t { int pg_num{3}; int pgp_num{3}; int size{3}; int min_size{3}; std::string name{"rep_pool"}; }; using attr_t = std::map<std::string, std::string>; using all_clones_snaps_t = std::map<hobject_t, std::vector<snapid_t>>; struct RealObj; // a function to manipulate (i.e. corrupt) an object in a specific OSD using CorruptFunc = std::function<RealObj(const RealObj& s, [[maybe_unused]] int osd_num)>; using CorruptFuncList = std::map<int, CorruptFunc>; // per OSD struct SnapsetMockData { using CookedCloneSnaps = std::tuple<std::map<snapid_t, uint64_t>, std::map<snapid_t, std::vector<snapid_t>>, std::map<snapid_t, interval_set<uint64_t>>>; // an auxiliary function to cook the data for the SnapsetMockData using clone_snaps_cooker = CookedCloneSnaps (*)(); snapid_t seq; std::vector<snapid_t> snaps; // descending std::vector<snapid_t> clones; // ascending std::map<snapid_t, interval_set<uint64_t>> clone_overlap; // overlap w/ next // newest std::map<snapid_t, uint64_t> clone_size; std::map<snapid_t, std::vector<snapid_t>> clone_snaps; // descending SnapsetMockData(snapid_t seq, std::vector<snapid_t> snaps, std::vector<snapid_t> clones, std::map<snapid_t, interval_set<uint64_t>> clone_overlap, std::map<snapid_t, uint64_t> clone_size, std::map<snapid_t, std::vector<snapid_t>> clone_snaps) : seq(seq) , snaps(snaps) , clones(clones) , clone_overlap(clone_overlap) , clone_size(clone_size) , clone_snaps(clone_snaps) {} SnapsetMockData(snapid_t seq, std::vector<snapid_t> snaps, std::vector<snapid_t> clones, clone_snaps_cooker func) : seq{seq} , snaps{snaps} , clones(clones) { auto [clone_size_, clone_snaps_, clone_overlap_] = func(); clone_size = clone_size_; clone_snaps = clone_snaps_; clone_overlap = clone_overlap_; } SnapSet make_snapset() const { SnapSet ss; ss.seq = seq; ss.snaps = snaps; ss.clones = clones; ss.clone_overlap = clone_overlap; ss.clone_size = clone_size; ss.clone_snaps = clone_snaps; return ss; } }; // an object in our "DB" - with its versioned snaps, "data" (size and hash), // and "omap" (size and hash) struct RealData { // not needed at this level of "data falsification": std::byte data; uint64_t size; uint32_t hash; uint32_t omap_digest; uint32_t omap_bytes; attr_t omap; attr_t attrs; }; struct RealObj { // the ghobject - oid, version, snap, hash, pool ghobject_t ghobj; RealData data; const CorruptFuncList* corrupt_funcs; const SnapsetMockData* snapset_mock_data; }; static inline RealObj crpt_do_nothing(const RealObj& s, int osdn) { return s; } struct SmapEntry { ghobject_t ghobj; ScrubMap::object smobj; }; ScrubGenerator::SmapEntry make_smobject( const ScrubGenerator::RealObj& blueprint, // the whole set of versions int osd_num); /** * returns the object's snap-set */ void add_object(ScrubMap& map, const RealObj& obj_versions, int osd_num); struct RealObjsConf { std::vector<RealObj> objs; }; using RealObjsConfRef = std::unique_ptr<RealObjsConf>; // RealObjsConf will be "developed" into the following of per-osd sets, // now with the correct pool ID, and with the corrupting functions // activated on the data using RealObjsConfList = std::map<int, RealObjsConfRef>; RealObjsConfList make_real_objs_conf(int64_t pool_id, const RealObjsConf& blueprint, std::vector<int32_t> active_osds); /** * create the snap-ids set for all clones appearing in the head * object's snapset (those will be injected into the scrubber's mock, * to be used as the 'snap_mapper') */ all_clones_snaps_t all_clones(const RealObj& head_obj); } // namespace ScrubGenerator template <> struct fmt::formatter<ScrubGenerator::RealObj> { constexpr auto parse(format_parse_context& ctx) { return ctx.begin(); } template <typename FormatContext> auto format(const ScrubGenerator::RealObj& rlo, FormatContext& ctx) { using namespace ScrubGenerator; return fmt::format_to(ctx.out(), "RealObj(gh:{}, dt:{}, snaps:{})", rlo.ghobj, rlo.data.size, (rlo.snapset_mock_data ? rlo.snapset_mock_data->snaps : std::vector<snapid_t>{})); } };
6,753
24.29588
79
h
null
ceph-main/src/test/osd/scrubber_test_datasets.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /// \file data-sets used by the scrubber unit tests #include "./scrubber_test_datasets.h" using namespace ScrubGenerator; using namespace std::string_literals; namespace ScrubDatasets { static RealObj corrupt_object_size(const RealObj& s, [[maybe_unused]] int osdn) { RealObj ret = s; ret.data.size = s.data.size + 1; return ret; } static RealObj corrupt_nothing(const RealObj& s, int osdn) { return s; } static CorruptFuncList crpt_funcs_set0 = {{0, &corrupt_nothing}}; CorruptFuncList crpt_funcs_set1 = {{0, &corrupt_object_size}, {1, &corrupt_nothing}}; // object with head & two snaps static hobject_t hobj_ms1{object_t{"hobj_ms1"}, "keykey", // key CEPH_NOSNAP, // snap_id 0, // hash 0, // pool ""s}; // nspace SnapsetMockData::CookedCloneSnaps ms1_fn() { std::map<snapid_t, uint64_t> clnsz; clnsz[0x20] = 222; clnsz[0x30] = 333; std::map<snapid_t, std::vector<snapid_t>> clnsn; clnsn[0x20] = {0x20}; clnsn[0x30] = {0x30}; std::map<snapid_t, interval_set<uint64_t>> overlaps; overlaps[0x20] = {}; overlaps[0x30] = {}; return {clnsz, clnsn, overlaps}; } static SnapsetMockData hobj_ms1_snapset{/* seq */ 0x40, /* snaps */ {0x30, 0x20}, /* clones */ {0x20, 0x30}, ms1_fn}; hobject_t hobj_ms1_snp30{object_t{"hobj_ms1"}, "keykey", // key 0x30, // snap_id 0, // hash 0, // pool ""s}; // nspace static hobject_t hobj_ms1_snp20{object_t{"hobj_ms1"}, "keykey", // key 0x20, // snap_id 0, // hash 0, // pool ""s}; // nspace ScrubGenerator::RealObjsConf minimal_snaps_configuration{ /* RealObjsConf::objs */ { /* Clone 30 */ { ghobject_t{hobj_ms1_snp30, 0, shard_id_t{0}}, RealData{ 333, 0x17, 17, 21, attr_t{/*{"_om1k", "om1v"}, {"om1k", "om1v"},*/ {"om3k", "om3v"}}, attr_t{{"_at1k", "_at1v"}, {"_at2k", "at2v"}, {"at3k", "at3v"}}}, &crpt_funcs_set0, nullptr}, /* Clone 20 */ {ghobject_t{hobj_ms1_snp20, 0, shard_id_t{0}}, RealData{222, 0x17, 17, 21, attr_t{/*{"_om1k", "om1v"}, {"om1k", "om1v"},*/ {"om3k", "om3v"}}, attr_t{{"_at1k", "_at1v"}, {"_at2k", "at2v"}, {"at3k", "at3v"}}}, &crpt_funcs_set0, nullptr}, /* Head */ {ghobject_t{hobj_ms1, 0, shard_id_t{0}}, RealData{100, 0x17, 17, 21, attr_t{{"_om1k", "om1v"}, {"om1k", "om1v"}, {"om3k", "om3v"}}, attr_t{{"_at1k", "_at1v"}, {"_at2k", "at2v"}, {"at3k", "at3v"}} }, &crpt_funcs_set0, &hobj_ms1_snapset}} }; } // namespace ScrubDatasets
2,725
21.528926
79
cc
null
ceph-main/src/test/osd/scrubber_test_datasets.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #pragma once /// \file data-sets used by the scrubber unit tests #include "./scrubber_generators.h" namespace ScrubDatasets { /* * Two objects with some clones. No inconsitencies. */ extern ScrubGenerator::RealObjsConf minimal_snaps_configuration; // and a part of this configuration, one that we will corrupt in a test: extern hobject_t hobj_ms1_snp30; // a manipulation set used in TestTScrubberBe_data_2: extern ScrubGenerator::CorruptFuncList crpt_funcs_set1; } // namespace ScrubDatasets
600
26.318182
72
h
null
ceph-main/src/test/osd/test_ec_transaction.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system * * Copyright (C) 2016 Red Hat * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #include <gtest/gtest.h> #include "osd/PGTransaction.h" #include "osd/ECTransaction.h" #include "test/unit.cc" struct mydpp : public DoutPrefixProvider { std::ostream& gen_prefix(std::ostream& out) const override { return out << "foo"; } CephContext *get_cct() const override { return g_ceph_context; } unsigned get_subsys() const override { return ceph_subsys_osd; } } dpp; #define dout_context g_ceph_context TEST(ectransaction, two_writes_separated) { hobject_t h; PGTransactionUPtr t(new PGTransaction); bufferlist a, b; t->create(h); a.append_zero(565760); t->write(h, 0, a.length(), a, 0); b.append_zero(2437120); t->write(h, 669856, b.length(), b, 0); ECUtil::stripe_info_t sinfo(2, 8192); auto plan = ECTransaction::get_write_plan( sinfo, std::move(t), [&](const hobject_t &i) { ECUtil::HashInfoRef ref(new ECUtil::HashInfo(1)); return ref; }, &dpp); generic_derr << "to_read " << plan.to_read << dendl; generic_derr << "will_write " << plan.will_write << dendl; ASSERT_EQ(0u, plan.to_read.size()); ASSERT_EQ(1u, plan.will_write.size()); } TEST(ectransaction, two_writes_nearby) { hobject_t h; PGTransactionUPtr t(new PGTransaction); bufferlist a, b; t->create(h); // two nearby writes, both partly touching the same 8192-byte stripe ECUtil::stripe_info_t sinfo(2, 8192); a.append_zero(565760); t->write(h, 0, a.length(), a, 0); b.append_zero(2437120); t->write(h, 569856, b.length(), b, 0); auto plan = ECTransaction::get_write_plan( sinfo, std::move(t), [&](const hobject_t &i) { ECUtil::HashInfoRef ref(new ECUtil::HashInfo(1)); return ref; }, &dpp); generic_derr << "to_read " << plan.to_read << dendl; generic_derr << "will_write " << plan.will_write << dendl; ASSERT_EQ(0u, plan.to_read.size()); ASSERT_EQ(1u, plan.will_write.size()); } TEST(ectransaction, many_writes) { hobject_t h; PGTransactionUPtr t(new PGTransaction); bufferlist a, b; a.append_zero(512); b.append_zero(4096); t->create(h); ECUtil::stripe_info_t sinfo(2, 8192); // write 2801664~512 // write 2802176~512 // write 2802688~512 // write 2803200~512 t->write(h, 2801664, a.length(), a, 0); t->write(h, 2802176, a.length(), a, 0); t->write(h, 2802688, a.length(), a, 0); t->write(h, 2803200, a.length(), a, 0); // write 2805760~4096 // write 2809856~4096 // write 2813952~4096 t->write(h, 2805760, b.length(), b, 0); t->write(h, 2809856, b.length(), b, 0); t->write(h, 2813952, b.length(), b, 0); auto plan = ECTransaction::get_write_plan( sinfo, std::move(t), [&](const hobject_t &i) { ECUtil::HashInfoRef ref(new ECUtil::HashInfo(1)); return ref; }, &dpp); generic_derr << "to_read " << plan.to_read << dendl; generic_derr << "will_write " << plan.will_write << dendl; ASSERT_EQ(0u, plan.to_read.size()); ASSERT_EQ(1u, plan.will_write.size()); }
3,356
25.856
85
cc
null
ceph-main/src/test/osd/test_extent_cache.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system * * Copyright (C) 2016 Red Hat * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #include <gtest/gtest.h> #include "osd/ExtentCache.h" #include <iostream> using namespace std; extent_map imap_from_vector(vector<pair<uint64_t, uint64_t> > &&in) { extent_map out; for (auto &&tup: in) { bufferlist bl; bl.append_zero(tup.second); out.insert(tup.first, bl.length(), bl); } return out; } extent_map imap_from_iset(const extent_set &set) { extent_map out; for (auto &&iter: set) { bufferlist bl; bl.append_zero(iter.second); out.insert(iter.first, iter.second, bl); } return out; } extent_set iset_from_vector(vector<pair<uint64_t, uint64_t> > &&in) { extent_set out; for (auto &&tup: in) { out.insert(tup.first, tup.second); } return out; } TEST(extentcache, simple_write) { hobject_t oid; ExtentCache c; ExtentCache::write_pin pin; c.open_write_pin(pin); auto to_read = iset_from_vector( {{0, 2}, {8, 2}, {20, 2}}); auto to_write = iset_from_vector( {{0, 10}, {20, 4}}); auto must_read = c.reserve_extents_for_rmw( oid, pin, to_write, to_read); ASSERT_EQ( must_read, to_read); c.print(std::cerr); auto got = imap_from_iset(must_read); auto pending_read = to_read; pending_read.subtract(must_read); auto pending = c.get_remaining_extents_for_rmw( oid, pin, pending_read); ASSERT_TRUE(pending.empty()); auto write_map = imap_from_iset(to_write); c.present_rmw_update( oid, pin, write_map); c.release_write_pin(pin); } TEST(extentcache, write_write_overlap) { hobject_t oid; ExtentCache c; ExtentCache::write_pin pin; c.open_write_pin(pin); // start write 1 auto to_read = iset_from_vector( {{0, 2}, {8, 2}, {20, 2}}); auto to_write = iset_from_vector( {{0, 10}, {20, 4}}); auto must_read = c.reserve_extents_for_rmw( oid, pin, to_write, to_read); ASSERT_EQ( must_read, to_read); c.print(std::cerr); // start write 2 ExtentCache::write_pin pin2; c.open_write_pin(pin2); auto to_read2 = iset_from_vector( {{2, 4}, {10, 4}, {18, 4}}); auto to_write2 = iset_from_vector( {{2, 12}, {18, 12}}); auto must_read2 = c.reserve_extents_for_rmw( oid, pin2, to_write2, to_read2); ASSERT_EQ( must_read2, iset_from_vector({{10, 4}, {18, 2}})); c.print(std::cerr); // complete read for write 1 and start commit auto got = imap_from_iset(must_read); auto pending_read = to_read; pending_read.subtract(must_read); auto pending = c.get_remaining_extents_for_rmw( oid, pin, pending_read); ASSERT_TRUE(pending.empty()); auto write_map = imap_from_iset(to_write); c.present_rmw_update( oid, pin, write_map); c.print(std::cerr); // complete read for write 2 and start commit auto pending_read2 = to_read2; pending_read2.subtract(must_read2); auto pending2 = c.get_remaining_extents_for_rmw( oid, pin2, pending_read2); ASSERT_EQ( pending2, imap_from_iset(pending_read2)); auto write_map2 = imap_from_iset(to_write2); c.present_rmw_update( oid, pin2, write_map2); c.print(std::cerr); c.release_write_pin(pin); c.print(std::cerr); c.release_write_pin(pin2); } TEST(extentcache, write_write_overlap2) { hobject_t oid; ExtentCache c; ExtentCache::write_pin pin; c.open_write_pin(pin); // start write 1 auto to_read = extent_set(); auto to_write = iset_from_vector( {{659456, 4096}}); auto must_read = c.reserve_extents_for_rmw( oid, pin, to_write, to_read); ASSERT_EQ( must_read, to_read); c.print(std::cerr); // start write 2 ExtentCache::write_pin pin2; c.open_write_pin(pin2); auto to_read2 = extent_set(); auto to_write2 = iset_from_vector( {{663552, 4096}}); auto must_read2 = c.reserve_extents_for_rmw( oid, pin2, to_write2, to_read2); ASSERT_EQ( must_read2, to_read2); // start write 3 ExtentCache::write_pin pin3; c.open_write_pin(pin3); auto to_read3 = iset_from_vector({{659456, 8192}}); auto to_write3 = iset_from_vector({{659456, 8192}}); auto must_read3 = c.reserve_extents_for_rmw( oid, pin3, to_write3, to_read3); ASSERT_EQ( must_read3, extent_set()); c.print(std::cerr); // complete read for write 1 and start commit auto got = imap_from_iset(must_read); auto pending_read = to_read; pending_read.subtract(must_read); auto pending = c.get_remaining_extents_for_rmw( oid, pin, pending_read); ASSERT_TRUE(pending.empty()); auto write_map = imap_from_iset(to_write); c.present_rmw_update( oid, pin, write_map); c.print(std::cerr); // complete read for write 2 and start commit auto pending_read2 = to_read2; pending_read2.subtract(must_read2); auto pending2 = c.get_remaining_extents_for_rmw( oid, pin2, pending_read2); ASSERT_EQ( pending2, imap_from_iset(pending_read2)); auto write_map2 = imap_from_iset(to_write2); c.present_rmw_update( oid, pin2, write_map2); // complete read for write 2 and start commit auto pending_read3 = to_read3; pending_read3.subtract(must_read3); auto pending3 = c.get_remaining_extents_for_rmw( oid, pin3, pending_read3); ASSERT_EQ( pending3, imap_from_iset(pending_read3)); auto write_map3 = imap_from_iset(to_write3); c.present_rmw_update( oid, pin3, write_map3); c.print(std::cerr); c.release_write_pin(pin); c.print(std::cerr); c.release_write_pin(pin2); c.print(std::cerr); c.release_write_pin(pin3); }
5,929
19.954064
70
cc
null
ceph-main/src/test/osd/test_pg_transaction.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system * * Copyright (C) 2016 Red Hat * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #include <gtest/gtest.h> #include "osd/PGTransaction.h" using namespace std; TEST(pgtransaction, simple) { hobject_t h; PGTransaction t; ASSERT_TRUE(t.empty()); t.nop(h); ASSERT_FALSE(t.empty()); unsigned num = 0; t.safe_create_traverse( [&](const pair<const hobject_t, PGTransaction::ObjectOperation> &p) { ASSERT_EQ(p.first, h); using T = PGTransaction::ObjectOperation::Init; ASSERT_TRUE(boost::get<T::None>(&p.second.init_type)); ++num; }); ASSERT_EQ(num, 1u); } TEST(pgtransaction, clone_safe_create_traverse) { hobject_t h, h2; h2.snap = 1; PGTransaction t; ASSERT_TRUE(t.empty()); t.nop(h2); ASSERT_FALSE(t.empty()); t.clone(h, h2); unsigned num = 0; t.safe_create_traverse( [&](const pair<const hobject_t, PGTransaction::ObjectOperation> &p) { using T = PGTransaction::ObjectOperation::Init; if (num == 0) { ASSERT_EQ(p.first, h); ASSERT_TRUE(boost::get<T::Clone>(&p.second.init_type)); ASSERT_EQ( boost::get<T::Clone>(&p.second.init_type)->source, h2); } else if (num == 1) { ASSERT_EQ(p.first, h2); ASSERT_TRUE(boost::get<T::None>(&p.second.init_type)); } else { ASSERT_LT(num, 2u); } ++num; }); } TEST(pgtransaction, clone_safe_create_traverse2) { hobject_t h, h2, h3; h.snap = 10; h2.snap = 5; h3.snap = 3; PGTransaction t; ASSERT_TRUE(t.empty()); t.nop(h3); ASSERT_FALSE(t.empty()); t.clone(h, h2); t.remove(h2); t.clone(h2, h3); unsigned num = 0; t.safe_create_traverse( [&](const pair<const hobject_t, PGTransaction::ObjectOperation> &p) { using T = PGTransaction::ObjectOperation::Init; if (num == 0) { ASSERT_EQ(p.first, h); ASSERT_TRUE(boost::get<T::Clone>(&p.second.init_type)); ASSERT_EQ( boost::get<T::Clone>(&p.second.init_type)->source, h2); } else if (num == 1) { ASSERT_EQ(p.first, h2); ASSERT_TRUE(boost::get<T::Clone>(&p.second.init_type)); ASSERT_EQ( boost::get<T::Clone>(&p.second.init_type)->source, h3); } else if (num == 2) { ASSERT_EQ(p.first, h3); ASSERT_TRUE(boost::get<T::None>(&p.second.init_type)); } else { ASSERT_LT(num, 3u); } ++num; }); } TEST(pgtransaction, clone_safe_create_traverse3) { hobject_t h, h2, h3; h.snap = 10; h2.snap = 5; h3.snap = 3; PGTransaction t; t.remove(h); t.remove(h2); t.clone(h2, h3); unsigned num = 0; t.safe_create_traverse( [&](const pair<const hobject_t, PGTransaction::ObjectOperation> &p) { using T = PGTransaction::ObjectOperation::Init; if (p.first == h) { ASSERT_TRUE(p.second.is_delete()); } else if (p.first == h2) { ASSERT_TRUE(boost::get<T::Clone>(&p.second.init_type)); ASSERT_EQ( boost::get<T::Clone>(&p.second.init_type)->source, h3); } ASSERT_LT(num, 2u); ++num; }); }
3,250
23.628788
73
cc
null
ceph-main/src/test/osd/test_scrub_sched.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /// \file testing the scrub scheduling algorithm #include <gtest/gtest.h> #include <algorithm> #include <map> #include "common/async/context_pool.h" #include "common/ceph_argparse.h" #include "global/global_context.h" #include "global/global_init.h" #include "include/utime_fmt.h" #include "mon/MonClient.h" #include "msg/Messenger.h" #include "os/ObjectStore.h" #include "osd/PG.h" #include "osd/osd_types.h" #include "osd/osd_types_fmt.h" #include "osd/scrubber/osd_scrub_sched.h" #include "osd/scrubber_common.h" int main(int argc, char** argv) { std::map<std::string, std::string> defaults = { // make sure we have 3 copies, or some tests won't work {"osd_pool_default_size", "3"}, // our map is flat, so just try and split across OSDs, not hosts or whatever {"osd_crush_chooseleaf_type", "0"}, }; std::vector<const char*> args(argv, argv + argc); auto cct = global_init(&defaults, args, CEPH_ENTITY_TYPE_CLIENT, CODE_ENVIRONMENT_UTILITY, CINIT_FLAG_NO_DEFAULT_CONFIG_FILE); common_init_finish(g_ceph_context); ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } using schedule_result_t = Scrub::schedule_result_t; using ScrubJobRef = ScrubQueue::ScrubJobRef; using qu_state_t = ScrubQueue::qu_state_t; /// enabling access into ScrubQueue internals class ScrubSchedTestWrapper : public ScrubQueue { public: ScrubSchedTestWrapper(Scrub::ScrubSchedListener& osds) : ScrubQueue(g_ceph_context, osds) {} void rm_unregistered_jobs() { ScrubQueue::rm_unregistered_jobs(to_scrub); ScrubQueue::rm_unregistered_jobs(penalized); } ScrubQContainer collect_ripe_jobs() { return ScrubQueue::collect_ripe_jobs(to_scrub, time_now()); } /** * unit-test support for faking the current time. When * not activated specifically - the default is to use ceph_clock_now() */ void set_time_for_testing(long faked_now) { m_time_for_testing = utime_t{timeval{faked_now}}; } void clear_time_for_testing() { m_time_for_testing.reset(); } mutable std::optional<utime_t> m_time_for_testing; utime_t time_now() const final { if (m_time_for_testing) { m_time_for_testing->tv.tv_nsec += 1'000'000; } return m_time_for_testing.value_or(ceph_clock_now()); } ~ScrubSchedTestWrapper() override = default; }; /** * providing the small number of OSD services used when scheduling * a scrub */ class FakeOsd : public Scrub::ScrubSchedListener { public: FakeOsd(int osd_num) : m_osd_num(osd_num) {} int get_nodeid() const final { return m_osd_num; } schedule_result_t initiate_a_scrub(spg_t pgid, bool allow_requested_repair_only) final { std::ignore = allow_requested_repair_only; auto res = m_next_response.find(pgid); if (res == m_next_response.end()) { return schedule_result_t::no_such_pg; } return m_next_response[pgid]; } void set_initiation_response(spg_t pgid, schedule_result_t result) { m_next_response[pgid] = result; } private: int m_osd_num; std::map<spg_t, schedule_result_t> m_next_response; }; /// the static blueprint for creating a scrub job in the scrub queue struct sjob_config_t { spg_t spg; bool are_stats_valid; utime_t history_scrub_stamp; std::optional<double> pool_conf_min; std::optional<double> pool_conf_max; bool is_must; bool is_need_auto; ScrubQueue::scrub_schedule_t initial_schedule; }; /** * the runtime configuration for a scrub job. Created basde on the blueprint * above (sjob_config_t) */ struct sjob_dynamic_data_t { sjob_config_t initial_config; pg_info_t mocked_pg_info; pool_opts_t mocked_pool_opts; requested_scrub_t request_flags; ScrubQueue::ScrubJobRef job; }; class TestScrubSched : public ::testing::Test { public: TestScrubSched() = default; protected: int m_osd_num{1}; FakeOsd m_osds{m_osd_num}; std::unique_ptr<ScrubSchedTestWrapper> m_sched{ new ScrubSchedTestWrapper(m_osds)}; /// the pg-info is queried for stats validity and for the last-scrub-stamp pg_info_t pg_info{}; /// the pool configuration holds some per-pool scrub timing settings pool_opts_t pool_opts{}; /** * the scrub-jobs created for the tests, along with their corresponding * "pg info" and pool configuration. In real life - the scrub jobs * are owned by the respective PGs. */ std::vector<sjob_dynamic_data_t> m_scrub_jobs; protected: sjob_dynamic_data_t create_scrub_job(const sjob_config_t& sjob_data) { sjob_dynamic_data_t dyn_data; dyn_data.initial_config = sjob_data; // populate the 'pool options' object with the scrub timing settings if (sjob_data.pool_conf_min) { dyn_data.mocked_pool_opts.set<double>(pool_opts_t::SCRUB_MIN_INTERVAL, sjob_data.pool_conf_min.value()); } if (sjob_data.pool_conf_max) { dyn_data.mocked_pool_opts.set(pool_opts_t::SCRUB_MAX_INTERVAL, sjob_data.pool_conf_max.value()); } // create the 'pg info' object with the stats dyn_data.mocked_pg_info = pg_info_t{sjob_data.spg}; dyn_data.mocked_pg_info.history.last_scrub_stamp = sjob_data.history_scrub_stamp; dyn_data.mocked_pg_info.stats.stats_invalid = !sjob_data.are_stats_valid; // fake hust the required 'requested-scrub' flags std::cout << "request_flags: sjob_data.is_must " << sjob_data.is_must << std::endl; dyn_data.request_flags.must_scrub = sjob_data.is_must; dyn_data.request_flags.need_auto = sjob_data.is_need_auto; // create the scrub job dyn_data.job = ceph::make_ref<ScrubQueue::ScrubJob>(g_ceph_context, sjob_data.spg, m_osd_num); m_scrub_jobs.push_back(dyn_data); return dyn_data; } void register_job_set(const std::vector<sjob_config_t>& job_configs) { std::for_each(job_configs.begin(), job_configs.end(), [this](const sjob_config_t& sj) { auto dynjob = create_scrub_job(sj); m_sched->register_with_osd( dynjob.job, m_sched->determine_scrub_time(dynjob.request_flags, dynjob.mocked_pg_info, dynjob.mocked_pool_opts)); }); } /// count the scrub-jobs that are currently in a specific state int count_scrub_jobs_in_state(qu_state_t state) { return std::count_if(m_scrub_jobs.begin(), m_scrub_jobs.end(), [state](const sjob_dynamic_data_t& sj) { return sj.job->state == state; }); } void list_testers_jobs(std::string hdr) { std::cout << fmt::format("{}: {} jobs created for the test:", hdr, m_scrub_jobs.size()) << std::endl; for (const auto& job : m_scrub_jobs) { std::cout << fmt::format("\t{}: job {}", hdr, *job.job) << std::endl; } } void print_all_states(std::string hdr) { std::cout << fmt::format( "{}: Created:{}. Per state: not-reg:{} reg:{} unreg:{}", hdr, m_scrub_jobs.size(), count_scrub_jobs_in_state(qu_state_t::not_registered), count_scrub_jobs_in_state(qu_state_t::registered), count_scrub_jobs_in_state(qu_state_t::unregistering)) << std::endl; } void debug_print_jobs(std::string hdr, const ScrubQueue::ScrubQContainer& jobs) { std::cout << fmt::format("{}: time now {}", hdr, m_sched->time_now()) << std::endl; for (const auto& job : jobs) { std::cout << fmt::format( "\t{}: job {} ({}): scheduled {}", hdr, job->pgid, job->scheduling_state(m_sched->time_now(), false), job->get_sched_time()) << std::endl; } } }; // /////////////////////////////////////////////////////////////////////////// // test data. Scrub-job creation requires a PG-id, and a set of 'scrub request' // flags namespace { // the times used during the tests are offset to 1.1.2000, so that // utime_t formatting will treat them as absolute (not as a relative time) static const auto epoch_2000 = 946'684'800; std::vector<sjob_config_t> sjob_configs = { { spg_t{pg_t{1, 1}}, true, // PG has valid stats utime_t{std::time_t(epoch_2000 + 1'000'000), 0}, // last-scrub-stamp 100.0, // min scrub delay in pool config std::nullopt, // max scrub delay in pool config false, // must-scrub false, // need-auto ScrubQueue::scrub_schedule_t{} // initial schedule }, {spg_t{pg_t{4, 1}}, true, utime_t{epoch_2000 + 1'000'000, 0}, 100.0, std::nullopt, true, false, ScrubQueue::scrub_schedule_t{}}, {spg_t{pg_t{7, 1}}, true, utime_t{}, 1.0, std::nullopt, false, false, ScrubQueue::scrub_schedule_t{}}, {spg_t{pg_t{5, 1}}, true, utime_t{epoch_2000 + 1'900'000, 0}, 1.0, std::nullopt, false, false, ScrubQueue::scrub_schedule_t{}}}; } // anonymous namespace // //////////////////////////// tests //////////////////////////////////////// /// basic test: scheduling simple jobs, validating their calculated schedule TEST_F(TestScrubSched, populate_queue) { ASSERT_EQ(0, m_sched->list_registered_jobs().size()); auto dynjob_0 = create_scrub_job(sjob_configs[0]); auto suggested = m_sched->determine_scrub_time(dynjob_0.request_flags, dynjob_0.mocked_pg_info, dynjob_0.mocked_pool_opts); m_sched->register_with_osd(dynjob_0.job, suggested); std::cout << fmt::format("scheduled at: {}", dynjob_0.job->get_sched_time()) << std::endl; auto dynjob_1 = create_scrub_job(sjob_configs[1]); suggested = m_sched->determine_scrub_time(dynjob_1.request_flags, dynjob_1.mocked_pg_info, dynjob_1.mocked_pool_opts); m_sched->register_with_osd(dynjob_1.job, suggested); std::cout << fmt::format("scheduled at: {}", dynjob_1.job->get_sched_time()) << std::endl; EXPECT_EQ(dynjob_1.job->get_sched_time(), utime_t(1, 1)); EXPECT_EQ(2, m_sched->list_registered_jobs().size()); } /// validate the states of the scrub-jobs (as set in the jobs themselves) TEST_F(TestScrubSched, states) { m_sched->set_time_for_testing(epoch_2000); register_job_set(sjob_configs); list_testers_jobs("testing states"); EXPECT_EQ(sjob_configs.size(), m_sched->list_registered_jobs().size()); // check the initial state of the jobs print_all_states("<initial state>"); m_sched->rm_unregistered_jobs(); EXPECT_EQ(0, count_scrub_jobs_in_state(qu_state_t::not_registered)); // now - remove a couple of them m_sched->remove_from_osd_queue(m_scrub_jobs[2].job); m_sched->remove_from_osd_queue(m_scrub_jobs[1].job); m_sched->remove_from_osd_queue(m_scrub_jobs[2].job); // should have no effect print_all_states("<w/ 2 jobs removed>"); EXPECT_EQ(2, count_scrub_jobs_in_state(qu_state_t::registered)); EXPECT_EQ(2, count_scrub_jobs_in_state(qu_state_t::unregistering)); m_sched->rm_unregistered_jobs(); EXPECT_EQ(2, count_scrub_jobs_in_state(qu_state_t::not_registered)); std::cout << fmt::format("inp size: {}. In list-registered: {}", sjob_configs.size(), m_sched->list_registered_jobs().size()) << std::endl; EXPECT_EQ(sjob_configs.size() - 2, m_sched->list_registered_jobs().size()); } /// jobs that are ripe should be in the ready list, sorted by their scheduled /// time TEST_F(TestScrubSched, ready_list) { m_sched->set_time_for_testing(epoch_2000 + 900'000); register_job_set(sjob_configs); list_testers_jobs("testing states"); EXPECT_EQ(sjob_configs.size(), m_sched->list_registered_jobs().size()); m_sched->set_time_for_testing(epoch_2000 + 1'000'000); auto all_reg_jobs = m_sched->list_registered_jobs(); debug_print_jobs("registered", all_reg_jobs); auto ripe_jobs = m_sched->collect_ripe_jobs(); EXPECT_EQ(2, ripe_jobs.size()); debug_print_jobs("ready_list", ripe_jobs); m_sched->set_time_for_testing(epoch_2000 + 3'000'000); // all jobs should be in the ready list ripe_jobs = m_sched->collect_ripe_jobs(); EXPECT_EQ(4, ripe_jobs.size()); debug_print_jobs("ready_list", ripe_jobs); }
12,036
28.868486
80
cc
null
ceph-main/src/test/osd/test_scrubber_be.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "./scrubber_generators.h" #include "./scrubber_test_datasets.h" #include <gtest/gtest.h> #include <signal.h> #include <stdio.h> #include <fmt/ranges.h> #include "common/async/context_pool.h" #include "common/ceph_argparse.h" #include "global/global_context.h" #include "global/global_init.h" #include "mon/MonClient.h" #include "msg/Messenger.h" #include "os/ObjectStore.h" #include "osd/PG.h" #include "osd/PGBackend.h" #include "osd/PrimaryLogPG.h" #include "osd/osd_types.h" #include "osd/osd_types_fmt.h" #include "osd/scrubber/pg_scrubber.h" #include "osd/scrubber/scrub_backend.h" /// \file testing isolated parts of the Scrubber backend using namespace std::string_literals; int main(int argc, char** argv) { std::map<std::string, std::string> defaults = { // make sure we have 3 copies, or some tests won't work {"osd_pool_default_size", "3"}, // our map is flat, so just try and split across OSDs, not hosts or whatever {"osd_crush_chooseleaf_type", "0"}, }; std::vector<const char*> args(argv, argv + argc); auto cct = global_init(&defaults, args, CEPH_ENTITY_TYPE_CLIENT, CODE_ENVIRONMENT_UTILITY, CINIT_FLAG_NO_DEFAULT_CONFIG_FILE); common_init_finish(g_ceph_context); ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } class TestScrubBackend : public ScrubBackend { public: TestScrubBackend(ScrubBeListener& scrubber, PgScrubBeListener& pg, pg_shard_t i_am, bool repair, scrub_level_t shallow_or_deep, const std::set<pg_shard_t>& acting) : ScrubBackend(scrubber, pg, i_am, repair, shallow_or_deep, acting) {} bool get_m_repair() const { return m_repair; } bool get_is_replicated() const { return m_is_replicated; } auto get_omap_stats() const { return m_omap_stats; } const std::vector<pg_shard_t>& all_but_me() const { return m_acting_but_me; } /// populate the scrub-maps set for the 'chunk' being scrubbed void insert_faked_smap(pg_shard_t shard, const ScrubMap& smap); }; // mocking the PG class TestPg : public PgScrubBeListener { public: ~TestPg() = default; TestPg(std::shared_ptr<PGPool> pool, pg_info_t& pginfo, pg_shard_t my_osd) : m_pool{pool} , m_info{pginfo} , m_pshard{my_osd} {} const PGPool& get_pgpool() const final { return *(m_pool.get()); } pg_shard_t get_primary() const final { return m_pshard; } void force_object_missing(ScrubberPasskey, const std::set<pg_shard_t>& peer, const hobject_t& oid, eversion_t version) final {} const pg_info_t& get_pg_info(ScrubberPasskey) const final { return m_info; } uint64_t logical_to_ondisk_size(uint64_t logical_size) const final { return logical_size; } bool is_waiting_for_unreadable_object() const final { return false; } std::shared_ptr<PGPool> m_pool; pg_info_t& m_info; pg_shard_t m_pshard; }; // /////////////////////////////////////////////////////////////////////////// // /////////////////////////////////////////////////////////////////////////// // and the scrubber class TestScrubber : public ScrubBeListener, public Scrub::SnapMapReaderI { using result_t = Scrub::SnapMapReaderI::result_t; public: ~TestScrubber() = default; TestScrubber(spg_t spg, OSDMapRef osdmap, LoggerSinkSet& logger) : m_spg{spg} , m_logger{logger} , m_osdmap{osdmap} {} std::ostream& gen_prefix(std::ostream& out) const final { return out; } CephContext* get_pg_cct() const final { return g_ceph_context; } LoggerSinkSet& get_logger() const final { return m_logger; } bool is_primary() const final { return m_primary; } spg_t get_pgid() const final { return m_info.pgid; } const OSDMapRef& get_osdmap() const final { return m_osdmap; } void add_to_stats(const object_stat_sum_t& stat) final { m_stats.add(stat); } // submit_digest_fixes() mock can be set to expect a specific set of // fixes to perform. /// \todo implement the mock. void submit_digest_fixes(const digests_fixes_t& fixes) final { std::cout << fmt::format("{} submit_digest_fixes({})", __func__, fmt::join(fixes, ",")) << std::endl; } int get_snaps(const hobject_t& hoid, std::set<snapid_t>* snaps_set) const; tl::expected<std::set<snapid_t>, result_t> get_snaps( const hobject_t& oid) const final; tl::expected<std::set<snapid_t>, result_t> get_snaps_check_consistency( const hobject_t& oid) const final { /// \todo for now return get_snaps(oid); } void set_snaps(const hobject_t& hoid, const std::vector<snapid_t>& snaps) { std::cout << fmt::format("{}: ({}) -> #{} {}", __func__, hoid, snaps.size(), snaps) << std::endl; std::set<snapid_t> snaps_set(snaps.begin(), snaps.end()); m_snaps[hoid] = snaps_set; } void set_snaps(const ScrubGenerator::all_clones_snaps_t& clones_snaps) { for (const auto& [clone, snaps] : clones_snaps) { std::cout << fmt::format("{}: ({}) -> #{} {}", __func__, clone, snaps.size(), snaps) << std::endl; std::set<snapid_t> snaps_set(snaps.begin(), snaps.end()); m_snaps[clone] = snaps_set; } } bool m_primary{true}; spg_t m_spg; LoggerSinkSet& m_logger; OSDMapRef m_osdmap; pg_info_t m_info; object_stat_sum_t m_stats; // the "snap-mapper" database (returned by get_snaps()) std::map<hobject_t, std::set<snapid_t>> m_snaps; }; int TestScrubber::get_snaps(const hobject_t& hoid, std::set<snapid_t>* snaps_set) const { auto it = m_snaps.find(hoid); if (it == m_snaps.end()) { std::cout << fmt::format("{}: ({}) no snaps", __func__, hoid) << std::endl; return -ENOENT; } *snaps_set = it->second; std::cout << fmt::format("{}: ({}) -> #{} {}", __func__, hoid, snaps_set->size(), *snaps_set) << std::endl; return 0; } tl::expected<std::set<snapid_t>, Scrub::SnapMapReaderI::result_t> TestScrubber::get_snaps(const hobject_t& oid) const { std::set<snapid_t> snapset; auto r = get_snaps(oid, &snapset); if (r >= 0) { return snapset; } return tl::make_unexpected(Scrub::SnapMapReaderI::result_t{ Scrub::SnapMapReaderI::result_t::code_t::not_found, r}); } // /////////////////////////////////////////////////////////////////////////// // /////////////////////////////////////////////////////////////////////////// /// parameters for TestTScrubberBe construction struct TestTScrubberBeParams { ScrubGenerator::pool_conf_t pool_conf; ScrubGenerator::RealObjsConf objs_conf; int num_osds; }; // /////////////////////////////////////////////////////////////////////////// // /////////////////////////////////////////////////////////////////////////// // the actual owner of the OSD "objects" that are used by // the mockers class TestTScrubberBe : public ::testing::Test { public: // the test data source virtual TestTScrubberBeParams inject_params() = 0; // initial test data ScrubGenerator::MockLog logger; ScrubGenerator::pool_conf_t pool_conf; ScrubGenerator::RealObjsConf real_objs; int num_osds{0}; // ctor & initialization TestTScrubberBe() = default; ~TestTScrubberBe() = default; void SetUp() override; void TearDown() override; /** * Create the set of scrub-maps supposedly sent by the replica (or * generated by the Primary). Then - create the snap-sets for all * the objects in the set. */ void fake_a_scrub_set(ScrubGenerator::RealObjsConfList& all_sets); std::unique_ptr<TestScrubBackend> sbe; spg_t spg; pg_shard_t i_am; // set to 'my osd and no shard' std::set<pg_shard_t> acting_shards; std::vector<int> acting_osds; int acting_primary; std::unique_ptr<TestScrubber> test_scrubber; int64_t pool_id; pg_pool_t pool_info; OSDMapRef osdmap; std::shared_ptr<PGPool> pool; pg_info_t info; std::unique_ptr<TestPg> test_pg; // generated sets of "objects" for the active OSDs ScrubGenerator::RealObjsConfList real_objs_list; protected: /** * Create the OSDmap and populate it with one pool, based on * the pool configuration. * For now - only replicated pools are supported. */ OSDMapRef setup_map(int num_osds, const ScrubGenerator::pool_conf_t& pconf); /** * Create a PG in the one pool we have. Fake the PG info. * Use the primary of the PG to determine "who we are". * * \returns the PG info */ pg_info_t setup_pg_in_map(); }; // /////////////////////////////////////////////////////////////////////////// // /////////////////////////////////////////////////////////////////////////// void TestTScrubberBe::SetUp() { std::cout << "TestTScrubberBe::SetUp()" << std::endl; logger.err_count = 0; // fetch test configuration auto params = inject_params(); pool_conf = params.pool_conf; real_objs = params.objs_conf; num_osds = params.num_osds; // create the OSDMap osdmap = setup_map(num_osds, pool_conf); std::cout << "osdmap: " << *osdmap << std::endl; // extract the pool from the osdmap pool_id = osdmap->lookup_pg_pool_name(pool_conf.name); const pg_pool_t* ext_pool_info = osdmap->get_pg_pool(pool_id); pool = std::make_shared<PGPool>(osdmap, pool_id, *ext_pool_info, pool_conf.name); std::cout << "pool: " << pool->info << std::endl; // a PG in that pool? info = setup_pg_in_map(); std::cout << fmt::format("PG info: {}", info) << std::endl; real_objs_list = ScrubGenerator::make_real_objs_conf(pool_id, real_objs, acting_osds); // now we can create the main mockers // the "PgScrubber" test_scrubber = std::make_unique<TestScrubber>(spg, osdmap, logger); // the "PG" (and its backend) test_pg = std::make_unique<TestPg>(pool, info, i_am); std::cout << fmt::format("{}: acting: {}", __func__, acting_shards) << std::endl; sbe = std::make_unique<TestScrubBackend>(*test_scrubber, *test_pg, i_am, /* repair? */ false, scrub_level_t::deep, acting_shards); // create a osd-num only copy of the relevant OSDs acting_osds.reserve(acting_shards.size()); for (const auto& shard : acting_shards) { acting_osds.push_back(shard.osd); } sbe->new_chunk(); fake_a_scrub_set(real_objs_list); } // Note: based on TestOSDMap.cc. OSDMapRef TestTScrubberBe::setup_map(int num_osds, const ScrubGenerator::pool_conf_t& pconf) { auto osdmap = std::make_shared<OSDMap>(); uuid_d fsid; osdmap->build_simple(g_ceph_context, 0, fsid, num_osds); OSDMap::Incremental pending_inc(osdmap->get_epoch() + 1); pending_inc.fsid = osdmap->get_fsid(); entity_addrvec_t sample_addrs; sample_addrs.v.push_back(entity_addr_t()); uuid_d sample_uuid; for (int i = 0; i < num_osds; ++i) { sample_uuid.generate_random(); sample_addrs.v[0].nonce = i; pending_inc.new_state[i] = CEPH_OSD_EXISTS | CEPH_OSD_NEW; pending_inc.new_up_client[i] = sample_addrs; pending_inc.new_up_cluster[i] = sample_addrs; pending_inc.new_hb_back_up[i] = sample_addrs; pending_inc.new_hb_front_up[i] = sample_addrs; pending_inc.new_weight[i] = CEPH_OSD_IN; pending_inc.new_uuid[i] = sample_uuid; } osdmap->apply_incremental(pending_inc); // create a replicated pool OSDMap::Incremental new_pool_inc(osdmap->get_epoch() + 1); new_pool_inc.new_pool_max = osdmap->get_pool_max(); new_pool_inc.fsid = osdmap->get_fsid(); uint64_t pool_id = ++new_pool_inc.new_pool_max; pg_pool_t empty; auto p = new_pool_inc.get_new_pool(pool_id, &empty); p->size = pconf.size; p->set_pg_num(pconf.pg_num); p->set_pgp_num(pconf.pgp_num); p->type = pg_pool_t::TYPE_REPLICATED; p->crush_rule = 0; p->set_flag(pg_pool_t::FLAG_HASHPSPOOL); new_pool_inc.new_pool_names[pool_id] = pconf.name; osdmap->apply_incremental(new_pool_inc); return osdmap; } pg_info_t TestTScrubberBe::setup_pg_in_map() { pg_t rawpg(0, pool_id); pg_t pgid = osdmap->raw_pg_to_pg(rawpg); std::vector<int> up_osds; int up_primary; osdmap->pg_to_up_acting_osds(pgid, &up_osds, &up_primary, &acting_osds, &acting_primary); std::cout << fmt::format( "{}: pg: {} up_osds: {} up_primary: {} acting_osds: {} " "acting_primary: " "{}", __func__, pgid, up_osds, up_primary, acting_osds, acting_primary) << std::endl; spg = spg_t{pgid}; i_am = pg_shard_t{up_primary}; std::cout << fmt::format("{}: spg: {} and I am {}", __func__, spg, i_am) << std::endl; // the 'acting shards' set - the one actually used by the scrubber std::for_each(acting_osds.begin(), acting_osds.end(), [&](int osd) { acting_shards.insert(pg_shard_t{osd}); }); std::cout << fmt::format("{}: acting_shards: {}", __func__, acting_shards) << std::endl; pg_info_t info; info.pgid = spg; /// \todo: handle the epochs: // info.last_update = osdmap->get_epoch(); // info.last_complete = osdmap->get_epoch(); // info.last_osdmap_epoch = osdmap->get_epoch(); // info.history.last_epoch_marked_removed = osdmap->get_epoch(); info.last_user_version = 1; info.purged_snaps = {}; info.last_user_version = 1; info.history.last_epoch_clean = osdmap->get_epoch(); info.history.last_epoch_split = osdmap->get_epoch(); info.history.last_epoch_marked_full = osdmap->get_epoch(); info.last_backfill = hobject_t::get_max(); return info; } void TestTScrubberBe::TearDown() { EXPECT_EQ(logger.err_count, logger.expected_err_count); } void TestTScrubberBe::fake_a_scrub_set( ScrubGenerator::RealObjsConfList& all_sets) { for (int osd_num = 0; osd_num < pool_conf.size; ++osd_num) { ScrubMap smap; smap.valid_through = eversion_t{1, 1}; smap.incr_since = eversion_t{1, 1}; smap.has_omap_keys = true; // to force omap checks // fill the map with the objects relevant to this OSD for (auto& obj : all_sets[osd_num]->objs) { std::cout << fmt::format("{}: object: {}", __func__, obj.ghobj.hobj) << std::endl; ScrubGenerator::add_object(smap, obj, osd_num); } std::cout << fmt::format("{}: {} inserting smap {:D}", __func__, osd_num, smap) << std::endl; sbe->insert_faked_smap(pg_shard_t{osd_num}, smap); } // create the snap_mapper state for (const auto& robj : all_sets[i_am.osd]->objs) { std::cout << fmt::format("{}: object: {}", __func__, robj.ghobj.hobj) << std::endl; if (robj.ghobj.hobj.snap == CEPH_NOSNAP) { // head object auto objects_snapset = ScrubGenerator::all_clones(robj); test_scrubber->set_snaps(objects_snapset); } } } void TestScrubBackend::insert_faked_smap(pg_shard_t shard, const ScrubMap& smap) { ASSERT_TRUE(this_chunk.has_value()); std::cout << fmt::format("{}: inserting faked smap for osd {}", __func__, shard.osd) << std::endl; this_chunk->received_maps[shard] = smap; } // /////////////////////////////////////////////////////////////////////////// // /////////////////////////////////////////////////////////////////////////// using namespace ScrubGenerator; class TestTScrubberBe_data_1 : public TestTScrubberBe { public: TestTScrubberBe_data_1() : TestTScrubberBe() {} // test configuration pool_conf_t pl{3, 3, 3, 3, "rep_pool"}; TestTScrubberBeParams inject_params() override { std::cout << fmt::format("{}: injecting params (minimal snaps conf.)", __func__) << std::endl; return TestTScrubberBeParams{ /* pool_conf */ pl, /* real_objs_conf */ ScrubDatasets::minimal_snaps_configuration, /*num_osds */ 3}; } }; // some basic sanity checks // (mainly testing the constructor) TEST_F(TestTScrubberBe_data_1, creation_1) { /// \todo copy some osdmap tests from TestOSDMap.cc ASSERT_TRUE(sbe); ASSERT_TRUE(sbe->get_is_replicated()); ASSERT_FALSE(sbe->get_m_repair()); sbe->update_repair_status(true); ASSERT_TRUE(sbe->get_m_repair()); // make sure *I* do not appear in 'all_but_me' set of OSDs auto others = sbe->all_but_me(); auto in_others = std::find(others.begin(), others.end(), i_am); EXPECT_EQ(others.end(), in_others); } TEST_F(TestTScrubberBe_data_1, smaps_creation_1) { ASSERT_TRUE(sbe); ASSERT_EQ(sbe->get_omap_stats().omap_bytes, 0); // for test data 'minimal_snaps_configuration': // scrub_compare_maps() should not emmit any error, nor // return any snap-mapper fix auto [incons, fix_list] = sbe->scrub_compare_maps(true, *test_scrubber); EXPECT_EQ(fix_list.size(), 0); // snap-mapper fix should be empty EXPECT_EQ(incons.size(), 0); // no inconsistency // make sure the test did execute *something* EXPECT_TRUE(sbe->get_omap_stats().omap_bytes != 0); } // whitebox testing (OK if failing after a change to the backend internals) // blackbox testing - testing the published functionality // (should not depend on internals of the backend) /// corrupt the snap_mapper data TEST_F(TestTScrubberBe_data_1, snapmapper_1) { using snap_mapper_op_t = Scrub::snap_mapper_op_t; ASSERT_TRUE(sbe); // a bogus version of hobj_ms1_snp30 (a clone) snap_ids hobject_t hobj_ms1_snp30_inpool = hobject_t{ScrubDatasets::hobj_ms1_snp30}; hobj_ms1_snp30_inpool.pool = pool_id; all_clones_snaps_t bogus_30; bogus_30[hobj_ms1_snp30_inpool] = {0x333, 0x666}; test_scrubber->set_snaps(bogus_30); auto [incons, fix_list] = sbe->scrub_compare_maps(true, *test_scrubber); EXPECT_EQ(fix_list.size(), 1); // debug - print the fix-list: for (const auto& fix : fix_list) { std::cout << fmt::format("snapmapper_1: fix {}: {} {}->{}", fix.hoid, (fix.op == snap_mapper_op_t::add ? "add" : "upd"), fix.wrong_snaps, fix.snaps) << std::endl; } EXPECT_EQ(fix_list[0].hoid, hobj_ms1_snp30_inpool); EXPECT_EQ(fix_list[0].snaps, std::set<snapid_t>{0x30}); EXPECT_EQ(incons.size(), 0); // no inconsistency } // a dataset similar to 'minimal_snaps_configuration', // but with the hobj_ms1_snp30 clone being modified by a corruption // function class TestTScrubberBe_data_2 : public TestTScrubberBe { public: TestTScrubberBe_data_2() : TestTScrubberBe() {} // basic test configuration - 3 OSDs, all involved in the pool pool_conf_t pl{3, 3, 3, 3, "rep_pool"}; TestTScrubberBeParams inject_params() override { std::cout << fmt::format( "{}: injecting params (minimal-snaps + size change)", __func__) << std::endl; TestTScrubberBeParams params{ /* pool_conf */ pl, /* real_objs_conf */ ScrubDatasets::minimal_snaps_configuration, /*num_osds */ 3}; // inject a corruption function that will modify osd.0's version of // the object params.objs_conf.objs[0].corrupt_funcs = &ScrubDatasets::crpt_funcs_set1; return params; } }; TEST_F(TestTScrubberBe_data_2, smaps_clone_size) { ASSERT_TRUE(sbe); EXPECT_EQ(sbe->get_omap_stats().omap_bytes, 0); logger.set_expected_err_count(1); auto [incons, fix_list] = sbe->scrub_compare_maps(true, *test_scrubber); EXPECT_EQ(fix_list.size(), 0); // snap-mapper fix should be empty EXPECT_EQ(incons.size(), 1); // one inconsistency } // Local Variables: // compile-command: "cd ../.. ; make unittest_osdscrub ; ./unittest_osdscrub // --log-to-stderr=true --debug-osd=20 # --gtest_filter=*.* " End:
19,420
27.986567
80
cc
null
ceph-main/src/test/osd/types.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system * * Copyright (C) 2011 New Dream Network * Copyright (C) 2013 Cloudwatt <libre.licensing@cloudwatt.com> * * Author: Loic Dachary <loic@dachary.org> * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License version 2, as published by the Free Software * Foundation. See file COPYING. * */ #include "include/types.h" #include "osd/osd_types.h" #include "osd/OSDMap.h" #include "gtest/gtest.h" #include "include/coredumpctl.h" #include "common/Thread.h" #include "include/stringify.h" #include "osd/ReplicatedBackend.h" #include <sstream> using namespace std; TEST(hobject, prefixes0) { uint32_t mask = 0xE947FA20; uint32_t bits = 12; int64_t pool = 0; set<string> prefixes_correct; prefixes_correct.insert(string("0000000000000000.02A")); set<string> prefixes_out(hobject_t::get_prefixes(bits, mask, pool)); ASSERT_EQ(prefixes_out, prefixes_correct); } TEST(hobject, prefixes1) { uint32_t mask = 0x0000000F; uint32_t bits = 6; int64_t pool = 20; set<string> prefixes_correct; prefixes_correct.insert(string("0000000000000014.F0")); prefixes_correct.insert(string("0000000000000014.F4")); prefixes_correct.insert(string("0000000000000014.F8")); prefixes_correct.insert(string("0000000000000014.FC")); set<string> prefixes_out(hobject_t::get_prefixes(bits, mask, pool)); ASSERT_EQ(prefixes_out, prefixes_correct); } TEST(hobject, prefixes2) { uint32_t mask = 0xDEADBEAF; uint32_t bits = 25; int64_t pool = 0; set<string> prefixes_correct; prefixes_correct.insert(string("0000000000000000.FAEBDA0")); prefixes_correct.insert(string("0000000000000000.FAEBDA2")); prefixes_correct.insert(string("0000000000000000.FAEBDA4")); prefixes_correct.insert(string("0000000000000000.FAEBDA6")); prefixes_correct.insert(string("0000000000000000.FAEBDA8")); prefixes_correct.insert(string("0000000000000000.FAEBDAA")); prefixes_correct.insert(string("0000000000000000.FAEBDAC")); prefixes_correct.insert(string("0000000000000000.FAEBDAE")); set<string> prefixes_out(hobject_t::get_prefixes(bits, mask, pool)); ASSERT_EQ(prefixes_out, prefixes_correct); } TEST(hobject, prefixes3) { uint32_t mask = 0xE947FA20; uint32_t bits = 32; int64_t pool = 0x23; set<string> prefixes_correct; prefixes_correct.insert(string("0000000000000023.02AF749E")); set<string> prefixes_out(hobject_t::get_prefixes(bits, mask, pool)); ASSERT_EQ(prefixes_out, prefixes_correct); } TEST(hobject, prefixes4) { uint32_t mask = 0xE947FA20; uint32_t bits = 0; int64_t pool = 0x23; set<string> prefixes_correct; prefixes_correct.insert(string("0000000000000023.")); set<string> prefixes_out(hobject_t::get_prefixes(bits, mask, pool)); ASSERT_EQ(prefixes_out, prefixes_correct); } TEST(hobject, prefixes5) { uint32_t mask = 0xDEADBEAF; uint32_t bits = 1; int64_t pool = 0x34AC5D00; set<string> prefixes_correct; prefixes_correct.insert(string("0000000034AC5D00.1")); prefixes_correct.insert(string("0000000034AC5D00.3")); prefixes_correct.insert(string("0000000034AC5D00.5")); prefixes_correct.insert(string("0000000034AC5D00.7")); prefixes_correct.insert(string("0000000034AC5D00.9")); prefixes_correct.insert(string("0000000034AC5D00.B")); prefixes_correct.insert(string("0000000034AC5D00.D")); prefixes_correct.insert(string("0000000034AC5D00.F")); set<string> prefixes_out(hobject_t::get_prefixes(bits, mask, pool)); ASSERT_EQ(prefixes_out, prefixes_correct); } TEST(pg_interval_t, check_new_interval) { // iterate through all 4 combinations for (unsigned i = 0; i < 4; ++i) { // // Create a situation where osdmaps are the same so that // each test case can diverge from it using minimal code. // int osd_id = 1; epoch_t epoch = 40; std::shared_ptr<OSDMap> osdmap(new OSDMap()); osdmap->set_max_osd(10); osdmap->set_state(osd_id, CEPH_OSD_EXISTS); osdmap->set_epoch(epoch); std::shared_ptr<OSDMap> lastmap(new OSDMap()); lastmap->set_max_osd(10); lastmap->set_state(osd_id, CEPH_OSD_EXISTS); lastmap->set_epoch(epoch); epoch_t same_interval_since = epoch; epoch_t last_epoch_clean = same_interval_since; int64_t pool_id = 200; int pg_num = 4; __u8 min_size = 2; boost::scoped_ptr<IsPGRecoverablePredicate> recoverable(new ReplicatedBackend::RPCRecPred()); { OSDMap::Incremental inc(epoch + 1); inc.new_pools[pool_id].min_size = min_size; inc.new_pools[pool_id].set_pg_num(pg_num); inc.new_pools[pool_id].set_pg_num_pending(pg_num); inc.new_up_thru[osd_id] = epoch + 1; osdmap->apply_incremental(inc); lastmap->apply_incremental(inc); } vector<int> new_acting; new_acting.push_back(osd_id); new_acting.push_back(osd_id + 1); vector<int> old_acting = new_acting; int old_primary = osd_id; int new_primary = osd_id; vector<int> new_up; new_up.push_back(osd_id); int old_up_primary = osd_id; int new_up_primary = osd_id; vector<int> old_up = new_up; pg_t pgid; pgid.set_pool(pool_id); // // Do nothing if there are no modifications in // acting, up or pool size and that the pool is not // being split // { PastIntervals past_intervals; ASSERT_TRUE(past_intervals.empty()); ASSERT_FALSE(PastIntervals::check_new_interval(old_primary, new_primary, old_acting, new_acting, old_up_primary, new_up_primary, old_up, new_up, same_interval_since, last_epoch_clean, osdmap, lastmap, pgid, *recoverable, &past_intervals)); ASSERT_TRUE(past_intervals.empty()); } // // The acting set has changed // { vector<int> new_acting; int _new_primary = osd_id + 1; new_acting.push_back(_new_primary); PastIntervals past_intervals; ASSERT_TRUE(past_intervals.empty()); ASSERT_TRUE(PastIntervals::check_new_interval(old_primary, new_primary, old_acting, new_acting, old_up_primary, new_up_primary, old_up, new_up, same_interval_since, last_epoch_clean, osdmap, lastmap, pgid, *recoverable, &past_intervals)); old_primary = new_primary; } // // The up set has changed // { vector<int> new_up; int _new_primary = osd_id + 1; new_up.push_back(_new_primary); PastIntervals past_intervals; ASSERT_TRUE(past_intervals.empty()); ASSERT_TRUE(PastIntervals::check_new_interval(old_primary, new_primary, old_acting, new_acting, old_up_primary, new_up_primary, old_up, new_up, same_interval_since, last_epoch_clean, osdmap, lastmap, pgid, *recoverable, &past_intervals)); } // // The up primary has changed // { vector<int> new_up; int _new_up_primary = osd_id + 1; PastIntervals past_intervals; ASSERT_TRUE(past_intervals.empty()); ASSERT_TRUE(PastIntervals::check_new_interval(old_primary, new_primary, old_acting, new_acting, old_up_primary, _new_up_primary, old_up, new_up, same_interval_since, last_epoch_clean, osdmap, lastmap, pgid, *recoverable, &past_intervals)); } // // PG is splitting // { std::shared_ptr<OSDMap> osdmap(new OSDMap()); osdmap->set_max_osd(10); osdmap->set_state(osd_id, CEPH_OSD_EXISTS); osdmap->set_epoch(epoch); int new_pg_num = pg_num ^ 2; OSDMap::Incremental inc(epoch + 1); inc.new_pools[pool_id].min_size = min_size; inc.new_pools[pool_id].set_pg_num(new_pg_num); osdmap->apply_incremental(inc); PastIntervals past_intervals; ASSERT_TRUE(past_intervals.empty()); ASSERT_TRUE(PastIntervals::check_new_interval(old_primary, new_primary, old_acting, new_acting, old_up_primary, new_up_primary, old_up, new_up, same_interval_since, last_epoch_clean, osdmap, lastmap, pgid, *recoverable, &past_intervals)); } // // PG is pre-merge source // { std::shared_ptr<OSDMap> osdmap(new OSDMap()); osdmap->set_max_osd(10); osdmap->set_state(osd_id, CEPH_OSD_EXISTS); osdmap->set_epoch(epoch); OSDMap::Incremental inc(epoch + 1); inc.new_pools[pool_id].min_size = min_size; inc.new_pools[pool_id].set_pg_num(pg_num); inc.new_pools[pool_id].set_pg_num_pending(pg_num - 1); osdmap->apply_incremental(inc); cout << "pg_num " << pg_num << std::endl; PastIntervals past_intervals; ASSERT_TRUE(past_intervals.empty()); ASSERT_TRUE(PastIntervals::check_new_interval(old_primary, new_primary, old_acting, new_acting, old_up_primary, new_up_primary, old_up, new_up, same_interval_since, last_epoch_clean, osdmap, lastmap, pg_t(pg_num - 1, pool_id), *recoverable, &past_intervals)); } // // PG was pre-merge source // { std::shared_ptr<OSDMap> osdmap(new OSDMap()); osdmap->set_max_osd(10); osdmap->set_state(osd_id, CEPH_OSD_EXISTS); osdmap->set_epoch(epoch); OSDMap::Incremental inc(epoch + 1); inc.new_pools[pool_id].min_size = min_size; inc.new_pools[pool_id].set_pg_num(pg_num); inc.new_pools[pool_id].set_pg_num_pending(pg_num - 1); osdmap->apply_incremental(inc); cout << "pg_num " << pg_num << std::endl; PastIntervals past_intervals; ASSERT_TRUE(past_intervals.empty()); ASSERT_TRUE(PastIntervals::check_new_interval(old_primary, new_primary, old_acting, new_acting, old_up_primary, new_up_primary, old_up, new_up, same_interval_since, last_epoch_clean, lastmap, // reverse order! osdmap, pg_t(pg_num - 1, pool_id), *recoverable, &past_intervals)); } // // PG is merge source // { std::shared_ptr<OSDMap> osdmap(new OSDMap()); osdmap->set_max_osd(10); osdmap->set_state(osd_id, CEPH_OSD_EXISTS); osdmap->set_epoch(epoch); OSDMap::Incremental inc(epoch + 1); inc.new_pools[pool_id].min_size = min_size; inc.new_pools[pool_id].set_pg_num(pg_num - 1); osdmap->apply_incremental(inc); PastIntervals past_intervals; ASSERT_TRUE(past_intervals.empty()); ASSERT_TRUE(PastIntervals::check_new_interval(old_primary, new_primary, old_acting, new_acting, old_up_primary, new_up_primary, old_up, new_up, same_interval_since, last_epoch_clean, osdmap, lastmap, pg_t(pg_num - 1, pool_id), *recoverable, &past_intervals)); } // // PG is pre-merge target // { std::shared_ptr<OSDMap> osdmap(new OSDMap()); osdmap->set_max_osd(10); osdmap->set_state(osd_id, CEPH_OSD_EXISTS); osdmap->set_epoch(epoch); OSDMap::Incremental inc(epoch + 1); inc.new_pools[pool_id].min_size = min_size; inc.new_pools[pool_id].set_pg_num_pending(pg_num - 1); osdmap->apply_incremental(inc); PastIntervals past_intervals; ASSERT_TRUE(past_intervals.empty()); ASSERT_TRUE(PastIntervals::check_new_interval(old_primary, new_primary, old_acting, new_acting, old_up_primary, new_up_primary, old_up, new_up, same_interval_since, last_epoch_clean, osdmap, lastmap, pg_t(pg_num / 2 - 1, pool_id), *recoverable, &past_intervals)); } // // PG was pre-merge target // { std::shared_ptr<OSDMap> osdmap(new OSDMap()); osdmap->set_max_osd(10); osdmap->set_state(osd_id, CEPH_OSD_EXISTS); osdmap->set_epoch(epoch); OSDMap::Incremental inc(epoch + 1); inc.new_pools[pool_id].min_size = min_size; inc.new_pools[pool_id].set_pg_num_pending(pg_num - 1); osdmap->apply_incremental(inc); PastIntervals past_intervals; ASSERT_TRUE(past_intervals.empty()); ASSERT_TRUE(PastIntervals::check_new_interval(old_primary, new_primary, old_acting, new_acting, old_up_primary, new_up_primary, old_up, new_up, same_interval_since, last_epoch_clean, lastmap, // reverse order! osdmap, pg_t(pg_num / 2 - 1, pool_id), *recoverable, &past_intervals)); } // // PG is merge target // { std::shared_ptr<OSDMap> osdmap(new OSDMap()); osdmap->set_max_osd(10); osdmap->set_state(osd_id, CEPH_OSD_EXISTS); osdmap->set_epoch(epoch); OSDMap::Incremental inc(epoch + 1); inc.new_pools[pool_id].min_size = min_size; inc.new_pools[pool_id].set_pg_num(pg_num - 1); osdmap->apply_incremental(inc); PastIntervals past_intervals; ASSERT_TRUE(past_intervals.empty()); ASSERT_TRUE(PastIntervals::check_new_interval(old_primary, new_primary, old_acting, new_acting, old_up_primary, new_up_primary, old_up, new_up, same_interval_since, last_epoch_clean, osdmap, lastmap, pg_t(pg_num / 2 - 1, pool_id), *recoverable, &past_intervals)); } // // PG size has changed // { std::shared_ptr<OSDMap> osdmap(new OSDMap()); osdmap->set_max_osd(10); osdmap->set_state(osd_id, CEPH_OSD_EXISTS); osdmap->set_epoch(epoch); OSDMap::Incremental inc(epoch + 1); __u8 new_min_size = min_size + 1; inc.new_pools[pool_id].min_size = new_min_size; inc.new_pools[pool_id].set_pg_num(pg_num); osdmap->apply_incremental(inc); PastIntervals past_intervals; ASSERT_TRUE(past_intervals.empty()); ASSERT_TRUE(PastIntervals::check_new_interval(old_primary, new_primary, old_acting, new_acting, old_up_primary, new_up_primary, old_up, new_up, same_interval_since, last_epoch_clean, osdmap, lastmap, pgid, *recoverable, &past_intervals)); } // // The old acting set was empty : the previous interval could not // have been rw // { vector<int> old_acting; PastIntervals past_intervals; ostringstream out; ASSERT_TRUE(past_intervals.empty()); ASSERT_TRUE(PastIntervals::check_new_interval(old_primary, new_primary, old_acting, new_acting, old_up_primary, new_up_primary, old_up, new_up, same_interval_since, last_epoch_clean, osdmap, lastmap, pgid, *recoverable, &past_intervals, &out)); ASSERT_NE(string::npos, out.str().find("acting set is too small")); } // // The old acting set did not have enough osd : it could // not have been rw // { vector<int> old_acting; old_acting.push_back(osd_id); // // see http://tracker.ceph.com/issues/5780 // the size of the old acting set should be compared // with the min_size of the old osdmap // // The new osdmap is created so that it triggers the // bug. // std::shared_ptr<OSDMap> osdmap(new OSDMap()); osdmap->set_max_osd(10); osdmap->set_state(osd_id, CEPH_OSD_EXISTS); osdmap->set_epoch(epoch); OSDMap::Incremental inc(epoch + 1); __u8 new_min_size = old_acting.size(); inc.new_pools[pool_id].min_size = new_min_size; inc.new_pools[pool_id].set_pg_num(pg_num); osdmap->apply_incremental(inc); ostringstream out; PastIntervals past_intervals; ASSERT_TRUE(past_intervals.empty()); ASSERT_TRUE(PastIntervals::check_new_interval(old_primary, new_primary, old_acting, new_acting, old_up_primary, new_up_primary, old_up, new_up, same_interval_since, last_epoch_clean, osdmap, lastmap, pgid, *recoverable, &past_intervals, &out)); ASSERT_NE(string::npos, out.str().find("acting set is too small")); } // // The acting set changes. The old acting set primary was up during the // previous interval and may have been rw. // { vector<int> new_acting; new_acting.push_back(osd_id + 4); new_acting.push_back(osd_id + 5); ostringstream out; PastIntervals past_intervals; ASSERT_TRUE(past_intervals.empty()); ASSERT_TRUE(PastIntervals::check_new_interval(old_primary, new_primary, old_acting, new_acting, old_up_primary, new_up_primary, old_up, new_up, same_interval_since, last_epoch_clean, osdmap, lastmap, pgid, *recoverable, &past_intervals, &out)); ASSERT_NE(string::npos, out.str().find("includes interval")); } // // The acting set changes. The old acting set primary was not up // during the old interval but last_epoch_clean is in the // old interval and it may have been rw. // { vector<int> new_acting; new_acting.push_back(osd_id + 4); new_acting.push_back(osd_id + 5); std::shared_ptr<OSDMap> lastmap(new OSDMap()); lastmap->set_max_osd(10); lastmap->set_state(osd_id, CEPH_OSD_EXISTS); lastmap->set_epoch(epoch); OSDMap::Incremental inc(epoch + 1); inc.new_pools[pool_id].min_size = min_size; inc.new_pools[pool_id].set_pg_num(pg_num); inc.new_up_thru[osd_id] = epoch - 10; lastmap->apply_incremental(inc); ostringstream out; PastIntervals past_intervals; ASSERT_TRUE(past_intervals.empty()); ASSERT_TRUE(PastIntervals::check_new_interval(old_primary, new_primary, old_acting, new_acting, old_up_primary, new_up_primary, old_up, new_up, same_interval_since, last_epoch_clean, osdmap, lastmap, pgid, *recoverable, &past_intervals, &out)); ASSERT_NE(string::npos, out.str().find("presumed to have been rw")); } // // The acting set changes. The old acting set primary was not up // during the old interval and last_epoch_clean is before the // old interval : the previous interval could not possibly have // been rw. // { vector<int> new_acting; new_acting.push_back(osd_id + 4); new_acting.push_back(osd_id + 5); epoch_t last_epoch_clean = epoch - 10; std::shared_ptr<OSDMap> lastmap(new OSDMap()); lastmap->set_max_osd(10); lastmap->set_state(osd_id, CEPH_OSD_EXISTS); lastmap->set_epoch(epoch); OSDMap::Incremental inc(epoch + 1); inc.new_pools[pool_id].min_size = min_size; inc.new_pools[pool_id].set_pg_num(pg_num); inc.new_up_thru[osd_id] = last_epoch_clean; lastmap->apply_incremental(inc); ostringstream out; PastIntervals past_intervals; ASSERT_TRUE(past_intervals.empty()); ASSERT_TRUE(PastIntervals::check_new_interval(old_primary, new_primary, old_acting, new_acting, old_up_primary, new_up_primary, old_up, new_up, same_interval_since, last_epoch_clean, osdmap, lastmap, pgid, *recoverable, &past_intervals, &out)); ASSERT_NE(string::npos, out.str().find("does not include interval")); } } // end for, didn't want to reindent } TEST(pg_t, get_ancestor) { ASSERT_EQ(pg_t(0, 0), pg_t(16, 0).get_ancestor(16)); ASSERT_EQ(pg_t(1, 0), pg_t(17, 0).get_ancestor(16)); ASSERT_EQ(pg_t(0, 0), pg_t(16, 0).get_ancestor(8)); ASSERT_EQ(pg_t(16, 0), pg_t(16, 0).get_ancestor(80)); ASSERT_EQ(pg_t(16, 0), pg_t(16, 0).get_ancestor(83)); ASSERT_EQ(pg_t(1, 0), pg_t(1321, 0).get_ancestor(123).get_ancestor(8)); ASSERT_EQ(pg_t(3, 0), pg_t(1323, 0).get_ancestor(123).get_ancestor(8)); ASSERT_EQ(pg_t(3, 0), pg_t(1323, 0).get_ancestor(8)); } TEST(pg_t, split) { pg_t pgid(0, 0); set<pg_t> s; bool b; s.clear(); b = pgid.is_split(1, 1, &s); ASSERT_TRUE(!b); s.clear(); b = pgid.is_split(2, 4, NULL); ASSERT_TRUE(b); b = pgid.is_split(2, 4, &s); ASSERT_TRUE(b); ASSERT_EQ(1u, s.size()); ASSERT_TRUE(s.count(pg_t(2, 0))); s.clear(); b = pgid.is_split(2, 8, &s); ASSERT_TRUE(b); ASSERT_EQ(3u, s.size()); ASSERT_TRUE(s.count(pg_t(2, 0))); ASSERT_TRUE(s.count(pg_t(4, 0))); ASSERT_TRUE(s.count(pg_t(6, 0))); s.clear(); b = pgid.is_split(3, 8, &s); ASSERT_TRUE(b); ASSERT_EQ(1u, s.size()); ASSERT_TRUE(s.count(pg_t(4, 0))); s.clear(); b = pgid.is_split(6, 8, NULL); ASSERT_TRUE(!b); b = pgid.is_split(6, 8, &s); ASSERT_TRUE(!b); ASSERT_EQ(0u, s.size()); pgid = pg_t(1, 0); s.clear(); b = pgid.is_split(2, 4, &s); ASSERT_TRUE(b); ASSERT_EQ(1u, s.size()); ASSERT_TRUE(s.count(pg_t(3, 0))); s.clear(); b = pgid.is_split(2, 6, &s); ASSERT_TRUE(b); ASSERT_EQ(2u, s.size()); ASSERT_TRUE(s.count(pg_t(3, 0))); ASSERT_TRUE(s.count(pg_t(5, 0))); s.clear(); b = pgid.is_split(2, 8, &s); ASSERT_TRUE(b); ASSERT_EQ(3u, s.size()); ASSERT_TRUE(s.count(pg_t(3, 0))); ASSERT_TRUE(s.count(pg_t(5, 0))); ASSERT_TRUE(s.count(pg_t(7, 0))); s.clear(); b = pgid.is_split(4, 8, &s); ASSERT_TRUE(b); ASSERT_EQ(1u, s.size()); ASSERT_TRUE(s.count(pg_t(5, 0))); s.clear(); b = pgid.is_split(3, 8, &s); ASSERT_TRUE(b); ASSERT_EQ(3u, s.size()); ASSERT_TRUE(s.count(pg_t(3, 0))); ASSERT_TRUE(s.count(pg_t(5, 0))); ASSERT_TRUE(s.count(pg_t(7, 0))); s.clear(); b = pgid.is_split(6, 8, &s); ASSERT_TRUE(!b); ASSERT_EQ(0u, s.size()); pgid = pg_t(3, 0); s.clear(); b = pgid.is_split(7, 8, &s); ASSERT_TRUE(b); ASSERT_EQ(1u, s.size()); ASSERT_TRUE(s.count(pg_t(7, 0))); s.clear(); b = pgid.is_split(7, 12, &s); ASSERT_TRUE(b); ASSERT_EQ(2u, s.size()); ASSERT_TRUE(s.count(pg_t(7, 0))); ASSERT_TRUE(s.count(pg_t(11, 0))); s.clear(); b = pgid.is_split(7, 11, &s); ASSERT_TRUE(b); ASSERT_EQ(1u, s.size()); ASSERT_TRUE(s.count(pg_t(7, 0))); } TEST(pg_t, merge) { pg_t pgid, parent; bool b; pgid = pg_t(7, 0); b = pgid.is_merge_source(8, 7, &parent); ASSERT_TRUE(b); ASSERT_EQ(parent, pg_t(3, 0)); ASSERT_TRUE(parent.is_merge_target(8, 7)); b = pgid.is_merge_source(8, 5, &parent); ASSERT_TRUE(b); ASSERT_EQ(parent, pg_t(3, 0)); ASSERT_TRUE(parent.is_merge_target(8, 5)); b = pgid.is_merge_source(8, 4, &parent); ASSERT_TRUE(b); ASSERT_EQ(parent, pg_t(3, 0)); ASSERT_TRUE(parent.is_merge_target(8, 4)); b = pgid.is_merge_source(8, 3, &parent); ASSERT_TRUE(b); ASSERT_EQ(parent, pg_t(1, 0)); ASSERT_TRUE(parent.is_merge_target(8, 4)); b = pgid.is_merge_source(9, 8, &parent); ASSERT_FALSE(b); ASSERT_FALSE(parent.is_merge_target(9, 8)); } TEST(ObjectCleanRegions, mark_data_region_dirty) { ObjectCleanRegions clean_regions; uint64_t offset_1, len_1, offset_2, len_2; offset_1 = 4096; len_1 = 8192; offset_2 = 40960; len_2 = 4096; interval_set<uint64_t> expect_dirty_region; EXPECT_EQ(expect_dirty_region, clean_regions.get_dirty_regions()); expect_dirty_region.insert(offset_1, len_1); expect_dirty_region.insert(offset_2, len_2); clean_regions.mark_data_region_dirty(offset_1, len_1); clean_regions.mark_data_region_dirty(offset_2, len_2); EXPECT_EQ(expect_dirty_region, clean_regions.get_dirty_regions()); } TEST(ObjectCleanRegions, mark_omap_dirty) { ObjectCleanRegions clean_regions; EXPECT_FALSE(clean_regions.omap_is_dirty()); clean_regions.mark_omap_dirty(); EXPECT_TRUE(clean_regions.omap_is_dirty()); } TEST(ObjectCleanRegions, merge) { ObjectCleanRegions cr1, cr2; interval_set<uint64_t> cr1_expect; interval_set<uint64_t> cr2_expect; ASSERT_EQ(cr1_expect, cr1.get_dirty_regions()); ASSERT_EQ(cr2_expect, cr2.get_dirty_regions()); cr1.mark_data_region_dirty(4096, 4096); cr1_expect.insert(4096, 4096); ASSERT_EQ(cr1_expect, cr1.get_dirty_regions()); cr1.mark_data_region_dirty(12288, 8192); cr1_expect.insert(12288, 8192); ASSERT_TRUE(cr1_expect.subset_of(cr1.get_dirty_regions())); cr1.mark_data_region_dirty(32768, 10240); cr1_expect.insert(32768, 10240); cr1_expect.erase(4096, 4096); ASSERT_TRUE(cr1_expect.subset_of(cr1.get_dirty_regions())); cr2.mark_data_region_dirty(20480, 12288); cr2_expect.insert(20480, 12288); ASSERT_EQ(cr2_expect, cr2.get_dirty_regions()); cr2.mark_data_region_dirty(102400, 4096); cr2_expect.insert(102400, 4096); cr2.mark_data_region_dirty(204800, 8192); cr2_expect.insert(204800, 8192); cr2.mark_data_region_dirty(409600, 4096); cr2_expect.insert(409600, 4096); ASSERT_TRUE(cr2_expect.subset_of(cr2.get_dirty_regions())); ASSERT_FALSE(cr2.omap_is_dirty()); cr2.mark_omap_dirty(); ASSERT_FALSE(cr1.omap_is_dirty()); ASSERT_TRUE(cr2.omap_is_dirty()); cr1.merge(cr2); cr1_expect.insert(204800, 8192); ASSERT_TRUE(cr1_expect.subset_of(cr1.get_dirty_regions())); ASSERT_TRUE(cr1.omap_is_dirty()); } TEST(pg_missing_t, constructor) { pg_missing_t missing; EXPECT_EQ((unsigned int)0, missing.num_missing()); EXPECT_FALSE(missing.have_missing()); } TEST(pg_missing_t, have_missing) { hobject_t oid(object_t("objname"), "key", 123, 456, 0, ""); pg_missing_t missing; EXPECT_FALSE(missing.have_missing()); missing.add(oid, eversion_t(), eversion_t(), false); EXPECT_TRUE(missing.have_missing()); } TEST(pg_missing_t, claim) { hobject_t oid(object_t("objname"), "key", 123, 456, 0, ""); pg_missing_t missing; EXPECT_FALSE(missing.have_missing()); missing.add(oid, eversion_t(), eversion_t(), false); EXPECT_TRUE(missing.have_missing()); pg_missing_t other; EXPECT_FALSE(other.have_missing()); other.claim(std::move(missing)); EXPECT_TRUE(other.have_missing()); } TEST(pg_missing_t, is_missing) { // pg_missing_t::is_missing(const hobject_t& oid) const { hobject_t oid(object_t("objname"), "key", 123, 456, 0, ""); pg_missing_t missing; EXPECT_FALSE(missing.is_missing(oid)); missing.add(oid, eversion_t(), eversion_t(), false); EXPECT_TRUE(missing.is_missing(oid)); } // bool pg_missing_t::is_missing(const hobject_t& oid, eversion_t v) const { hobject_t oid(object_t("objname"), "key", 123, 456, 0, ""); pg_missing_t missing; eversion_t need(10,5); EXPECT_FALSE(missing.is_missing(oid, eversion_t())); missing.add(oid, need, eversion_t(), false); EXPECT_TRUE(missing.is_missing(oid)); EXPECT_FALSE(missing.is_missing(oid, eversion_t())); EXPECT_TRUE(missing.is_missing(oid, need)); } } TEST(pg_missing_t, add_next_event) { hobject_t oid(object_t("objname"), "key", 123, 456, 0, ""); hobject_t oid_other(object_t("other"), "key", 9123, 9456, 0, ""); eversion_t version(10,5); eversion_t prior_version(3,4); pg_log_entry_t sample_e(pg_log_entry_t::DELETE, oid, version, prior_version, 0, osd_reqid_t(entity_name_t::CLIENT(777), 8, 999), utime_t(8,9), 0); // new object (MODIFY) { pg_missing_t missing; pg_log_entry_t e = sample_e; e.op = pg_log_entry_t::MODIFY; e.prior_version = eversion_t(); EXPECT_TRUE(e.is_update()); EXPECT_TRUE(e.object_is_indexed()); EXPECT_TRUE(e.reqid_is_indexed()); EXPECT_FALSE(missing.is_missing(oid)); missing.add_next_event(e); EXPECT_TRUE(missing.is_missing(oid)); EXPECT_EQ(eversion_t(), missing.get_items().at(oid).have); EXPECT_EQ(oid, missing.get_rmissing().at(e.version.version)); EXPECT_EQ(1U, missing.num_missing()); EXPECT_EQ(1U, missing.get_rmissing().size()); // adding the same object replaces the previous one missing.add_next_event(e); EXPECT_TRUE(missing.is_missing(oid)); EXPECT_EQ(1U, missing.num_missing()); EXPECT_EQ(1U, missing.get_rmissing().size()); } // new object (CLONE) { pg_missing_t missing; pg_log_entry_t e = sample_e; e.op = pg_log_entry_t::CLONE; e.prior_version = eversion_t(); EXPECT_TRUE(e.is_clone()); EXPECT_TRUE(e.object_is_indexed()); EXPECT_FALSE(e.reqid_is_indexed()); EXPECT_FALSE(missing.is_missing(oid)); missing.add_next_event(e); EXPECT_TRUE(missing.is_missing(oid)); EXPECT_EQ(eversion_t(), missing.get_items().at(oid).have); EXPECT_EQ(oid, missing.get_rmissing().at(e.version.version)); EXPECT_EQ(1U, missing.num_missing()); EXPECT_EQ(1U, missing.get_rmissing().size()); // adding the same object replaces the previous one missing.add_next_event(e); EXPECT_TRUE(missing.is_missing(oid)); EXPECT_EQ(1U, missing.num_missing()); EXPECT_EQ(1U, missing.get_rmissing().size()); } // existing object (MODIFY) { pg_missing_t missing; pg_log_entry_t e = sample_e; e.op = pg_log_entry_t::MODIFY; e.prior_version = eversion_t(); EXPECT_TRUE(e.is_update()); EXPECT_TRUE(e.object_is_indexed()); EXPECT_TRUE(e.reqid_is_indexed()); EXPECT_FALSE(missing.is_missing(oid)); missing.add_next_event(e); EXPECT_TRUE(missing.is_missing(oid)); EXPECT_EQ(eversion_t(), missing.get_items().at(oid).have); EXPECT_EQ(oid, missing.get_rmissing().at(e.version.version)); EXPECT_EQ(1U, missing.num_missing()); EXPECT_EQ(1U, missing.get_rmissing().size()); // adding the same object with a different version e.prior_version = prior_version; missing.add_next_event(e); EXPECT_EQ(eversion_t(), missing.get_items().at(oid).have); EXPECT_TRUE(missing.is_missing(oid)); EXPECT_EQ(1U, missing.num_missing()); EXPECT_EQ(1U, missing.get_rmissing().size()); } // object with prior version (MODIFY) { pg_missing_t missing; pg_log_entry_t e = sample_e; e.op = pg_log_entry_t::MODIFY; EXPECT_TRUE(e.is_update()); EXPECT_TRUE(e.object_is_indexed()); EXPECT_TRUE(e.reqid_is_indexed()); EXPECT_FALSE(missing.is_missing(oid)); missing.add_next_event(e); EXPECT_TRUE(missing.is_missing(oid)); EXPECT_EQ(prior_version, missing.get_items().at(oid).have); EXPECT_EQ(version, missing.get_items().at(oid).need); EXPECT_EQ(oid, missing.get_rmissing().at(e.version.version)); EXPECT_EQ(1U, missing.num_missing()); EXPECT_EQ(1U, missing.get_rmissing().size()); } // adding a DELETE matching an existing event { pg_missing_t missing; pg_log_entry_t e = sample_e; e.op = pg_log_entry_t::MODIFY; EXPECT_TRUE(e.is_update()); EXPECT_TRUE(e.object_is_indexed()); EXPECT_TRUE(e.reqid_is_indexed()); EXPECT_FALSE(missing.is_missing(oid)); missing.add_next_event(e); EXPECT_TRUE(missing.is_missing(oid)); e.op = pg_log_entry_t::DELETE; EXPECT_TRUE(e.is_delete()); missing.add_next_event(e); EXPECT_TRUE(missing.is_missing(oid)); EXPECT_TRUE(missing.get_items().at(oid).is_delete()); EXPECT_EQ(prior_version, missing.get_items().at(oid).have); EXPECT_EQ(version, missing.get_items().at(oid).need); EXPECT_EQ(oid, missing.get_rmissing().at(e.version.version)); EXPECT_EQ(1U, missing.num_missing()); EXPECT_EQ(1U, missing.get_rmissing().size()); } // adding a LOST_DELETE after an existing event { pg_missing_t missing; pg_log_entry_t e = sample_e; e.op = pg_log_entry_t::MODIFY; EXPECT_TRUE(e.is_update()); EXPECT_TRUE(e.object_is_indexed()); EXPECT_TRUE(e.reqid_is_indexed()); EXPECT_FALSE(missing.is_missing(oid)); missing.add_next_event(e); EXPECT_TRUE(missing.is_missing(oid)); EXPECT_FALSE(missing.get_items().at(oid).is_delete()); e.op = pg_log_entry_t::LOST_DELETE; e.version.version++; EXPECT_TRUE(e.is_delete()); missing.add_next_event(e); EXPECT_TRUE(missing.is_missing(oid)); EXPECT_TRUE(missing.get_items().at(oid).is_delete()); EXPECT_EQ(prior_version, missing.get_items().at(oid).have); EXPECT_EQ(e.version, missing.get_items().at(oid).need); EXPECT_EQ(oid, missing.get_rmissing().at(e.version.version)); EXPECT_EQ(1U, missing.num_missing()); EXPECT_EQ(1U, missing.get_rmissing().size()); } } TEST(pg_missing_t, revise_need) { hobject_t oid(object_t("objname"), "key", 123, 456, 0, ""); pg_missing_t missing; // create a new entry EXPECT_FALSE(missing.is_missing(oid)); eversion_t need(10,10); missing.revise_need(oid, need, false); EXPECT_TRUE(missing.is_missing(oid)); EXPECT_EQ(eversion_t(), missing.get_items().at(oid).have); EXPECT_EQ(need, missing.get_items().at(oid).need); // update an existing entry and preserve have eversion_t have(1,1); missing.revise_have(oid, have); eversion_t new_need(10,12); EXPECT_EQ(have, missing.get_items().at(oid).have); missing.revise_need(oid, new_need, false); EXPECT_EQ(have, missing.get_items().at(oid).have); EXPECT_EQ(new_need, missing.get_items().at(oid).need); } TEST(pg_missing_t, revise_have) { hobject_t oid(object_t("objname"), "key", 123, 456, 0, ""); pg_missing_t missing; // a non existing entry means noop EXPECT_FALSE(missing.is_missing(oid)); eversion_t have(1,1); missing.revise_have(oid, have); EXPECT_FALSE(missing.is_missing(oid)); // update an existing entry eversion_t need(10,12); missing.add(oid, need, have, false); EXPECT_TRUE(missing.is_missing(oid)); eversion_t new_have(2,2); EXPECT_EQ(have, missing.get_items().at(oid).have); missing.revise_have(oid, new_have); EXPECT_EQ(new_have, missing.get_items().at(oid).have); EXPECT_EQ(need, missing.get_items().at(oid).need); } TEST(pg_missing_t, add) { hobject_t oid(object_t("objname"), "key", 123, 456, 0, ""); pg_missing_t missing; EXPECT_FALSE(missing.is_missing(oid)); eversion_t have(1,1); eversion_t need(10,10); missing.add(oid, need, have, false); EXPECT_TRUE(missing.is_missing(oid)); EXPECT_EQ(have, missing.get_items().at(oid).have); EXPECT_EQ(need, missing.get_items().at(oid).need); } TEST(pg_missing_t, rm) { // void pg_missing_t::rm(const hobject_t& oid, eversion_t v) { hobject_t oid(object_t("objname"), "key", 123, 456, 0, ""); pg_missing_t missing; EXPECT_FALSE(missing.is_missing(oid)); epoch_t epoch = 10; eversion_t need(epoch,10); missing.add(oid, need, eversion_t(), false); EXPECT_TRUE(missing.is_missing(oid)); // rm of an older version is a noop missing.rm(oid, eversion_t(epoch / 2,20)); EXPECT_TRUE(missing.is_missing(oid)); // rm of a later version removes the object missing.rm(oid, eversion_t(epoch * 2,20)); EXPECT_FALSE(missing.is_missing(oid)); } // void pg_missing_t::rm(const std::map<hobject_t, pg_missing_item>::iterator &m) { hobject_t oid(object_t("objname"), "key", 123, 456, 0, ""); pg_missing_t missing; EXPECT_FALSE(missing.is_missing(oid)); missing.add(oid, eversion_t(), eversion_t(), false); EXPECT_TRUE(missing.is_missing(oid)); auto m = missing.get_items().find(oid); missing.rm(m); EXPECT_FALSE(missing.is_missing(oid)); } } TEST(pg_missing_t, got) { // void pg_missing_t::got(const hobject_t& oid, eversion_t v) { hobject_t oid(object_t("objname"), "key", 123, 456, 0, ""); pg_missing_t missing; // assert if the oid does not exist { PrCtl unset_dumpable; EXPECT_DEATH(missing.got(oid, eversion_t()), ""); } EXPECT_FALSE(missing.is_missing(oid)); epoch_t epoch = 10; eversion_t need(epoch,10); missing.add(oid, need, eversion_t(), false); EXPECT_TRUE(missing.is_missing(oid)); // assert if that the version to be removed is lower than the version of the object { PrCtl unset_dumpable; EXPECT_DEATH(missing.got(oid, eversion_t(epoch / 2,20)), ""); } // remove of a later version removes the object missing.got(oid, eversion_t(epoch * 2,20)); EXPECT_FALSE(missing.is_missing(oid)); } // void pg_missing_t::got(const std::map<hobject_t, pg_missing_item>::iterator &m) { hobject_t oid(object_t("objname"), "key", 123, 456, 0, ""); pg_missing_t missing; EXPECT_FALSE(missing.is_missing(oid)); missing.add(oid, eversion_t(), eversion_t(), false); EXPECT_TRUE(missing.is_missing(oid)); auto m = missing.get_items().find(oid); missing.got(m); EXPECT_FALSE(missing.is_missing(oid)); } } TEST(pg_missing_t, split_into) { uint32_t hash1 = 1; hobject_t oid1(object_t("objname"), "key1", 123, hash1, 0, ""); uint32_t hash2 = 2; hobject_t oid2(object_t("objname"), "key2", 123, hash2, 0, ""); pg_missing_t missing; missing.add(oid1, eversion_t(), eversion_t(), false); missing.add(oid2, eversion_t(), eversion_t(), false); pg_t child_pgid; child_pgid.m_seed = 1; pg_missing_t child; unsigned split_bits = 1; missing.split_into(child_pgid, split_bits, &child); EXPECT_TRUE(child.is_missing(oid1)); EXPECT_FALSE(child.is_missing(oid2)); EXPECT_FALSE(missing.is_missing(oid1)); EXPECT_TRUE(missing.is_missing(oid2)); } TEST(pg_pool_t_test, get_pg_num_divisor) { pg_pool_t p; p.set_pg_num(16); p.set_pgp_num(16); for (int i = 0; i < 16; ++i) ASSERT_EQ(16u, p.get_pg_num_divisor(pg_t(i, 1))); p.set_pg_num(12); p.set_pgp_num(12); ASSERT_EQ(16u, p.get_pg_num_divisor(pg_t(0, 1))); ASSERT_EQ(16u, p.get_pg_num_divisor(pg_t(1, 1))); ASSERT_EQ(16u, p.get_pg_num_divisor(pg_t(2, 1))); ASSERT_EQ(16u, p.get_pg_num_divisor(pg_t(3, 1))); ASSERT_EQ(8u, p.get_pg_num_divisor(pg_t(4, 1))); ASSERT_EQ(8u, p.get_pg_num_divisor(pg_t(5, 1))); ASSERT_EQ(8u, p.get_pg_num_divisor(pg_t(6, 1))); ASSERT_EQ(8u, p.get_pg_num_divisor(pg_t(7, 1))); ASSERT_EQ(16u, p.get_pg_num_divisor(pg_t(8, 1))); ASSERT_EQ(16u, p.get_pg_num_divisor(pg_t(9, 1))); ASSERT_EQ(16u, p.get_pg_num_divisor(pg_t(10, 1))); ASSERT_EQ(16u, p.get_pg_num_divisor(pg_t(11, 1))); } TEST(pg_pool_t_test, get_random_pg_position) { srand(getpid()); for (int i = 0; i < 100; ++i) { pg_pool_t p; p.set_pg_num(1 + (rand() % 1000)); p.set_pgp_num(p.get_pg_num()); pg_t pgid(rand() % p.get_pg_num(), 1); uint32_t h = p.get_random_pg_position(pgid, rand()); uint32_t ps = p.raw_hash_to_pg(h); cout << p.get_pg_num() << " " << pgid << ": " << h << " -> " << pg_t(ps, 1) << std::endl; ASSERT_EQ(pgid.ps(), ps); } } TEST(shard_id_t, iostream) { set<shard_id_t> shards; shards.insert(shard_id_t(0)); shards.insert(shard_id_t(1)); shards.insert(shard_id_t(2)); ostringstream out; out << shards; ASSERT_EQ(out.str(), "0,1,2"); shard_id_t noshard = shard_id_t::NO_SHARD; shard_id_t zero(0); ASSERT_GT(zero, noshard); } TEST(spg_t, parse) { spg_t a(pg_t(1,2), shard_id_t::NO_SHARD); spg_t aa, bb; spg_t b(pg_t(3,2), shard_id_t(2)); std::string s = stringify(a); ASSERT_TRUE(aa.parse(s.c_str())); ASSERT_EQ(a, aa); s = stringify(b); ASSERT_TRUE(bb.parse(s.c_str())); ASSERT_EQ(b, bb); } TEST(coll_t, parse) { const char *ok[] = { "meta", "1.2_head", "1.2_TEMP", "1.2s3_head", "1.3s2_TEMP", "1.2s0_head", 0 }; const char *bad[] = { "foo", "1.2_food", "1.2_head ", //" 1.2_head", // hrm, this parses, which is not ideal.. pg_t's fault? "1.2_temp", "1.2_HEAD", "1.xS3_HEAD", "1.2s_HEAD", "1.2sfoo_HEAD", 0 }; coll_t a; for (int i = 0; ok[i]; ++i) { cout << "check ok " << ok[i] << std::endl; ASSERT_TRUE(a.parse(ok[i])); ASSERT_EQ(string(ok[i]), a.to_str()); } for (int i = 0; bad[i]; ++i) { cout << "check bad " << bad[i] << std::endl; ASSERT_FALSE(a.parse(bad[i])); } } TEST(coll_t, temp) { spg_t pgid; coll_t foo(pgid); ASSERT_EQ(foo.to_str(), string("0.0_head")); coll_t temp = foo.get_temp(); ASSERT_EQ(temp.to_str(), string("0.0_TEMP")); spg_t pgid2; ASSERT_TRUE(temp.is_temp()); ASSERT_TRUE(temp.is_temp(&pgid2)); ASSERT_EQ(pgid, pgid2); } TEST(coll_t, assigment) { spg_t pgid; coll_t right(pgid); ASSERT_EQ(right.to_str(), string("0.0_head")); coll_t left, middle; ASSERT_EQ(left.to_str(), string("meta")); ASSERT_EQ(middle.to_str(), string("meta")); left = middle = right; ASSERT_EQ(left.to_str(), string("0.0_head")); ASSERT_EQ(middle.to_str(), string("0.0_head")); ASSERT_NE(middle.c_str(), right.c_str()); ASSERT_NE(left.c_str(), middle.c_str()); } TEST(hobject_t, parse) { const char *v[] = { "MIN", "MAX", "-1:60c2fa6d:::inc_osdmap.1:0", "-1:60c2fa6d:::inc_osdmap.1:333", "0:00000000::::head", "1:00000000:nspace:key:obj:head", "-40:00000000:nspace::obj:head", "20:00000000::key:obj:head", "20:00000000:::o%fdj:head", "20:00000000:::o%02fdj:head", "20:00000000:::_zero_%00_:head", NULL }; for (unsigned i=0; v[i]; ++i) { hobject_t o; bool b = o.parse(v[i]); if (!b) { cout << "failed to parse " << v[i] << std::endl; ASSERT_TRUE(false); } string s = stringify(o); if (s != v[i]) { cout << v[i] << " -> " << o << " -> " << s << std::endl; ASSERT_EQ(s, string(v[i])); } } } TEST(ghobject_t, cmp) { ghobject_t min; ghobject_t sep; sep.set_shard(shard_id_t(1)); sep.hobj.pool = -1; cout << min << " < " << sep << std::endl; ASSERT_TRUE(min < sep); sep.set_shard(shard_id_t::NO_SHARD); cout << "sep shard " << sep.shard_id << std::endl; ghobject_t o(hobject_t(object_t(), string(), CEPH_NOSNAP, 0x42, 1, string())); cout << "o " << o << std::endl; ASSERT_TRUE(o > sep); } TEST(ghobject_t, parse) { const char *v[] = { "GHMIN", "GHMAX", "13#0:00000000::::head#", "13#0:00000000::::head#deadbeef", "#-1:60c2fa6d:::inc_osdmap.1:333#deadbeef", "#-1:60c2fa6d:::inc%02osdmap.1:333#deadbeef", "#-1:60c2fa6d:::inc_osdmap.1:333#", "1#MIN#deadbeefff", "1#MAX#", "#MAX#123", "#-40:00000000:nspace::obj:head#", NULL }; for (unsigned i=0; v[i]; ++i) { ghobject_t o; bool b = o.parse(v[i]); if (!b) { cout << "failed to parse " << v[i] << std::endl; ASSERT_TRUE(false); } string s = stringify(o); if (s != v[i]) { cout << v[i] << " -> " << o << " -> " << s << std::endl; ASSERT_EQ(s, string(v[i])); } } } TEST(pool_opts_t, invalid_opt) { EXPECT_FALSE(pool_opts_t::is_opt_name("INVALID_OPT")); PrCtl unset_dumpable; EXPECT_DEATH(pool_opts_t::get_opt_desc("INVALID_OPT"), ""); } TEST(pool_opts_t, scrub_min_interval) { EXPECT_TRUE(pool_opts_t::is_opt_name("scrub_min_interval")); EXPECT_EQ(pool_opts_t::get_opt_desc("scrub_min_interval"), pool_opts_t::opt_desc_t(pool_opts_t::SCRUB_MIN_INTERVAL, pool_opts_t::DOUBLE)); pool_opts_t opts; EXPECT_FALSE(opts.is_set(pool_opts_t::SCRUB_MIN_INTERVAL)); { PrCtl unset_dumpable; EXPECT_DEATH(opts.get(pool_opts_t::SCRUB_MIN_INTERVAL), ""); } double val; EXPECT_FALSE(opts.get(pool_opts_t::SCRUB_MIN_INTERVAL, &val)); opts.set(pool_opts_t::SCRUB_MIN_INTERVAL, static_cast<double>(2015)); EXPECT_TRUE(opts.get(pool_opts_t::SCRUB_MIN_INTERVAL, &val)); EXPECT_EQ(val, 2015); opts.unset(pool_opts_t::SCRUB_MIN_INTERVAL); EXPECT_FALSE(opts.is_set(pool_opts_t::SCRUB_MIN_INTERVAL)); } TEST(pool_opts_t, scrub_max_interval) { EXPECT_TRUE(pool_opts_t::is_opt_name("scrub_max_interval")); EXPECT_EQ(pool_opts_t::get_opt_desc("scrub_max_interval"), pool_opts_t::opt_desc_t(pool_opts_t::SCRUB_MAX_INTERVAL, pool_opts_t::DOUBLE)); pool_opts_t opts; EXPECT_FALSE(opts.is_set(pool_opts_t::SCRUB_MAX_INTERVAL)); { PrCtl unset_dumpable; EXPECT_DEATH(opts.get(pool_opts_t::SCRUB_MAX_INTERVAL), ""); } double val; EXPECT_FALSE(opts.get(pool_opts_t::SCRUB_MAX_INTERVAL, &val)); opts.set(pool_opts_t::SCRUB_MAX_INTERVAL, static_cast<double>(2015)); EXPECT_TRUE(opts.get(pool_opts_t::SCRUB_MAX_INTERVAL, &val)); EXPECT_EQ(val, 2015); opts.unset(pool_opts_t::SCRUB_MAX_INTERVAL); EXPECT_FALSE(opts.is_set(pool_opts_t::SCRUB_MAX_INTERVAL)); } TEST(pool_opts_t, deep_scrub_interval) { EXPECT_TRUE(pool_opts_t::is_opt_name("deep_scrub_interval")); EXPECT_EQ(pool_opts_t::get_opt_desc("deep_scrub_interval"), pool_opts_t::opt_desc_t(pool_opts_t::DEEP_SCRUB_INTERVAL, pool_opts_t::DOUBLE)); pool_opts_t opts; EXPECT_FALSE(opts.is_set(pool_opts_t::DEEP_SCRUB_INTERVAL)); { PrCtl unset_dumpable; EXPECT_DEATH(opts.get(pool_opts_t::DEEP_SCRUB_INTERVAL), ""); } double val; EXPECT_FALSE(opts.get(pool_opts_t::DEEP_SCRUB_INTERVAL, &val)); opts.set(pool_opts_t::DEEP_SCRUB_INTERVAL, static_cast<double>(2015)); EXPECT_TRUE(opts.get(pool_opts_t::DEEP_SCRUB_INTERVAL, &val)); EXPECT_EQ(val, 2015); opts.unset(pool_opts_t::DEEP_SCRUB_INTERVAL); EXPECT_FALSE(opts.is_set(pool_opts_t::DEEP_SCRUB_INTERVAL)); } struct RequiredPredicate : IsPGRecoverablePredicate { unsigned required_size; explicit RequiredPredicate(unsigned required_size) : required_size(required_size) {} bool operator()(const set<pg_shard_t> &have) const override { return have.size() >= required_size; } }; using namespace std; struct MapPredicate { map<int, pair<PastIntervals::osd_state_t, epoch_t>> states; explicit MapPredicate( const vector<pair<int, pair<PastIntervals::osd_state_t, epoch_t>>> &_states) : states(_states.begin(), _states.end()) {} PastIntervals::osd_state_t operator()(epoch_t start, int osd, epoch_t *lost_at) { auto val = states.at(osd); if (lost_at) *lost_at = val.second; return val.first; } }; using sit = shard_id_t; using PI = PastIntervals; using pst = pg_shard_t; using ival = PastIntervals::pg_interval_t; using ivallst = std::list<ival>; const int N = 0x7fffffff /* CRUSH_ITEM_NONE, can't import crush.h here */; struct PITest : ::testing::Test { PITest() {} void run( bool ec_pool, ivallst intervals, epoch_t last_epoch_started, unsigned min_to_peer, vector<pair<int, pair<PastIntervals::osd_state_t, epoch_t>>> osd_states, vector<int> up, vector<int> acting, set<pg_shard_t> probe, set<int> down, map<int, epoch_t> blocked_by, bool pg_down) { RequiredPredicate rec_pred(min_to_peer); MapPredicate map_pred(osd_states); PI::PriorSet correct( ec_pool, probe, down, blocked_by, pg_down, new RequiredPredicate(rec_pred)); PastIntervals compact; for (auto &&i: intervals) { compact.add_interval(ec_pool, i); } PI::PriorSet compact_ps = compact.get_prior_set( ec_pool, last_epoch_started, new RequiredPredicate(rec_pred), map_pred, up, acting, nullptr); ASSERT_EQ(correct, compact_ps); } }; TEST_F(PITest, past_intervals_rep) { run( /* ec_pool */ false, /* intervals */ { ival{{0, 1, 2}, {0, 1, 2}, 10, 20, true, 0, 0} , ival{{ 1, 2}, { 1, 2}, 21, 30, true, 1, 1} , ival{{ 2}, { 2}, 31, 35, false, 2, 2} , ival{{0, 2}, {0, 2}, 36, 50, true, 0, 0} }, /* les */ 5, /* min_peer */ 1, /* osd states at end */ { make_pair(0, make_pair(PI::UP , 0)) , make_pair(1, make_pair(PI::UP , 0)) , make_pair(2, make_pair(PI::DOWN , 0)) }, /* acting */ {0, 1 }, /* up */ {0, 1 }, /* probe */ {pst(0), pst(1)}, /* down */ {2}, /* blocked_by */ {}, /* pg_down */ false); } TEST_F(PITest, past_intervals_ec) { run( /* ec_pool */ true, /* intervals */ { ival{{0, 1, 2}, {0, 1, 2}, 10, 20, true, 0, 0} , ival{{N, 1, 2}, {N, 1, 2}, 21, 30, true, 1, 1} }, /* les */ 5, /* min_peer */ 2, /* osd states at end */ { make_pair(0, make_pair(PI::DOWN , 0)) , make_pair(1, make_pair(PI::UP , 0)) , make_pair(2, make_pair(PI::UP , 0)) }, /* acting */ {N, 1, 2}, /* up */ {N, 1, 2}, /* probe */ {pst(1, sit(1)), pst(2, sit(2))}, /* down */ {0}, /* blocked_by */ {}, /* pg_down */ false); } TEST_F(PITest, past_intervals_rep_down) { run( /* ec_pool */ false, /* intervals */ { ival{{0, 1, 2}, {0, 1, 2}, 10, 20, true, 0, 0} , ival{{ 1, 2}, { 1, 2}, 21, 30, true, 1, 1} , ival{{ 2}, { 2}, 31, 35, true, 2, 2} , ival{{0, 2}, {0, 2}, 36, 50, true, 0, 0} }, /* les */ 5, /* min_peer */ 1, /* osd states at end */ { make_pair(0, make_pair(PI::UP , 0)) , make_pair(1, make_pair(PI::UP , 0)) , make_pair(2, make_pair(PI::DOWN , 0)) }, /* acting */ {0, 1 }, /* up */ {0, 1 }, /* probe */ {pst(0), pst(1)}, /* down */ {2}, /* blocked_by */ {{2, 0}}, /* pg_down */ true); } TEST_F(PITest, past_intervals_ec_down) { run( /* ec_pool */ true, /* intervals */ { ival{{0, 1, 2}, {0, 1, 2}, 10, 20, true, 0, 0} , ival{{N, 1, 2}, {N, 1, 2}, 21, 30, true, 1, 1} , ival{{N, N, 2}, {N, N, 2}, 31, 35, false, 2, 2} }, /* les */ 5, /* min_peer */ 2, /* osd states at end */ { make_pair(0, make_pair(PI::UP , 0)) , make_pair(1, make_pair(PI::DOWN , 0)) , make_pair(2, make_pair(PI::UP , 0)) }, /* acting */ {0, N, 2}, /* up */ {0, N, 2}, /* probe */ {pst(0, sit(0)), pst(2, sit(2))}, /* down */ {1}, /* blocked_by */ {{1, 0}}, /* pg_down */ true); } TEST_F(PITest, past_intervals_rep_no_subsets) { run( /* ec_pool */ false, /* intervals */ { ival{{0, 2}, {0, 2}, 10, 20, true, 0, 0} , ival{{ 1, 2}, { 1, 2}, 21, 30, true, 1, 1} , ival{{0, 1 }, {0, 1 }, 31, 35, true, 0, 0} }, /* les */ 5, /* min_peer */ 1, /* osd states at end */ { make_pair(0, make_pair(PI::UP , 0)) , make_pair(1, make_pair(PI::UP , 0)) , make_pair(2, make_pair(PI::DOWN , 0)) }, /* acting */ {0, 1 }, /* up */ {0, 1 }, /* probe */ {pst(0), pst(1)}, /* down */ {2}, /* blocked_by */ {}, /* pg_down */ false); } TEST_F(PITest, past_intervals_ec_no_subsets) { run( /* ec_pool */ true, /* intervals */ { ival{{0, N, 2}, {0, N, 2}, 10, 20, true, 0, 0} , ival{{N, 1, 2}, {N, 1, 2}, 21, 30, true, 1, 1} , ival{{0, 1, N}, {0, 1, N}, 31, 35, true, 0, 0} }, /* les */ 5, /* min_peer */ 2, /* osd states at end */ { make_pair(0, make_pair(PI::UP , 0)) , make_pair(1, make_pair(PI::DOWN , 0)) , make_pair(2, make_pair(PI::UP , 0)) }, /* acting */ {0, N, 2}, /* up */ {0, N, 2}, /* probe */ {pst(0, sit(0)), pst(2, sit(2))}, /* down */ {1}, /* blocked_by */ {{1, 0}}, /* pg_down */ true); } TEST_F(PITest, past_intervals_ec_no_subsets2) { run( /* ec_pool */ true, /* intervals */ { ival{{N, 1, 2}, {N, 1, 2}, 10, 20, true, 0, 0} , ival{{0, N, 2}, {0, N, 2}, 21, 30, true, 1, 1} , ival{{0, 3, N}, {0, 3, N}, 31, 35, true, 0, 0} }, /* les */ 31, /* min_peer */ 2, /* osd states at end */ { make_pair(0, make_pair(PI::UP , 0)) , make_pair(1, make_pair(PI::DOWN , 0)) , make_pair(2, make_pair(PI::UP , 0)) , make_pair(3, make_pair(PI::UP , 0)) }, /* acting */ {0, N, 2}, /* up */ {0, N, 2}, /* probe */ {pst(0, sit(0)), pst(2, sit(2)), pst(3, sit(1))}, /* down */ {1}, /* blocked_by */ {}, /* pg_down */ false); } TEST_F(PITest, past_intervals_rep_lost) { run( /* ec_pool */ false, /* intervals */ { ival{{0, 1, 2}, {0, 1, 2}, 10, 20, true, 0, 0} , ival{{ 1, 2}, { 1, 2}, 21, 30, true, 1, 1} , ival{{ 2}, { 2}, 31, 35, true, 2, 2} , ival{{0, 2}, {0, 2}, 36, 50, true, 0, 0} }, /* les */ 5, /* min_peer */ 1, /* osd states at end */ { make_pair(0, make_pair(PI::UP , 0)) , make_pair(1, make_pair(PI::UP , 0)) , make_pair(2, make_pair(PI::LOST , 55)) }, /* acting */ {0, 1 }, /* up */ {0, 1 }, /* probe */ {pst(0), pst(1)}, /* down */ {2}, /* blocked_by */ {}, /* pg_down */ false); } TEST_F(PITest, past_intervals_ec_lost) { run( /* ec_pool */ true, /* intervals */ { ival{{0, N, 2}, {0, N, 2}, 10, 20, true, 0, 0} , ival{{N, 1, 2}, {N, 1, 2}, 21, 30, true, 1, 1} , ival{{0, 1, N}, {0, 1, N}, 31, 35, true, 0, 0} }, /* les */ 5, /* min_peer */ 2, /* osd states at end */ { make_pair(0, make_pair(PI::UP , 0)) , make_pair(1, make_pair(PI::LOST , 36)) , make_pair(2, make_pair(PI::UP , 0)) }, /* acting */ {0, N, 2}, /* up */ {0, N, 2}, /* probe */ {pst(0, sit(0)), pst(2, sit(2))}, /* down */ {1}, /* blocked_by */ {}, /* pg_down */ false); } void ci_ref_test( object_manifest_t l, object_manifest_t to_remove, object_manifest_t g, object_ref_delta_t expected_delta) { { object_ref_delta_t delta; to_remove.calc_refs_to_drop_on_removal( &l, &g, delta); ASSERT_EQ( expected_delta, delta); } // calc_refs_to_drop specifically handles nullptr identically to empty // chunk_map if (l.chunk_map.empty() || g.chunk_map.empty()) { object_ref_delta_t delta; to_remove.calc_refs_to_drop_on_removal( l.chunk_map.empty() ? nullptr : &l, g.chunk_map.empty() ? nullptr : &g, delta); ASSERT_EQ( expected_delta, delta); } } void ci_ref_test_on_modify( object_manifest_t l, object_manifest_t to_remove, ObjectCleanRegions clean_regions, object_ref_delta_t expected_delta) { { object_ref_delta_t delta; to_remove.calc_refs_to_drop_on_modify( &l, clean_regions, delta); ASSERT_EQ( expected_delta, delta); } } void ci_ref_test_inc_on_set( object_manifest_t l, object_manifest_t added_set, object_manifest_t g, object_ref_delta_t expected_delta) { { object_ref_delta_t delta; added_set.calc_refs_to_inc_on_set( &l, &g, delta); ASSERT_EQ( expected_delta, delta); } } hobject_t mk_hobject(string name) { return hobject_t( std::move(name), string(), CEPH_NOSNAP, 0x42, 1, string()); } object_manifest_t mk_manifest( std::map<uint64_t, std::tuple<uint64_t, uint64_t, string>> m) { object_manifest_t ret; ret.type = object_manifest_t::TYPE_CHUNKED; for (auto &[offset, tgt] : m) { auto &[tgt_off, length, name] = tgt; auto &ci = ret.chunk_map[offset]; ci.offset = tgt_off; ci.length = length; ci.oid = mk_hobject(name); } return ret; } object_ref_delta_t mk_delta(std::map<string, int> _m) { std::map<hobject_t, int> m; for (auto &[name, delta] : _m) { m.insert( std::make_pair( mk_hobject(name), delta)); } return object_ref_delta_t(std::move(m)); } TEST(chunk_info_test, calc_refs_to_drop) { ci_ref_test( mk_manifest({}), mk_manifest({{0, {0, 1024, "foo"}}}), mk_manifest({}), mk_delta({{"foo", -1}})); } TEST(chunk_info_test, calc_refs_to_drop_match) { ci_ref_test( mk_manifest({{0, {0, 1024, "foo"}}}), mk_manifest({{0, {0, 1024, "foo"}}}), mk_manifest({{0, {0, 1024, "foo"}}}), mk_delta({})); } TEST(chunk_info_test, calc_refs_to_drop_head_match) { ci_ref_test( mk_manifest({}), mk_manifest({{0, {0, 1024, "foo"}}}), mk_manifest({{0, {0, 1024, "foo"}}}), mk_delta({})); } TEST(chunk_info_test, calc_refs_to_drop_tail_match) { ci_ref_test( mk_manifest({{0, {0, 1024, "foo"}}}), mk_manifest({{0, {0, 1024, "foo"}}}), mk_manifest({}), mk_delta({})); } TEST(chunk_info_test, calc_refs_to_drop_second_reference) { ci_ref_test( mk_manifest({{0, {0, 1024, "foo"}}}), mk_manifest({{0, {0, 1024, "foo"}}, {4<<10, {0, 1<<10, "foo"}}}), mk_manifest({}), mk_delta({{"foo", -1}})); } TEST(chunk_info_test, calc_refs_offsets_dont_match) { ci_ref_test( mk_manifest({{0, {0, 1024, "foo"}}}), mk_manifest({{512, {0, 1024, "foo"}}, {(4<<10) + 512, {0, 1<<10, "foo"}}}), mk_manifest({}), mk_delta({{"foo", -2}})); } TEST(chunk_info_test, calc_refs_g_l_match) { ci_ref_test( mk_manifest({{4096, {0, 1024, "foo"}}}), mk_manifest({{0, {0, 1024, "foo"}}, {4096, {0, 1024, "bar"}}}), mk_manifest({{4096, {0, 1024, "foo"}}}), mk_delta({{"foo", -2}, {"bar", -1}})); } TEST(chunk_info_test, calc_refs_g_l_match_no_this) { ci_ref_test( mk_manifest({{4096, {0, 1024, "foo"}}}), mk_manifest({{0, {0, 1024, "bar"}}}), mk_manifest({{4096, {0, 1024, "foo"}}}), mk_delta({{"foo", -1}, {"bar", -1}})); } TEST(chunk_info_test, calc_refs_modify_mismatch) { ObjectCleanRegions clean_regions(0, 8192, false); clean_regions.mark_data_region_dirty(0, 1024); clean_regions.mark_data_region_dirty(512, 1024); ci_ref_test_on_modify( mk_manifest({{512, {2048, 1024, "foo"}}, {4096, {0, 1024, "foo"}}}), mk_manifest({{0, {0, 1024, "bar"}}, {512, {2048, 1024, "ttt"}}}), clean_regions, mk_delta({{"bar", -1}, {"ttt", -1}})); } TEST(chunk_info_test, calc_refs_modify_match) { ObjectCleanRegions clean_regions(0, 8192, false); clean_regions.mark_data_region_dirty(0, 1024); clean_regions.mark_data_region_dirty(512, 1024); clean_regions.mark_data_region_dirty(4096, 1024); ci_ref_test_on_modify( mk_manifest({{512, {2048, 1024, "foo"}}, {4096, {0, 1024, "ttt"}}}), mk_manifest({{0, {0, 1024, "bar"}}, {512, {2048, 1024, "foo"}}, {4096, {0, 1024, "ttt"}}}), clean_regions, mk_delta({{"bar", -1}})); } TEST(chunk_info_test, calc_refs_modify_match_dirty_overlap) { ObjectCleanRegions clean_regions(0, 8192, false); clean_regions.mark_data_region_dirty(0, 256); clean_regions.mark_data_region_dirty(256, 4096); ci_ref_test_on_modify( mk_manifest({}), mk_manifest({{0, {0, 256, "bar"}}, {512, {2048, 1024, "foo"}}, {4096, {0, 1024, "ttt"}}}), clean_regions, mk_delta({{"bar", -1}, {"foo", -1}, {"ttt", -1}})); } TEST(chunk_info_test, calc_refs_modify_match_dirty_overlap2) { ObjectCleanRegions clean_regions(0, 8192, false); clean_regions.mark_data_region_dirty(0, 256); clean_regions.mark_data_region_dirty(256, 1024); clean_regions.mark_data_region_dirty(3584, 1024); ci_ref_test_on_modify( mk_manifest({{512, {2048, 1024, "foo"}}, {4096, {0, 1024, "ttt"}}}), mk_manifest({{0, {0, 256, "bar"}}, {512, {2048, 1024, "foo"}}, {4096, {0, 1024, "ttt"}}}), clean_regions, mk_delta({{"bar", -1}})); } TEST(chunk_info_test, calc_refs_modify_match_dirty_overlap3) { ObjectCleanRegions clean_regions(0, 8192, false); clean_regions.mark_data_region_dirty(0, 256); clean_regions.mark_data_region_dirty(256, 4096); ci_ref_test_on_modify( mk_manifest({{512, {2048, 1024, "foo"}}, {4096, {0, 1024, "ttt"}}}), mk_manifest({{0, {0, 256, "bar"}}, {512, {2048, 1024, "foo"}}, {4096, {0, 1024, "ttt"}}}), clean_regions, mk_delta({{"bar", -1}})); } TEST(chunk_info_test, calc_refs_modify_match_clone_overlap) { ObjectCleanRegions clean_regions(0, 8192, false); clean_regions.mark_data_region_dirty(0, 256); clean_regions.mark_data_region_dirty(256, 1024); clean_regions.mark_data_region_dirty(3584, 1024); ci_ref_test_on_modify( mk_manifest({{512, {2048, 1024, "foo"}}, {4096, {0, 1024, "ttt"}}}), mk_manifest({{0, {0, 256, "bar"}}, {256, {2048, 1024, "foo"}}, {3584, {0, 1024, "ttt"}}}), clean_regions, mk_delta({{"bar", -1}, {"foo", -1}, {"ttt", -1}})); } TEST(chunk_info_test, calc_refs_modify_no_snap) { ObjectCleanRegions clean_regions(0, 8192, false); clean_regions.mark_data_region_dirty(0, 1024); clean_regions.mark_data_region_dirty(512, 1024); ci_ref_test_on_modify( mk_manifest({}), mk_manifest({{0, {0, 1024, "bar"}}, {512, {2048, 1024, "ttt"}}}), clean_regions, mk_delta({{"bar", -1}, {"ttt", -1}})); } TEST(chunk_info_test, calc_refs_inc) { ci_ref_test_inc_on_set( mk_manifest({{256, {0, 256, "aaa"}}, {4096, {0, 1024, "foo"}}}), mk_manifest({{1024, {0, 1024, "bar"}}}), mk_manifest({{4096, {0, 1024, "foo"}}}), mk_delta({{"bar", 1}})); } TEST(chunk_info_test, calc_refs_inc2) { ci_ref_test_inc_on_set( mk_manifest({{512, {0, 1024, "aaa"}}, {4096, {0, 1024, "foo"}}}), mk_manifest({{1024, {0, 1024, "bar"}}, {4096, {0, 1024, "bbb"}}}), mk_manifest({{512, {0, 1024, "foo"}}}), mk_delta({{"bar", 1}, {"bbb", 1}})); } TEST(chunk_info_test, calc_refs_inc_no_l) { ci_ref_test_inc_on_set( mk_manifest({}), mk_manifest({{1024, {0, 1024, "bar"}}, {4096, {0, 1024, "bbb"}}}), mk_manifest({{512, {0, 1024, "foo"}}}), mk_delta({{"bar", 1}, {"bbb", 1}})); } TEST(chunk_info_test, calc_refs_inc_no_g) { ci_ref_test_inc_on_set( mk_manifest({{512, {0, 1024, "aaa"}}, {4096, {0, 1024, "foo"}}}), mk_manifest({{1024, {0, 1024, "bar"}}, {4096, {0, 1024, "foo"}}}), mk_manifest({}), mk_delta({{"bar", 1}})); } TEST(chunk_info_test, calc_refs_inc_match_g_l) { ci_ref_test_inc_on_set( mk_manifest({{256, {0, 256, "aaa"}}, {4096, {0, 1024, "foo"}}}), mk_manifest({{256, {0, 256, "aaa"}}, {4096, {0, 1024, "foo"}}}), mk_manifest({{256, {0, 256, "aaa"}}, {4096, {0, 1024, "foo"}}}), mk_delta({{"aaa", -1}, {"foo", -1}})); } TEST(chunk_info_test, calc_refs_inc_match) { ci_ref_test_inc_on_set( mk_manifest({{256, {0, 256, "bbb"}}, {4096, {0, 1024, "foo"}}}), mk_manifest({{256, {0, 256, "aaa"}}, {4096, {0, 1024, "foo"}}}), mk_manifest({{256, {0, 256, "aaa"}}, {4096, {0, 1024, "ccc"}}}), mk_delta({})); } /* * Local Variables: * compile-command: "cd ../.. ; * make unittest_osd_types ; * ./unittest_osd_types # --gtest_filter=pg_missing_t.constructor * " * End: */
63,090
27.612698
95
cc
null
ceph-main/src/test/osdc/FakeWriteback.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include <errno.h> #include <time.h> #include <thread> #include "common/debug.h" #include "common/Cond.h" #include "common/Finisher.h" #include "common/ceph_mutex.h" #include "include/ceph_assert.h" #include "common/ceph_time.h" #include "FakeWriteback.h" #define dout_subsys ceph_subsys_objectcacher #undef dout_prefix #define dout_prefix *_dout << "FakeWriteback(" << this << ") " class C_Delay : public Context { CephContext *m_cct; Context *m_con; ceph::timespan m_delay; ceph::mutex *m_lock; bufferlist *m_bl; uint64_t m_off; public: C_Delay(CephContext *cct, Context *c, ceph::mutex *lock, uint64_t off, bufferlist *pbl, uint64_t delay_ns=0) : m_cct(cct), m_con(c), m_delay(delay_ns * std::chrono::nanoseconds(1)), m_lock(lock), m_bl(pbl), m_off(off) {} void finish(int r) override { std::this_thread::sleep_for(m_delay); if (m_bl) { buffer::ptr bp(r); bp.zero(); m_bl->append(bp); ldout(m_cct, 20) << "finished read " << m_off << "~" << r << dendl; } std::lock_guard locker{*m_lock}; m_con->complete(r); } }; FakeWriteback::FakeWriteback(CephContext *cct, ceph::mutex *lock, uint64_t delay_ns) : m_cct(cct), m_lock(lock), m_delay_ns(delay_ns) { m_finisher = new Finisher(cct); m_finisher->start(); } FakeWriteback::~FakeWriteback() { m_finisher->stop(); delete m_finisher; } void FakeWriteback::read(const object_t& oid, uint64_t object_no, const object_locator_t& oloc, uint64_t off, uint64_t len, snapid_t snapid, bufferlist *pbl, uint64_t trunc_size, __u32 trunc_seq, int op_flags, const ZTracer::Trace &parent_trace, Context *onfinish) { C_Delay *wrapper = new C_Delay(m_cct, onfinish, m_lock, off, pbl, m_delay_ns); m_finisher->queue(wrapper, len); } ceph_tid_t FakeWriteback::write(const object_t& oid, const object_locator_t& oloc, uint64_t off, uint64_t len, const SnapContext& snapc, const bufferlist &bl, ceph::real_time mtime, uint64_t trunc_size, __u32 trunc_seq, ceph_tid_t journal_tid, const ZTracer::Trace &parent_trace, Context *oncommit) { C_Delay *wrapper = new C_Delay(m_cct, oncommit, m_lock, off, NULL, m_delay_ns); m_finisher->queue(wrapper, 0); return ++m_tid; } bool FakeWriteback::may_copy_on_write(const object_t&, uint64_t, uint64_t, snapid_t) { return false; }
2,576
26.414894
84
cc
null
ceph-main/src/test/osdc/FakeWriteback.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_TEST_OSDC_FAKEWRITEBACK_H #define CEPH_TEST_OSDC_FAKEWRITEBACK_H #include "include/Context.h" #include "include/types.h" #include "osd/osd_types.h" #include "osdc/WritebackHandler.h" #include <atomic> class Finisher; class FakeWriteback : public WritebackHandler { public: FakeWriteback(CephContext *cct, ceph::mutex *lock, uint64_t delay_ns); ~FakeWriteback() override; void read(const object_t& oid, uint64_t object_no, const object_locator_t& oloc, uint64_t off, uint64_t len, snapid_t snapid, bufferlist *pbl, uint64_t trunc_size, __u32 trunc_seq, int op_flags, const ZTracer::Trace &parent_trace, Context *onfinish) override; ceph_tid_t write(const object_t& oid, const object_locator_t& oloc, uint64_t off, uint64_t len, const SnapContext& snapc, const bufferlist &bl, ceph::real_time mtime, uint64_t trunc_size, __u32 trunc_seq, ceph_tid_t journal_tid, const ZTracer::Trace &parent_trace, Context *oncommit) override; using WritebackHandler::write; bool may_copy_on_write(const object_t&, uint64_t, uint64_t, snapid_t) override; private: CephContext *m_cct; ceph::mutex *m_lock; uint64_t m_delay_ns; std::atomic<unsigned> m_tid = { 0 }; Finisher *m_finisher; }; #endif
1,424
28.6875
72
h
null
ceph-main/src/test/osdc/MemWriteback.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include <errno.h> #include <time.h> #include <thread> #include "common/debug.h" #include "common/Cond.h" #include "common/Finisher.h" #include "common/ceph_mutex.h" #include "include/ceph_assert.h" #include "common/ceph_time.h" #include "MemWriteback.h" #define dout_context g_ceph_context #define dout_subsys ceph_subsys_objectcacher #undef dout_prefix #define dout_prefix *_dout << "MemWriteback(" << this << ") " class C_DelayRead : public Context { MemWriteback *wb; CephContext *m_cct; Context *m_con; ceph::timespan m_delay; ceph::mutex *m_lock; object_t m_oid; uint64_t m_off; uint64_t m_len; bufferlist *m_bl; public: C_DelayRead(MemWriteback *mwb, CephContext *cct, Context *c, ceph::mutex *lock, const object_t& oid, uint64_t off, uint64_t len, bufferlist *pbl, uint64_t delay_ns=0) : wb(mwb), m_cct(cct), m_con(c), m_delay(delay_ns * std::chrono::nanoseconds(1)), m_lock(lock), m_oid(oid), m_off(off), m_len(len), m_bl(pbl) {} void finish(int r) override { std::this_thread::sleep_for(m_delay); std::lock_guard locker{*m_lock}; r = wb->read_object_data(m_oid, m_off, m_len, m_bl); if (m_con) m_con->complete(r); } }; class C_DelayWrite : public Context { MemWriteback *wb; CephContext *m_cct; Context *m_con; ceph::timespan m_delay; ceph::mutex *m_lock; object_t m_oid; uint64_t m_off; uint64_t m_len; const bufferlist& m_bl; public: C_DelayWrite(MemWriteback *mwb, CephContext *cct, Context *c, ceph::mutex *lock, const object_t& oid, uint64_t off, uint64_t len, const bufferlist& bl, uint64_t delay_ns=0) : wb(mwb), m_cct(cct), m_con(c), m_delay(delay_ns * std::chrono::nanoseconds(1)), m_lock(lock), m_oid(oid), m_off(off), m_len(len), m_bl(bl) {} void finish(int r) override { std::this_thread::sleep_for(m_delay); std::lock_guard locker{*m_lock}; wb->write_object_data(m_oid, m_off, m_len, m_bl); if (m_con) m_con->complete(r); } }; MemWriteback::MemWriteback(CephContext *cct, ceph::mutex *lock, uint64_t delay_ns) : m_cct(cct), m_lock(lock), m_delay_ns(delay_ns) { m_finisher = new Finisher(cct); m_finisher->start(); } MemWriteback::~MemWriteback() { m_finisher->stop(); delete m_finisher; } void MemWriteback::read(const object_t& oid, uint64_t object_no, const object_locator_t& oloc, uint64_t off, uint64_t len, snapid_t snapid, bufferlist *pbl, uint64_t trunc_size, __u32 trunc_seq, int op_flags, const ZTracer::Trace &parent_trace, Context *onfinish) { ceph_assert(snapid == CEPH_NOSNAP); C_DelayRead *wrapper = new C_DelayRead(this, m_cct, onfinish, m_lock, oid, off, len, pbl, m_delay_ns); m_finisher->queue(wrapper, len); } ceph_tid_t MemWriteback::write(const object_t& oid, const object_locator_t& oloc, uint64_t off, uint64_t len, const SnapContext& snapc, const bufferlist &bl, ceph::real_time mtime, uint64_t trunc_size, __u32 trunc_seq, ceph_tid_t journal_tid, const ZTracer::Trace &parent_trace, Context *oncommit) { ceph_assert(snapc.seq == 0); C_DelayWrite *wrapper = new C_DelayWrite(this, m_cct, oncommit, m_lock, oid, off, len, bl, m_delay_ns); m_finisher->queue(wrapper, 0); return ++m_tid; } void MemWriteback::write_object_data(const object_t& oid, uint64_t off, uint64_t len, const bufferlist& data_bl) { dout(1) << "writing " << oid << " " << off << "~" << len << dendl; ceph_assert(len == data_bl.length()); bufferlist& obj_bl = object_data[oid]; bufferlist new_obj_bl; // ensure size, or set it if new object if (off + len > obj_bl.length()) { obj_bl.append_zero(off + len - obj_bl.length()); } // beginning new_obj_bl.substr_of(obj_bl, 0, off); // overwritten bit new_obj_bl.append(data_bl); // tail bit bufferlist tmp; tmp.substr_of(obj_bl, off+len, obj_bl.length()-(off+len)); new_obj_bl.append(tmp); obj_bl.swap(new_obj_bl); dout(1) << oid << " final size " << obj_bl.length() << dendl; } int MemWriteback::read_object_data(const object_t& oid, uint64_t off, uint64_t len, bufferlist *data_bl) { dout(1) << "reading " << oid << " " << off << "~" << len << dendl; auto obj_i = object_data.find(oid); if (obj_i == object_data.end()) { dout(1) << oid << "DNE!" << dendl; return -ENOENT; } const bufferlist& obj_bl = obj_i->second; dout(1) << "reading " << oid << " from total size " << obj_bl.length() << dendl; uint64_t read_len = std::min(len, obj_bl.length()-off); data_bl->substr_of(obj_bl, off, read_len); return 0; } bool MemWriteback::may_copy_on_write(const object_t&, uint64_t, uint64_t, snapid_t) { return false; }
4,928
28.51497
85
cc
null
ceph-main/src/test/osdc/MemWriteback.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_TEST_OSDC_MEMWRITEBACK_H #define CEPH_TEST_OSDC_MEMWRITEBACK_H #include "include/Context.h" #include "include/types.h" #include "osd/osd_types.h" #include "osdc/WritebackHandler.h" #include <atomic> class Finisher; class MemWriteback : public WritebackHandler { public: MemWriteback(CephContext *cct, ceph::mutex *lock, uint64_t delay_ns); ~MemWriteback() override; void read(const object_t& oid, uint64_t object_no, const object_locator_t& oloc, uint64_t off, uint64_t len, snapid_t snapid, bufferlist *pbl, uint64_t trunc_size, __u32 trunc_seq, int op_flags, const ZTracer::Trace &parent_trace, Context *onfinish) override; ceph_tid_t write(const object_t& oid, const object_locator_t& oloc, uint64_t off, uint64_t len, const SnapContext& snapc, const bufferlist &bl, ceph::real_time mtime, uint64_t trunc_size, __u32 trunc_seq, ceph_tid_t journal_tid, const ZTracer::Trace &parent_trace, Context *oncommit) override; using WritebackHandler::write; bool may_copy_on_write(const object_t&, uint64_t, uint64_t, snapid_t) override; void write_object_data(const object_t& oid, uint64_t off, uint64_t len, const bufferlist& data_bl); int read_object_data(const object_t& oid, uint64_t off, uint64_t len, bufferlist *data_bl); private: std::map<object_t, bufferlist> object_data; CephContext *m_cct; ceph::mutex *m_lock; uint64_t m_delay_ns; std::atomic<unsigned> m_tid = { 0 }; Finisher *m_finisher; }; #endif
1,688
30.867925
73
h
null
ceph-main/src/test/osdc/object_cacher_stress.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include <cstdlib> #include <ctime> #include <sstream> #include <string> #include <vector> #include <boost/scoped_ptr.hpp> #include "common/ceph_argparse.h" #include "common/ceph_mutex.h" #include "common/common_init.h" #include "common/config.h" #include "common/snap_types.h" #include "global/global_init.h" #include "include/buffer.h" #include "include/Context.h" #include "include/stringify.h" #include "osdc/ObjectCacher.h" #include "FakeWriteback.h" #include "MemWriteback.h" #include <atomic> using namespace std; // XXX: Only tests default namespace struct op_data { op_data(const std::string &oid, uint64_t offset, uint64_t len, bool read) : extent(oid, 0, offset, len, 0), is_read(read) { extent.oloc.pool = 0; extent.buffer_extents.push_back(make_pair(0, len)); } ObjectExtent extent; bool is_read; ceph::bufferlist result; std::atomic<unsigned> done = { 0 }; }; class C_Count : public Context { op_data *m_op; std::atomic<unsigned> *m_outstanding = nullptr; public: C_Count(op_data *op, std::atomic<unsigned> *outstanding) : m_op(op), m_outstanding(outstanding) {} void finish(int r) override { m_op->done++; ceph_assert(*m_outstanding > 0); (*m_outstanding)--; } }; int stress_test(uint64_t num_ops, uint64_t num_objs, uint64_t max_obj_size, uint64_t delay_ns, uint64_t max_op_len, float percent_reads) { ceph::mutex lock = ceph::make_mutex("object_cacher_stress::object_cacher"); FakeWriteback writeback(g_ceph_context, &lock, delay_ns); ObjectCacher obc(g_ceph_context, "test", writeback, lock, NULL, NULL, g_conf()->client_oc_size, g_conf()->client_oc_max_objects, g_conf()->client_oc_max_dirty, g_conf()->client_oc_target_dirty, g_conf()->client_oc_max_dirty_age, true); obc.start(); std::atomic<unsigned> outstanding_reads = { 0 }; vector<std::shared_ptr<op_data> > ops; ObjectCacher::ObjectSet object_set(NULL, 0, 0); SnapContext snapc; ceph::buffer::ptr bp(max_op_len); ceph::bufferlist bl; uint64_t journal_tid = 0; bp.zero(); bl.append(bp); // schedule ops std::cout << "Test configuration:\n\n" << setw(10) << "ops: " << num_ops << "\n" << setw(10) << "objects: " << num_objs << "\n" << setw(10) << "obj size: " << max_obj_size << "\n" << setw(10) << "delay: " << delay_ns << "\n" << setw(10) << "max op len: " << max_op_len << "\n" << setw(10) << "percent reads: " << percent_reads << "\n\n"; for (uint64_t i = 0; i < num_ops; ++i) { uint64_t offset = random() % max_obj_size; uint64_t max_len = std::min(max_obj_size - offset, max_op_len); // no zero-length operations uint64_t length = random() % (std::max<uint64_t>(max_len - 1, 1)) + 1; std::string oid = "test" + stringify(random() % num_objs); bool is_read = random() < percent_reads * float(RAND_MAX); std::shared_ptr<op_data> op(new op_data(oid, offset, length, is_read)); ops.push_back(op); std::cout << "op " << i << " " << (is_read ? "read" : "write") << " " << op->extent << "\n"; if (op->is_read) { ObjectCacher::OSDRead *rd = obc.prepare_read(CEPH_NOSNAP, &op->result, 0); rd->extents.push_back(op->extent); outstanding_reads++; Context *completion = new C_Count(op.get(), &outstanding_reads); lock.lock(); int r = obc.readx(rd, &object_set, completion); lock.unlock(); ceph_assert(r >= 0); if ((uint64_t)r == length) completion->complete(r); else ceph_assert(r == 0); } else { ObjectCacher::OSDWrite *wr = obc.prepare_write(snapc, bl, ceph::real_time::min(), 0, ++journal_tid); wr->extents.push_back(op->extent); lock.lock(); obc.writex(wr, &object_set, NULL); lock.unlock(); } } // check that all reads completed for (uint64_t i = 0; i < num_ops; ++i) { if (!ops[i]->is_read) continue; std::cout << "waiting for read " << i << ops[i]->extent << std::endl; uint64_t done = 0; while (done == 0) { done = ops[i]->done; if (!done) { usleep(500); } } if (done > 1) { std::cout << "completion called more than once!\n" << std::endl; return EXIT_FAILURE; } } lock.lock(); obc.release_set(&object_set); lock.unlock(); int r = 0; ceph::mutex mylock = ceph::make_mutex("librbd::ImageCtx::flush_cache"); ceph::condition_variable cond; bool done; Context *onfinish = new C_SafeCond(mylock, cond, &done, &r); lock.lock(); bool already_flushed = obc.flush_set(&object_set, onfinish); std::cout << "already flushed = " << already_flushed << std::endl; lock.unlock(); { std::unique_lock locker{mylock}; cond.wait(locker, [&done] { return done; }); } lock.lock(); bool unclean = obc.release_set(&object_set); lock.unlock(); if (unclean) { std::cout << "unclean buffers left over!" << std::endl; return EXIT_FAILURE; } obc.stop(); std::cout << "Test completed successfully." << std::endl; return EXIT_SUCCESS; } int correctness_test(uint64_t delay_ns) { std::cerr << "starting correctness test" << std::endl; ceph::mutex lock = ceph::make_mutex("object_cacher_stress::object_cacher"); MemWriteback writeback(g_ceph_context, &lock, delay_ns); ObjectCacher obc(g_ceph_context, "test", writeback, lock, NULL, NULL, 1<<21, // max cache size, 2MB 1, // max objects, just one 1<<18, // max dirty, 256KB 1<<17, // target dirty, 128KB g_conf()->client_oc_max_dirty_age, true); obc.start(); std::cerr << "just start()ed ObjectCacher" << std::endl; SnapContext snapc; ceph_tid_t journal_tid = 0; std::string oid("correctness_test_obj"); ObjectCacher::ObjectSet object_set(NULL, 0, 0); ceph::bufferlist zeroes_bl; zeroes_bl.append_zero(1<<20); // set up a 4MB all-zero object std::cerr << "writing 4x1MB object" << std::endl; std::map<int, C_SaferCond> create_finishers; for (int i = 0; i < 4; ++i) { ObjectCacher::OSDWrite *wr = obc.prepare_write(snapc, zeroes_bl, ceph::real_time::min(), 0, ++journal_tid); ObjectExtent extent(oid, 0, zeroes_bl.length()*i, zeroes_bl.length(), 0); extent.oloc.pool = 0; extent.buffer_extents.push_back(make_pair(0, 1<<20)); wr->extents.push_back(extent); lock.lock(); obc.writex(wr, &object_set, &create_finishers[i]); lock.unlock(); } // write some 1-valued bits at 256-KB intervals for checking consistency std::cerr << "Writing some 0xff values" << std::endl; ceph::buffer::ptr ones(1<<16); memset(ones.c_str(), 0xff, ones.length()); ceph::bufferlist ones_bl; ones_bl.append(ones); for (int i = 1<<18; i < 1<<22; i+=1<<18) { ObjectCacher::OSDWrite *wr = obc.prepare_write(snapc, ones_bl, ceph::real_time::min(), 0, ++journal_tid); ObjectExtent extent(oid, 0, i, ones_bl.length(), 0); extent.oloc.pool = 0; extent.buffer_extents.push_back(make_pair(0, 1<<16)); wr->extents.push_back(extent); lock.lock(); obc.writex(wr, &object_set, &create_finishers[i]); lock.unlock(); } for (auto i = create_finishers.begin(); i != create_finishers.end(); ++i) { i->second.wait(); } std::cout << "Finished setting up object" << std::endl; lock.lock(); C_SaferCond flushcond; bool done = obc.flush_all(&flushcond); if (!done) { std::cout << "Waiting for flush" << std::endl; lock.unlock(); flushcond.wait(); lock.lock(); } lock.unlock(); /* now read the back half of the object in, check consistency, */ std::cout << "Reading back half of object (1<<21~1<<21)" << std::endl; bufferlist readbl; C_SaferCond backreadcond; ObjectCacher::OSDRead *back_half_rd = obc.prepare_read(CEPH_NOSNAP, &readbl, 0); ObjectExtent back_half_extent(oid, 0, 1<<21, 1<<21, 0); back_half_extent.oloc.pool = 0; back_half_extent.buffer_extents.push_back(make_pair(0, 1<<21)); back_half_rd->extents.push_back(back_half_extent); lock.lock(); int r = obc.readx(back_half_rd, &object_set, &backreadcond); lock.unlock(); ceph_assert(r >= 0); if (r == 0) { std::cout << "Waiting to read data into cache" << std::endl; r = backreadcond.wait(); } ceph_assert(r == 1<<21); /* Read the whole object in, * verify we have to wait for it to complete, * overwrite a small piece, (http://tracker.ceph.com/issues/16002), * and check consistency */ readbl.clear(); std::cout<< "Reading whole object (0~1<<22)" << std::endl; C_SaferCond frontreadcond; ObjectCacher::OSDRead *whole_rd = obc.prepare_read(CEPH_NOSNAP, &readbl, 0); ObjectExtent whole_extent(oid, 0, 0, 1<<22, 0); whole_extent.oloc.pool = 0; whole_extent.buffer_extents.push_back(make_pair(0, 1<<22)); whole_rd->extents.push_back(whole_extent); lock.lock(); r = obc.readx(whole_rd, &object_set, &frontreadcond); // we cleared out the cache by reading back half, it shouldn't pass immediately! ceph_assert(r == 0); std::cout << "Data (correctly) not available without fetching" << std::endl; ObjectCacher::OSDWrite *verify_wr = obc.prepare_write(snapc, ones_bl, ceph::real_time::min(), 0, ++journal_tid); ObjectExtent verify_extent(oid, 0, (1<<18)+(1<<16), ones_bl.length(), 0); verify_extent.oloc.pool = 0; verify_extent.buffer_extents.push_back(make_pair(0, 1<<16)); verify_wr->extents.push_back(verify_extent); C_SaferCond verify_finisher; obc.writex(verify_wr, &object_set, &verify_finisher); lock.unlock(); std::cout << "wrote dirtying data" << std::endl; std::cout << "Waiting to read data into cache" << std::endl; frontreadcond.wait(); verify_finisher.wait(); std::cout << "Validating data" << std::endl; for (int i = 1<<18; i < 1<<22; i+=1<<18) { bufferlist ones_maybe; ones_maybe.substr_of(readbl, i, ones_bl.length()); ceph_assert(0 == memcmp(ones_maybe.c_str(), ones_bl.c_str(), ones_bl.length())); } bufferlist ones_maybe; ones_maybe.substr_of(readbl, (1<<18)+(1<<16), ones_bl.length()); ceph_assert(0 == memcmp(ones_maybe.c_str(), ones_bl.c_str(), ones_bl.length())); std::cout << "validated that data is 0xff where it should be" << std::endl; lock.lock(); C_SaferCond flushcond2; done = obc.flush_all(&flushcond2); if (!done) { std::cout << "Waiting for final write flush" << std::endl; lock.unlock(); flushcond2.wait(); lock.lock(); } bool unclean = obc.release_set(&object_set); if (unclean) { std::cout << "unclean buffers left over!" << std::endl; vector<ObjectExtent> discard_extents; int i = 0; for (auto oi = object_set.objects.begin(); !oi.end(); ++oi) { discard_extents.emplace_back(oid, i++, 0, 1<<22, 0); } obc.discard_set(&object_set, discard_extents); lock.unlock(); obc.stop(); goto fail; } lock.unlock(); obc.stop(); std::cout << "Testing ObjectCacher correctness complete" << std::endl; return EXIT_SUCCESS; fail: return EXIT_FAILURE; } int main(int argc, const char **argv) { auto args = argv_to_vec(argc, argv); auto cct = global_init(nullptr, args, CEPH_ENTITY_TYPE_CLIENT, CODE_ENVIRONMENT_UTILITY, CINIT_FLAG_NO_DEFAULT_CONFIG_FILE); long long delay_ns = 0; long long num_ops = 1000; long long obj_bytes = 4 << 20; long long max_len = 128 << 10; long long num_objs = 10; float percent_reads = 0.90; int seed = time(0) % 100000; bool stress = false; bool correctness = false; std::ostringstream err; std::vector<const char*>::iterator i; for (i = args.begin(); i != args.end();) { if (ceph_argparse_witharg(args, i, &delay_ns, err, "--delay-ns", (char*)NULL)) { if (!err.str().empty()) { cerr << argv[0] << ": " << err.str() << std::endl; return EXIT_FAILURE; } } else if (ceph_argparse_witharg(args, i, &num_ops, err, "--ops", (char*)NULL)) { if (!err.str().empty()) { cerr << argv[0] << ": " << err.str() << std::endl; return EXIT_FAILURE; } } else if (ceph_argparse_witharg(args, i, &num_objs, err, "--objects", (char*)NULL)) { if (!err.str().empty()) { cerr << argv[0] << ": " << err.str() << std::endl; return EXIT_FAILURE; } } else if (ceph_argparse_witharg(args, i, &obj_bytes, err, "--obj-size", (char*)NULL)) { if (!err.str().empty()) { cerr << argv[0] << ": " << err.str() << std::endl; return EXIT_FAILURE; } } else if (ceph_argparse_witharg(args, i, &max_len, err, "--max-op-size", (char*)NULL)) { if (!err.str().empty()) { cerr << argv[0] << ": " << err.str() << std::endl; return EXIT_FAILURE; } } else if (ceph_argparse_witharg(args, i, &percent_reads, err, "--percent-read", (char*)NULL)) { if (!err.str().empty()) { cerr << argv[0] << ": " << err.str() << std::endl; return EXIT_FAILURE; } } else if (ceph_argparse_witharg(args, i, &seed, err, "--seed", (char*)NULL)) { if (!err.str().empty()) { cerr << argv[0] << ": " << err.str() << std::endl; return EXIT_FAILURE; } } else if (ceph_argparse_flag(args, i, "--stress-test", NULL)) { stress = true; } else if (ceph_argparse_flag(args, i, "--correctness-test", NULL)) { correctness = true; } else { cerr << "unknown option " << *i << std::endl; return EXIT_FAILURE; } } if (stress) { srandom(seed); return stress_test(num_ops, num_objs, obj_bytes, delay_ns, max_len, percent_reads); } if (correctness) { return correctness_test(delay_ns); } }
13,613
30.957746
100
cc
null
ceph-main/src/test/pybind/test_ceph_argparse.py
#!/usr/bin/env python3 # -*- mode:python; tab-width:4; indent-tabs-mode:nil; coding:utf-8 -*- # vim: ts=4 sw=4 smarttab expandtab fileencoding=utf-8 # # Ceph - scalable distributed file system # # Copyright (C) 2013,2014 Cloudwatt <libre.licensing@cloudwatt.com> # Copyright (C) 2014 Red Hat <contact@redhat.com> # # Author: Loic Dachary <loic@dachary.org> # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # from ceph_argparse import validate_command, parse_json_funcsigs, validate, \ parse_funcsig, ArgumentError, ArgumentTooFew, ArgumentMissing, \ ArgumentNumber, ArgumentValid import os import random import re import string import sys import unittest try: from StringIO import StringIO except ImportError: from io import StringIO def get_command_descriptions(what): print ("get_command_descriptions --" + what) CEPH_BIN = os.environ.get('CEPH_BIN', ".") with os.popen(CEPH_BIN + "/get_command_descriptions " + "--" + what) as output_file: output_contents = output_file.read() return output_contents class ParseJsonFuncsigs(unittest.TestCase): def test_parse_json_funcsigs(self): commands = get_command_descriptions("all") cmd_json = parse_json_funcsigs(commands, 'cli') # syntax error https://github.com/ceph/ceph/pull/585 commands = get_command_descriptions("pull585") self.assertRaises(TypeError, parse_json_funcsigs, commands, 'cli') sigdict = parse_json_funcsigs(get_command_descriptions("all"), 'cli') class TestArgparse(unittest.TestCase): def _assert_valid_command(self, args): result = validate_command(sigdict, args) self.assertNotIn(result, [{}, None]) def check_1_natural_arg(self, prefix, command): self._assert_valid_command([prefix, command, '1']) self.assertEqual({}, validate_command(sigdict, [prefix, command])) self.assertEqual({}, validate_command(sigdict, [prefix, command, '-1'])) self.assertEqual({}, validate_command(sigdict, [prefix, command, '1', '1'])) def check_0_or_1_natural_arg(self, prefix, command): self._assert_valid_command([prefix, command, '1']) self._assert_valid_command([prefix, command]) self.assertEqual({}, validate_command(sigdict, [prefix, command, '-1'])) self.assertEqual({}, validate_command(sigdict, [prefix, command, '1', '1'])) def check_1_string_arg(self, prefix, command): self.assertEqual({}, validate_command(sigdict, [prefix, command])) self._assert_valid_command([prefix, command, 'string']) self.assertEqual({}, validate_command(sigdict, [prefix, command, 'string', 'toomany'])) def check_0_or_1_string_arg(self, prefix, command): self._assert_valid_command([prefix, command, 'string']) self._assert_valid_command([prefix, command]) self.assertEqual({}, validate_command(sigdict, [prefix, command, 'string', 'toomany'])) def check_1_or_more_string_args(self, prefix, command): self.assertEqual({}, validate_command(sigdict, [prefix, command])) self._assert_valid_command([prefix, command, 'string']) self._assert_valid_command([prefix, command, 'string', 'more string']) def check_no_arg(self, prefix, command): self._assert_valid_command([prefix, command]) self.assertEqual({}, validate_command(sigdict, [prefix, command, 'toomany'])) def _capture_output(self, args, stdout=None, stderr=None): if stdout: stdout = StringIO() sys.stdout = stdout if stderr: stderr = StringIO() sys.stderr = stderr ret = validate_command(sigdict, args) if stdout: stdout = stdout.getvalue().strip() if stderr: stderr = stderr.getvalue().strip() return ret, stdout, stderr class TestBasic(unittest.TestCase): def test_non_ascii_in_non_options(self): # ArgumentPrefix("no match for {0}".format(s)) is not able to convert # unicode str parameter into str. and validate_command() should not # choke on it. self.assertEqual({}, validate_command(sigdict, [u'章鱼和鱿鱼'])) self.assertEqual({}, validate_command(sigdict, [u'–w'])) # actually we always pass unicode strings to validate_command() in "ceph" # CLI, but we also use bytestrings in our tests, so make sure it does not # break. self.assertEqual({}, validate_command(sigdict, ['章鱼和鱿鱼'])) self.assertEqual({}, validate_command(sigdict, ['–w'])) class TestPG(TestArgparse): def test_stat(self): self._assert_valid_command(['pg', 'stat']) def test_getmap(self): self._assert_valid_command(['pg', 'getmap']) def test_dump(self): valid_commands = { 'pg dump': {'prefix': 'pg dump'}, 'pg dump all summary sum delta pools osds pgs pgs_brief': {'prefix': 'pg dump', 'dumpcontents': 'all summary sum delta pools osds pgs pgs_brief'.split() }, 'pg dump --dumpcontents summary,sum': {'prefix': 'pg dump', 'dumpcontents': 'summary,sum'.split(',') } } for command, expected_result in valid_commands.items(): actual_result = validate_command(sigdict, command.split()) expected_result['target'] = ('mon-mgr', '') self.assertEqual(expected_result, actual_result) invalid_commands = ['pg dump invalid'] for command in invalid_commands: actual_result = validate_command(sigdict, command.split()) self.assertEqual({}, actual_result) def test_dump_json(self): self._assert_valid_command(['pg', 'dump_json']) self._assert_valid_command(['pg', 'dump_json', 'all', 'summary', 'sum', 'pools', 'osds', 'pgs']) self.assertEqual({}, validate_command(sigdict, ['pg', 'dump_json', 'invalid'])) def test_dump_pools_json(self): self._assert_valid_command(['pg', 'dump_pools_json']) def test_dump_pools_stuck(self): self._assert_valid_command(['pg', 'dump_stuck']) self._assert_valid_command(['pg', 'dump_stuck', 'inactive', 'unclean', 'stale']) self.assertEqual({}, validate_command(sigdict, ['pg', 'dump_stuck', 'invalid'])) self._assert_valid_command(['pg', 'dump_stuck', 'inactive', '1234']) def one_pgid(self, command): self._assert_valid_command(['pg', command, '1.1']) self.assertEqual({}, validate_command(sigdict, ['pg', command])) self.assertEqual({}, validate_command(sigdict, ['pg', command, '1'])) def test_map(self): self.one_pgid('map') def test_scrub(self): self.one_pgid('scrub') def test_deep_scrub(self): self.one_pgid('deep-scrub') def test_repair(self): self.one_pgid('repair') def test_debug(self): self._assert_valid_command(['pg', 'debug', 'unfound_objects_exist']) self._assert_valid_command(['pg', 'debug', 'degraded_pgs_exist']) self.assertEqual({}, validate_command(sigdict, ['pg', 'debug'])) self.assertEqual({}, validate_command(sigdict, ['pg', 'debug', 'invalid'])) def test_pg_missing_args_output(self): ret, _, stderr = self._capture_output(['pg'], stderr=True) self.assertEqual({}, ret) self.assertRegexpMatches(stderr, re.compile('no valid command found.* closest matches')) def test_pg_wrong_arg_output(self): ret, _, stderr = self._capture_output(['pg', 'map', 'bad-pgid'], stderr=True) self.assertEqual({}, ret) self.assertIn("Invalid command", stderr) class TestAuth(TestArgparse): def test_export(self): self._assert_valid_command(['auth', 'export']) self._assert_valid_command(['auth', 'export', 'string']) self.assertEqual({}, validate_command(sigdict, ['auth', 'export', 'string', 'toomany'])) def test_get(self): self.check_1_string_arg('auth', 'get') def test_get_key(self): self.check_1_string_arg('auth', 'get-key') def test_print_key(self): self.check_1_string_arg('auth', 'print-key') self.check_1_string_arg('auth', 'print_key') def test_list(self): self.check_no_arg('auth', 'list') def test_import(self): self.check_no_arg('auth', 'import') def test_add(self): self.check_1_or_more_string_args('auth', 'add') def test_get_or_create_key(self): self.check_1_or_more_string_args('auth', 'get-or-create-key') prefix = 'auth get-or-create-key' entity = 'client.test' caps = ['mon', 'allow r', 'osd', 'allow rw pool=nfs-ganesha namespace=test, allow rw tag cephfs data=user_test_fs', 'mds', 'allow rw path=/'] cmd = prefix.split() + [entity] + caps self.assertEqual( { 'prefix': prefix, 'entity': entity, 'caps': caps }, validate_command(sigdict, cmd)) def test_get_or_create(self): self.check_1_or_more_string_args('auth', 'get-or-create') def test_caps(self): self.assertEqual({}, validate_command(sigdict, ['auth', 'caps'])) self.assertEqual({}, validate_command(sigdict, ['auth', 'caps', 'string'])) self._assert_valid_command(['auth', 'caps', 'string', 'more string']) def test_del(self): self.check_1_string_arg('auth', 'del') class TestMonitor(TestArgparse): def test_compact(self): self._assert_valid_command(['compact']) def test_fsid(self): self._assert_valid_command(['fsid']) def test_log(self): self.assertEqual({}, validate_command(sigdict, ['log'])) self._assert_valid_command(['log', 'a logtext']) self._assert_valid_command(['log', 'a logtext', 'and another']) def test_injectargs(self): self.assertEqual({}, validate_command(sigdict, ['injectargs'])) self._assert_valid_command(['injectargs', 'one']) self._assert_valid_command(['injectargs', 'one', 'two']) def test_status(self): self._assert_valid_command(['status']) def test_health(self): self._assert_valid_command(['health']) self._assert_valid_command(['health', 'detail']) self.assertEqual({}, validate_command(sigdict, ['health', 'invalid'])) self.assertEqual({}, validate_command(sigdict, ['health', 'detail', 'toomany'])) def test_df(self): self._assert_valid_command(['df']) self._assert_valid_command(['df', 'detail']) self.assertEqual({}, validate_command(sigdict, ['df', 'invalid'])) self.assertEqual({}, validate_command(sigdict, ['df', 'detail', 'toomany'])) def test_report(self): self._assert_valid_command(['report']) self._assert_valid_command(['report', 'tag1']) self._assert_valid_command(['report', 'tag1', 'tag2']) def test_quorum_status(self): self._assert_valid_command(['quorum_status']) def test_tell(self): self.assertEqual({}, validate_command(sigdict, ['tell'])) self.assertEqual({}, validate_command(sigdict, ['tell', 'invalid'])) for name in ('osd', 'mon', 'client', 'mds'): self.assertEqual({}, validate_command(sigdict, ['tell', name])) self.assertEqual({}, validate_command(sigdict, ['tell', name + ".42"])) self._assert_valid_command(['tell', name + ".42", 'something']) self._assert_valid_command(['tell', name + ".42", 'something', 'something else']) class TestMDS(TestArgparse): def test_stat(self): self.check_no_arg('mds', 'stat') def test_compat_show(self): self._assert_valid_command(['mds', 'compat', 'show']) self.assertEqual({}, validate_command(sigdict, ['mds', 'compat'])) self.assertEqual({}, validate_command(sigdict, ['mds', 'compat', 'show', 'toomany'])) def test_set_state(self): self._assert_valid_command(['mds', 'set_state', '1', '2']) self.assertEqual({}, validate_command(sigdict, ['mds', 'set_state'])) self.assertEqual({}, validate_command(sigdict, ['mds', 'set_state', '-1'])) self.assertEqual({}, validate_command(sigdict, ['mds', 'set_state', '1', '-1'])) self.assertEqual({}, validate_command(sigdict, ['mds', 'set_state', '1', '21'])) def test_fail(self): self.check_1_string_arg('mds', 'fail') def test_rm(self): # Valid: single GID argument present self._assert_valid_command(['mds', 'rm', '1']) # Missing GID arg: invalid self.assertEqual({}, validate_command(sigdict, ['mds', 'rm'])) # Extra arg: invalid self.assertEqual({}, validate_command(sigdict, ['mds', 'rm', '1', 'mds.42'])) def test_rmfailed(self): self._assert_valid_command(['mds', 'rmfailed', '0']) self._assert_valid_command(['mds', 'rmfailed', '0', '--yes-i-really-mean-it']) self.assertEqual({}, validate_command(sigdict, ['mds', 'rmfailed', '0', '--yes-i-really-mean-it', 'toomany'])) def test_compat_rm_compat(self): self._assert_valid_command(['mds', 'compat', 'rm_compat', '1']) self.assertEqual({}, validate_command(sigdict, ['mds', 'compat', 'rm_compat'])) self.assertEqual({}, validate_command(sigdict, ['mds', 'compat', 'rm_compat', '-1'])) self.assertEqual({}, validate_command(sigdict, ['mds', 'compat', 'rm_compat', '1', '1'])) def test_incompat_rm_incompat(self): self._assert_valid_command(['mds', 'compat', 'rm_incompat', '1']) self.assertEqual({}, validate_command(sigdict, ['mds', 'compat', 'rm_incompat'])) self.assertEqual({}, validate_command(sigdict, ['mds', 'compat', 'rm_incompat', '-1'])) self.assertEqual({}, validate_command(sigdict, ['mds', 'compat', 'rm_incompat', '1', '1'])) class TestFS(TestArgparse): def test_dump(self): self.check_0_or_1_natural_arg('fs', 'dump') def test_fs_new(self): self._assert_valid_command(['fs', 'new', 'default', 'metadata', 'data']) def test_fs_set_max_mds(self): self._assert_valid_command(['fs', 'set', 'default', 'max_mds', '1']) self._assert_valid_command(['fs', 'set', 'default', 'max_mds', '2']) def test_fs_set_cluster_down(self): self._assert_valid_command(['fs', 'set', 'default', 'down', 'true']) def test_fs_set_cluster_up(self): self._assert_valid_command(['fs', 'set', 'default', 'down', 'false']) def test_fs_set_cluster_joinable(self): self._assert_valid_command(['fs', 'set', 'default', 'joinable', 'true']) def test_fs_set_cluster_not_joinable(self): self._assert_valid_command(['fs', 'set', 'default', 'joinable', 'false']) def test_fs_set(self): self._assert_valid_command(['fs', 'set', 'default', 'max_file_size', '2']) self._assert_valid_command(['fs', 'set', 'default', 'allow_new_snaps', 'no']) self.assertEqual({}, validate_command(sigdict, ['fs', 'set', 'invalid'])) def test_fs_add_data_pool(self): self._assert_valid_command(['fs', 'add_data_pool', 'default', '1']) self._assert_valid_command(['fs', 'add_data_pool', 'default', 'foo']) def test_fs_remove_data_pool(self): self._assert_valid_command(['fs', 'rm_data_pool', 'default', '1']) self._assert_valid_command(['fs', 'rm_data_pool', 'default', 'foo']) def test_fs_rm(self): self._assert_valid_command(['fs', 'rm', 'default']) self._assert_valid_command(['fs', 'rm', 'default', '--yes-i-really-mean-it']) self.assertEqual({}, validate_command(sigdict, ['fs', 'rm', 'default', '--yes-i-really-mean-it', 'toomany'])) def test_fs_ls(self): self._assert_valid_command(['fs', 'ls']) self.assertEqual({}, validate_command(sigdict, ['fs', 'ls', 'toomany'])) def test_fs_set_default(self): self._assert_valid_command(['fs', 'set-default', 'cephfs']) self.assertEqual({}, validate_command(sigdict, ['fs', 'set-default'])) self.assertEqual({}, validate_command(sigdict, ['fs', 'set-default', 'cephfs', 'toomany'])) class TestMon(TestArgparse): def test_dump(self): self.check_0_or_1_natural_arg('mon', 'dump') def test_stat(self): self.check_no_arg('mon', 'stat') def test_getmap(self): self.check_0_or_1_natural_arg('mon', 'getmap') def test_add(self): self._assert_valid_command(['mon', 'add', 'name', '1.2.3.4:1234']) self.assertEqual({}, validate_command(sigdict, ['mon', 'add'])) self.assertEqual({}, validate_command(sigdict, ['mon', 'add', 'name'])) self.assertEqual({}, validate_command(sigdict, ['mon', 'add', 'name', '400.500.600.700'])) def test_remove(self): self._assert_valid_command(['mon', 'remove', 'name']) self.assertEqual({}, validate_command(sigdict, ['mon', 'remove'])) self.assertEqual({}, validate_command(sigdict, ['mon', 'remove', 'name', 'toomany'])) class TestOSD(TestArgparse): def test_stat(self): self.check_no_arg('osd', 'stat') def test_dump(self): self.check_0_or_1_natural_arg('osd', 'dump') def test_osd_tree(self): self.check_0_or_1_natural_arg('osd', 'tree') cmd = 'osd tree down,out' self.assertEqual( { 'prefix': 'osd tree', 'states': ['down', 'out'] }, validate_command(sigdict, cmd.split())) def test_osd_ls(self): self.check_0_or_1_natural_arg('osd', 'ls') def test_osd_getmap(self): self.check_0_or_1_natural_arg('osd', 'getmap') def test_osd_getcrushmap(self): self.check_0_or_1_natural_arg('osd', 'getcrushmap') def test_perf(self): self.check_no_arg('osd', 'perf') def test_getmaxosd(self): self.check_no_arg('osd', 'getmaxosd') def test_find(self): self.check_1_natural_arg('osd', 'find') def test_map(self): self._assert_valid_command(['osd', 'map', 'poolname', 'objectname']) self._assert_valid_command(['osd', 'map', 'poolname', 'objectname', 'nspace']) self.assertEqual({}, validate_command(sigdict, ['osd', 'map'])) self.assertEqual({}, validate_command(sigdict, ['osd', 'map', 'poolname'])) self.assertEqual({}, validate_command(sigdict, ['osd', 'map', 'poolname', 'objectname', 'nspace', 'toomany'])) def test_metadata(self): self.check_0_or_1_natural_arg('osd', 'metadata') def test_scrub(self): self.check_1_string_arg('osd', 'scrub') def test_deep_scrub(self): self.check_1_string_arg('osd', 'deep-scrub') def test_repair(self): self.check_1_string_arg('osd', 'repair') def test_lspools(self): self._assert_valid_command(['osd', 'lspools']) self.assertEqual({}, validate_command(sigdict, ['osd', 'lspools', 'toomany'])) def test_blocklist_ls(self): self._assert_valid_command(['osd', 'blocklist', 'ls']) self.assertEqual({}, validate_command(sigdict, ['osd', 'blocklist'])) self.assertEqual({}, validate_command(sigdict, ['osd', 'blocklist', 'ls', 'toomany'])) def test_crush_rule(self): self.assertEqual({}, validate_command(sigdict, ['osd', 'crush'])) self.assertEqual({}, validate_command(sigdict, ['osd', 'crush', 'rule'])) for subcommand in ('list', 'ls'): self._assert_valid_command(['osd', 'crush', 'rule', subcommand]) self.assertEqual({}, validate_command(sigdict, ['osd', 'crush', 'rule', subcommand, 'toomany'])) def test_crush_rule_dump(self): self._assert_valid_command(['osd', 'crush', 'rule', 'dump']) self._assert_valid_command(['osd', 'crush', 'rule', 'dump', 'RULE']) self.assertEqual({}, validate_command(sigdict, ['osd', 'crush', 'rule', 'dump', 'RULE', 'toomany'])) def test_crush_dump(self): self._assert_valid_command(['osd', 'crush', 'dump']) self.assertEqual({}, validate_command(sigdict, ['osd', 'crush'])) self.assertEqual({}, validate_command(sigdict, ['osd', 'crush', 'dump', 'toomany'])) def test_setcrushmap(self): self.check_no_arg('osd', 'setcrushmap') def test_crush_add_bucket(self): self._assert_valid_command(['osd', 'crush', 'add-bucket', 'name', 'type']) self._assert_valid_command(['osd', 'crush', 'add-bucket', 'name', 'type', 'root=foo-root', 'host=foo-host']) self.assertEqual({}, validate_command(sigdict, ['osd', 'crush'])) self.assertEqual({}, validate_command(sigdict, ['osd', 'crush', 'add-bucket'])) self.assertEqual({}, validate_command(sigdict, ['osd', 'crush', 'add-bucket', '^^^', 'type'])) def test_crush_rename_bucket(self): self._assert_valid_command(['osd', 'crush', 'rename-bucket', 'srcname', 'dstname']) self.assertEqual({}, validate_command(sigdict, ['osd', 'crush'])) self.assertEqual({}, validate_command(sigdict, ['osd', 'crush', 'rename-bucket'])) self.assertEqual({}, validate_command(sigdict, ['osd', 'crush', 'rename-bucket', 'srcname'])) self.assertEqual({}, validate_command(sigdict, ['osd', 'crush', 'rename-bucket', 'srcname', 'dstname', 'toomany'])) self.assertEqual({}, validate_command(sigdict, ['osd', 'crush', 'rename-bucket', '^^^', 'dstname'])) self.assertEqual({}, validate_command(sigdict, ['osd', 'crush', 'rename-bucket', 'srcname', '^^^^'])) def _check_crush_setter(self, setter): self._assert_valid_command(['osd', 'crush', setter, '*', '2.3', 'AZaz09-_.=']) self._assert_valid_command(['osd', 'crush', setter, 'osd.0', '2.3', 'AZaz09-_.=']) self._assert_valid_command(['osd', 'crush', setter, '0', '2.3', 'AZaz09-_.=']) self._assert_valid_command(['osd', 'crush', setter, '0', '2.3', 'AZaz09-_.=', 'AZaz09-_.=']) self.assertEqual({}, validate_command(sigdict, ['osd', 'crush', setter, 'osd.0'])) ret = validate_command(sigdict, ['osd', 'crush', setter, 'osd.0', '-1.0']) assert ret in [None, {}] self.assertEqual({}, validate_command(sigdict, ['osd', 'crush', setter, 'osd.0', '1.0', '^^^'])) def test_crush_set(self): self.assertEqual({}, validate_command(sigdict, ['osd', 'crush'])) self._check_crush_setter('set') def test_crush_add(self): self.assertEqual({}, validate_command(sigdict, ['osd', 'crush'])) self._check_crush_setter('add') def test_crush_create_or_move(self): self.assertEqual({}, validate_command(sigdict, ['osd', 'crush'])) self._check_crush_setter('create-or-move') def test_crush_move(self): self._assert_valid_command(['osd', 'crush', 'move', 'AZaz09-_.', 'AZaz09-_.=']) self._assert_valid_command(['osd', 'crush', 'move', '0', 'AZaz09-_.=', 'AZaz09-_.=']) self.assertEqual({}, validate_command(sigdict, ['osd', 'crush', 'move'])) self.assertEqual({}, validate_command(sigdict, ['osd', 'crush', 'move', 'AZaz09-_.'])) self.assertEqual({}, validate_command(sigdict, ['osd', 'crush', 'move', '^^^', 'AZaz09-_.='])) self.assertEqual({}, validate_command(sigdict, ['osd', 'crush', 'move', 'AZaz09-_.', '^^^'])) def test_crush_link(self): self._assert_valid_command(['osd', 'crush', 'link', 'name', 'AZaz09-_.=']) self._assert_valid_command(['osd', 'crush', 'link', 'name', 'AZaz09-_.=', 'AZaz09-_.=']) self.assertEqual({}, validate_command(sigdict, ['osd', 'crush', 'link'])) self.assertEqual({}, validate_command(sigdict, ['osd', 'crush', 'link', 'name'])) def test_crush_rm(self): for alias in ('rm', 'remove', 'unlink'): self._assert_valid_command(['osd', 'crush', alias, 'AZaz09-_.']) self._assert_valid_command(['osd', 'crush', alias, 'AZaz09-_.', 'AZaz09-_.']) self.assertEqual({}, validate_command(sigdict, ['osd', 'crush', alias])) self.assertEqual({}, validate_command(sigdict, ['osd', 'crush', alias, 'AZaz09-_.', 'AZaz09-_.', 'toomany'])) def test_crush_reweight(self): self._assert_valid_command(['osd', 'crush', 'reweight', 'AZaz09-_.', '2.3']) self.assertEqual({}, validate_command(sigdict, ['osd', 'crush', 'reweight'])) self.assertEqual({}, validate_command(sigdict, ['osd', 'crush', 'reweight', 'AZaz09-_.'])) self.assertEqual({}, validate_command(sigdict, ['osd', 'crush', 'reweight', 'AZaz09-_.', '-1.0'])) self.assertEqual({}, validate_command(sigdict, ['osd', 'crush', 'reweight', '^^^', '2.3'])) def test_crush_tunables(self): for tunable in ('legacy', 'argonaut', 'bobtail', 'firefly', 'optimal', 'default'): self._assert_valid_command(['osd', 'crush', 'tunables', tunable]) self.assertEqual({}, validate_command(sigdict, ['osd', 'crush', 'tunables'])) self.assertEqual({}, validate_command(sigdict, ['osd', 'crush', 'tunables', 'default', 'toomany'])) def test_crush_rule_create_simple(self): self._assert_valid_command(['osd', 'crush', 'rule', 'create-simple', 'AZaz09-_.', 'AZaz09-_.', 'AZaz09-_.']) self.assertEqual({}, validate_command(sigdict, ['osd', 'crush', 'rule', 'create-simple'])) self.assertEqual({}, validate_command(sigdict, ['osd', 'crush', 'rule', 'create-simple', 'AZaz09-_.'])) self.assertEqual({}, validate_command(sigdict, ['osd', 'crush', 'rule', 'create-simple', 'AZaz09-_.', 'AZaz09-_.'])) self.assertEqual({}, validate_command(sigdict, ['osd', 'crush', 'rule', 'create-simple', '^^^', 'AZaz09-_.', 'AZaz09-_.'])) self.assertEqual({}, validate_command(sigdict, ['osd', 'crush', 'rule', 'create-simple', 'AZaz09-_.', '|||', 'AZaz09-_.'])) self.assertEqual({}, validate_command(sigdict, ['osd', 'crush', 'rule', 'create-simple', 'AZaz09-_.', 'AZaz09-_.', '+++'])) self.assertEqual({}, validate_command(sigdict, ['osd', 'crush', 'rule', 'create-simple', 'AZaz09-_.', 'AZaz09-_.', 'AZaz09-_.', 'toomany'])) def test_crush_rule_create_erasure(self): self._assert_valid_command(['osd', 'crush', 'rule', 'create-erasure', 'AZaz09-_.']) self._assert_valid_command(['osd', 'crush', 'rule', 'create-erasure', 'AZaz09-_.', 'whatever']) self.assertEqual({}, validate_command(sigdict, ['osd', 'crush', 'rule', 'create-erasure'])) self.assertEqual({}, validate_command(sigdict, ['osd', 'crush', 'rule', 'create-erasure', '^^^'])) self.assertEqual({}, validate_command(sigdict, ['osd', 'crush', 'rule', 'create-erasure', 'name', '^^^'])) def test_crush_rule_rm(self): self._assert_valid_command(['osd', 'crush', 'rule', 'rm', 'AZaz09-_.']) self.assertEqual({}, validate_command(sigdict, ['osd', 'crush', 'rule', 'rm'])) self.assertEqual({}, validate_command(sigdict, ['osd', 'crush', 'rule', 'rm', '^^^^'])) self.assertEqual({}, validate_command(sigdict, ['osd', 'crush', 'rule', 'rm', 'AZaz09-_.', 'toomany'])) def test_setmaxosd(self): self.check_1_natural_arg('osd', 'setmaxosd') def test_pause(self): self.check_no_arg('osd', 'pause') def test_unpause(self): self.check_no_arg('osd', 'unpause') def test_erasure_code_profile_set(self): self._assert_valid_command(['osd', 'erasure-code-profile', 'set', 'name']) self._assert_valid_command(['osd', 'erasure-code-profile', 'set', 'name', 'A=B']) self._assert_valid_command(['osd', 'erasure-code-profile', 'set', 'name', 'A=B', 'C=D']) self.assertEqual({}, validate_command(sigdict, ['osd', 'erasure-code-profile', 'set'])) self.assertEqual({}, validate_command(sigdict, ['osd', 'erasure-code-profile', 'set', '^^^^'])) def test_erasure_code_profile_get(self): self._assert_valid_command(['osd', 'erasure-code-profile', 'get', 'name']) self.assertEqual({}, validate_command(sigdict, ['osd', 'erasure-code-profile', 'get'])) self.assertEqual({}, validate_command(sigdict, ['osd', 'erasure-code-profile', 'get', '^^^^'])) def test_erasure_code_profile_rm(self): self._assert_valid_command(['osd', 'erasure-code-profile', 'rm', 'name']) self.assertEqual({}, validate_command(sigdict, ['osd', 'erasure-code-profile', 'rm'])) self.assertEqual({}, validate_command(sigdict, ['osd', 'erasure-code-profile', 'rm', '^^^^'])) def test_erasure_code_profile_ls(self): self._assert_valid_command(['osd', 'erasure-code-profile', 'ls']) self.assertEqual({}, validate_command(sigdict, ['osd', 'erasure-code-profile', 'ls', 'toomany'])) def test_set_unset(self): for action in ('set', 'unset'): for flag in ('pause', 'noup', 'nodown', 'noout', 'noin', 'nobackfill', 'norecover', 'noscrub', 'nodeep-scrub'): self._assert_valid_command(['osd', action, flag]) self.assertEqual({}, validate_command(sigdict, ['osd', action])) self.assertEqual({}, validate_command(sigdict, ['osd', action, 'invalid'])) self.assertEqual({}, validate_command(sigdict, ['osd', action, 'pause', 'toomany'])) def test_down(self): self.check_1_or_more_string_args('osd', 'down') def test_out(self): self.check_1_or_more_string_args('osd', 'out') def test_in(self): self.check_1_or_more_string_args('osd', 'in') def test_rm(self): self.check_1_or_more_string_args('osd', 'rm') def test_reweight(self): self._assert_valid_command(['osd', 'reweight', '1', '0.1']) self.assertEqual({}, validate_command(sigdict, ['osd', 'reweight'])) self.assertEqual({}, validate_command(sigdict, ['osd', 'reweight', '1'])) self.assertEqual({}, validate_command(sigdict, ['osd', 'reweight', '1', '2.0'])) self.assertEqual({}, validate_command(sigdict, ['osd', 'reweight', '-1', '0.1'])) self.assertEqual({}, validate_command(sigdict, ['osd', 'reweight', '1', '0.1', 'toomany'])) def test_lost(self): self._assert_valid_command(['osd', 'lost', '1', '--yes-i-really-mean-it']) self._assert_valid_command(['osd', 'lost', '1']) self.assertEqual({}, validate_command(sigdict, ['osd', 'lost'])) self.assertEqual({}, validate_command(sigdict, ['osd', 'lost', '1', 'what?'])) self.assertEqual({}, validate_command(sigdict, ['osd', 'lost', '-1', '--yes-i-really-mean-it'])) self.assertEqual({}, validate_command(sigdict, ['osd', 'lost', '1', '--yes-i-really-mean-it', 'toomany'])) def test_create(self): uuid = '12345678123456781234567812345678' self._assert_valid_command(['osd', 'create']) self._assert_valid_command(['osd', 'create', uuid]) self.assertEqual({}, validate_command(sigdict, ['osd', 'create', 'invalid'])) self.assertEqual({}, validate_command(sigdict, ['osd', 'create', uuid, 'toomany'])) def test_blocklist(self): for action in ('add', 'rm'): self._assert_valid_command(['osd', 'blocklist', action, '1.2.3.4/567']) self._assert_valid_command(['osd', 'blocklist', action, '1.2.3.4']) self._assert_valid_command(['osd', 'blocklist', action, '1.2.3.4/567', '600.40']) self._assert_valid_command(['osd', 'blocklist', action, '1.2.3.4', '600.40']) self._assert_valid_command(['osd', 'blocklist', action, 'v1:1.2.3.4', '600.40']) self._assert_valid_command(['osd', 'blocklist', action, 'v1:1.2.3.4/0', '600.40']) self._assert_valid_command(['osd', 'blocklist', action, 'v2:2001:0db8:85a3:0000:0000:8a2e:0370:7334', '600.40']) self._assert_valid_command(['osd', 'blocklist', action, 'v2:fe80::1/0', '600.40']) self._assert_valid_command(['osd', 'blocklist', action, 'v2:[2607:f298:4:2243::5522]:0/0', '600.40']) self._assert_valid_command(['osd', 'blocklist', action, '[2001:0db8::85a3:0000:8a2e:0370:7334]:0/0', '600.40']) self.assertEqual({}, validate_command(sigdict, ['osd', 'blocklist', action, 'invalid', '600.40'])) self.assertEqual({}, validate_command(sigdict, ['osd', 'blocklist', action, '1.2.3.4/567', '-1.0'])) self.assertEqual({}, validate_command(sigdict, ['osd', 'blocklist', action, '1.2.3.4/567', '600.40', 'toomany'])) self.assertEqual({}, validate_command(sigdict, ['osd', 'blocklist', action, 'v2:1.2.3.4/567', '600.40'])) self.assertEqual({}, validate_command(sigdict, ['osd', 'blocklist', action, 'v1:1.2.3.4:65536/567', '600.40'])) def test_pool_mksnap(self): self._assert_valid_command(['osd', 'pool', 'mksnap', 'poolname', 'snapname']) self.assertEqual({}, validate_command(sigdict, ['osd', 'pool', 'mksnap'])) self.assertEqual({}, validate_command(sigdict, ['osd', 'pool', 'mksnap', 'poolname'])) self.assertEqual({}, validate_command(sigdict, ['osd', 'pool', 'mksnap', 'poolname', 'snapname', 'toomany'])) def test_pool_rmsnap(self): self._assert_valid_command(['osd', 'pool', 'rmsnap', 'poolname', 'snapname']) self.assertEqual({}, validate_command(sigdict, ['osd', 'pool', 'rmsnap'])) self.assertEqual({}, validate_command(sigdict, ['osd', 'pool', 'rmsnap', 'poolname'])) self.assertEqual({}, validate_command(sigdict, ['osd', 'pool', 'rmsnap', 'poolname', 'snapname', 'toomany'])) def test_pool_kwargs(self): """ Use the pool creation command to exercise keyword-style arguments since it has lots of parameters """ # Simply use a keyword arg instead of a positional arg, in its # normal order (pgp_num after pg_num) self.assertEqual( { "prefix": "osd pool create", "pool": "foo", "pg_num": 8, "pgp_num": 16 }, validate_command(sigdict, [ 'osd', 'pool', 'create', "foo", "8", "--pgp_num", "16"])) # Again, but using the "--foo=bar" style self.assertEqual( { "prefix": "osd pool create", "pool": "foo", "pg_num": 8, "pgp_num": 16 }, validate_command(sigdict, [ 'osd', 'pool', 'create', "foo", "8", "--pgp_num=16"])) # Specify keyword args in a different order than their definitions # (pgp_num after pool_type) self.assertEqual( { "prefix": "osd pool create", "pool": "foo", "pg_num": 8, "pgp_num": 16, "pool_type": "replicated" }, validate_command(sigdict, [ 'osd', 'pool', 'create', "foo", "8", "--pool_type", "replicated", "--pgp_num", "16"])) # Use a keyword argument that doesn't exist, should fail validation self.assertEqual({}, validate_command(sigdict, ['osd', 'pool', 'create', "foo", "8", "--foo=bar"])) def test_foo(self): # Long form of a boolean argument (--foo=true) self.assertEqual( { "prefix": "osd pool delete", "pool": "foo", "pool2": "foo", "yes_i_really_really_mean_it": True }, validate_command(sigdict, [ 'osd', 'pool', 'delete', "foo", "foo", "--yes-i-really-really-mean-it=true"])) def test_pool_bool_args(self): """ Use pool deletion to exercise boolean arguments since it has the --yes-i-really-really-mean-it flags """ # Short form of a boolean argument (--foo) self.assertEqual( { "prefix": "osd pool delete", "pool": "foo", "pool2": "foo", "yes_i_really_really_mean_it": True }, validate_command(sigdict, [ 'osd', 'pool', 'delete', "foo", "foo", "--yes-i-really-really-mean-it"])) # Long form of a boolean argument (--foo=true) self.assertEqual( { "prefix": "osd pool delete", "pool": "foo", "pool2": "foo", "yes_i_really_really_mean_it": True }, validate_command(sigdict, [ 'osd', 'pool', 'delete', "foo", "foo", "--yes-i-really-really-mean-it=true"])) # Negative form of a boolean argument (--foo=false) self.assertEqual( { "prefix": "osd pool delete", "pool": "foo", "pool2": "foo", "yes_i_really_really_mean_it": False }, validate_command(sigdict, [ 'osd', 'pool', 'delete', "foo", "foo", "--yes-i-really-really-mean-it=false"])) # Invalid value boolean argument (--foo=somethingelse) self.assertEqual({}, validate_command(sigdict, [ 'osd', 'pool', 'delete', "foo", "foo", "--yes-i-really-really-mean-it=rhubarb"])) def test_pool_create(self): self._assert_valid_command(['osd', 'pool', 'create', 'poolname', '128']) self._assert_valid_command(['osd', 'pool', 'create', 'poolname', '128', '128']) self._assert_valid_command(['osd', 'pool', 'create', 'poolname', '128', '128', 'replicated']) self._assert_valid_command(['osd', 'pool', 'create', 'poolname', '128', '128', 'erasure', 'A-Za-z0-9-_.', 'rule^^']) self._assert_valid_command(['osd', 'pool', 'create', 'poolname']) self.assertEqual({}, validate_command(sigdict, ['osd', 'pool', 'create'])) # invalid pg_num and pgp_num, like "-1", could spill over to # erasure_code_profile and rule as they are valid profile and rule # names, so validate_commands() cannot identify such cases. # but if they are matched by profile and rule, the "rule" argument # won't get a chance to be matched anymore. self.assertEqual({}, validate_command(sigdict, ['osd', 'pool', 'create', 'poolname', '-1', '-1', 'rule'])) self.assertEqual({}, validate_command(sigdict, ['osd', 'pool', 'create', 'poolname', '128', '128', 'erasure', '^^^', 'rule'])) self.assertEqual({}, validate_command(sigdict, ['osd', 'pool', 'create', 'poolname', '128', '128', 'erasure', 'profile', 'rule', 'toomany'])) self.assertEqual({}, validate_command(sigdict, ['osd', 'pool', 'create', 'poolname', '128', '128', 'INVALID', 'profile', 'rule'])) def test_pool_delete(self): self._assert_valid_command(['osd', 'pool', 'delete', 'poolname', 'poolname', '--yes-i-really-really-mean-it']) self._assert_valid_command(['osd', 'pool', 'delete', 'poolname', 'poolname']) self._assert_valid_command(['osd', 'pool', 'delete', 'poolname']) self.assertEqual({}, validate_command(sigdict, ['osd', 'pool', 'delete'])) self.assertEqual({}, validate_command(sigdict, ['osd', 'pool', 'delete', 'poolname', 'poolname', '--yes-i-really-really-mean-it', 'toomany'])) def test_pool_rename(self): self._assert_valid_command(['osd', 'pool', 'rename', 'poolname', 'othername']) self.assertEqual({}, validate_command(sigdict, ['osd', 'pool', 'rename'])) self.assertEqual({}, validate_command(sigdict, ['osd', 'pool', 'rename', 'poolname'])) self.assertEqual({}, validate_command(sigdict, ['osd', 'pool', 'rename', 'poolname', 'othername', 'toomany'])) def test_pool_get(self): for var in ('size', 'min_size', 'pg_num', 'pgp_num', 'crush_rule', 'fast_read', 'scrub_min_interval', 'scrub_max_interval', 'deep_scrub_interval', 'recovery_priority', 'recovery_op_priority'): self._assert_valid_command(['osd', 'pool', 'get', 'poolname', var]) self.assertEqual({}, validate_command(sigdict, ['osd', 'pool'])) self.assertEqual({}, validate_command(sigdict, ['osd', 'pool', 'get'])) self.assertEqual({}, validate_command(sigdict, ['osd', 'pool', 'get', 'poolname'])) self.assertEqual({}, validate_command(sigdict, ['osd', 'pool', 'get', 'poolname', 'size', 'toomany'])) self.assertEqual({}, validate_command(sigdict, ['osd', 'pool', 'get', 'poolname', 'invalid'])) def test_pool_set(self): for var in ('size', 'min_size', 'pg_num', 'pgp_num', 'crush_rule', 'hashpspool', 'fast_read', 'scrub_min_interval', 'scrub_max_interval', 'deep_scrub_interval', 'recovery_priority', 'recovery_op_priority'): self._assert_valid_command(['osd', 'pool', 'set', 'poolname', var, 'value']) self.assertEqual({}, validate_command(sigdict, ['osd', 'pool', 'set'])) self.assertEqual({}, validate_command(sigdict, ['osd', 'pool', 'set', 'poolname'])) self.assertEqual({}, validate_command(sigdict, ['osd', 'pool', 'set', 'poolname', 'size', 'value', 'toomany'])) def test_pool_set_quota(self): for field in ('max_objects', 'max_bytes'): self._assert_valid_command(['osd', 'pool', 'set-quota', 'poolname', field, '10K']) self.assertEqual({}, validate_command(sigdict, ['osd', 'pool', 'set-quota'])) self.assertEqual({}, validate_command(sigdict, ['osd', 'pool', 'set-quota', 'poolname'])) self.assertEqual({}, validate_command(sigdict, ['osd', 'pool', 'set-quota', 'poolname', 'max_objects'])) self.assertEqual({}, validate_command(sigdict, ['osd', 'pool', 'set-quota', 'poolname', 'invalid', '10K'])) self.assertEqual({}, validate_command(sigdict, ['osd', 'pool', 'set-quota', 'poolname', 'max_objects', '10K', 'toomany'])) def test_reweight_by_utilization(self): self._assert_valid_command(['osd', 'reweight-by-utilization']) self._assert_valid_command(['osd', 'reweight-by-utilization', '100']) self._assert_valid_command(['osd', 'reweight-by-utilization', '100', '.1']) self.assertEqual({}, validate_command(sigdict, ['osd', 'reweight-by-utilization', '100', 'toomany'])) def test_tier_op(self): for op in ('add', 'remove', 'set-overlay'): self._assert_valid_command(['osd', 'tier', op, 'poolname', 'othername']) self.assertEqual({}, validate_command(sigdict, ['osd', 'tier', op])) self.assertEqual({}, validate_command(sigdict, ['osd', 'tier', op, 'poolname'])) self.assertEqual({}, validate_command(sigdict, ['osd', 'tier', op, 'poolname', 'othername', 'toomany'])) def test_tier_cache_mode(self): for mode in ('none', 'writeback', 'readonly', 'readproxy'): self._assert_valid_command(['osd', 'tier', 'cache-mode', 'poolname', mode]) self.assertEqual({}, validate_command(sigdict, ['osd', 'tier', 'cache-mode'])) self.assertEqual({}, validate_command(sigdict, ['osd', 'tier', 'cache-mode', 'invalid'])) def test_tier_remove_overlay(self): self._assert_valid_command(['osd', 'tier', 'remove-overlay', 'poolname']) self.assertEqual({}, validate_command(sigdict, ['osd', 'tier', 'remove-overlay'])) self.assertEqual({}, validate_command(sigdict, ['osd', 'tier', 'remove-overlay', 'poolname', 'toomany'])) def _set_ratio(self, command): self._assert_valid_command(['osd', command, '0.0']) self.assertEqual({}, validate_command(sigdict, ['osd', command])) self.assertEqual({}, validate_command(sigdict, ['osd', command, '2.0'])) def test_set_full_ratio(self): self._set_ratio('set-full-ratio') def test_set_backfillfull_ratio(self): self._set_ratio('set-backfillfull-ratio') def test_set_nearfull_ratio(self): self._set_ratio('set-nearfull-ratio') class TestConfigKey(TestArgparse): def test_get(self): self.check_1_string_arg('config-key', 'get') def test_put(self): self._assert_valid_command(['config-key', 'put', 'key']) self._assert_valid_command(['config-key', 'put', 'key', 'value']) self.assertEqual({}, validate_command(sigdict, ['config-key', 'put'])) self.assertEqual({}, validate_command(sigdict, ['config-key', 'put', 'key', 'value', 'toomany'])) def test_del(self): self.check_1_string_arg('config-key', 'del') def test_exists(self): self.check_1_string_arg('config-key', 'exists') def test_dump(self): self.check_0_or_1_string_arg('config-key', 'dump') def test_list(self): self.check_no_arg('config-key', 'list') class TestValidate(unittest.TestCase): ARGS = 0 KWARGS = 1 KWARGS_EQ = 2 MIXED = 3 def setUp(self): self.prefix = ['some', 'random', 'cmd'] self.args_dict = [ {'name': 'variable_one', 'type': 'CephString'}, {'name': 'variable_two', 'type': 'CephString'}, {'name': 'variable_three', 'type': 'CephString'}, {'name': 'variable_four', 'type': 'CephInt'}, {'name': 'variable_five', 'type': 'CephString'}] self.args = [] for d in self.args_dict: if d['type'] == 'CephInt': val = "{}".format(random.randint(0, 100)) elif d['type'] == 'CephString': letters = string.ascii_letters str_len = random.randint(5, 10) val = ''.join(random.choice(letters) for _ in range(str_len)) else: raise skipTest() self.args.append((d['name'], val)) self.sig = parse_funcsig(self.prefix + self.args_dict) def _arg_kwarg_test(self, prefix, args, sig, arg_type=0): """ Runs validate in different arg/kargs ways. :param prefix: List of prefix commands (that can't be kwarged) :param args: a list of kwarg, arg pairs: [(k1, v1), (k2, v2), ...] :param sig: The sig to match :param arg_type: how to build the args to send. As positional args (ARGS), as long kwargs (KWARGS [--k v]), other style long kwargs (KWARGS_EQ (--k=v]), and mixed (MIXED) where there will be a random mix of the above. :return: None, the method will assert. """ final_args = list(prefix) for k, v in args: a_type = arg_type if a_type == self.MIXED: a_type = random.choice((self.ARGS, self.KWARGS, self.KWARGS_EQ)) if a_type == self.ARGS: final_args.append(v) elif a_type == self.KWARGS: final_args.extend(["--{}".format(k), v]) else: final_args.append("--{}={}".format(k, v)) try: validate(final_args, sig) except (ArgumentError, ArgumentMissing, ArgumentNumber, ArgumentTooFew, ArgumentValid) as ex: self.fail("Validation failed: {}".format(str(ex))) def test_args_and_kwargs_validate(self): for arg_type in (self.ARGS, self.KWARGS, self.KWARGS_EQ, self.MIXED): self._arg_kwarg_test(self.prefix, self.args, self.sig, arg_type) if __name__ == '__main__': unittest.main() # Local Variables: # compile-command: "cd ../../..; cmake --build build --target get_command_descriptions -j4 && # CEPH_BIN=build/bin \ # PYTHONPATH=src/pybind python3 \ # src/test/pybind/test_ceph_argparse.py" # End:
65,765
47.392936
117
py
null
ceph-main/src/test/pybind/test_ceph_daemon.py
#!/usr/bin/env python3 # -*- mode:python; tab-width:4; indent-tabs-mode:t -*- # vim: ts=4 sw=4 smarttab expandtab # """ Copyright (C) 2015 Red Hat This is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2, as published by the Free Software Foundation. See file COPYING. """ import unittest from ceph_daemon import DaemonWatcher try: from StringIO import StringIO except ImportError: from io import StringIO class TestDaemonWatcher(unittest.TestCase): def test_format(self): dw = DaemonWatcher(None) self.assertEqual(dw.format_dimless(1, 4), " 1 ") self.assertEqual(dw.format_dimless(1000, 4), "1.0k") self.assertEqual(dw.format_dimless(3.14159, 4), " 3 ") self.assertEqual(dw.format_dimless(1400000, 4), "1.4M") def test_col_width(self): dw = DaemonWatcher(None) self.assertEqual(dw.col_width("foo"), 4) self.assertEqual(dw.col_width("foobar"), 6) def test_supports_color(self): dw = DaemonWatcher(None) # Can't count on having a tty available during tests, so only test the false case self.assertFalse(dw.supports_color(StringIO())) if __name__ == '__main__': unittest.main() # Local Variables: # compile-command: "cd ../../..; # PYTHONPATH=src/pybind python3 src/test/pybind/test_ceph_daemon.py" # End:
1,401
25.45283
89
py
null
ceph-main/src/test/pybind/test_cephfs.py
# vim: expandtab smarttab shiftwidth=4 softtabstop=4 import collections collections.Callable = collections.abc.Callable from nose.tools import assert_raises, assert_equal, assert_not_equal, assert_greater, with_setup import cephfs as libcephfs import fcntl import os import random import time import stat import uuid from datetime import datetime cephfs = None def setup_module(): global cephfs cephfs = libcephfs.LibCephFS(conffile='') cephfs.mount() def teardown_module(): global cephfs cephfs.shutdown() def purge_dir(path, is_snap = False): print(b"Purge " + path) d = cephfs.opendir(path) if (not path.endswith(b"/")): path = path + b"/" dent = cephfs.readdir(d) while dent: if (dent.d_name not in [b".", b".."]): print(path + dent.d_name) if dent.is_dir(): if (not is_snap): try: snappath = path + dent.d_name + b"/.snap" cephfs.stat(snappath) purge_dir(snappath, True) except: pass purge_dir(path + dent.d_name, False) cephfs.rmdir(path + dent.d_name) else: print("rmsnap on {} snap {}".format(path, dent.d_name)) cephfs.rmsnap(path, dent.d_name); else: cephfs.unlink(path + dent.d_name) dent = cephfs.readdir(d) cephfs.closedir(d) def setup_test(): purge_dir(b"/") cephfs.chdir(b"/") _, ret_buf = cephfs.listxattr("/") print(f'ret_buf={ret_buf}') xattrs = ret_buf.decode('utf-8').split('\x00') for xattr in xattrs[:-1]: cephfs.removexattr("/", xattr) @with_setup(setup_test) def test_conf_get(): fsid = cephfs.conf_get("fsid") assert(len(fsid) > 0) @with_setup(setup_test) def test_version(): cephfs.version() @with_setup(setup_test) def test_fstat(): fd = cephfs.open(b'file-1', 'w', 0o755) stat = cephfs.fstat(fd) assert(len(stat) == 13) cephfs.close(fd) @with_setup(setup_test) def test_statfs(): stat = cephfs.statfs(b'/') assert(len(stat) == 11) @with_setup(setup_test) def test_statx(): stat = cephfs.statx(b'/', libcephfs.CEPH_STATX_MODE, 0) assert('mode' in stat.keys()) stat = cephfs.statx(b'/', libcephfs.CEPH_STATX_BTIME, 0) assert('btime' in stat.keys()) fd = cephfs.open(b'file-1', 'w', 0o755) cephfs.write(fd, b"1111", 0) cephfs.close(fd) cephfs.symlink(b'file-1', b'file-2') stat = cephfs.statx(b'file-2', libcephfs.CEPH_STATX_MODE | libcephfs.CEPH_STATX_BTIME, libcephfs.AT_SYMLINK_NOFOLLOW) assert('mode' in stat.keys()) assert('btime' in stat.keys()) cephfs.unlink(b'file-2') cephfs.unlink(b'file-1') @with_setup(setup_test) def test_syncfs(): stat = cephfs.sync_fs() @with_setup(setup_test) def test_fsync(): fd = cephfs.open(b'file-1', 'w', 0o755) cephfs.write(fd, b"asdf", 0) stat = cephfs.fsync(fd, 0) cephfs.write(fd, b"qwer", 0) stat = cephfs.fsync(fd, 1) cephfs.close(fd) #sync on non-existing fd (assume fd 12345 is not exists) assert_raises(libcephfs.Error, cephfs.fsync, 12345, 0) @with_setup(setup_test) def test_directory(): cephfs.mkdir(b"/temp-directory", 0o755) cephfs.mkdirs(b"/temp-directory/foo/bar", 0o755) cephfs.chdir(b"/temp-directory") assert_equal(cephfs.getcwd(), b"/temp-directory") cephfs.rmdir(b"/temp-directory/foo/bar") cephfs.rmdir(b"/temp-directory/foo") cephfs.rmdir(b"/temp-directory") assert_raises(libcephfs.ObjectNotFound, cephfs.chdir, b"/temp-directory") @with_setup(setup_test) def test_walk_dir(): cephfs.chdir(b"/") dirs = [b"dir-1", b"dir-2", b"dir-3"] for i in dirs: cephfs.mkdir(i, 0o755) handler = cephfs.opendir(b"/") d = cephfs.readdir(handler) dirs += [b".", b".."] while d: assert(d.d_name in dirs) dirs.remove(d.d_name) d = cephfs.readdir(handler) assert(len(dirs) == 0) dirs = [b"/dir-1", b"/dir-2", b"/dir-3"] for i in dirs: cephfs.rmdir(i) cephfs.closedir(handler) @with_setup(setup_test) def test_xattr(): assert_raises(libcephfs.OperationNotSupported, cephfs.setxattr, "/", "key", b"value", 0) cephfs.setxattr("/", "user.key", b"value", 0) assert_equal(b"value", cephfs.getxattr("/", "user.key")) cephfs.setxattr("/", "user.big", b"x" * 300, 0) # Default size is 255, get ERANGE assert_raises(libcephfs.OutOfRange, cephfs.getxattr, "/", "user.big") # Pass explicit size, and we'll get the value assert_equal(300, len(cephfs.getxattr("/", "user.big", 300))) cephfs.removexattr("/", "user.key") # user.key is already removed assert_raises(libcephfs.NoData, cephfs.getxattr, "/", "user.key") # user.big is only listed ret_val, ret_buff = cephfs.listxattr("/") assert_equal(9, ret_val) assert_equal("user.big\x00", ret_buff.decode('utf-8')) @with_setup(setup_test) def test_ceph_mirror_xattr(): def gen_mirror_xattr(): cluster_id = str(uuid.uuid4()) fs_id = random.randint(1, 10) mirror_xattr = f'cluster_id={cluster_id} fs_id={fs_id}' return mirror_xattr.encode('utf-8') mirror_xattr_enc_1 = gen_mirror_xattr() # mirror xattr is only allowed on root cephfs.mkdir('/d0', 0o755) assert_raises(libcephfs.InvalidValue, cephfs.setxattr, '/d0', 'ceph.mirror.info', mirror_xattr_enc_1, os.XATTR_CREATE) cephfs.rmdir('/d0') cephfs.setxattr('/', 'ceph.mirror.info', mirror_xattr_enc_1, os.XATTR_CREATE) assert_equal(mirror_xattr_enc_1, cephfs.getxattr('/', 'ceph.mirror.info')) # setting again with XATTR_CREATE should fail assert_raises(libcephfs.ObjectExists, cephfs.setxattr, '/', 'ceph.mirror.info', mirror_xattr_enc_1, os.XATTR_CREATE) # ceph.mirror.info should not show up in listing ret_val, _ = cephfs.listxattr("/") assert_equal(0, ret_val) mirror_xattr_enc_2 = gen_mirror_xattr() cephfs.setxattr('/', 'ceph.mirror.info', mirror_xattr_enc_2, os.XATTR_REPLACE) assert_equal(mirror_xattr_enc_2, cephfs.getxattr('/', 'ceph.mirror.info')) cephfs.removexattr('/', 'ceph.mirror.info') # ceph.mirror.info is already removed assert_raises(libcephfs.NoData, cephfs.getxattr, '/', 'ceph.mirror.info') # removing again should throw error assert_raises(libcephfs.NoData, cephfs.removexattr, "/", "ceph.mirror.info") # check mirror info xattr format assert_raises(libcephfs.InvalidValue, cephfs.setxattr, '/', 'ceph.mirror.info', b"unknown", 0) @with_setup(setup_test) def test_fxattr(): fd = cephfs.open(b'/file-fxattr', 'w', 0o755) assert_raises(libcephfs.OperationNotSupported, cephfs.fsetxattr, fd, "key", b"value", 0) assert_raises(TypeError, cephfs.fsetxattr, "fd", "user.key", b"value", 0) assert_raises(TypeError, cephfs.fsetxattr, fd, "user.key", "value", 0) assert_raises(TypeError, cephfs.fsetxattr, fd, "user.key", b"value", "0") cephfs.fsetxattr(fd, "user.key", b"value", 0) assert_equal(b"value", cephfs.fgetxattr(fd, "user.key")) cephfs.fsetxattr(fd, "user.big", b"x" * 300, 0) # Default size is 255, get ERANGE assert_raises(libcephfs.OutOfRange, cephfs.fgetxattr, fd, "user.big") # Pass explicit size, and we'll get the value assert_equal(300, len(cephfs.fgetxattr(fd, "user.big", 300))) cephfs.fremovexattr(fd, "user.key") # user.key is already removed assert_raises(libcephfs.NoData, cephfs.fgetxattr, fd, "user.key") # user.big is only listed ret_val, ret_buff = cephfs.flistxattr(fd) assert_equal(9, ret_val) assert_equal("user.big\x00", ret_buff.decode('utf-8')) cephfs.close(fd) cephfs.unlink(b'/file-fxattr') @with_setup(setup_test) def test_rename(): cephfs.mkdir(b"/a", 0o755) cephfs.mkdir(b"/a/b", 0o755) cephfs.rename(b"/a", b"/b") cephfs.stat(b"/b/b") cephfs.rmdir(b"/b/b") cephfs.rmdir(b"/b") @with_setup(setup_test) def test_open(): assert_raises(libcephfs.ObjectNotFound, cephfs.open, b'file-1', 'r') assert_raises(libcephfs.ObjectNotFound, cephfs.open, b'file-1', 'r+') fd = cephfs.open(b'file-1', 'w', 0o755) cephfs.write(fd, b"asdf", 0) cephfs.close(fd) fd = cephfs.open(b'file-1', 'r', 0o755) assert_equal(cephfs.read(fd, 0, 4), b"asdf") cephfs.close(fd) fd = cephfs.open(b'file-1', 'r+', 0o755) cephfs.write(fd, b"zxcv", 4) assert_equal(cephfs.read(fd, 4, 8), b"zxcv") cephfs.close(fd) fd = cephfs.open(b'file-1', 'w+', 0o755) assert_equal(cephfs.read(fd, 0, 4), b"") cephfs.write(fd, b"zxcv", 4) assert_equal(cephfs.read(fd, 4, 8), b"zxcv") cephfs.close(fd) fd = cephfs.open(b'file-1', os.O_RDWR, 0o755) cephfs.write(fd, b"asdf", 0) assert_equal(cephfs.read(fd, 0, 4), b"asdf") cephfs.close(fd) assert_raises(libcephfs.OperationNotSupported, cephfs.open, b'file-1', 'a') cephfs.unlink(b'file-1') @with_setup(setup_test) def test_link(): fd = cephfs.open(b'file-1', 'w', 0o755) cephfs.write(fd, b"1111", 0) cephfs.close(fd) cephfs.link(b'file-1', b'file-2') fd = cephfs.open(b'file-2', 'r', 0o755) assert_equal(cephfs.read(fd, 0, 4), b"1111") cephfs.close(fd) fd = cephfs.open(b'file-2', 'r+', 0o755) cephfs.write(fd, b"2222", 4) cephfs.close(fd) fd = cephfs.open(b'file-1', 'r', 0o755) assert_equal(cephfs.read(fd, 0, 8), b"11112222") cephfs.close(fd) cephfs.unlink(b'file-2') @with_setup(setup_test) def test_symlink(): fd = cephfs.open(b'file-1', 'w', 0o755) cephfs.write(fd, b"1111", 0) cephfs.close(fd) cephfs.symlink(b'file-1', b'file-2') fd = cephfs.open(b'file-2', 'r', 0o755) assert_equal(cephfs.read(fd, 0, 4), b"1111") cephfs.close(fd) fd = cephfs.open(b'file-2', 'r+', 0o755) cephfs.write(fd, b"2222", 4) cephfs.close(fd) fd = cephfs.open(b'file-1', 'r', 0o755) assert_equal(cephfs.read(fd, 0, 8), b"11112222") cephfs.close(fd) cephfs.unlink(b'file-2') @with_setup(setup_test) def test_readlink(): fd = cephfs.open(b'/file-1', 'w', 0o755) cephfs.write(fd, b"1111", 0) cephfs.close(fd) cephfs.symlink(b'/file-1', b'/file-2') d = cephfs.readlink(b"/file-2",100) assert_equal(d, b"/file-1") cephfs.unlink(b'/file-2') cephfs.unlink(b'/file-1') @with_setup(setup_test) def test_delete_cwd(): assert_equal(b"/", cephfs.getcwd()) cephfs.mkdir(b"/temp-directory", 0o755) cephfs.chdir(b"/temp-directory") cephfs.rmdir(b"/temp-directory") # getcwd gives you something stale here: it remembers the path string # even when things are unlinked. It's up to the caller to find out # whether it really still exists assert_equal(b"/temp-directory", cephfs.getcwd()) @with_setup(setup_test) def test_flock(): fd = cephfs.open(b'file-1', 'w', 0o755) cephfs.flock(fd, fcntl.LOCK_EX, 123); fd2 = cephfs.open(b'file-1', 'w', 0o755) assert_raises(libcephfs.WouldBlock, cephfs.flock, fd2, fcntl.LOCK_EX | fcntl.LOCK_NB, 456); cephfs.close(fd2) cephfs.close(fd) @with_setup(setup_test) def test_mount_unmount(): test_directory() cephfs.unmount() cephfs.mount() test_open() @with_setup(setup_test) def test_lxattr(): fd = cephfs.open(b'/file-lxattr', 'w', 0o755) cephfs.close(fd) cephfs.setxattr(b"/file-lxattr", "user.key", b"value", 0) cephfs.symlink(b"/file-lxattr", b"/file-sym-lxattr") assert_equal(b"value", cephfs.getxattr(b"/file-sym-lxattr", "user.key")) assert_raises(libcephfs.NoData, cephfs.lgetxattr, b"/file-sym-lxattr", "user.key") cephfs.lsetxattr(b"/file-sym-lxattr", "trusted.key-sym", b"value-sym", 0) assert_equal(b"value-sym", cephfs.lgetxattr(b"/file-sym-lxattr", "trusted.key-sym")) cephfs.lsetxattr(b"/file-sym-lxattr", "trusted.big", b"x" * 300, 0) # Default size is 255, get ERANGE assert_raises(libcephfs.OutOfRange, cephfs.lgetxattr, b"/file-sym-lxattr", "trusted.big") # Pass explicit size, and we'll get the value assert_equal(300, len(cephfs.lgetxattr(b"/file-sym-lxattr", "trusted.big", 300))) cephfs.lremovexattr(b"/file-sym-lxattr", "trusted.key-sym") # trusted.key-sym is already removed assert_raises(libcephfs.NoData, cephfs.lgetxattr, b"/file-sym-lxattr", "trusted.key-sym") # trusted.big is only listed ret_val, ret_buff = cephfs.llistxattr(b"/file-sym-lxattr") assert_equal(12, ret_val) assert_equal("trusted.big\x00", ret_buff.decode('utf-8')) cephfs.unlink(b'/file-lxattr') cephfs.unlink(b'/file-sym-lxattr') @with_setup(setup_test) def test_mount_root(): cephfs.mkdir(b"/mount-directory", 0o755) cephfs.unmount() cephfs.mount(mount_root = b"/mount-directory") assert_raises(libcephfs.Error, cephfs.mount, mount_root = b"/nowhere") cephfs.unmount() cephfs.mount() @with_setup(setup_test) def test_utime(): fd = cephfs.open(b'/file-1', 'w', 0o755) cephfs.write(fd, b'0000', 0) cephfs.close(fd) stx_pre = cephfs.statx(b'/file-1', libcephfs.CEPH_STATX_ATIME | libcephfs.CEPH_STATX_MTIME, 0) time.sleep(1) cephfs.utime(b'/file-1') stx_post = cephfs.statx(b'/file-1', libcephfs.CEPH_STATX_ATIME | libcephfs.CEPH_STATX_MTIME, 0) assert_greater(stx_post['atime'], stx_pre['atime']) assert_greater(stx_post['mtime'], stx_pre['mtime']) atime_pre = int(time.mktime(stx_pre['atime'].timetuple())) mtime_pre = int(time.mktime(stx_pre['mtime'].timetuple())) cephfs.utime(b'/file-1', (atime_pre, mtime_pre)) stx_post = cephfs.statx(b'/file-1', libcephfs.CEPH_STATX_ATIME | libcephfs.CEPH_STATX_MTIME, 0) assert_equal(stx_post['atime'], stx_pre['atime']) assert_equal(stx_post['mtime'], stx_pre['mtime']) cephfs.unlink(b'/file-1') @with_setup(setup_test) def test_futime(): fd = cephfs.open(b'/file-1', 'w', 0o755) cephfs.write(fd, b'0000', 0) stx_pre = cephfs.statx(b'/file-1', libcephfs.CEPH_STATX_ATIME | libcephfs.CEPH_STATX_MTIME, 0) time.sleep(1) cephfs.futime(fd) stx_post = cephfs.statx(b'/file-1', libcephfs.CEPH_STATX_ATIME | libcephfs.CEPH_STATX_MTIME, 0) assert_greater(stx_post['atime'], stx_pre['atime']) assert_greater(stx_post['mtime'], stx_pre['mtime']) atime_pre = int(time.mktime(stx_pre['atime'].timetuple())) mtime_pre = int(time.mktime(stx_pre['mtime'].timetuple())) cephfs.futime(fd, (atime_pre, mtime_pre)) stx_post = cephfs.statx(b'/file-1', libcephfs.CEPH_STATX_ATIME | libcephfs.CEPH_STATX_MTIME, 0) assert_equal(stx_post['atime'], stx_pre['atime']) assert_equal(stx_post['mtime'], stx_pre['mtime']) cephfs.close(fd) cephfs.unlink(b'/file-1') @with_setup(setup_test) def test_utimes(): fd = cephfs.open(b'/file-1', 'w', 0o755) cephfs.write(fd, b'0000', 0) cephfs.close(fd) stx_pre = cephfs.statx(b'/file-1', libcephfs.CEPH_STATX_ATIME | libcephfs.CEPH_STATX_MTIME, 0) time.sleep(1) cephfs.utimes(b'/file-1') stx_post = cephfs.statx(b'/file-1', libcephfs.CEPH_STATX_ATIME | libcephfs.CEPH_STATX_MTIME, 0) assert_greater(stx_post['atime'], stx_pre['atime']) assert_greater(stx_post['mtime'], stx_pre['mtime']) atime_pre = time.mktime(stx_pre['atime'].timetuple()) mtime_pre = time.mktime(stx_pre['mtime'].timetuple()) cephfs.utimes(b'/file-1', (atime_pre, mtime_pre)) stx_post = cephfs.statx(b'/file-1', libcephfs.CEPH_STATX_ATIME | libcephfs.CEPH_STATX_MTIME, 0) assert_equal(stx_post['atime'], stx_pre['atime']) assert_equal(stx_post['mtime'], stx_pre['mtime']) cephfs.unlink(b'/file-1') @with_setup(setup_test) def test_lutimes(): fd = cephfs.open(b'/file-1', 'w', 0o755) cephfs.write(fd, b'0000', 0) cephfs.close(fd) cephfs.symlink(b'/file-1', b'/file-2') stx_pre_t = cephfs.statx(b'/file-1', libcephfs.CEPH_STATX_ATIME | libcephfs.CEPH_STATX_MTIME, 0) stx_pre_s = cephfs.statx(b'/file-2', libcephfs.CEPH_STATX_ATIME | libcephfs.CEPH_STATX_MTIME, libcephfs.AT_SYMLINK_NOFOLLOW) time.sleep(1) cephfs.lutimes(b'/file-2') stx_post_t = cephfs.statx(b'/file-1', libcephfs.CEPH_STATX_ATIME | libcephfs.CEPH_STATX_MTIME, 0) stx_post_s = cephfs.statx(b'/file-2', libcephfs.CEPH_STATX_ATIME | libcephfs.CEPH_STATX_MTIME, libcephfs.AT_SYMLINK_NOFOLLOW) assert_equal(stx_post_t['atime'], stx_pre_t['atime']) assert_equal(stx_post_t['mtime'], stx_pre_t['mtime']) assert_greater(stx_post_s['atime'], stx_pre_s['atime']) assert_greater(stx_post_s['mtime'], stx_pre_s['mtime']) atime_pre = time.mktime(stx_pre_s['atime'].timetuple()) mtime_pre = time.mktime(stx_pre_s['mtime'].timetuple()) cephfs.lutimes(b'/file-2', (atime_pre, mtime_pre)) stx_post_s = cephfs.statx(b'/file-2', libcephfs.CEPH_STATX_ATIME | libcephfs.CEPH_STATX_MTIME, libcephfs.AT_SYMLINK_NOFOLLOW) assert_equal(stx_post_s['atime'], stx_pre_s['atime']) assert_equal(stx_post_s['mtime'], stx_pre_s['mtime']) cephfs.unlink(b'/file-2') cephfs.unlink(b'/file-1') @with_setup(setup_test) def test_futimes(): fd = cephfs.open(b'/file-1', 'w', 0o755) cephfs.write(fd, b'0000', 0) stx_pre = cephfs.statx(b'/file-1', libcephfs.CEPH_STATX_ATIME | libcephfs.CEPH_STATX_MTIME, 0) time.sleep(1) cephfs.futimes(fd) stx_post = cephfs.statx(b'/file-1', libcephfs.CEPH_STATX_ATIME | libcephfs.CEPH_STATX_MTIME, 0) assert_greater(stx_post['atime'], stx_pre['atime']) assert_greater(stx_post['mtime'], stx_pre['mtime']) atime_pre = time.mktime(stx_pre['atime'].timetuple()) mtime_pre = time.mktime(stx_pre['mtime'].timetuple()) cephfs.futimes(fd, (atime_pre, mtime_pre)) stx_post = cephfs.statx(b'/file-1', libcephfs.CEPH_STATX_ATIME | libcephfs.CEPH_STATX_MTIME, 0) assert_equal(stx_post['atime'], stx_pre['atime']) assert_equal(stx_post['mtime'], stx_pre['mtime']) cephfs.close(fd) cephfs.unlink(b'/file-1') @with_setup(setup_test) def test_futimens(): fd = cephfs.open(b'/file-1', 'w', 0o755) cephfs.write(fd, b'0000', 0) stx_pre = cephfs.statx(b'/file-1', libcephfs.CEPH_STATX_ATIME | libcephfs.CEPH_STATX_MTIME, 0) time.sleep(1) cephfs.futimens(fd) stx_post = cephfs.statx(b'/file-1', libcephfs.CEPH_STATX_ATIME | libcephfs.CEPH_STATX_MTIME, 0) assert_greater(stx_post['atime'], stx_pre['atime']) assert_greater(stx_post['mtime'], stx_pre['mtime']) atime_pre = time.mktime(stx_pre['atime'].timetuple()) mtime_pre = time.mktime(stx_pre['mtime'].timetuple()) cephfs.futimens(fd, (atime_pre, mtime_pre)) stx_post = cephfs.statx(b'/file-1', libcephfs.CEPH_STATX_ATIME | libcephfs.CEPH_STATX_MTIME, 0) assert_equal(stx_post['atime'], stx_pre['atime']) assert_equal(stx_post['mtime'], stx_pre['mtime']) cephfs.close(fd) cephfs.unlink(b'/file-1') @with_setup(setup_test) def test_lchmod(): fd = cephfs.open(b'/file-1', 'w', 0o755) cephfs.write(fd, b'0000', 0) cephfs.close(fd) cephfs.symlink(b'/file-1', b'/file-2') stx_pre_t = cephfs.statx(b'/file-1', libcephfs.CEPH_STATX_MODE, 0) stx_pre_s = cephfs.statx(b'/file-2', libcephfs.CEPH_STATX_MODE, libcephfs.AT_SYMLINK_NOFOLLOW) time.sleep(1) cephfs.lchmod(b'/file-2', 0o400) stx_post_t = cephfs.statx(b'/file-1', libcephfs.CEPH_STATX_MODE, 0) stx_post_s = cephfs.statx(b'/file-2', libcephfs.CEPH_STATX_MODE, libcephfs.AT_SYMLINK_NOFOLLOW) assert_equal(stx_post_t['mode'], stx_pre_t['mode']) assert_not_equal(stx_post_s['mode'], stx_pre_s['mode']) stx_post_s_perm_bits = stx_post_s['mode'] & ~stat.S_IFMT(stx_post_s["mode"]) assert_equal(stx_post_s_perm_bits, 0o400) cephfs.unlink(b'/file-2') cephfs.unlink(b'/file-1') @with_setup(setup_test) def test_fchmod(): fd = cephfs.open(b'/file-fchmod', 'w', 0o655) st = cephfs.statx(b'/file-fchmod', libcephfs.CEPH_STATX_MODE, 0) mode = st["mode"] | stat.S_IXUSR cephfs.fchmod(fd, mode) st = cephfs.statx(b'/file-fchmod', libcephfs.CEPH_STATX_MODE, 0) assert_equal(st["mode"] & stat.S_IRWXU, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR) assert_raises(TypeError, cephfs.fchmod, "/file-fchmod", stat.S_IXUSR) assert_raises(TypeError, cephfs.fchmod, fd, "stat.S_IXUSR") cephfs.close(fd) cephfs.unlink(b'/file-fchmod') @with_setup(setup_test) def test_fchown(): fd = cephfs.open(b'/file-fchown', 'w', 0o655) uid = os.getuid() gid = os.getgid() assert_raises(TypeError, cephfs.fchown, b'/file-fchown', uid, gid) assert_raises(TypeError, cephfs.fchown, fd, "uid", "gid") cephfs.fchown(fd, uid, gid) st = cephfs.statx(b'/file-fchown', libcephfs.CEPH_STATX_UID | libcephfs.CEPH_STATX_GID, 0) assert_equal(st["uid"], uid) assert_equal(st["gid"], gid) cephfs.fchown(fd, 9999, 9999) st = cephfs.statx(b'/file-fchown', libcephfs.CEPH_STATX_UID | libcephfs.CEPH_STATX_GID, 0) assert_equal(st["uid"], 9999) assert_equal(st["gid"], 9999) cephfs.close(fd) cephfs.unlink(b'/file-fchown') @with_setup(setup_test) def test_truncate(): fd = cephfs.open(b'/file-truncate', 'w', 0o755) cephfs.write(fd, b"1111", 0) cephfs.truncate(b'/file-truncate', 0) stat = cephfs.fsync(fd, 0) st = cephfs.statx(b'/file-truncate', libcephfs.CEPH_STATX_SIZE, 0) assert_equal(st["size"], 0) cephfs.close(fd) cephfs.unlink(b'/file-truncate') @with_setup(setup_test) def test_ftruncate(): fd = cephfs.open(b'/file-ftruncate', 'w', 0o755) cephfs.write(fd, b"1111", 0) assert_raises(TypeError, cephfs.ftruncate, b'/file-ftruncate', 0) cephfs.ftruncate(fd, 0) stat = cephfs.fsync(fd, 0) st = cephfs.fstat(fd) assert_equal(st.st_size, 0) cephfs.close(fd) cephfs.unlink(b'/file-ftruncate') @with_setup(setup_test) def test_fallocate(): fd = cephfs.open(b'/file-fallocate', 'w', 0o755) assert_raises(TypeError, cephfs.fallocate, b'/file-fallocate', 0, 10) cephfs.fallocate(fd, 0, 10) stat = cephfs.fsync(fd, 0) st = cephfs.fstat(fd) assert_equal(st.st_size, 10) cephfs.close(fd) cephfs.unlink(b'/file-fallocate') @with_setup(setup_test) def test_mknod(): mode = stat.S_IFIFO | stat.S_IRUSR | stat.S_IWUSR cephfs.mknod(b'/file-fifo', mode) st = cephfs.statx(b'/file-fifo', libcephfs.CEPH_STATX_MODE, 0) assert_equal(st["mode"] & mode, mode) cephfs.unlink(b'/file-fifo') @with_setup(setup_test) def test_lazyio(): fd = cephfs.open(b'/file-lazyio', 'w', 0o755) assert_raises(TypeError, cephfs.lazyio, "fd", 1) assert_raises(TypeError, cephfs.lazyio, fd, "1") cephfs.lazyio(fd, 1) cephfs.write(fd, b"1111", 0) assert_raises(TypeError, cephfs.lazyio_propagate, "fd", 0, 4) assert_raises(TypeError, cephfs.lazyio_propagate, fd, "0", 4) assert_raises(TypeError, cephfs.lazyio_propagate, fd, 0, "4") cephfs.lazyio_propagate(fd, 0, 4) st = cephfs.fstat(fd) assert_equal(st.st_size, 4) cephfs.write(fd, b"2222", 4) assert_raises(TypeError, cephfs.lazyio_synchronize, "fd", 0, 8) assert_raises(TypeError, cephfs.lazyio_synchronize, fd, "0", 8) assert_raises(TypeError, cephfs.lazyio_synchronize, fd, 0, "8") cephfs.lazyio_synchronize(fd, 0, 8) st = cephfs.fstat(fd) assert_equal(st.st_size, 8) cephfs.close(fd) cephfs.unlink(b'/file-lazyio') @with_setup(setup_test) def test_replication(): fd = cephfs.open(b'/file-rep', 'w', 0o755) assert_raises(TypeError, cephfs.get_file_replication, "fd") l_dict = cephfs.get_layout(fd) assert('pool_name' in l_dict.keys()) cnt = cephfs.get_file_replication(fd) get_rep_cnt_cmd = "ceph osd pool get " + l_dict["pool_name"] + " size" s=os.popen(get_rep_cnt_cmd).read().strip('\n') size=int(s.split(" ")[-1]) assert_equal(cnt, size) cnt = cephfs.get_path_replication(b'/file-rep') assert_equal(cnt, size) cephfs.close(fd) cephfs.unlink(b'/file-rep') @with_setup(setup_test) def test_caps(): fd = cephfs.open(b'/file-caps', 'w', 0o755) timeout = cephfs.get_cap_return_timeout() assert_equal(timeout, 300) fd_caps = cephfs.debug_get_fd_caps(fd) file_caps = cephfs.debug_get_file_caps(b'/file-caps') assert_equal(fd_caps, file_caps) cephfs.close(fd) cephfs.unlink(b'/file-caps') @with_setup(setup_test) def test_setuuid(): ses_id_uid = uuid.uuid1() ses_id_str = str(ses_id_uid) cephfs.set_uuid(ses_id_str) @with_setup(setup_test) def test_session_timeout(): assert_raises(TypeError, cephfs.set_session_timeout, "300") cephfs.set_session_timeout(300) @with_setup(setup_test) def test_readdirops(): cephfs.chdir(b"/") dirs = [b"dir-1", b"dir-2", b"dir-3"] for i in dirs: cephfs.mkdir(i, 0o755) handler = cephfs.opendir(b"/") d1 = cephfs.readdir(handler) d2 = cephfs.readdir(handler) d3 = cephfs.readdir(handler) offset_d4 = cephfs.telldir(handler) d4 = cephfs.readdir(handler) cephfs.rewinddir(handler) d = cephfs.readdir(handler) assert_equal(d.d_name, d1.d_name) cephfs.seekdir(handler, offset_d4) d = cephfs.readdir(handler) assert_equal(d.d_name, d4.d_name) dirs += [b".", b".."] cephfs.rewinddir(handler) d = cephfs.readdir(handler) while d: assert(d.d_name in dirs) dirs.remove(d.d_name) d = cephfs.readdir(handler) assert(len(dirs) == 0) dirs = [b"/dir-1", b"/dir-2", b"/dir-3"] for i in dirs: cephfs.rmdir(i) cephfs.closedir(handler) def test_preadv_pwritev(): fd = cephfs.open(b'file-1', 'w', 0o755) cephfs.pwritev(fd, [b"asdf", b"zxcvb"], 0) cephfs.close(fd) fd = cephfs.open(b'file-1', 'r', 0o755) buf = [bytearray(i) for i in [4, 5]] cephfs.preadv(fd, buf, 0) assert_equal([b"asdf", b"zxcvb"], list(buf)) cephfs.close(fd) cephfs.unlink(b'file-1') @with_setup(setup_test) def test_setattrx(): fd = cephfs.open(b'file-setattrx', 'w', 0o655) cephfs.write(fd, b"1111", 0) cephfs.close(fd) st = cephfs.statx(b'file-setattrx', libcephfs.CEPH_STATX_MODE, 0) mode = st["mode"] | stat.S_IXUSR assert_raises(TypeError, cephfs.setattrx, b'file-setattrx', "dict", 0, 0) time.sleep(1) statx_dict = dict() statx_dict["mode"] = mode statx_dict["uid"] = 9999 statx_dict["gid"] = 9999 dt = datetime.now() statx_dict["mtime"] = dt statx_dict["atime"] = dt statx_dict["ctime"] = dt statx_dict["size"] = 10 statx_dict["btime"] = dt cephfs.setattrx(b'file-setattrx', statx_dict, libcephfs.CEPH_SETATTR_MODE | libcephfs.CEPH_SETATTR_UID | libcephfs.CEPH_SETATTR_GID | libcephfs.CEPH_SETATTR_MTIME | libcephfs.CEPH_SETATTR_ATIME | libcephfs.CEPH_SETATTR_CTIME | libcephfs.CEPH_SETATTR_SIZE | libcephfs.CEPH_SETATTR_BTIME, 0) st1 = cephfs.statx(b'file-setattrx', libcephfs.CEPH_STATX_MODE | libcephfs.CEPH_STATX_UID | libcephfs.CEPH_STATX_GID | libcephfs.CEPH_STATX_MTIME | libcephfs.CEPH_STATX_ATIME | libcephfs.CEPH_STATX_CTIME | libcephfs.CEPH_STATX_SIZE | libcephfs.CEPH_STATX_BTIME, 0) assert_equal(mode, st1["mode"]) assert_equal(9999, st1["uid"]) assert_equal(9999, st1["gid"]) assert_equal(int(dt.timestamp()), int(st1["mtime"].timestamp())) assert_equal(int(dt.timestamp()), int(st1["atime"].timestamp())) assert_equal(int(dt.timestamp()), int(st1["ctime"].timestamp())) assert_equal(int(dt.timestamp()), int(st1["btime"].timestamp())) assert_equal(10, st1["size"]) cephfs.unlink(b'file-setattrx') @with_setup(setup_test) def test_fsetattrx(): fd = cephfs.open(b'file-fsetattrx', 'w', 0o655) cephfs.write(fd, b"1111", 0) st = cephfs.statx(b'file-fsetattrx', libcephfs.CEPH_STATX_MODE, 0) mode = st["mode"] | stat.S_IXUSR assert_raises(TypeError, cephfs.fsetattrx, fd, "dict", 0, 0) time.sleep(1) statx_dict = dict() statx_dict["mode"] = mode statx_dict["uid"] = 9999 statx_dict["gid"] = 9999 dt = datetime.now() statx_dict["mtime"] = dt statx_dict["atime"] = dt statx_dict["ctime"] = dt statx_dict["size"] = 10 statx_dict["btime"] = dt cephfs.fsetattrx(fd, statx_dict, libcephfs.CEPH_SETATTR_MODE | libcephfs.CEPH_SETATTR_UID | libcephfs.CEPH_SETATTR_GID | libcephfs.CEPH_SETATTR_MTIME | libcephfs.CEPH_SETATTR_ATIME | libcephfs.CEPH_SETATTR_CTIME | libcephfs.CEPH_SETATTR_SIZE | libcephfs.CEPH_SETATTR_BTIME) st1 = cephfs.statx(b'file-fsetattrx', libcephfs.CEPH_STATX_MODE | libcephfs.CEPH_STATX_UID | libcephfs.CEPH_STATX_GID | libcephfs.CEPH_STATX_MTIME | libcephfs.CEPH_STATX_ATIME | libcephfs.CEPH_STATX_CTIME | libcephfs.CEPH_STATX_SIZE | libcephfs.CEPH_STATX_BTIME, 0) assert_equal(mode, st1["mode"]) assert_equal(9999, st1["uid"]) assert_equal(9999, st1["gid"]) assert_equal(int(dt.timestamp()), int(st1["mtime"].timestamp())) assert_equal(int(dt.timestamp()), int(st1["atime"].timestamp())) assert_equal(int(dt.timestamp()), int(st1["ctime"].timestamp())) assert_equal(int(dt.timestamp()), int(st1["btime"].timestamp())) assert_equal(10, st1["size"]) cephfs.close(fd) cephfs.unlink(b'file-fsetattrx') @with_setup(setup_test) def test_get_layout(): fd = cephfs.open(b'file-get-layout', 'w', 0o755) cephfs.write(fd, b"1111", 0) assert_raises(TypeError, cephfs.get_layout, "fd") l_dict = cephfs.get_layout(fd) assert('stripe_unit' in l_dict.keys()) assert('stripe_count' in l_dict.keys()) assert('object_size' in l_dict.keys()) assert('pool_id' in l_dict.keys()) assert('pool_name' in l_dict.keys()) cephfs.close(fd) cephfs.unlink(b'file-get-layout') @with_setup(setup_test) def test_get_default_pool(): dp_dict = cephfs.get_default_pool() assert('pool_id' in dp_dict.keys()) assert('pool_name' in dp_dict.keys()) @with_setup(setup_test) def test_get_pool(): dp_dict = cephfs.get_default_pool() assert('pool_id' in dp_dict.keys()) assert('pool_name' in dp_dict.keys()) assert_equal(cephfs.get_pool_id(dp_dict["pool_name"]), dp_dict["pool_id"]) get_rep_cnt_cmd = "ceph osd pool get " + dp_dict["pool_name"] + " size" s=os.popen(get_rep_cnt_cmd).read().strip('\n') size=int(s.split(" ")[-1]) assert_equal(cephfs.get_pool_replication(dp_dict["pool_id"]), size) @with_setup(setup_test) def test_disk_quota_exceeeded_error(): cephfs.mkdir("/dir-1", 0o755) cephfs.setxattr("/dir-1", "ceph.quota.max_bytes", b"4096", 0) fd = cephfs.open(b'/dir-1/file-1', 'w', 0o755) cephfs.ftruncate(fd, 4092) cephfs.lseek(fd, 4090, os.SEEK_SET) assert_raises(libcephfs.DiskQuotaExceeded, cephfs.write, fd, b"abcdeghiklmnopqrstuvwxyz1234567890qwertyuioddd", -1) cephfs.close(fd) cephfs.unlink(b"/dir-1/file-1") @with_setup(setup_test) def test_empty_snapshot_info(): cephfs.mkdir("/dir-1", 0o755) # snap without metadata cephfs.mkdir("/dir-1/.snap/snap0", 0o755) snap_info = cephfs.snap_info("/dir-1/.snap/snap0") assert_equal(snap_info["metadata"], {}) assert_greater(snap_info["id"], 0) cephfs.rmdir("/dir-1/.snap/snap0") # remove directory cephfs.rmdir("/dir-1") @with_setup(setup_test) def test_snapshot_info(): cephfs.mkdir("/dir-1", 0o755) # snap with custom metadata md = {"foo": "bar", "zig": "zag", "abcdefg": "12345"} cephfs.mksnap("/dir-1", "snap0", 0o755, metadata=md) snap_info = cephfs.snap_info("/dir-1/.snap/snap0") assert_equal(snap_info["metadata"]["foo"], md["foo"]) assert_equal(snap_info["metadata"]["zig"], md["zig"]) assert_equal(snap_info["metadata"]["abcdefg"], md["abcdefg"]) assert_greater(snap_info["id"], 0) cephfs.rmsnap("/dir-1", "snap0") # remove directory cephfs.rmdir("/dir-1") @with_setup(setup_test) def test_set_mount_timeout_post_mount(): assert_raises(libcephfs.LibCephFSStateError, cephfs.set_mount_timeout, 5) @with_setup(setup_test) def test_set_mount_timeout(): cephfs.unmount() cephfs.set_mount_timeout(5) cephfs.mount() @with_setup(setup_test) def test_set_mount_timeout_lt0(): cephfs.unmount() assert_raises(libcephfs.InvalidValue, cephfs.set_mount_timeout, -5) cephfs.mount() @with_setup(setup_test) def test_snapdiff(): cephfs.mkdir("/snapdiff_test", 0o755) fd = cephfs.open('/snapdiff_test/file-1', 'w', 0o755) cephfs.write(fd, b"1111", 0) cephfs.close(fd) fd = cephfs.open('/snapdiff_test/file-2', 'w', 0o755) cephfs.write(fd, b"2222", 0) cephfs.close(fd) cephfs.mksnap("/snapdiff_test", "snap1", 0o755) fd = cephfs.open('/snapdiff_test/file-1', 'w', 0o755) cephfs.write(fd, b"1222", 0) cephfs.close(fd) cephfs.unlink('/snapdiff_test/file-2') cephfs.mksnap("/snapdiff_test", "snap2", 0o755) snap1id = cephfs.snap_info(b"/snapdiff_test/.snap/snap1")['id'] snap2id = cephfs.snap_info(b"/snapdiff_test/.snap/snap2")['id'] diff = cephfs.opensnapdiff(b"/snapdiff_test", b"/", b"snap2", b"snap1") cnt = 0 e = diff.readdir() while e is not None: if (e.d_name == b"file-1"): cnt = cnt + 1 assert_equal(snap2id, e.d_snapid) elif (e.d_name == b"file-2"): cnt = cnt + 1 assert_equal(snap1id, e.d_snapid) elif (e.d_name != b"." and e.d_name != b".."): cnt = cnt + 1 e = diff.readdir() assert_equal(cnt, 2) diff.close() # remove directory purge_dir(b"/snapdiff_test");
34,223
34.538941
129
py
null
ceph-main/src/test/pybind/test_rados.py
from __future__ import print_function from nose import SkipTest from nose.plugins.attrib import attr from nose.tools import eq_ as eq, ok_ as ok, assert_raises from rados import (Rados, Error, RadosStateError, Object, ObjectExists, ObjectNotFound, ObjectBusy, NotConnected, LIBRADOS_ALL_NSPACES, WriteOpCtx, ReadOpCtx, LIBRADOS_CREATE_EXCLUSIVE, LIBRADOS_CMPXATTR_OP_EQ, LIBRADOS_CMPXATTR_OP_GT, LIBRADOS_CMPXATTR_OP_LT, OSError, LIBRADOS_SNAP_HEAD, LIBRADOS_OPERATION_BALANCE_READS, LIBRADOS_OPERATION_SKIPRWLOCKS, MonitorLog, MAX_ERRNO, NoData, ExtendMismatch) from datetime import timedelta import time import threading import json import errno import os import re import sys def test_rados_init_error(): assert_raises(Error, Rados, conffile='', rados_id='admin', name='client.admin') assert_raises(Error, Rados, conffile='', name='invalid') assert_raises(Error, Rados, conffile='', name='bad.invalid') def test_rados_init(): with Rados(conffile='', rados_id='admin'): pass with Rados(conffile='', name='client.admin'): pass with Rados(conffile='', name='client.admin'): pass with Rados(conffile='', name='client.admin'): pass def test_ioctx_context_manager(): with Rados(conffile='', rados_id='admin') as conn: with conn.open_ioctx('rbd') as ioctx: pass def test_parse_argv(): args = ['osd', 'pool', 'delete', 'foobar', 'foobar', '--yes-i-really-really-mean-it'] r = Rados() eq(args, r.conf_parse_argv(args)) def test_parse_argv_empty_str(): args = [''] r = Rados() eq(args, r.conf_parse_argv(args)) class TestRadosStateError(object): def _requires_configuring(self, rados): assert_raises(RadosStateError, rados.connect) def _requires_configuring_or_connected(self, rados): assert_raises(RadosStateError, rados.conf_read_file) assert_raises(RadosStateError, rados.conf_parse_argv, None) assert_raises(RadosStateError, rados.conf_parse_env) assert_raises(RadosStateError, rados.conf_get, 'opt') assert_raises(RadosStateError, rados.conf_set, 'opt', 'val') assert_raises(RadosStateError, rados.ping_monitor, '0') def _requires_connected(self, rados): assert_raises(RadosStateError, rados.pool_exists, 'foo') assert_raises(RadosStateError, rados.pool_lookup, 'foo') assert_raises(RadosStateError, rados.pool_reverse_lookup, 0) assert_raises(RadosStateError, rados.create_pool, 'foo') assert_raises(RadosStateError, rados.get_pool_base_tier, 0) assert_raises(RadosStateError, rados.delete_pool, 'foo') assert_raises(RadosStateError, rados.list_pools) assert_raises(RadosStateError, rados.get_fsid) assert_raises(RadosStateError, rados.open_ioctx, 'foo') assert_raises(RadosStateError, rados.mon_command, '', b'') assert_raises(RadosStateError, rados.osd_command, 0, '', b'') assert_raises(RadosStateError, rados.pg_command, '', '', b'') assert_raises(RadosStateError, rados.wait_for_latest_osdmap) assert_raises(RadosStateError, rados.blocklist_add, '127.0.0.1/123', 0) def test_configuring(self): rados = Rados(conffile='') eq('configuring', rados.state) self._requires_connected(rados) def test_connected(self): rados = Rados(conffile='') with rados: eq('connected', rados.state) self._requires_configuring(rados) def test_shutdown(self): rados = Rados(conffile='') with rados: pass eq('shutdown', rados.state) self._requires_configuring(rados) self._requires_configuring_or_connected(rados) self._requires_connected(rados) class TestRados(object): def setUp(self): self.rados = Rados(conffile='') self.rados.conf_parse_env('FOO_DOES_NOT_EXIST_BLAHBLAH') self.rados.conf_parse_env() self.rados.connect() # Assume any pre-existing pools are the cluster's defaults self.default_pools = self.rados.list_pools() def tearDown(self): self.rados.shutdown() def test_ping_monitor(self): assert_raises(ObjectNotFound, self.rados.ping_monitor, 'not_exists_monitor') cmd = {'prefix': 'mon dump', 'format':'json'} ret, buf, out = self.rados.mon_command(json.dumps(cmd), b'') for mon in json.loads(buf.decode('utf8'))['mons']: while True: output = self.rados.ping_monitor(mon['name']) if output is None: continue buf = json.loads(output) if buf.get('health'): break def test_annotations(self): with assert_raises(TypeError): self.rados.create_pool(0xf00) def test_create(self): self.rados.create_pool('foo') self.rados.delete_pool('foo') def test_create_utf8(self): poolname = "\u9ec4" self.rados.create_pool(poolname) assert self.rados.pool_exists(u"\u9ec4") self.rados.delete_pool(poolname) def test_pool_lookup_utf8(self): poolname = '\u9ec4' self.rados.create_pool(poolname) try: poolid = self.rados.pool_lookup(poolname) eq(poolname, self.rados.pool_reverse_lookup(poolid)) finally: self.rados.delete_pool(poolname) def test_eexist(self): self.rados.create_pool('foo') assert_raises(ObjectExists, self.rados.create_pool, 'foo') self.rados.delete_pool('foo') def list_non_default_pools(self): pools = self.rados.list_pools() for p in self.default_pools: pools.remove(p) return set(pools) def test_list_pools(self): eq(set(), self.list_non_default_pools()) self.rados.create_pool('foo') eq(set(['foo']), self.list_non_default_pools()) self.rados.create_pool('bar') eq(set(['foo', 'bar']), self.list_non_default_pools()) self.rados.create_pool('baz') eq(set(['foo', 'bar', 'baz']), self.list_non_default_pools()) self.rados.delete_pool('foo') eq(set(['bar', 'baz']), self.list_non_default_pools()) self.rados.delete_pool('baz') eq(set(['bar']), self.list_non_default_pools()) self.rados.delete_pool('bar') eq(set(), self.list_non_default_pools()) self.rados.create_pool('a' * 500) eq(set(['a' * 500]), self.list_non_default_pools()) self.rados.delete_pool('a' * 500) @attr('tier') def test_get_pool_base_tier(self): self.rados.create_pool('foo') try: self.rados.create_pool('foo-cache') try: pool_id = self.rados.pool_lookup('foo') tier_pool_id = self.rados.pool_lookup('foo-cache') cmd = {"prefix":"osd tier add", "pool":"foo", "tierpool":"foo-cache", "force_nonempty":""} ret, buf, errs = self.rados.mon_command(json.dumps(cmd), b'', timeout=30) eq(ret, 0) try: cmd = {"prefix":"osd tier cache-mode", "pool":"foo-cache", "tierpool":"foo-cache", "mode":"readonly", "yes_i_really_mean_it": True} ret, buf, errs = self.rados.mon_command(json.dumps(cmd), b'', timeout=30) eq(ret, 0) eq(self.rados.wait_for_latest_osdmap(), 0) eq(pool_id, self.rados.get_pool_base_tier(pool_id)) eq(pool_id, self.rados.get_pool_base_tier(tier_pool_id)) finally: cmd = {"prefix":"osd tier remove", "pool":"foo", "tierpool":"foo-cache"} ret, buf, errs = self.rados.mon_command(json.dumps(cmd), b'', timeout=30) eq(ret, 0) finally: self.rados.delete_pool('foo-cache') finally: self.rados.delete_pool('foo') def test_get_fsid(self): fsid = self.rados.get_fsid() assert re.match('[0-9a-f\-]{36}', fsid, re.I) def test_blocklist_add(self): self.rados.blocklist_add("1.2.3.4/123", 1) @attr('stats') def test_get_cluster_stats(self): stats = self.rados.get_cluster_stats() assert stats['kb'] > 0 assert stats['kb_avail'] > 0 assert stats['kb_used'] > 0 assert stats['num_objects'] >= 0 def test_monitor_log(self): lock = threading.Condition() def cb(arg, line, who, sec, nsec, seq, level, msg): # NOTE(sileht): the old pyrados API was received the pointer as int # instead of the value of arg eq(arg, "arg") with lock: lock.notify() return 0 # NOTE(sileht): force don't save the monitor into local var # to ensure all references are correctly tracked into the lib MonitorLog(self.rados, "debug", cb, "arg") with lock: lock.wait() MonitorLog(self.rados, "debug", None, None) eq(None, self.rados.monitor_callback) class TestIoctx(object): def setUp(self): self.rados = Rados(conffile='') self.rados.connect() self.rados.create_pool('test_pool') assert self.rados.pool_exists('test_pool') self.ioctx = self.rados.open_ioctx('test_pool') def tearDown(self): cmd = {"prefix":"osd unset", "key":"noup"} self.rados.mon_command(json.dumps(cmd), b'') self.ioctx.close() self.rados.delete_pool('test_pool') self.rados.shutdown() def test_get_last_version(self): version = self.ioctx.get_last_version() assert version >= 0 def test_get_stats(self): stats = self.ioctx.get_stats() eq(stats, {'num_objects_unfound': 0, 'num_objects_missing_on_primary': 0, 'num_object_clones': 0, 'num_objects': 0, 'num_object_copies': 0, 'num_bytes': 0, 'num_rd_kb': 0, 'num_wr_kb': 0, 'num_kb': 0, 'num_wr': 0, 'num_objects_degraded': 0, 'num_rd': 0}) def test_write(self): self.ioctx.write('abc', b'abc') eq(self.ioctx.read('abc'), b'abc') def test_write_full(self): self.ioctx.write('abc', b'abc') eq(self.ioctx.read('abc'), b'abc') self.ioctx.write_full('abc', b'd') eq(self.ioctx.read('abc'), b'd') def test_writesame(self): self.ioctx.writesame('ob', b'rzx', 9) eq(self.ioctx.read('ob'), b'rzxrzxrzx') def test_append(self): self.ioctx.write('abc', b'a') self.ioctx.append('abc', b'b') self.ioctx.append('abc', b'c') eq(self.ioctx.read('abc'), b'abc') def test_write_zeros(self): self.ioctx.write('abc', b'a\0b\0c') eq(self.ioctx.read('abc'), b'a\0b\0c') def test_trunc(self): self.ioctx.write('abc', b'abc') self.ioctx.trunc('abc', 2) eq(self.ioctx.read('abc'), b'ab') size = self.ioctx.stat('abc')[0] eq(size, 2) def test_cmpext(self): self.ioctx.write('test_object', b'abcdefghi') eq(0, self.ioctx.cmpext('test_object', b'abcdefghi', 0)) eq(-MAX_ERRNO - 4, self.ioctx.cmpext('test_object', b'abcdxxxxx', 0)) def test_list_objects_empty(self): eq(list(self.ioctx.list_objects()), []) def test_list_objects(self): self.ioctx.write('a', b'') self.ioctx.write('b', b'foo') self.ioctx.write_full('c', b'bar') self.ioctx.append('d', b'jazz') object_names = [obj.key for obj in self.ioctx.list_objects()] eq(sorted(object_names), ['a', 'b', 'c', 'd']) def test_list_ns_objects(self): self.ioctx.write('a', b'') self.ioctx.write('b', b'foo') self.ioctx.write_full('c', b'bar') self.ioctx.append('d', b'jazz') self.ioctx.set_namespace("ns1") self.ioctx.write('ns1-a', b'') self.ioctx.write('ns1-b', b'foo') self.ioctx.write_full('ns1-c', b'bar') self.ioctx.append('ns1-d', b'jazz') self.ioctx.append('d', b'jazz') self.ioctx.set_namespace(LIBRADOS_ALL_NSPACES) object_names = [(obj.nspace, obj.key) for obj in self.ioctx.list_objects()] eq(sorted(object_names), [('', 'a'), ('','b'), ('','c'), ('','d'),\ ('ns1', 'd'), ('ns1', 'ns1-a'), ('ns1', 'ns1-b'),\ ('ns1', 'ns1-c'), ('ns1', 'ns1-d')]) def test_xattrs(self): xattrs = dict(a=b'1', b=b'2', c=b'3', d=b'a\0b', e=b'\0', f=b'') self.ioctx.write('abc', b'') for key, value in xattrs.items(): self.ioctx.set_xattr('abc', key, value) eq(self.ioctx.get_xattr('abc', key), value) stored_xattrs = {} for key, value in self.ioctx.get_xattrs('abc'): stored_xattrs[key] = value eq(stored_xattrs, xattrs) def test_obj_xattrs(self): xattrs = dict(a=b'1', b=b'2', c=b'3', d=b'a\0b', e=b'\0', f=b'') self.ioctx.write('abc', b'') obj = list(self.ioctx.list_objects())[0] for key, value in xattrs.items(): obj.set_xattr(key, value) eq(obj.get_xattr(key), value) stored_xattrs = {} for key, value in obj.get_xattrs(): stored_xattrs[key] = value eq(stored_xattrs, xattrs) def test_get_pool_id(self): eq(self.ioctx.get_pool_id(), self.rados.pool_lookup('test_pool')) def test_get_pool_name(self): eq(self.ioctx.get_pool_name(), 'test_pool') def test_create_snap(self): assert_raises(ObjectNotFound, self.ioctx.remove_snap, 'foo') self.ioctx.create_snap('foo') self.ioctx.remove_snap('foo') def test_list_snaps_empty(self): eq(list(self.ioctx.list_snaps()), []) def test_list_snaps(self): snaps = ['snap1', 'snap2', 'snap3'] for snap in snaps: self.ioctx.create_snap(snap) listed_snaps = [snap.name for snap in self.ioctx.list_snaps()] eq(snaps, listed_snaps) def test_lookup_snap(self): self.ioctx.create_snap('foo') snap = self.ioctx.lookup_snap('foo') eq(snap.name, 'foo') def test_snap_timestamp(self): self.ioctx.create_snap('foo') snap = self.ioctx.lookup_snap('foo') snap.get_timestamp() def test_remove_snap(self): self.ioctx.create_snap('foo') (snap,) = self.ioctx.list_snaps() eq(snap.name, 'foo') self.ioctx.remove_snap('foo') eq(list(self.ioctx.list_snaps()), []) @attr('rollback') def test_snap_rollback(self): self.ioctx.write("insnap", b"contents1") self.ioctx.create_snap("snap1") self.ioctx.remove_object("insnap") self.ioctx.snap_rollback("insnap", "snap1") eq(self.ioctx.read("insnap"), b"contents1") self.ioctx.remove_snap("snap1") self.ioctx.remove_object("insnap") @attr('rollback') def test_snap_rollback_removed(self): self.ioctx.write("insnap", b"contents1") self.ioctx.create_snap("snap1") self.ioctx.write("insnap", b"contents2") self.ioctx.snap_rollback("insnap", "snap1") eq(self.ioctx.read("insnap"), b"contents1") self.ioctx.remove_snap("snap1") self.ioctx.remove_object("insnap") def test_snap_read(self): self.ioctx.write("insnap", b"contents1") self.ioctx.create_snap("snap1") self.ioctx.remove_object("insnap") snap = self.ioctx.lookup_snap("snap1") self.ioctx.set_read(snap.snap_id) eq(self.ioctx.read("insnap"), b"contents1") self.ioctx.set_read(LIBRADOS_SNAP_HEAD) self.ioctx.write("inhead", b"contents2") eq(self.ioctx.read("inhead"), b"contents2") self.ioctx.remove_snap("snap1") self.ioctx.remove_object("inhead") def test_set_omap(self): keys = ("1", "2", "3", "4", b"\xff") values = (b"aaa", b"bbb", b"ccc", b"\x04\x04\x04\x04", b"5") with WriteOpCtx() as write_op: self.ioctx.set_omap(write_op, keys, values) write_op.set_flags(LIBRADOS_OPERATION_SKIPRWLOCKS) self.ioctx.operate_write_op(write_op, "hw") with ReadOpCtx() as read_op: iter, ret = self.ioctx.get_omap_vals(read_op, "", "", 5, omap_key_type=bytes) eq(ret, 0) self.ioctx.operate_read_op(read_op, "hw") next(iter) eq(list(iter), [(b"2", b"bbb"), (b"3", b"ccc"), (b"4", b"\x04\x04\x04\x04"), (b"\xff", b"5")]) with ReadOpCtx() as read_op: iter, ret = self.ioctx.get_omap_vals(read_op, b"2", "", 4, omap_key_type=bytes) eq(ret, 0) self.ioctx.operate_read_op(read_op, "hw") eq((b"3", b"ccc"), next(iter)) eq(list(iter), [(b"4", b"\x04\x04\x04\x04"), (b"\xff", b"5")]) with ReadOpCtx() as read_op: iter, ret = self.ioctx.get_omap_vals(read_op, "", "2", 4, omap_key_type=bytes) eq(ret, 0) read_op.set_flags(LIBRADOS_OPERATION_BALANCE_READS) self.ioctx.operate_read_op(read_op, "hw") eq(list(iter), [(b"2", b"bbb")]) def test_set_omap_aio(self): lock = threading.Condition() count = [0] def cb(blah): with lock: count[0] += 1 lock.notify() return 0 keys = ("1", "2", "3", "4") values = (b"aaa", b"bbb", b"ccc", b"\x04\x04\x04\x04") with WriteOpCtx() as write_op: self.ioctx.set_omap(write_op, keys, values) comp = self.ioctx.operate_aio_write_op(write_op, "hw", cb, cb) comp.wait_for_complete() with lock: while count[0] < 2: lock.wait() eq(comp.get_return_value(), 0) with ReadOpCtx() as read_op: iter, ret = self.ioctx.get_omap_vals(read_op, "", "", 4) eq(ret, 0) comp = self.ioctx.operate_aio_read_op(read_op, "hw", cb, cb) comp.wait_for_complete() with lock: while count[0] < 4: lock.wait() eq(comp.get_return_value(), 0) next(iter) eq(list(iter), [("2", b"bbb"), ("3", b"ccc"), ("4", b"\x04\x04\x04\x04")]) def test_write_ops(self): with WriteOpCtx() as write_op: write_op.new(0) self.ioctx.operate_write_op(write_op, "write_ops") eq(self.ioctx.read('write_ops'), b'') write_op.write_full(b'1') write_op.append(b'2') self.ioctx.operate_write_op(write_op, "write_ops") eq(self.ioctx.read('write_ops'), b'12') write_op.write_full(b'12345') write_op.write(b'x', 2) self.ioctx.operate_write_op(write_op, "write_ops") eq(self.ioctx.read('write_ops'), b'12x45') write_op.write_full(b'12345') write_op.zero(2, 2) self.ioctx.operate_write_op(write_op, "write_ops") eq(self.ioctx.read('write_ops'), b'12\x00\x005') write_op.write_full(b'12345') write_op.truncate(2) self.ioctx.operate_write_op(write_op, "write_ops") eq(self.ioctx.read('write_ops'), b'12') write_op.remove() self.ioctx.operate_write_op(write_op, "write_ops") with assert_raises(ObjectNotFound): self.ioctx.read('write_ops') def test_execute_op(self): with WriteOpCtx() as write_op: write_op.execute("hello", "record_hello", b"ebs") self.ioctx.operate_write_op(write_op, "object") eq(self.ioctx.read('object'), b"Hello, ebs!") def test_writesame_op(self): with WriteOpCtx() as write_op: write_op.writesame(b'rzx', 9) self.ioctx.operate_write_op(write_op, 'abc') eq(self.ioctx.read('abc'), b'rzxrzxrzx') def test_get_omap_vals_by_keys(self): keys = ("1", "2", "3", "4", b"\xff") values = (b"aaa", b"bbb", b"ccc", b"\x04\x04\x04\x04", b"5") with WriteOpCtx() as write_op: self.ioctx.set_omap(write_op, keys, values) self.ioctx.operate_write_op(write_op, "hw") with ReadOpCtx() as read_op: iter, ret = self.ioctx.get_omap_vals_by_keys(read_op,("3","4",b"\xff"), omap_key_type=bytes) eq(ret, 0) self.ioctx.operate_read_op(read_op, "hw") eq(list(iter), [(b"3", b"ccc"), (b"4", b"\x04\x04\x04\x04"), (b"\xff", b"5")]) with ReadOpCtx() as read_op: iter, ret = self.ioctx.get_omap_vals_by_keys(read_op,("3","4",), omap_key_type=bytes) eq(ret, 0) with assert_raises(ObjectNotFound): self.ioctx.operate_read_op(read_op, "no_such") def test_get_omap_keys(self): keys = ("1", "2", "3") values = (b"aaa", b"bbb", b"ccc") with WriteOpCtx() as write_op: self.ioctx.set_omap(write_op, keys, values) self.ioctx.operate_write_op(write_op, "hw") with ReadOpCtx() as read_op: iter, ret = self.ioctx.get_omap_keys(read_op,"",2) eq(ret, 0) self.ioctx.operate_read_op(read_op, "hw") eq(list(iter), [("1", None), ("2", None)]) with ReadOpCtx() as read_op: iter, ret = self.ioctx.get_omap_keys(read_op,"",2) eq(ret, 0) with assert_raises(ObjectNotFound): self.ioctx.operate_read_op(read_op, "no_such") def test_clear_omap(self): keys = ("1", "2", "3") values = (b"aaa", b"bbb", b"ccc") with WriteOpCtx() as write_op: self.ioctx.set_omap(write_op, keys, values) self.ioctx.operate_write_op(write_op, "hw") with WriteOpCtx() as write_op_1: self.ioctx.clear_omap(write_op_1) self.ioctx.operate_write_op(write_op_1, "hw") with ReadOpCtx() as read_op: iter, ret = self.ioctx.get_omap_vals_by_keys(read_op,("1",)) eq(ret, 0) self.ioctx.operate_read_op(read_op, "hw") eq(list(iter), []) def test_remove_omap_range2(self): keys = ("1", "2", "3", "4") values = (b"a", b"bb", b"ccc", b"dddd") with WriteOpCtx() as write_op: self.ioctx.set_omap(write_op, keys, values) self.ioctx.operate_write_op(write_op, "test_obj") with ReadOpCtx() as read_op: iter, ret = self.ioctx.get_omap_vals_by_keys(read_op, keys) eq(ret, 0) self.ioctx.operate_read_op(read_op, "test_obj") eq(list(iter), list(zip(keys, values))) with WriteOpCtx() as write_op: self.ioctx.remove_omap_range2(write_op, "1", "4") self.ioctx.operate_write_op(write_op, "test_obj") with ReadOpCtx() as read_op: iter, ret = self.ioctx.get_omap_vals_by_keys(read_op, keys) eq(ret, 0) self.ioctx.operate_read_op(read_op, "test_obj") eq(list(iter), [("4", b"dddd")]) def test_omap_cmp(self): object_id = 'test' self.ioctx.write(object_id, b'omap_cmp') with WriteOpCtx() as write_op: self.ioctx.set_omap(write_op, ('key1',), ('1',)) self.ioctx.operate_write_op(write_op, object_id) with WriteOpCtx() as write_op: write_op.omap_cmp('key1', '1', LIBRADOS_CMPXATTR_OP_EQ) self.ioctx.set_omap(write_op, ('key1',), ('2',)) self.ioctx.operate_write_op(write_op, object_id) with ReadOpCtx() as read_op: iter, ret = self.ioctx.get_omap_vals_by_keys(read_op, ('key1',)) eq(ret, 0) self.ioctx.operate_read_op(read_op, object_id) eq(list(iter), [('key1', b'2')]) with WriteOpCtx() as write_op: write_op.omap_cmp('key1', '1', LIBRADOS_CMPXATTR_OP_GT) self.ioctx.set_omap(write_op, ('key1',), ('3',)) self.ioctx.operate_write_op(write_op, object_id) with ReadOpCtx() as read_op: iter, ret = self.ioctx.get_omap_vals_by_keys(read_op, ('key1',)) eq(ret, 0) self.ioctx.operate_read_op(read_op, object_id) eq(list(iter), [('key1', b'3')]) with WriteOpCtx() as write_op: write_op.omap_cmp('key1', '4', LIBRADOS_CMPXATTR_OP_LT) self.ioctx.set_omap(write_op, ('key1',), ('4',)) self.ioctx.operate_write_op(write_op, object_id) with ReadOpCtx() as read_op: iter, ret = self.ioctx.get_omap_vals_by_keys(read_op, ('key1',)) eq(ret, 0) self.ioctx.operate_read_op(read_op, object_id) eq(list(iter), [('key1', b'4')]) with WriteOpCtx() as write_op: write_op.omap_cmp('key1', '1', LIBRADOS_CMPXATTR_OP_EQ) self.ioctx.set_omap(write_op, ('key1',), ('5',)) try: self.ioctx.operate_write_op(write_op, object_id) except (OSError, ExtendMismatch) as e: eq(e.errno, 125) else: message = "omap_cmp did not raise Exception when omap content does not match" raise AssertionError(message) def test_cmpext_op(self): object_id = 'test' with WriteOpCtx() as write_op: write_op.write(b'12345', 0) self.ioctx.operate_write_op(write_op, object_id) with WriteOpCtx() as write_op: write_op.cmpext(b'12345', 0) write_op.write(b'54321', 0) self.ioctx.operate_write_op(write_op, object_id) eq(self.ioctx.read(object_id), b'54321') with WriteOpCtx() as write_op: write_op.cmpext(b'56789', 0) write_op.write(b'12345', 0) try: self.ioctx.operate_write_op(write_op, object_id) except ExtendMismatch as e: # the cmpext_result compare with expected error number, it should be (-MAX_ERRNO - 1) # where "1" is the offset of the first unmatched byte eq(-e.errno, -MAX_ERRNO - 1) eq(e.offset, 1) else: message = "cmpext did not raise Exception when object content does not match" raise AssertionError(message) with ReadOpCtx() as read_op: read_op.cmpext(b'54321', 0) self.ioctx.operate_read_op(read_op, object_id) with ReadOpCtx() as read_op: read_op.cmpext(b'54789', 0) try: self.ioctx.operate_read_op(read_op, object_id) except ExtendMismatch as e: # the cmpext_result compare with expected error number, it should be (-MAX_ERRNO - 2) # where "2" is the offset of the first unmatched byte eq(-e.errno, -MAX_ERRNO - 2) eq(e.offset, 2) else: message = "cmpext did not raise Exception when object content does not match" raise AssertionError(message) def test_xattrs_op(self): xattrs = dict(a=b'1', b=b'2', c=b'3', d=b'a\0b', e=b'\0') with WriteOpCtx() as write_op: write_op.new(LIBRADOS_CREATE_EXCLUSIVE) for key, value in xattrs.items(): write_op.set_xattr(key, value) self.ioctx.operate_write_op(write_op, 'abc') eq(self.ioctx.get_xattr('abc', key), value) stored_xattrs_1 = {} for key, value in self.ioctx.get_xattrs('abc'): stored_xattrs_1[key] = value eq(stored_xattrs_1, xattrs) for key in xattrs.keys(): write_op.rm_xattr(key) self.ioctx.operate_write_op(write_op, 'abc') stored_xattrs_2 = {} for key, value in self.ioctx.get_xattrs('abc'): stored_xattrs_2[key] = value eq(stored_xattrs_2, {}) write_op.remove() self.ioctx.operate_write_op(write_op, 'abc') def test_locator(self): self.ioctx.set_locator_key("bar") self.ioctx.write('foo', b'contents1') objects = [i for i in self.ioctx.list_objects()] eq(len(objects), 1) eq(self.ioctx.get_locator_key(), "bar") self.ioctx.set_locator_key("") objects[0].seek(0) objects[0].write(b"contents2") eq(self.ioctx.get_locator_key(), "") self.ioctx.set_locator_key("bar") contents = self.ioctx.read("foo") eq(contents, b"contents2") eq(self.ioctx.get_locator_key(), "bar") objects[0].remove() objects = [i for i in self.ioctx.list_objects()] eq(objects, []) self.ioctx.set_locator_key("") def test_operate_aio_write_op(self): lock = threading.Condition() count = [0] def cb(blah): with lock: count[0] += 1 lock.notify() return 0 with WriteOpCtx() as write_op: write_op.write(b'rzx') comp = self.ioctx.operate_aio_write_op(write_op, "object", cb, cb) comp.wait_for_complete() with lock: while count[0] < 2: lock.wait() eq(comp.get_return_value(), 0) eq(self.ioctx.read('object'), b'rzx') def test_aio_write(self): lock = threading.Condition() count = [0] def cb(blah): with lock: count[0] += 1 lock.notify() return 0 comp = self.ioctx.aio_write("foo", b"bar", 0, cb, cb) comp.wait_for_complete() with lock: while count[0] < 2: lock.wait() eq(comp.get_return_value(), 0) contents = self.ioctx.read("foo") eq(contents, b"bar") [i.remove() for i in self.ioctx.list_objects()] def test_aio_cmpext(self): lock = threading.Condition() count = [0] def cb(blah): with lock: count[0] += 1 lock.notify() return 0 self.ioctx.write('test_object', b'abcdefghi') comp = self.ioctx.aio_cmpext('test_object', b'abcdefghi', 0, cb) comp.wait_for_complete() with lock: while count[0] < 1: lock.wait() eq(comp.get_return_value(), 0) def test_aio_rmxattr(self): lock = threading.Condition() count = [0] def cb(blah): with lock: count[0] += 1 lock.notify() return 0 self.ioctx.set_xattr("xyz", "key", b'value') eq(self.ioctx.get_xattr("xyz", "key"), b'value') comp = self.ioctx.aio_rmxattr("xyz", "key", cb) comp.wait_for_complete() with lock: while count[0] < 1: lock.wait() eq(comp.get_return_value(), 0) with assert_raises(NoData): self.ioctx.get_xattr("xyz", "key") def test_aio_write_no_comp_ref(self): lock = threading.Condition() count = [0] def cb(blah): with lock: count[0] += 1 lock.notify() return 0 # NOTE(sileht): force don't save the comp into local var # to ensure all references are correctly tracked into the lib self.ioctx.aio_write("foo", b"bar", 0, cb, cb) with lock: while count[0] < 2: lock.wait() contents = self.ioctx.read("foo") eq(contents, b"bar") [i.remove() for i in self.ioctx.list_objects()] def test_aio_append(self): lock = threading.Condition() count = [0] def cb(blah): with lock: count[0] += 1 lock.notify() return 0 comp = self.ioctx.aio_write("foo", b"bar", 0, cb, cb) comp2 = self.ioctx.aio_append("foo", b"baz", cb, cb) comp.wait_for_complete() contents = self.ioctx.read("foo") eq(contents, b"barbaz") with lock: while count[0] < 4: lock.wait() eq(comp.get_return_value(), 0) eq(comp2.get_return_value(), 0) [i.remove() for i in self.ioctx.list_objects()] def test_aio_write_full(self): lock = threading.Condition() count = [0] def cb(blah): with lock: count[0] += 1 lock.notify() return 0 self.ioctx.aio_write("foo", b"barbaz", 0, cb, cb) comp = self.ioctx.aio_write_full("foo", b"bar", cb, cb) comp.wait_for_complete() with lock: while count[0] < 2: lock.wait() eq(comp.get_return_value(), 0) contents = self.ioctx.read("foo") eq(contents, b"bar") [i.remove() for i in self.ioctx.list_objects()] def test_aio_writesame(self): lock = threading.Condition() count = [0] def cb(blah): with lock: count[0] += 1 lock.notify() return 0 comp = self.ioctx.aio_writesame("abc", b"rzx", 9, 0, cb) comp.wait_for_complete() with lock: while count[0] < 1: lock.wait() eq(comp.get_return_value(), 0) eq(self.ioctx.read("abc"), b"rzxrzxrzx") [i.remove() for i in self.ioctx.list_objects()] def test_aio_stat(self): lock = threading.Condition() count = [0] def cb(_, size, mtime): with lock: count[0] += 1 lock.notify() comp = self.ioctx.aio_stat("foo", cb) comp.wait_for_complete() with lock: while count[0] < 1: lock.wait() eq(comp.get_return_value(), -2) self.ioctx.write("foo", b"bar") comp = self.ioctx.aio_stat("foo", cb) comp.wait_for_complete() with lock: while count[0] < 2: lock.wait() eq(comp.get_return_value(), 0) [i.remove() for i in self.ioctx.list_objects()] def test_aio_remove(self): lock = threading.Condition() count = [0] def cb(blah): with lock: count[0] += 1 lock.notify() return 0 self.ioctx.write('foo', b'wrx') eq(self.ioctx.read('foo'), b'wrx') comp = self.ioctx.aio_remove('foo', cb, cb) comp.wait_for_complete() with lock: while count[0] < 2: lock.wait() eq(comp.get_return_value(), 0) eq(list(self.ioctx.list_objects()), []) def _take_down_acting_set(self, pool, objectname): # find acting_set for pool:objectname and take it down; used to # verify that async reads don't complete while acting set is missing cmd = { "prefix":"osd map", "pool":pool, "object":objectname, "format":"json", } r, jsonout, _ = self.rados.mon_command(json.dumps(cmd), b'') objmap = json.loads(jsonout.decode("utf-8")) acting_set = objmap['acting'] cmd = {"prefix":"osd set", "key":"noup"} r, _, _ = self.rados.mon_command(json.dumps(cmd), b'') eq(r, 0) cmd = {"prefix":"osd down", "ids":[str(i) for i in acting_set]} r, _, _ = self.rados.mon_command(json.dumps(cmd), b'') eq(r, 0) # wait for OSDs to acknowledge the down eq(self.rados.wait_for_latest_osdmap(), 0) def _let_osds_back_up(self): cmd = {"prefix":"osd unset", "key":"noup"} r, _, _ = self.rados.mon_command(json.dumps(cmd), b'') eq(r, 0) @attr('wait') def test_aio_read_wait_for_complete(self): # use wait_for_complete() and wait for cb by # watching retval[0] # this is a list so that the local cb() can modify it payload = b"bar\000frob" self.ioctx.write("foo", payload) self._take_down_acting_set('test_pool', 'foo') retval = [None] lock = threading.Condition() def cb(_, buf): with lock: retval[0] = buf lock.notify() comp = self.ioctx.aio_read("foo", len(payload), 0, cb) eq(False, comp.is_complete()) time.sleep(3) eq(False, comp.is_complete()) with lock: eq(None, retval[0]) self._let_osds_back_up() comp.wait_for_complete() loops = 0 with lock: while retval[0] is None and loops <= 10: lock.wait(timeout=5) loops += 1 assert(loops <= 10) eq(retval[0], payload) eq(sys.getrefcount(comp), 2) @attr('wait') def test_aio_read_wait_for_complete_and_cb(self): # use wait_for_complete_and_cb(), verify retval[0] is # set by the time we regain control payload = b"bar\000frob" self.ioctx.write("foo", payload) self._take_down_acting_set('test_pool', 'foo') # this is a list so that the local cb() can modify it retval = [None] lock = threading.Condition() def cb(_, buf): with lock: retval[0] = buf lock.notify() comp = self.ioctx.aio_read("foo", len(payload), 0, cb) eq(False, comp.is_complete()) time.sleep(3) eq(False, comp.is_complete()) with lock: eq(None, retval[0]) self._let_osds_back_up() comp.wait_for_complete_and_cb() assert(retval[0] is not None) eq(retval[0], payload) eq(sys.getrefcount(comp), 2) @attr('wait') def test_aio_read_wait_for_complete_and_cb_error(self): # error case, use wait_for_complete_and_cb(), verify retval[0] is # set by the time we regain control self._take_down_acting_set('test_pool', 'bar') # this is a list so that the local cb() can modify it retval = [1] lock = threading.Condition() def cb(_, buf): with lock: retval[0] = buf lock.notify() # read from a DNE object comp = self.ioctx.aio_read("bar", 3, 0, cb) eq(False, comp.is_complete()) time.sleep(3) eq(False, comp.is_complete()) with lock: eq(1, retval[0]) self._let_osds_back_up() comp.wait_for_complete_and_cb() eq(None, retval[0]) assert(comp.get_return_value() < 0) eq(sys.getrefcount(comp), 2) def test_lock(self): self.ioctx.lock_exclusive("foo", "lock", "locker", "desc_lock", 10000, 0) assert_raises(ObjectExists, self.ioctx.lock_exclusive, "foo", "lock", "locker", "desc_lock", 10000, 0) self.ioctx.unlock("foo", "lock", "locker") assert_raises(ObjectNotFound, self.ioctx.unlock, "foo", "lock", "locker") self.ioctx.lock_shared("foo", "lock", "locker1", "tag", "desc_lock", 10000, 0) self.ioctx.lock_shared("foo", "lock", "locker2", "tag", "desc_lock", 10000, 0) assert_raises(ObjectBusy, self.ioctx.lock_exclusive, "foo", "lock", "locker3", "desc_lock", 10000, 0) self.ioctx.unlock("foo", "lock", "locker1") self.ioctx.unlock("foo", "lock", "locker2") assert_raises(ObjectNotFound, self.ioctx.unlock, "foo", "lock", "locker1") assert_raises(ObjectNotFound, self.ioctx.unlock, "foo", "lock", "locker2") def test_execute(self): self.ioctx.write("foo", b"") # ensure object exists ret, buf = self.ioctx.execute("foo", "hello", "say_hello", b"") eq(buf, b"Hello, world!") ret, buf = self.ioctx.execute("foo", "hello", "say_hello", b"nose") eq(buf, b"Hello, nose!") def test_aio_execute(self): count = [0] retval = [None] lock = threading.Condition() def cb(_, buf): with lock: if retval[0] is None: retval[0] = buf count[0] += 1 lock.notify() self.ioctx.write("foo", b"") # ensure object exists comp = self.ioctx.aio_execute("foo", "hello", "say_hello", b"", 32, cb, cb) comp.wait_for_complete() with lock: while count[0] < 2: lock.wait() eq(comp.get_return_value(), 13) eq(retval[0], b"Hello, world!") retval[0] = None comp = self.ioctx.aio_execute("foo", "hello", "say_hello", b"nose", 32, cb, cb) comp.wait_for_complete() with lock: while count[0] < 4: lock.wait() eq(comp.get_return_value(), 12) eq(retval[0], b"Hello, nose!") [i.remove() for i in self.ioctx.list_objects()] def test_aio_setxattr(self): lock = threading.Condition() count = [0] def cb(blah): with lock: count[0] += 1 lock.notify() return 0 comp = self.ioctx.aio_setxattr("obj", "key", b'value', cb) comp.wait_for_complete() with lock: while count[0] < 1: lock.wait() eq(comp.get_return_value(), 0) eq(self.ioctx.get_xattr("obj", "key"), b'value') def test_applications(self): cmd = {"prefix":"osd dump", "format":"json"} ret, buf, errs = self.rados.mon_command(json.dumps(cmd), b'') eq(ret, 0) assert len(buf) > 0 release = json.loads(buf.decode("utf-8")).get("require_osd_release", None) if not release or release[0] < 'l': raise SkipTest eq([], self.ioctx.application_list()) self.ioctx.application_enable("app1") assert_raises(Error, self.ioctx.application_enable, "app2") self.ioctx.application_enable("app2", True) assert_raises(Error, self.ioctx.application_metadata_list, "dne") eq([], self.ioctx.application_metadata_list("app1")) assert_raises(Error, self.ioctx.application_metadata_set, "dne", "key", "key") self.ioctx.application_metadata_set("app1", "key1", "val1") eq("val1", self.ioctx.application_metadata_get("app1", "key1")) self.ioctx.application_metadata_set("app1", "key2", "val2") eq("val2", self.ioctx.application_metadata_get("app1", "key2")) self.ioctx.application_metadata_set("app2", "key1", "val1") eq("val1", self.ioctx.application_metadata_get("app2", "key1")) eq([("key1", "val1"), ("key2", "val2")], self.ioctx.application_metadata_list("app1")) self.ioctx.application_metadata_remove("app1", "key1") eq([("key2", "val2")], self.ioctx.application_metadata_list("app1")) def test_service_daemon(self): name = "pid-" + str(os.getpid()) metadata = {'version': '3.14', 'memory': '42'} self.rados.service_daemon_register("laundry", name, metadata) status = {'result': 'unknown', 'test': 'running'} self.rados.service_daemon_update(status) def test_alignment(self): eq(self.ioctx.alignment(), None) @attr('ec') class TestIoctxEc(object): def setUp(self): self.rados = Rados(conffile='') self.rados.connect() self.pool = 'test-ec' self.profile = 'testprofile-%s' % self.pool cmd = {"prefix": "osd erasure-code-profile set", "name": self.profile, "profile": ["k=2", "m=1", "crush-failure-domain=osd"]} ret, buf, out = self.rados.mon_command(json.dumps(cmd), b'', timeout=30) eq(ret, 0, msg=out) # create ec pool with profile created above cmd = {'prefix': 'osd pool create', 'pg_num': 8, 'pgp_num': 8, 'pool': self.pool, 'pool_type': 'erasure', 'erasure_code_profile': self.profile} ret, buf, out = self.rados.mon_command(json.dumps(cmd), b'', timeout=30) eq(ret, 0, msg=out) assert self.rados.pool_exists(self.pool) self.ioctx = self.rados.open_ioctx(self.pool) def tearDown(self): cmd = {"prefix": "osd unset", "key": "noup"} self.rados.mon_command(json.dumps(cmd), b'') self.ioctx.close() self.rados.delete_pool(self.pool) self.rados.shutdown() def test_alignment(self): eq(self.ioctx.alignment(), 8192) class TestIoctx2(object): def setUp(self): self.rados = Rados(conffile='') self.rados.connect() self.rados.create_pool('test_pool') assert self.rados.pool_exists('test_pool') pool_id = self.rados.pool_lookup('test_pool') assert pool_id > 0 self.ioctx2 = self.rados.open_ioctx2(pool_id) def tearDown(self): cmd = {"prefix": "osd unset", "key": "noup"} self.rados.mon_command(json.dumps(cmd), b'') self.ioctx2.close() self.rados.delete_pool('test_pool') self.rados.shutdown() def test_get_last_version(self): version = self.ioctx2.get_last_version() assert version >= 0 def test_get_stats(self): stats = self.ioctx2.get_stats() eq(stats, {'num_objects_unfound': 0, 'num_objects_missing_on_primary': 0, 'num_object_clones': 0, 'num_objects': 0, 'num_object_copies': 0, 'num_bytes': 0, 'num_rd_kb': 0, 'num_wr_kb': 0, 'num_kb': 0, 'num_wr': 0, 'num_objects_degraded': 0, 'num_rd': 0}) class TestObject(object): def setUp(self): self.rados = Rados(conffile='') self.rados.connect() self.rados.create_pool('test_pool') assert self.rados.pool_exists('test_pool') self.ioctx = self.rados.open_ioctx('test_pool') self.ioctx.write('foo', b'bar') self.object = Object(self.ioctx, 'foo') def tearDown(self): self.ioctx.close() self.ioctx = None self.rados.delete_pool('test_pool') self.rados.shutdown() self.rados = None def test_read(self): eq(self.object.read(3), b'bar') eq(self.object.read(100), b'') def test_seek(self): self.object.write(b'blah') self.object.seek(0) eq(self.object.read(4), b'blah') self.object.seek(1) eq(self.object.read(3), b'lah') def test_write(self): self.object.write(b'barbaz') self.object.seek(0) eq(self.object.read(3), b'bar') eq(self.object.read(3), b'baz') class TestIoCtxSelfManagedSnaps(object): def setUp(self): self.rados = Rados(conffile='') self.rados.connect() self.rados.create_pool('test_pool') assert self.rados.pool_exists('test_pool') self.ioctx = self.rados.open_ioctx('test_pool') def tearDown(self): cmd = {"prefix":"osd unset", "key":"noup"} self.rados.mon_command(json.dumps(cmd), b'') self.ioctx.close() self.rados.delete_pool('test_pool') self.rados.shutdown() @attr('rollback') def test(self): # cannot mix-and-match pool and self-managed snapshot mode self.ioctx.set_self_managed_snap_write([]) self.ioctx.write('abc', b'abc') snap_id_1 = self.ioctx.create_self_managed_snap() self.ioctx.set_self_managed_snap_write([snap_id_1]) self.ioctx.write('abc', b'def') snap_id_2 = self.ioctx.create_self_managed_snap() self.ioctx.set_self_managed_snap_write([snap_id_1, snap_id_2]) self.ioctx.write('abc', b'ghi') self.ioctx.rollback_self_managed_snap('abc', snap_id_1) eq(self.ioctx.read('abc'), b'abc') self.ioctx.rollback_self_managed_snap('abc', snap_id_2) eq(self.ioctx.read('abc'), b'def') self.ioctx.remove_self_managed_snap(snap_id_1) self.ioctx.remove_self_managed_snap(snap_id_2) class TestCommand(object): def setUp(self): self.rados = Rados(conffile='') self.rados.connect() def tearDown(self): self.rados.shutdown() def test_monmap_dump(self): # check for success and some plain output with epoch in it cmd = {"prefix":"mon dump"} ret, buf, errs = self.rados.mon_command(json.dumps(cmd), b'', timeout=30) eq(ret, 0) assert len(buf) > 0 assert(b'epoch' in buf) # JSON, and grab current epoch cmd['format'] = 'json' ret, buf, errs = self.rados.mon_command(json.dumps(cmd), b'', timeout=30) eq(ret, 0) assert len(buf) > 0 d = json.loads(buf.decode("utf-8")) assert('epoch' in d) epoch = d['epoch'] # assume epoch + 1000 does not exist; test for ENOENT cmd['epoch'] = epoch + 1000 ret, buf, errs = self.rados.mon_command(json.dumps(cmd), b'', timeout=30) eq(ret, -errno.ENOENT) eq(len(buf), 0) del cmd['epoch'] # send to specific target by name, rank cmd = {"prefix": "version"} target = d['mons'][0]['name'] print(target) ret, buf, errs = self.rados.mon_command(json.dumps(cmd), b'', timeout=30, target=target) eq(ret, 0) assert len(buf) > 0 e = json.loads(buf.decode("utf-8")) assert('release' in e) target = d['mons'][0]['rank'] print(target) ret, buf, errs = self.rados.mon_command(json.dumps(cmd), b'', timeout=30, target=target) eq(ret, 0) assert len(buf) > 0 e = json.loads(buf.decode("utf-8")) assert('release' in e) @attr('bench') def test_osd_bench(self): cmd = dict(prefix='bench', size=4096, count=8192) ret, buf, err = self.rados.osd_command(0, json.dumps(cmd), b'', timeout=30) eq(ret, 0) assert len(buf) > 0 out = json.loads(buf.decode('utf-8')) eq(out['blocksize'], cmd['size']) eq(out['bytes_written'], cmd['count']) def test_ceph_osd_pool_create_utf8(self): poolname = "\u9ec5" cmd = {"prefix": "osd pool create", "pg_num": 16, "pool": poolname} ret, buf, out = self.rados.mon_command(json.dumps(cmd), b'') eq(ret, 0) assert len(out) > 0 eq(u"pool '\u9ec5' created", out) @attr('watch') class TestWatchNotify(object): OID = "test_watch_notify" def setUp(self): self.rados = Rados(conffile='') self.rados.connect() self.rados.create_pool('test_pool') assert self.rados.pool_exists('test_pool') self.ioctx = self.rados.open_ioctx('test_pool') self.ioctx.write(self.OID, b'test watch notify') self.lock = threading.Condition() self.notify_cnt = {} self.notify_data = {} self.notify_error = {} # aio related self.ack_cnt = {} self.ack_data = {} self.instance_id = self.rados.get_instance_id() def tearDown(self): self.ioctx.close() self.rados.delete_pool('test_pool') self.rados.shutdown() def make_callback(self): def callback(notify_id, notifier_id, watch_id, data): with self.lock: if watch_id not in self.notify_cnt: self.notify_cnt[watch_id] = 1 elif self.notify_data[watch_id] != data: self.notify_cnt[watch_id] += 1 self.notify_data[watch_id] = data return callback def make_error_callback(self): def callback(watch_id, error): with self.lock: self.notify_error[watch_id] = error return callback def test(self): with self.ioctx.watch(self.OID, self.make_callback(), self.make_error_callback()) as watch1: watch_id1 = watch1.get_id() assert(watch_id1 > 0) with self.rados.open_ioctx('test_pool') as ioctx: watch2 = ioctx.watch(self.OID, self.make_callback(), self.make_error_callback()) watch_id2 = watch2.get_id() assert(watch_id2 > 0) assert(self.ioctx.notify(self.OID, 'test')) with self.lock: assert(watch_id1 in self.notify_cnt) assert(watch_id2 in self.notify_cnt) eq(self.notify_cnt[watch_id1], 1) eq(self.notify_cnt[watch_id2], 1) eq(self.notify_data[watch_id1], b'test') eq(self.notify_data[watch_id2], b'test') assert(watch1.check() >= timedelta()) assert(watch2.check() >= timedelta()) assert(self.ioctx.notify(self.OID, 'best')) with self.lock: eq(self.notify_cnt[watch_id1], 2) eq(self.notify_cnt[watch_id2], 2) eq(self.notify_data[watch_id1], b'best') eq(self.notify_data[watch_id2], b'best') watch2.close() assert(self.ioctx.notify(self.OID, 'rest')) with self.lock: eq(self.notify_cnt[watch_id1], 3) eq(self.notify_cnt[watch_id2], 2) eq(self.notify_data[watch_id1], b'rest') eq(self.notify_data[watch_id2], b'best') assert(watch1.check() >= timedelta()) self.ioctx.remove_object(self.OID) for i in range(10): with self.lock: if watch_id1 in self.notify_error: break time.sleep(1) eq(self.notify_error[watch_id1], -errno.ENOTCONN) assert_raises(NotConnected, watch1.check) assert_raises(ObjectNotFound, self.ioctx.notify, self.OID, 'test') def make_callback_reply(self): def callback(notify_id, notifier_id, watch_id, data): with self.lock: return data return callback def notify_callback(self, _, r, ack_list, timeout_list): eq(r, 0) with self.lock: for notifier_id, _, notifier_data in ack_list: if notifier_id not in self.ack_cnt: self.ack_cnt[notifier_id] = 0 self.ack_cnt[notifier_id] += 1 self.ack_data[notifier_id] = notifier_data def notify_callback_err(self, _, r, ack_list, timeout_list): eq(r, -errno.ENOENT) def test_aio_notify(self): with self.ioctx.watch(self.OID, self.make_callback_reply(), self.make_error_callback()) as watch1: watch_id1 = watch1.get_id() ok(watch_id1 > 0) with self.rados.open_ioctx('test_pool') as ioctx: watch2 = ioctx.watch(self.OID, self.make_callback_reply(), self.make_error_callback()) watch_id2 = watch2.get_id() ok(watch_id2 > 0) comp = self.ioctx.aio_notify(self.OID, self.notify_callback, msg='test') comp.wait_for_complete_and_cb() with self.lock: ok(self.instance_id in self.ack_cnt) eq(self.ack_cnt[self.instance_id], 2) eq(self.ack_data[self.instance_id], b'test') ok(watch1.check() >= timedelta()) ok(watch2.check() >= timedelta()) comp = self.ioctx.aio_notify(self.OID, self.notify_callback, msg='best') comp.wait_for_complete_and_cb() with self.lock: eq(self.ack_cnt[self.instance_id], 4) eq(self.ack_data[self.instance_id], b'best') watch2.close() comp = self.ioctx.aio_notify(self.OID, self.notify_callback, msg='rest') comp.wait_for_complete_and_cb() with self.lock: eq(self.ack_cnt[self.instance_id], 5) eq(self.ack_data[self.instance_id], b'rest') assert(watch1.check() >= timedelta()) self.ioctx.remove_object(self.OID) for i in range(10): with self.lock: if watch_id1 in self.notify_error: break time.sleep(1) eq(self.notify_error[watch_id1], -errno.ENOTCONN) assert_raises(NotConnected, watch1.check) comp = self.ioctx.aio_notify(self.OID, self.notify_callback_err, msg='test') comp.wait_for_complete_and_cb()
57,273
36.07055
151
py
null
ceph-main/src/test/pybind/test_rbd.py
# vim: expandtab smarttab shiftwidth=4 softtabstop=4 import base64 import copy import errno import functools import json import socket import os import platform import time import sys from datetime import datetime, timedelta from nose import with_setup, SkipTest from nose.plugins.attrib import attr from nose.tools import (eq_ as eq, assert_raises, assert_not_equal, assert_greater_equal) from rados import (Rados, LIBRADOS_OP_FLAG_FADVISE_DONTNEED, LIBRADOS_OP_FLAG_FADVISE_NOCACHE, LIBRADOS_OP_FLAG_FADVISE_RANDOM) from rbd import (RBD, Group, Image, ImageNotFound, InvalidArgument, ImageExists, ImageBusy, ImageHasSnapshots, ReadOnlyImage, FunctionNotSupported, ArgumentOutOfRange, ECANCELED, OperationCanceled, DiskQuotaExceeded, ConnectionShutdown, PermissionError, RBD_FEATURE_LAYERING, RBD_FEATURE_STRIPINGV2, RBD_FEATURE_EXCLUSIVE_LOCK, RBD_FEATURE_JOURNALING, RBD_FEATURE_DEEP_FLATTEN, RBD_FEATURE_FAST_DIFF, RBD_FEATURE_OBJECT_MAP, RBD_MIRROR_MODE_DISABLED, RBD_MIRROR_MODE_IMAGE, RBD_MIRROR_MODE_POOL, RBD_MIRROR_IMAGE_ENABLED, RBD_MIRROR_IMAGE_DISABLED, MIRROR_IMAGE_STATUS_STATE_UNKNOWN, RBD_MIRROR_IMAGE_MODE_JOURNAL, RBD_MIRROR_IMAGE_MODE_SNAPSHOT, RBD_LOCK_MODE_EXCLUSIVE, RBD_OPERATION_FEATURE_GROUP, RBD_SNAP_NAMESPACE_TYPE_TRASH, RBD_SNAP_NAMESPACE_TYPE_MIRROR, RBD_IMAGE_MIGRATION_STATE_PREPARED, RBD_CONFIG_SOURCE_CONFIG, RBD_CONFIG_SOURCE_POOL, RBD_CONFIG_SOURCE_IMAGE, RBD_MIRROR_PEER_ATTRIBUTE_NAME_MON_HOST, RBD_MIRROR_PEER_ATTRIBUTE_NAME_KEY, RBD_MIRROR_PEER_DIRECTION_RX, RBD_MIRROR_PEER_DIRECTION_RX_TX, RBD_SNAP_REMOVE_UNPROTECT, RBD_SNAP_MIRROR_STATE_PRIMARY, RBD_SNAP_MIRROR_STATE_PRIMARY_DEMOTED, RBD_SNAP_CREATE_SKIP_QUIESCE, RBD_SNAP_CREATE_IGNORE_QUIESCE_ERROR, RBD_WRITE_ZEROES_FLAG_THICK_PROVISION, RBD_ENCRYPTION_FORMAT_LUKS1, RBD_ENCRYPTION_FORMAT_LUKS2, RBD_ENCRYPTION_FORMAT_LUKS) rados = None ioctx = None features = None image_idx = 0 group_idx = 0 snap_idx = 0 image_name = None group_name = None snap_name = None pool_idx = 0 pool_name = None IMG_SIZE = 8 << 20 # 8 MiB IMG_ORDER = 22 # 4 MiB objects os.environ["RBD_FORCE_ALLOW_V1"] = "1" def setup_module(): global rados rados = Rados(conffile='') rados.connect() global pool_name pool_name = get_temp_pool_name() rados.create_pool(pool_name) global ioctx ioctx = rados.open_ioctx(pool_name) RBD().pool_init(ioctx, True) global features features = os.getenv("RBD_FEATURES") if features is not None: features = int(features) def teardown_module(): global ioctx ioctx.close() global rados rados.delete_pool(pool_name) rados.shutdown() def get_temp_pool_name(): global pool_idx pool_idx += 1 return "test-rbd-api-" + socket.gethostname() + '-' + str(os.getpid()) + \ '-' + str(pool_idx) def get_temp_image_name(): global image_idx image_idx += 1 return "image" + str(image_idx) def get_temp_group_name(): global group_idx group_idx += 1 return "group" + str(group_idx) def get_temp_snap_name(): global snap_idx snap_idx += 1 return "snap" + str(snap_idx) def create_image(): global image_name image_name = get_temp_image_name() if features is not None: RBD().create(ioctx, image_name, IMG_SIZE, IMG_ORDER, old_format=False, features=int(features)) else: RBD().create(ioctx, image_name, IMG_SIZE, IMG_ORDER, old_format=True) return image_name def remove_image(): if image_name is not None: RBD().remove(ioctx, image_name) def create_group(): global group_name group_name = get_temp_group_name() RBD().group_create(ioctx, group_name) def remove_group(): if group_name is not None: RBD().group_remove(ioctx, group_name) def rename_group(): new_group_name = "new" + group_name RBD().group_rename(ioctx, group_name, new_group_name) def require_new_format(): def wrapper(fn): def _require_new_format(*args, **kwargs): global features if features is None: raise SkipTest return fn(*args, **kwargs) return functools.wraps(fn)(_require_new_format) return wrapper def require_features(required_features): def wrapper(fn): def _require_features(*args, **kwargs): global features if features is None: raise SkipTest for feature in required_features: if feature & features != feature: raise SkipTest return fn(*args, **kwargs) return functools.wraps(fn)(_require_features) return wrapper def require_linux(): def wrapper(fn): def _require_linux(*args, **kwargs): if platform.system() != "Linux": raise SkipTest return fn(*args, **kwargs) return functools.wraps(fn)(_require_linux) return wrapper def blocklist_features(blocklisted_features): def wrapper(fn): def _blocklist_features(*args, **kwargs): global features for feature in blocklisted_features: if features is not None and feature & features == feature: raise SkipTest return fn(*args, **kwargs) return functools.wraps(fn)(_blocklist_features) return wrapper def test_version(): RBD().version() def test_create(): create_image() remove_image() def check_default_params(format, order=None, features=None, stripe_count=None, stripe_unit=None, exception=None): global rados global ioctx orig_vals = {} for k in ['rbd_default_format', 'rbd_default_order', 'rbd_default_features', 'rbd_default_stripe_count', 'rbd_default_stripe_unit']: orig_vals[k] = rados.conf_get(k) try: rados.conf_set('rbd_default_format', str(format)) if order is not None: rados.conf_set('rbd_default_order', str(order or 0)) if features is not None: rados.conf_set('rbd_default_features', str(features or 0)) if stripe_count is not None: rados.conf_set('rbd_default_stripe_count', str(stripe_count or 0)) if stripe_unit is not None: rados.conf_set('rbd_default_stripe_unit', str(stripe_unit or 0)) feature_data_pool = 0 datapool = rados.conf_get('rbd_default_data_pool') if not len(datapool) == 0: feature_data_pool = 128 image_name = get_temp_image_name() if exception is None: RBD().create(ioctx, image_name, IMG_SIZE, old_format=(format == 1)) try: with Image(ioctx, image_name) as image: eq(format == 1, image.old_format()) expected_order = int(rados.conf_get('rbd_default_order')) actual_order = image.stat()['order'] eq(expected_order, actual_order) expected_features = features if format == 1: expected_features = 0 elif expected_features is None: expected_features = 61 | feature_data_pool else: expected_features |= feature_data_pool eq(expected_features, image.features()) expected_stripe_count = stripe_count if not expected_stripe_count or format == 1 or \ features & RBD_FEATURE_STRIPINGV2 == 0: expected_stripe_count = 1 eq(expected_stripe_count, image.stripe_count()) expected_stripe_unit = stripe_unit if not expected_stripe_unit or format == 1 or \ features & RBD_FEATURE_STRIPINGV2 == 0: expected_stripe_unit = 1 << actual_order eq(expected_stripe_unit, image.stripe_unit()) finally: RBD().remove(ioctx, image_name) else: assert_raises(exception, RBD().create, ioctx, image_name, IMG_SIZE) finally: for k, v in orig_vals.items(): rados.conf_set(k, v) def test_create_defaults(): # basic format 1 and 2 check_default_params(1) check_default_params(2) # invalid order check_default_params(1, 0, exception=ArgumentOutOfRange) check_default_params(2, 0, exception=ArgumentOutOfRange) check_default_params(1, 11, exception=ArgumentOutOfRange) check_default_params(2, 11, exception=ArgumentOutOfRange) check_default_params(1, 65, exception=ArgumentOutOfRange) check_default_params(2, 65, exception=ArgumentOutOfRange) # striping and features are ignored for format 1 check_default_params(1, 20, 0, 1, 1) check_default_params(1, 20, 3, 1, 1) check_default_params(1, 20, 0, 0, 0) # striping is ignored if stripingv2 is not set check_default_params(2, 20, 0, 1, 1 << 20) check_default_params(2, 20, RBD_FEATURE_LAYERING, 1, 1 << 20) check_default_params(2, 20, 0, 0, 0) # striping with stripingv2 is fine check_default_params(2, 20, RBD_FEATURE_STRIPINGV2, 1, 1 << 16) check_default_params(2, 20, RBD_FEATURE_STRIPINGV2, 10, 1 << 20) check_default_params(2, 20, RBD_FEATURE_STRIPINGV2, 10, 1 << 16) check_default_params(2, 20, 0, 0, 0) # make sure invalid combinations of stripe unit and order are still invalid check_default_params(2, 22, RBD_FEATURE_STRIPINGV2, 10, 1 << 50, exception=InvalidArgument) check_default_params(2, 22, RBD_FEATURE_STRIPINGV2, 10, 100, exception=InvalidArgument) check_default_params(2, 22, RBD_FEATURE_STRIPINGV2, 0, 1, exception=InvalidArgument) check_default_params(2, 22, RBD_FEATURE_STRIPINGV2, 1, 0, exception=InvalidArgument) # 0 stripe unit and count are still ignored check_default_params(2, 22, 0, 0, 0) def test_context_manager(): with Rados(conffile='') as cluster: with cluster.open_ioctx(pool_name) as ioctx: image_name = get_temp_image_name() RBD().create(ioctx, image_name, IMG_SIZE) with Image(ioctx, image_name) as image: data = rand_data(256) image.write(data, 0) read = image.read(0, 256) RBD().remove(ioctx, image_name) eq(data, read) def test_open_read_only(): with Rados(conffile='') as cluster: with cluster.open_ioctx(pool_name) as ioctx: image_name = get_temp_image_name() RBD().create(ioctx, image_name, IMG_SIZE) data = rand_data(256) with Image(ioctx, image_name) as image: image.write(data, 0) image.create_snap('snap') with Image(ioctx, image_name, read_only=True) as image: read = image.read(0, 256) eq(data, read) assert_raises(ReadOnlyImage, image.write, data, 0) assert_raises(ReadOnlyImage, image.create_snap, 'test') assert_raises(ReadOnlyImage, image.remove_snap, 'snap') assert_raises(ReadOnlyImage, image.rollback_to_snap, 'snap') assert_raises(ReadOnlyImage, image.protect_snap, 'snap') assert_raises(ReadOnlyImage, image.unprotect_snap, 'snap') assert_raises(ReadOnlyImage, image.unprotect_snap, 'snap') assert_raises(ReadOnlyImage, image.flatten) with Image(ioctx, image_name) as image: image.remove_snap('snap') RBD().remove(ioctx, image_name) eq(data, read) def test_open_dne(): for i in range(100): image_name = get_temp_image_name() assert_raises(ImageNotFound, Image, ioctx, image_name + 'dne') assert_raises(ImageNotFound, Image, ioctx, image_name, 'snap') def test_open_readonly_dne(): for i in range(100): image_name = get_temp_image_name() assert_raises(ImageNotFound, Image, ioctx, image_name + 'dne', read_only=True) assert_raises(ImageNotFound, Image, ioctx, image_name, 'snap', read_only=True) @require_new_format() def test_open_by_id(): with Rados(conffile='') as cluster: with cluster.open_ioctx(pool_name) as ioctx: image_name = get_temp_image_name() RBD().create(ioctx, image_name, IMG_SIZE) with Image(ioctx, image_name) as image: image_id = image.id() with Image(ioctx, image_id=image_id) as image: eq(image.get_name(), image_name) RBD().remove(ioctx, image_name) def test_aio_open(): with Rados(conffile='') as cluster: with cluster.open_ioctx(pool_name) as ioctx: image_name = get_temp_image_name() order = 20 RBD().create(ioctx, image_name, IMG_SIZE, order) # this is a list so that the open_cb() can modify it image = [None] def open_cb(_, image_): image[0] = image_ comp = RBD().aio_open_image(open_cb, ioctx, image_name) comp.wait_for_complete_and_cb() eq(comp.get_return_value(), 0) eq(sys.getrefcount(comp), 2) assert_not_equal(image[0], None) image = image[0] eq(image.get_name(), image_name) check_stat(image.stat(), IMG_SIZE, order) closed = [False] def close_cb(_): closed[0] = True comp = image.aio_close(close_cb) comp.wait_for_complete_and_cb() eq(comp.get_return_value(), 0) eq(sys.getrefcount(comp), 2) eq(closed[0], True) RBD().remove(ioctx, image_name) def test_remove_dne(): assert_raises(ImageNotFound, remove_image) def test_list_empty(): eq([], RBD().list(ioctx)) @with_setup(create_image, remove_image) def test_list(): eq([image_name], RBD().list(ioctx)) with Image(ioctx, image_name) as image: image_id = image.id() eq([{'id': image_id, 'name': image_name}], list(RBD().list2(ioctx))) @with_setup(create_image) def test_remove_with_progress(): d = {'received_callback': False} def progress_cb(current, total): d['received_callback'] = True return 0 RBD().remove(ioctx, image_name, on_progress=progress_cb) eq(True, d['received_callback']) @with_setup(create_image) def test_remove_canceled(): def progress_cb(current, total): return -ECANCELED assert_raises(OperationCanceled, RBD().remove, ioctx, image_name, on_progress=progress_cb) @with_setup(create_image, remove_image) def test_rename(): rbd = RBD() image_name2 = get_temp_image_name() rbd.rename(ioctx, image_name, image_name2) eq([image_name2], rbd.list(ioctx)) rbd.rename(ioctx, image_name2, image_name) eq([image_name], rbd.list(ioctx)) def test_pool_metadata(): rbd = RBD() metadata = list(rbd.pool_metadata_list(ioctx)) eq(len(metadata), 0) assert_raises(KeyError, rbd.pool_metadata_get, ioctx, "key1") rbd.pool_metadata_set(ioctx, "key1", "value1") rbd.pool_metadata_set(ioctx, "key2", "value2") value = rbd.pool_metadata_get(ioctx, "key1") eq(value, "value1") value = rbd.pool_metadata_get(ioctx, "key2") eq(value, "value2") metadata = list(rbd.pool_metadata_list(ioctx)) eq(len(metadata), 2) rbd.pool_metadata_remove(ioctx, "key1") metadata = list(rbd.pool_metadata_list(ioctx)) eq(len(metadata), 1) eq(metadata[0], ("key2", "value2")) rbd.pool_metadata_remove(ioctx, "key2") assert_raises(KeyError, rbd.pool_metadata_remove, ioctx, "key2") metadata = list(rbd.pool_metadata_list(ioctx)) eq(len(metadata), 0) N = 65 for i in range(N): rbd.pool_metadata_set(ioctx, "key" + str(i), "X" * 1025) metadata = list(rbd.pool_metadata_list(ioctx)) eq(len(metadata), N) for i in range(N): rbd.pool_metadata_remove(ioctx, "key" + str(i)) metadata = list(rbd.pool_metadata_list(ioctx)) eq(len(metadata), N - i - 1) def test_config_list(): rbd = RBD() for option in rbd.config_list(ioctx): eq(option['source'], RBD_CONFIG_SOURCE_CONFIG) rbd.pool_metadata_set(ioctx, "conf_rbd_cache", "true") for option in rbd.config_list(ioctx): if option['name'] == "rbd_cache": eq(option['source'], RBD_CONFIG_SOURCE_POOL) else: eq(option['source'], RBD_CONFIG_SOURCE_CONFIG) rbd.pool_metadata_remove(ioctx, "conf_rbd_cache") for option in rbd.config_list(ioctx): eq(option['source'], RBD_CONFIG_SOURCE_CONFIG) def test_pool_config_set_and_get_and_remove(): rbd = RBD() for option in rbd.config_list(ioctx): eq(option['source'], RBD_CONFIG_SOURCE_CONFIG) rbd.config_set(ioctx, "rbd_request_timed_out_seconds", "100") new_value = rbd.config_get(ioctx, "rbd_request_timed_out_seconds") eq(new_value, "100") rbd.config_remove(ioctx, "rbd_request_timed_out_seconds") for option in rbd.config_list(ioctx): eq(option['source'], RBD_CONFIG_SOURCE_CONFIG) def test_namespaces(): rbd = RBD() eq(False, rbd.namespace_exists(ioctx, 'ns1')) eq([], rbd.namespace_list(ioctx)) assert_raises(ImageNotFound, rbd.namespace_remove, ioctx, 'ns1') rbd.namespace_create(ioctx, 'ns1') eq(True, rbd.namespace_exists(ioctx, 'ns1')) assert_raises(ImageExists, rbd.namespace_create, ioctx, 'ns1') eq(['ns1'], rbd.namespace_list(ioctx)) rbd.namespace_remove(ioctx, 'ns1') eq([], rbd.namespace_list(ioctx)) @require_new_format() def test_pool_stats(): rbd = RBD() try: image1 = create_image() image2 = create_image() image3 = create_image() image4 = create_image() with Image(ioctx, image4) as image: image.create_snap('snap') image.resize(0) stats = rbd.pool_stats_get(ioctx) eq(stats['image_count'], 4) eq(stats['image_provisioned_bytes'], 3 * IMG_SIZE) eq(stats['image_max_provisioned_bytes'], 4 * IMG_SIZE) eq(stats['image_snap_count'], 1) eq(stats['trash_count'], 0) eq(stats['trash_provisioned_bytes'], 0) eq(stats['trash_max_provisioned_bytes'], 0) eq(stats['trash_snap_count'], 0) finally: rbd.remove(ioctx, image1) rbd.remove(ioctx, image2) rbd.remove(ioctx, image3) with Image(ioctx, image4) as image: image.remove_snap('snap') rbd.remove(ioctx, image4) def rand_data(size): return os.urandom(size) def check_stat(info, size, order): assert 'block_name_prefix' in info eq(info['size'], size) eq(info['order'], order) eq(info['num_objs'], size // (1 << order)) eq(info['obj_size'], 1 << order) @require_new_format() def test_features_to_string(): rbd = RBD() features = RBD_FEATURE_DEEP_FLATTEN | RBD_FEATURE_EXCLUSIVE_LOCK | RBD_FEATURE_FAST_DIFF \ | RBD_FEATURE_LAYERING | RBD_FEATURE_OBJECT_MAP expected_features_string = "deep-flatten,exclusive-lock,fast-diff,layering,object-map" features_string = rbd.features_to_string(features) eq(expected_features_string, features_string) features = RBD_FEATURE_LAYERING features_string = rbd.features_to_string(features) eq(features_string, "layering") features = 16777216 assert_raises(InvalidArgument, rbd.features_to_string, features) @require_new_format() def test_features_from_string(): rbd = RBD() features_string = "deep-flatten,exclusive-lock,fast-diff,layering,object-map" expected_features_bitmask = RBD_FEATURE_DEEP_FLATTEN | RBD_FEATURE_EXCLUSIVE_LOCK | RBD_FEATURE_FAST_DIFF \ | RBD_FEATURE_LAYERING | RBD_FEATURE_OBJECT_MAP features = rbd.features_from_string(features_string) eq(expected_features_bitmask, features) features_string = "layering" features = rbd.features_from_string(features_string) eq(features, RBD_FEATURE_LAYERING) class TestImage(object): def setUp(self): self.rbd = RBD() create_image() self.image = Image(ioctx, image_name) def tearDown(self): self.image.close() remove_image() self.image = None @require_new_format() @blocklist_features([RBD_FEATURE_EXCLUSIVE_LOCK]) def test_update_features(self): features = self.image.features() self.image.update_features(RBD_FEATURE_EXCLUSIVE_LOCK, True) eq(features | RBD_FEATURE_EXCLUSIVE_LOCK, self.image.features()) @require_features([RBD_FEATURE_STRIPINGV2]) def test_create_with_params(self): global features image_name = get_temp_image_name() order = 20 stripe_unit = 1 << 20 stripe_count = 10 self.rbd.create(ioctx, image_name, IMG_SIZE, order, False, features, stripe_unit, stripe_count) image = Image(ioctx, image_name) info = image.stat() check_stat(info, IMG_SIZE, order) eq(image.features(), features) eq(image.stripe_unit(), stripe_unit) eq(image.stripe_count(), stripe_count) image.close() RBD().remove(ioctx, image_name) @require_new_format() def test_id(self): assert_not_equal(b'', self.image.id()) def test_block_name_prefix(self): assert_not_equal(b'', self.image.block_name_prefix()) def test_data_pool_id(self): assert_greater_equal(self.image.data_pool_id(), 0) def test_create_timestamp(self): timestamp = self.image.create_timestamp() assert_not_equal(0, timestamp.year) assert_not_equal(1970, timestamp.year) def test_access_timestamp(self): timestamp = self.image.access_timestamp() assert_not_equal(0, timestamp.year) assert_not_equal(1970, timestamp.year) def test_modify_timestamp(self): timestamp = self.image.modify_timestamp() assert_not_equal(0, timestamp.year) assert_not_equal(1970, timestamp.year) def test_invalidate_cache(self): self.image.write(b'abc', 0) eq(b'abc', self.image.read(0, 3)) self.image.invalidate_cache() eq(b'abc', self.image.read(0, 3)) def test_stat(self): info = self.image.stat() check_stat(info, IMG_SIZE, IMG_ORDER) def test_flags(self): flags = self.image.flags() eq(0, flags) def test_image_auto_close(self): image = Image(ioctx, image_name) def test_use_after_close(self): self.image.close() assert_raises(InvalidArgument, self.image.stat) def test_write(self): data = rand_data(256) self.image.write(data, 0) def test_write_with_fadvise_flags(self): data = rand_data(256) self.image.write(data, 0, LIBRADOS_OP_FLAG_FADVISE_DONTNEED) self.image.write(data, 0, LIBRADOS_OP_FLAG_FADVISE_NOCACHE) def test_write_zeroes(self): data = rand_data(256) self.image.write(data, 0) self.image.write_zeroes(0, 256) eq(self.image.read(256, 256), b'\0' * 256) check_diff(self.image, 0, IMG_SIZE, None, []) def test_write_zeroes_thick_provision(self): data = rand_data(256) self.image.write(data, 0) self.image.write_zeroes(0, 256, RBD_WRITE_ZEROES_FLAG_THICK_PROVISION) eq(self.image.read(256, 256), b'\0' * 256) check_diff(self.image, 0, IMG_SIZE, None, [(0, 256, True)]) def test_read(self): data = self.image.read(0, 20) eq(data, b'\0' * 20) def test_read_with_fadvise_flags(self): data = self.image.read(0, 20, LIBRADOS_OP_FLAG_FADVISE_DONTNEED) eq(data, b'\0' * 20) data = self.image.read(0, 20, LIBRADOS_OP_FLAG_FADVISE_RANDOM) eq(data, b'\0' * 20) def test_large_write(self): data = rand_data(IMG_SIZE) self.image.write(data, 0) def test_large_read(self): data = self.image.read(0, IMG_SIZE) eq(data, b'\0' * IMG_SIZE) def test_write_read(self): data = rand_data(256) offset = 50 self.image.write(data, offset) read = self.image.read(offset, 256) eq(data, read) def test_read_bad_offset(self): assert_raises(InvalidArgument, self.image.read, IMG_SIZE + 1, IMG_SIZE) def test_resize(self): new_size = IMG_SIZE * 2 self.image.resize(new_size) info = self.image.stat() check_stat(info, new_size, IMG_ORDER) def test_resize_allow_shrink_False(self): new_size = IMG_SIZE * 2 self.image.resize(new_size) info = self.image.stat() check_stat(info, new_size, IMG_ORDER) assert_raises(InvalidArgument, self.image.resize, IMG_SIZE, False) def test_size(self): eq(IMG_SIZE, self.image.size()) self.image.create_snap('snap1') new_size = IMG_SIZE * 2 self.image.resize(new_size) eq(new_size, self.image.size()) self.image.create_snap('snap2') self.image.set_snap('snap2') eq(new_size, self.image.size()) self.image.set_snap('snap1') eq(IMG_SIZE, self.image.size()) self.image.set_snap(None) eq(new_size, self.image.size()) self.image.remove_snap('snap1') self.image.remove_snap('snap2') def test_resize_down(self): new_size = IMG_SIZE // 2 data = rand_data(256) self.image.write(data, IMG_SIZE // 2); self.image.resize(new_size) self.image.resize(IMG_SIZE) read = self.image.read(IMG_SIZE // 2, 256) eq(b'\0' * 256, read) def test_resize_bytes(self): new_size = IMG_SIZE // 2 - 5 data = rand_data(256) self.image.write(data, IMG_SIZE // 2 - 10); self.image.resize(new_size) self.image.resize(IMG_SIZE) read = self.image.read(IMG_SIZE // 2 - 10, 5) eq(data[:5], read) read = self.image.read(IMG_SIZE // 2 - 5, 251) eq(b'\0' * 251, read) def _test_copy(self, features=None, order=None, stripe_unit=None, stripe_count=None): global ioctx data = rand_data(256) self.image.write(data, 256) image_name = get_temp_image_name() if features is None: self.image.copy(ioctx, image_name) elif order is None: self.image.copy(ioctx, image_name, features) elif stripe_unit is None: self.image.copy(ioctx, image_name, features, order) elif stripe_count is None: self.image.copy(ioctx, image_name, features, order, stripe_unit) else: self.image.copy(ioctx, image_name, features, order, stripe_unit, stripe_count) assert_raises(ImageExists, self.image.copy, ioctx, image_name) copy = Image(ioctx, image_name) copy_data = copy.read(256, 256) copy.close() self.rbd.remove(ioctx, image_name) eq(data, copy_data) def test_copy(self): self._test_copy() def test_copy2(self): self._test_copy(self.image.features(), self.image.stat()['order']) @require_features([RBD_FEATURE_STRIPINGV2]) def test_copy3(self): global features self._test_copy(features, self.image.stat()['order'], self.image.stripe_unit(), self.image.stripe_count()) @attr('SKIP_IF_CRIMSON') def test_deep_copy(self): global ioctx global features self.image.write(b'a' * 256, 0) self.image.create_snap('snap1') self.image.write(b'b' * 256, 0) dst_name = get_temp_image_name() self.image.deep_copy(ioctx, dst_name, features=features, order=self.image.stat()['order'], stripe_unit=self.image.stripe_unit(), stripe_count=self.image.stripe_count(), data_pool=None) self.image.remove_snap('snap1') with Image(ioctx, dst_name, 'snap1') as copy: copy_data = copy.read(0, 256) eq(b'a' * 256, copy_data) with Image(ioctx, dst_name) as copy: copy_data = copy.read(0, 256) eq(b'b' * 256, copy_data) copy.remove_snap('snap1') self.rbd.remove(ioctx, dst_name) @require_features([RBD_FEATURE_LAYERING]) def test_deep_copy_clone(self): global ioctx global features self.image.write(b'a' * 256, 0) self.image.create_snap('snap1') self.image.write(b'b' * 256, 0) self.image.protect_snap('snap1') clone_name = get_temp_image_name() dst_name = get_temp_image_name() self.rbd.clone(ioctx, image_name, 'snap1', ioctx, clone_name) with Image(ioctx, clone_name) as child: child.create_snap('snap1') child.deep_copy(ioctx, dst_name, features=features, order=self.image.stat()['order'], stripe_unit=self.image.stripe_unit(), stripe_count=self.image.stripe_count(), data_pool=None) child.remove_snap('snap1') with Image(ioctx, dst_name) as copy: copy_data = copy.read(0, 256) eq(b'a' * 256, copy_data) copy.remove_snap('snap1') self.rbd.remove(ioctx, dst_name) self.rbd.remove(ioctx, clone_name) self.image.unprotect_snap('snap1') self.image.remove_snap('snap1') def test_create_snap(self): global ioctx self.image.create_snap('snap1') read = self.image.read(0, 256) eq(read, b'\0' * 256) data = rand_data(256) self.image.write(data, 0) read = self.image.read(0, 256) eq(read, data) at_snapshot = Image(ioctx, image_name, 'snap1') snap_data = at_snapshot.read(0, 256) at_snapshot.close() eq(snap_data, b'\0' * 256) self.image.remove_snap('snap1') def test_create_snap_exists(self): self.image.create_snap('snap1') assert_raises(ImageExists, self.image.create_snap, 'snap1') self.image.remove_snap('snap1') def test_create_snap_flags(self): self.image.create_snap('snap1', 0) self.image.remove_snap('snap1') self.image.create_snap('snap1', RBD_SNAP_CREATE_SKIP_QUIESCE) self.image.remove_snap('snap1') self.image.create_snap('snap1', RBD_SNAP_CREATE_IGNORE_QUIESCE_ERROR) self.image.remove_snap('snap1') def test_list_snaps(self): eq([], list(self.image.list_snaps())) self.image.create_snap('snap1') eq(['snap1'], [snap['name'] for snap in self.image.list_snaps()]) self.image.create_snap('snap2') eq(['snap1', 'snap2'], [snap['name'] for snap in self.image.list_snaps()]) self.image.remove_snap('snap1') self.image.remove_snap('snap2') def test_list_snaps_iterator_auto_close(self): self.image.create_snap('snap1') self.image.list_snaps() self.image.remove_snap('snap1') def test_remove_snap(self): eq([], list(self.image.list_snaps())) self.image.create_snap('snap1') eq(['snap1'], [snap['name'] for snap in self.image.list_snaps()]) self.image.remove_snap('snap1') eq([], list(self.image.list_snaps())) def test_remove_snap_not_found(self): assert_raises(ImageNotFound, self.image.remove_snap, 'snap1') @require_features([RBD_FEATURE_LAYERING]) def test_remove_snap2(self): self.image.create_snap('snap1') self.image.protect_snap('snap1') assert(self.image.is_protected_snap('snap1')) self.image.remove_snap2('snap1', RBD_SNAP_REMOVE_UNPROTECT) eq([], list(self.image.list_snaps())) def test_remove_snap_by_id(self): eq([], list(self.image.list_snaps())) self.image.create_snap('snap1') eq(['snap1'], [snap['name'] for snap in self.image.list_snaps()]) for snap in self.image.list_snaps(): snap_id = snap["id"] self.image.remove_snap_by_id(snap_id) eq([], list(self.image.list_snaps())) def test_rename_snap(self): eq([], list(self.image.list_snaps())) self.image.create_snap('snap1') eq(['snap1'], [snap['name'] for snap in self.image.list_snaps()]) self.image.rename_snap("snap1", "snap1-rename") eq(['snap1-rename'], [snap['name'] for snap in self.image.list_snaps()]) self.image.remove_snap('snap1-rename') eq([], list(self.image.list_snaps())) @require_features([RBD_FEATURE_LAYERING]) def test_protect_snap(self): self.image.create_snap('snap1') assert(not self.image.is_protected_snap('snap1')) self.image.protect_snap('snap1') assert(self.image.is_protected_snap('snap1')) assert_raises(ImageBusy, self.image.remove_snap, 'snap1') self.image.unprotect_snap('snap1') assert(not self.image.is_protected_snap('snap1')) self.image.remove_snap('snap1') assert_raises(ImageNotFound, self.image.unprotect_snap, 'snap1') assert_raises(ImageNotFound, self.image.is_protected_snap, 'snap1') def test_snap_exists(self): self.image.create_snap('snap1') eq(self.image.snap_exists('snap1'), True) self.image.remove_snap('snap1') eq(self.image.snap_exists('snap1'), False) def test_snap_timestamp(self): self.image.create_snap('snap1') eq(['snap1'], [snap['name'] for snap in self.image.list_snaps()]) for snap in self.image.list_snaps(): snap_id = snap["id"] time = self.image.get_snap_timestamp(snap_id) assert_not_equal(b'', time.year) assert_not_equal(0, time.year) assert_not_equal(time.year, '1970') self.image.remove_snap('snap1') def test_limit_snaps(self): self.image.set_snap_limit(2) eq(2, self.image.get_snap_limit()) self.image.create_snap('snap1') self.image.create_snap('snap2') assert_raises(DiskQuotaExceeded, self.image.create_snap, 'snap3') self.image.remove_snap_limit() self.image.create_snap('snap3') self.image.remove_snap('snap1') self.image.remove_snap('snap2') self.image.remove_snap('snap3') @require_features([RBD_FEATURE_EXCLUSIVE_LOCK]) def test_remove_with_exclusive_lock(self): assert_raises(ImageBusy, remove_image) @blocklist_features([RBD_FEATURE_EXCLUSIVE_LOCK]) def test_remove_with_snap(self): self.image.create_snap('snap1') assert_raises(ImageHasSnapshots, remove_image) self.image.remove_snap('snap1') @blocklist_features([RBD_FEATURE_EXCLUSIVE_LOCK]) def test_remove_with_watcher(self): data = rand_data(256) self.image.write(data, 0) assert_raises(ImageBusy, remove_image) read = self.image.read(0, 256) eq(read, data) def test_rollback_to_snap(self): self.image.write(b'\0' * 256, 0) self.image.create_snap('snap1') read = self.image.read(0, 256) eq(read, b'\0' * 256) data = rand_data(256) self.image.write(data, 0) read = self.image.read(0, 256) eq(read, data) self.image.rollback_to_snap('snap1') read = self.image.read(0, 256) eq(read, b'\0' * 256) self.image.remove_snap('snap1') def test_rollback_to_snap_sparse(self): self.image.create_snap('snap1') read = self.image.read(0, 256) eq(read, b'\0' * 256) data = rand_data(256) self.image.write(data, 0) read = self.image.read(0, 256) eq(read, data) self.image.rollback_to_snap('snap1') read = self.image.read(0, 256) eq(read, b'\0' * 256) self.image.remove_snap('snap1') def test_rollback_with_resize(self): read = self.image.read(0, 256) eq(read, b'\0' * 256) data = rand_data(256) self.image.write(data, 0) self.image.create_snap('snap1') read = self.image.read(0, 256) eq(read, data) new_size = IMG_SIZE * 2 self.image.resize(new_size) check_stat(self.image.stat(), new_size, IMG_ORDER) self.image.write(data, new_size - 256) self.image.create_snap('snap2') read = self.image.read(new_size - 256, 256) eq(read, data) self.image.rollback_to_snap('snap1') check_stat(self.image.stat(), IMG_SIZE, IMG_ORDER) assert_raises(InvalidArgument, self.image.read, new_size - 256, 256) self.image.rollback_to_snap('snap2') check_stat(self.image.stat(), new_size, IMG_ORDER) read = self.image.read(new_size - 256, 256) eq(read, data) self.image.remove_snap('snap1') self.image.remove_snap('snap2') def test_set_snap(self): self.image.write(b'\0' * 256, 0) self.image.create_snap('snap1') read = self.image.read(0, 256) eq(read, b'\0' * 256) data = rand_data(256) self.image.write(data, 0) read = self.image.read(0, 256) eq(read, data) self.image.set_snap('snap1') read = self.image.read(0, 256) eq(read, b'\0' * 256) assert_raises(ReadOnlyImage, self.image.write, data, 0) self.image.remove_snap('snap1') def test_set_no_snap(self): self.image.write(b'\0' * 256, 0) self.image.create_snap('snap1') read = self.image.read(0, 256) eq(read, b'\0' * 256) data = rand_data(256) self.image.write(data, 0) read = self.image.read(0, 256) eq(read, data) self.image.set_snap('snap1') read = self.image.read(0, 256) eq(read, b'\0' * 256) assert_raises(ReadOnlyImage, self.image.write, data, 0) self.image.set_snap(None) read = self.image.read(0, 256) eq(read, data) self.image.remove_snap('snap1') def test_set_snap_by_id(self): self.image.write(b'\0' * 256, 0) self.image.create_snap('snap1') read = self.image.read(0, 256) eq(read, b'\0' * 256) data = rand_data(256) self.image.write(data, 0) read = self.image.read(0, 256) eq(read, data) snaps = list(self.image.list_snaps()) self.image.set_snap_by_id(snaps[0]['id']) read = self.image.read(0, 256) eq(read, b'\0' * 256) assert_raises(ReadOnlyImage, self.image.write, data, 0) self.image.set_snap_by_id(None) read = self.image.read(0, 256) eq(read, data) self.image.remove_snap('snap1') def test_snap_get_name(self): eq([], list(self.image.list_snaps())) self.image.create_snap('snap1') self.image.create_snap('snap2') self.image.create_snap('snap3') for snap in self.image.list_snaps(): expected_snap_name = self.image.snap_get_name(snap['id']) eq(expected_snap_name, snap['name']) self.image.remove_snap('snap1') self.image.remove_snap('snap2') self.image.remove_snap('snap3') eq([], list(self.image.list_snaps())) assert_raises(ImageNotFound, self.image.snap_get_name, 1) def test_snap_get_id(self): eq([], list(self.image.list_snaps())) self.image.create_snap('snap1') self.image.create_snap('snap2') self.image.create_snap('snap3') for snap in self.image.list_snaps(): expected_snap_id = self.image.snap_get_id(snap['name']) eq(expected_snap_id, snap['id']) self.image.remove_snap('snap1') self.image.remove_snap('snap2') self.image.remove_snap('snap3') eq([], list(self.image.list_snaps())) assert_raises(ImageNotFound, self.image.snap_get_id, 'snap1') def test_set_snap_sparse(self): self.image.create_snap('snap1') read = self.image.read(0, 256) eq(read, b'\0' * 256) data = rand_data(256) self.image.write(data, 0) read = self.image.read(0, 256) eq(read, data) self.image.set_snap('snap1') read = self.image.read(0, 256) eq(read, b'\0' * 256) assert_raises(ReadOnlyImage, self.image.write, data, 0) self.image.remove_snap('snap1') def test_many_snaps(self): num_snaps = 200 for i in range(num_snaps): self.image.create_snap(str(i)) snaps = sorted(self.image.list_snaps(), key=lambda snap: int(snap['name'])) eq(len(snaps), num_snaps) for i, snap in enumerate(snaps): eq(snap['size'], IMG_SIZE) eq(snap['name'], str(i)) for i in range(num_snaps): self.image.remove_snap(str(i)) def test_set_snap_deleted(self): self.image.write(b'\0' * 256, 0) self.image.create_snap('snap1') read = self.image.read(0, 256) eq(read, b'\0' * 256) data = rand_data(256) self.image.write(data, 0) read = self.image.read(0, 256) eq(read, data) self.image.set_snap('snap1') self.image.remove_snap('snap1') assert_raises(ImageNotFound, self.image.read, 0, 256) self.image.set_snap(None) read = self.image.read(0, 256) eq(read, data) def test_set_snap_recreated(self): self.image.write(b'\0' * 256, 0) self.image.create_snap('snap1') read = self.image.read(0, 256) eq(read, b'\0' * 256) data = rand_data(256) self.image.write(data, 0) read = self.image.read(0, 256) eq(read, data) self.image.set_snap('snap1') self.image.remove_snap('snap1') self.image.create_snap('snap1') assert_raises(ImageNotFound, self.image.read, 0, 256) self.image.set_snap(None) read = self.image.read(0, 256) eq(read, data) self.image.remove_snap('snap1') def test_lock_unlock(self): assert_raises(ImageNotFound, self.image.unlock, '') self.image.lock_exclusive('') assert_raises(ImageExists, self.image.lock_exclusive, '') assert_raises(ImageBusy, self.image.lock_exclusive, 'test') assert_raises(ImageExists, self.image.lock_shared, '', '') assert_raises(ImageBusy, self.image.lock_shared, 'foo', '') self.image.unlock('') def test_list_lockers(self): eq([], self.image.list_lockers()) self.image.lock_exclusive('test') lockers = self.image.list_lockers() eq(1, len(lockers['lockers'])) _, cookie, _ = lockers['lockers'][0] eq(cookie, 'test') eq('', lockers['tag']) assert lockers['exclusive'] self.image.unlock('test') eq([], self.image.list_lockers()) num_shared = 10 for i in range(num_shared): self.image.lock_shared(str(i), 'tag') lockers = self.image.list_lockers() eq('tag', lockers['tag']) assert not lockers['exclusive'] eq(num_shared, len(lockers['lockers'])) cookies = sorted(map(lambda x: x[1], lockers['lockers'])) for i in range(num_shared): eq(str(i), cookies[i]) self.image.unlock(str(i)) eq([], self.image.list_lockers()) def test_diff_iterate(self): check_diff(self.image, 0, IMG_SIZE, None, []) self.image.write(b'a' * 256, 0) check_diff(self.image, 0, IMG_SIZE, None, [(0, 256, True)]) self.image.write(b'b' * 256, 256) check_diff(self.image, 0, IMG_SIZE, None, [(0, 512, True)]) self.image.discard(128, 256) check_diff(self.image, 0, IMG_SIZE, None, [(0, 512, True)]) self.image.create_snap('snap1') self.image.discard(0, 1 << IMG_ORDER) self.image.create_snap('snap2') self.image.set_snap('snap2') check_diff(self.image, 0, IMG_SIZE, 'snap1', [(0, 512, False)]) self.image.remove_snap('snap1') self.image.remove_snap('snap2') def test_aio_read(self): # this is a list so that the local cb() can modify it retval = [None] def cb(_, buf): retval[0] = buf # test1: success case comp = self.image.aio_read(0, 20, cb) comp.wait_for_complete_and_cb() eq(retval[0], b'\0' * 20) eq(comp.get_return_value(), 20) eq(sys.getrefcount(comp), 2) # test2: error case retval[0] = 1 comp = self.image.aio_read(IMG_SIZE, 20, cb) comp.wait_for_complete_and_cb() eq(None, retval[0]) assert(comp.get_return_value() < 0) eq(sys.getrefcount(comp), 2) def test_aio_write(self): retval = [None] def cb(comp): retval[0] = comp.get_return_value() data = rand_data(256) comp = self.image.aio_write(data, 256, cb) comp.wait_for_complete_and_cb() eq(retval[0], 0) eq(comp.get_return_value(), 0) eq(sys.getrefcount(comp), 2) eq(self.image.read(256, 256), data) def test_aio_discard(self): retval = [None] def cb(comp): retval[0] = comp.get_return_value() data = rand_data(256) self.image.write(data, 0) comp = self.image.aio_discard(0, 256, cb) comp.wait_for_complete_and_cb() eq(retval[0], 0) eq(comp.get_return_value(), 0) eq(sys.getrefcount(comp), 2) eq(self.image.read(256, 256), b'\0' * 256) def test_aio_write_zeroes(self): retval = [None] def cb(comp): retval[0] = comp.get_return_value() data = rand_data(256) self.image.write(data, 0) comp = self.image.aio_write_zeroes(0, 256, cb) comp.wait_for_complete_and_cb() eq(retval[0], 0) eq(comp.get_return_value(), 0) eq(sys.getrefcount(comp), 2) eq(self.image.read(256, 256), b'\0' * 256) def test_aio_flush(self): retval = [None] def cb(comp): retval[0] = comp.get_return_value() comp = self.image.aio_flush(cb) comp.wait_for_complete_and_cb() eq(retval[0], 0) eq(sys.getrefcount(comp), 2) def test_metadata(self): metadata = list(self.image.metadata_list()) eq(len(metadata), 0) assert_raises(KeyError, self.image.metadata_get, "key1") self.image.metadata_set("key1", "value1") self.image.metadata_set("key2", "value2") value = self.image.metadata_get("key1") eq(value, "value1") value = self.image.metadata_get("key2") eq(value, "value2") metadata = list(self.image.metadata_list()) eq(len(metadata), 2) self.image.metadata_remove("key1") metadata = list(self.image.metadata_list()) eq(len(metadata), 1) eq(metadata[0], ("key2", "value2")) self.image.metadata_remove("key2") assert_raises(KeyError, self.image.metadata_remove, "key2") metadata = list(self.image.metadata_list()) eq(len(metadata), 0) N = 65 for i in range(N): self.image.metadata_set("key" + str(i), "X" * 1025) metadata = list(self.image.metadata_list()) eq(len(metadata), N) for i in range(N): self.image.metadata_remove("key" + str(i)) metadata = list(self.image.metadata_list()) eq(len(metadata), N - i - 1) def test_watchers_list(self): watchers = list(self.image.watchers_list()) # The image is open (in r/w mode) from setup, so expect there to be one # watcher. eq(len(watchers), 1) def test_config_list(self): with Image(ioctx, image_name) as image: for option in image.config_list(): eq(option['source'], RBD_CONFIG_SOURCE_CONFIG) image.metadata_set("conf_rbd_cache", "true") for option in image.config_list(): if option['name'] == "rbd_cache": eq(option['source'], RBD_CONFIG_SOURCE_IMAGE) else: eq(option['source'], RBD_CONFIG_SOURCE_CONFIG) image.metadata_remove("conf_rbd_cache") for option in image.config_list(): eq(option['source'], RBD_CONFIG_SOURCE_CONFIG) def test_image_config_set_and_get_and_remove(self): with Image(ioctx, image_name) as image: for option in image.config_list(): eq(option['source'], RBD_CONFIG_SOURCE_CONFIG) image.config_set("rbd_request_timed_out_seconds", "100") modify_value = image.config_get("rbd_request_timed_out_seconds") eq(modify_value, '100') image.config_remove("rbd_request_timed_out_seconds") for option in image.config_list(): eq(option['source'], RBD_CONFIG_SOURCE_CONFIG) def test_sparsify(self): assert_raises(InvalidArgument, self.image.sparsify, 16) self.image.sparsify(4096) @require_linux() @blocklist_features([RBD_FEATURE_JOURNALING]) def test_encryption_luks1(self): data = b'hello world' offset = 16<<20 image_size = 32<<20 with Image(ioctx, image_name) as image: image.resize(image_size) image.write(data, offset) image.encryption_format(RBD_ENCRYPTION_FORMAT_LUKS1, "password") assert_not_equal(data, image.read(offset, len(data))) with Image(ioctx, image_name) as image: image.encryption_load(RBD_ENCRYPTION_FORMAT_LUKS1, "password") assert_not_equal(data, image.read(offset, len(data))) image.write(data, offset) with Image(ioctx, image_name) as image: image.encryption_load(RBD_ENCRYPTION_FORMAT_LUKS, "password") eq(data, image.read(offset, len(data))) @require_linux() @blocklist_features([RBD_FEATURE_JOURNALING]) def test_encryption_luks2(self): data = b'hello world' offset = 16<<20 image_size = 256<<20 with Image(ioctx, image_name) as image: image.resize(image_size) image.write(data, offset) image.encryption_format(RBD_ENCRYPTION_FORMAT_LUKS2, "password") assert_not_equal(data, image.read(offset, len(data))) with Image(ioctx, image_name) as image: image.encryption_load(RBD_ENCRYPTION_FORMAT_LUKS2, "password") assert_not_equal(data, image.read(offset, len(data))) image.write(data, offset) with Image(ioctx, image_name) as image: image.encryption_load(RBD_ENCRYPTION_FORMAT_LUKS, "password") eq(data, image.read(offset, len(data))) class TestImageId(object): def setUp(self): self.rbd = RBD() create_image() self.image = Image(ioctx, image_name) self.image2 = Image(ioctx, None, None, False, self.image.id()) def tearDown(self): self.image.close() self.image2.close() remove_image() self.image = None self.image2 = None def test_read(self): data = self.image2.read(0, 20) eq(data, b'\0' * 20) def test_write(self): data = rand_data(256) self.image2.write(data, 0) def test_resize(self): new_size = IMG_SIZE * 2 self.image2.resize(new_size) info = self.image2.stat() check_stat(info, new_size, IMG_ORDER) def check_diff(image, offset, length, from_snapshot, expected): extents = [] def cb(offset, length, exists): extents.append((offset, length, exists)) image.diff_iterate(0, IMG_SIZE, None, cb) eq(extents, expected) class TestClone(object): @require_features([RBD_FEATURE_LAYERING]) def setUp(self): global ioctx global features self.rbd = RBD() create_image() self.image = Image(ioctx, image_name) data = rand_data(256) self.image.write(data, IMG_SIZE // 2) self.image.create_snap('snap1') global features self.image.protect_snap('snap1') self.clone_name = get_temp_image_name() self.rbd.clone(ioctx, image_name, 'snap1', ioctx, self.clone_name, features) self.clone = Image(ioctx, self.clone_name) def tearDown(self): global ioctx self.clone.close() self.rbd.remove(ioctx, self.clone_name) self.image.unprotect_snap('snap1') self.image.remove_snap('snap1') self.image.close() remove_image() def _test_with_params(self, features=None, order=None, stripe_unit=None, stripe_count=None): self.image.create_snap('snap2') self.image.protect_snap('snap2') clone_name2 = get_temp_image_name() if features is None: self.rbd.clone(ioctx, image_name, 'snap2', ioctx, clone_name2) elif order is None: self.rbd.clone(ioctx, image_name, 'snap2', ioctx, clone_name2, features) elif stripe_unit is None: self.rbd.clone(ioctx, image_name, 'snap2', ioctx, clone_name2, features, order) elif stripe_count is None: self.rbd.clone(ioctx, image_name, 'snap2', ioctx, clone_name2, features, order, stripe_unit) else: self.rbd.clone(ioctx, image_name, 'snap2', ioctx, clone_name2, features, order, stripe_unit, stripe_count) self.rbd.remove(ioctx, clone_name2) self.image.unprotect_snap('snap2') self.image.remove_snap('snap2') def test_with_params(self): self._test_with_params() def test_with_params2(self): global features self._test_with_params(features, self.image.stat()['order']) @require_features([RBD_FEATURE_STRIPINGV2]) def test_with_params3(self): global features self._test_with_params(features, self.image.stat()['order'], self.image.stripe_unit(), self.image.stripe_count()) def test_stripe_unit_and_count(self): global features global ioctx image_name = get_temp_image_name() RBD().create(ioctx, image_name, IMG_SIZE, IMG_ORDER, old_format=False, features=int(features), stripe_unit=1048576, stripe_count=8) image = Image(ioctx, image_name) image.create_snap('snap1') image.protect_snap('snap1') clone_name = get_temp_image_name() RBD().clone(ioctx, image_name, 'snap1', ioctx, clone_name) clone = Image(ioctx, clone_name) eq(1048576, clone.stripe_unit()) eq(8, clone.stripe_count()) clone.close() RBD().remove(ioctx, clone_name) image.unprotect_snap('snap1') image.remove_snap('snap1') image.close() RBD().remove(ioctx, image_name) def test_unprotected(self): self.image.create_snap('snap2') global features clone_name2 = get_temp_image_name() rados.conf_set("rbd_default_clone_format", "1") assert_raises(InvalidArgument, self.rbd.clone, ioctx, image_name, 'snap2', ioctx, clone_name2, features) rados.conf_set("rbd_default_clone_format", "auto") self.image.remove_snap('snap2') def test_unprotect_with_children(self): global features # can't remove a snapshot that has dependent clones assert_raises(ImageBusy, self.image.remove_snap, 'snap1') # validate parent info of clone created by TestClone.setUp (pool, image, snap) = self.clone.parent_info() eq(pool, pool_name) eq(image, image_name) eq(snap, 'snap1') eq(self.image.id(), self.clone.parent_id()) # create a new pool... pool_name2 = get_temp_pool_name() rados.create_pool(pool_name2) other_ioctx = rados.open_ioctx(pool_name2) other_ioctx.application_enable('rbd') # ...with a clone of the same parent other_clone_name = get_temp_image_name() rados.conf_set("rbd_default_clone_format", "1") self.rbd.clone(ioctx, image_name, 'snap1', other_ioctx, other_clone_name, features) rados.conf_set("rbd_default_clone_format", "auto") self.other_clone = Image(other_ioctx, other_clone_name) # validate its parent info (pool, image, snap) = self.other_clone.parent_info() eq(pool, pool_name) eq(image, image_name) eq(snap, 'snap1') eq(self.image.id(), self.other_clone.parent_id()) # can't unprotect snap with children assert_raises(ImageBusy, self.image.unprotect_snap, 'snap1') # 2 children, check that cannot remove the parent snap assert_raises(ImageBusy, self.image.remove_snap, 'snap1') # close and remove other pool's clone self.other_clone.close() self.rbd.remove(other_ioctx, other_clone_name) # check that we cannot yet remove the parent snap assert_raises(ImageBusy, self.image.remove_snap, 'snap1') other_ioctx.close() rados.delete_pool(pool_name2) # unprotect, remove parent snap happen in cleanup, and should succeed def test_stat(self): image_info = self.image.stat() clone_info = self.clone.stat() eq(clone_info['size'], image_info['size']) eq(clone_info['size'], self.clone.overlap()) def test_resize_stat(self): self.clone.resize(IMG_SIZE // 2) image_info = self.image.stat() clone_info = self.clone.stat() eq(clone_info['size'], IMG_SIZE // 2) eq(image_info['size'], IMG_SIZE) eq(self.clone.overlap(), IMG_SIZE // 2) self.clone.resize(IMG_SIZE * 2) image_info = self.image.stat() clone_info = self.clone.stat() eq(clone_info['size'], IMG_SIZE * 2) eq(image_info['size'], IMG_SIZE) eq(self.clone.overlap(), IMG_SIZE // 2) def test_resize_io(self): parent_data = self.image.read(IMG_SIZE // 2, 256) self.image.resize(0) self.clone.resize(IMG_SIZE // 2 + 128) child_data = self.clone.read(IMG_SIZE // 2, 128) eq(child_data, parent_data[:128]) self.clone.resize(IMG_SIZE) child_data = self.clone.read(IMG_SIZE // 2, 256) eq(child_data, parent_data[:128] + (b'\0' * 128)) self.clone.resize(IMG_SIZE // 2 + 1) child_data = self.clone.read(IMG_SIZE // 2, 1) eq(child_data, parent_data[0:1]) self.clone.resize(0) self.clone.resize(IMG_SIZE) child_data = self.clone.read(IMG_SIZE // 2, 256) eq(child_data, b'\0' * 256) def test_read(self): parent_data = self.image.read(IMG_SIZE // 2, 256) child_data = self.clone.read(IMG_SIZE // 2, 256) eq(child_data, parent_data) def test_write(self): parent_data = self.image.read(IMG_SIZE // 2, 256) new_data = rand_data(256) self.clone.write(new_data, IMG_SIZE // 2 + 256) child_data = self.clone.read(IMG_SIZE // 2 + 256, 256) eq(child_data, new_data) child_data = self.clone.read(IMG_SIZE // 2, 256) eq(child_data, parent_data) parent_data = self.image.read(IMG_SIZE // 2 + 256, 256) eq(parent_data, b'\0' * 256) def check_children(self, expected): actual = self.image.list_children() # dedup for cache pools until # http://tracker.ceph.com/issues/8187 is fixed deduped = set([(pool_name, image[1]) for image in actual]) eq(deduped, set(expected)) def check_children2(self, expected): actual = [{k:v for k,v in x.items() if k in expected[0]} \ for x in self.image.list_children2()] eq(actual, expected) def check_descendants(self, expected): eq(list(self.image.list_descendants()), expected) def get_image_id(self, ioctx, name): with Image(ioctx, name) as image: return image.id() def test_list_children(self): global ioctx global features self.image.set_snap('snap1') self.check_children([(pool_name, self.clone_name)]) self.check_children2( [{'pool': pool_name, 'pool_namespace': '', 'image': self.clone_name, 'trash': False, 'id': self.get_image_id(ioctx, self.clone_name)}]) self.check_descendants( [{'pool': pool_name, 'pool_namespace': '', 'image': self.clone_name, 'trash': False, 'id': self.get_image_id(ioctx, self.clone_name)}]) self.clone.close() self.rbd.remove(ioctx, self.clone_name) eq(self.image.list_children(), []) eq(list(self.image.list_children2()), []) eq(list(self.image.list_descendants()), []) clone_name = get_temp_image_name() + '_' expected_children = [] expected_children2 = [] for i in range(10): self.rbd.clone(ioctx, image_name, 'snap1', ioctx, clone_name + str(i), features) expected_children.append((pool_name, clone_name + str(i))) expected_children2.append( {'pool': pool_name, 'pool_namespace': '', 'image': clone_name + str(i), 'trash': False, 'id': self.get_image_id(ioctx, clone_name + str(i))}) self.check_children(expected_children) self.check_children2(expected_children2) self.check_descendants(expected_children2) image6_id = self.get_image_id(ioctx, clone_name + str(5)) RBD().trash_move(ioctx, clone_name + str(5), 0) expected_children.remove((pool_name, clone_name + str(5))) for item in expected_children2: for k, v in item.items(): if v == image6_id: item["trash"] = True self.check_children(expected_children) self.check_children2(expected_children2) self.check_descendants(expected_children2) RBD().trash_restore(ioctx, image6_id, clone_name + str(5)) expected_children.append((pool_name, clone_name + str(5))) for item in expected_children2: for k, v in item.items(): if v == image6_id: item["trash"] = False self.check_children(expected_children) self.check_children2(expected_children2) self.check_descendants(expected_children2) for i in range(10): self.rbd.remove(ioctx, clone_name + str(i)) expected_children.remove((pool_name, clone_name + str(i))) expected_children2.pop(0) self.check_children(expected_children) self.check_children2(expected_children2) self.check_descendants(expected_children2) eq(self.image.list_children(), []) eq(list(self.image.list_children2()), []) self.rbd.clone(ioctx, image_name, 'snap1', ioctx, self.clone_name, features) self.check_children([(pool_name, self.clone_name)]) self.check_children2( [{'pool': pool_name, 'pool_namespace': '', 'image': self.clone_name, 'trash': False, 'id': self.get_image_id(ioctx, self.clone_name)}]) self.check_descendants( [{'pool': pool_name, 'pool_namespace': '', 'image': self.clone_name, 'trash': False, 'id': self.get_image_id(ioctx, self.clone_name)}]) self.clone = Image(ioctx, self.clone_name) def test_flatten_errors(self): # test that we can't flatten a non-clone assert_raises(InvalidArgument, self.image.flatten) # test that we can't flatten a snapshot self.clone.create_snap('snap2') self.clone.set_snap('snap2') assert_raises(ReadOnlyImage, self.clone.flatten) self.clone.remove_snap('snap2') def check_flatten_with_order(self, new_order, stripe_unit=None, stripe_count=None): global ioctx global features clone_name2 = get_temp_image_name() self.rbd.clone(ioctx, image_name, 'snap1', ioctx, clone_name2, features, new_order, stripe_unit, stripe_count) #with Image(ioctx, 'clone2') as clone: clone2 = Image(ioctx, clone_name2) clone2.flatten() eq(clone2.overlap(), 0) clone2.close() self.rbd.remove(ioctx, clone_name2) # flatten after resizing to non-block size self.rbd.clone(ioctx, image_name, 'snap1', ioctx, clone_name2, features, new_order, stripe_unit, stripe_count) with Image(ioctx, clone_name2) as clone: clone.resize(IMG_SIZE // 2 - 1) clone.flatten() eq(0, clone.overlap()) self.rbd.remove(ioctx, clone_name2) # flatten after resizing to non-block size self.rbd.clone(ioctx, image_name, 'snap1', ioctx, clone_name2, features, new_order, stripe_unit, stripe_count) with Image(ioctx, clone_name2) as clone: clone.resize(IMG_SIZE // 2 + 1) clone.flatten() eq(clone.overlap(), 0) self.rbd.remove(ioctx, clone_name2) def test_flatten_basic(self): self.check_flatten_with_order(IMG_ORDER) def test_flatten_smaller_order(self): self.check_flatten_with_order(IMG_ORDER - 2, 1048576, 1) def test_flatten_larger_order(self): self.check_flatten_with_order(IMG_ORDER + 2) def test_flatten_drops_cache(self): global ioctx global features clone_name2 = get_temp_image_name() self.rbd.clone(ioctx, image_name, 'snap1', ioctx, clone_name2, features, IMG_ORDER) with Image(ioctx, clone_name2) as clone: with Image(ioctx, clone_name2) as clone2: # cache object non-existence data = clone.read(IMG_SIZE // 2, 256) clone2_data = clone2.read(IMG_SIZE // 2, 256) eq(data, clone2_data) clone.flatten() assert_raises(ImageNotFound, clone.parent_info) assert_raises(ImageNotFound, clone2.parent_info) assert_raises(ImageNotFound, clone.parent_id) assert_raises(ImageNotFound, clone2.parent_id) after_flatten = clone.read(IMG_SIZE // 2, 256) eq(data, after_flatten) after_flatten = clone2.read(IMG_SIZE // 2, 256) eq(data, after_flatten) self.rbd.remove(ioctx, clone_name2) def test_flatten_multi_level(self): self.clone.create_snap('snap2') self.clone.protect_snap('snap2') clone_name3 = get_temp_image_name() self.rbd.clone(ioctx, self.clone_name, 'snap2', ioctx, clone_name3, features) self.clone.flatten() with Image(ioctx, clone_name3) as clone3: clone3.flatten() self.clone.unprotect_snap('snap2') self.clone.remove_snap('snap2') self.rbd.remove(ioctx, clone_name3) def test_flatten_with_progress(self): d = {'received_callback': False} def progress_cb(current, total): d['received_callback'] = True return 0 global ioctx global features clone_name = get_temp_image_name() self.rbd.clone(ioctx, image_name, 'snap1', ioctx, clone_name, features, 0) with Image(ioctx, clone_name) as clone: clone.flatten(on_progress=progress_cb) self.rbd.remove(ioctx, clone_name) eq(True, d['received_callback']) def test_resize_flatten_multi_level(self): self.clone.create_snap('snap2') self.clone.protect_snap('snap2') clone_name3 = get_temp_image_name() self.rbd.clone(ioctx, self.clone_name, 'snap2', ioctx, clone_name3, features) self.clone.resize(1) orig_data = self.image.read(0, 256) with Image(ioctx, clone_name3) as clone3: clone3_data = clone3.read(0, 256) eq(orig_data, clone3_data) self.clone.flatten() with Image(ioctx, clone_name3) as clone3: clone3_data = clone3.read(0, 256) eq(orig_data, clone3_data) self.rbd.remove(ioctx, clone_name3) self.clone.unprotect_snap('snap2') self.clone.remove_snap('snap2') def test_trash_snapshot(self): self.image.create_snap('snap2') global features clone_name = get_temp_image_name() rados.conf_set("rbd_default_clone_format", "2") self.rbd.clone(ioctx, image_name, 'snap2', ioctx, clone_name, features) rados.conf_set("rbd_default_clone_format", "auto") self.image.remove_snap('snap2') snaps = [s for s in self.image.list_snaps() if s['name'] != 'snap1'] eq([RBD_SNAP_NAMESPACE_TYPE_TRASH], [s['namespace'] for s in snaps]) eq([{'original_name' : 'snap2'}], [s['trash'] for s in snaps]) self.rbd.remove(ioctx, clone_name) eq([], [s for s in self.image.list_snaps() if s['name'] != 'snap1']) @require_linux() @blocklist_features([RBD_FEATURE_JOURNALING]) def test_encryption_luks1(self): data = b'hello world' offset = 16<<20 image_size = 32<<20 self.clone.resize(image_size) self.clone.encryption_format(RBD_ENCRYPTION_FORMAT_LUKS1, "password") self.clone.encryption_load2( ((RBD_ENCRYPTION_FORMAT_LUKS1, "password"),)) self.clone.write(data, offset) eq(self.clone.read(0, 16), self.image.read(0, 16)) @require_linux() @blocklist_features([RBD_FEATURE_JOURNALING]) def test_encryption_luks2(self): data = b'hello world' offset = 16<<20 image_size = 64<<20 self.clone.resize(image_size) self.clone.encryption_format(RBD_ENCRYPTION_FORMAT_LUKS2, "password") self.clone.encryption_load2( ((RBD_ENCRYPTION_FORMAT_LUKS2, "password"),)) self.clone.write(data, offset) eq(self.clone.read(0, 16), self.image.read(0, 16)) class TestExclusiveLock(object): @require_features([RBD_FEATURE_EXCLUSIVE_LOCK]) def setUp(self): global rados2 rados2 = Rados(conffile='') rados2.connect() global ioctx2 ioctx2 = rados2.open_ioctx(pool_name) create_image() def tearDown(self): remove_image() global ioctx2 ioctx2.close() global rados2 rados2.shutdown() def test_ownership(self): with Image(ioctx, image_name) as image1, Image(ioctx2, image_name) as image2: image1.write(b'0'*256, 0) eq(image1.is_exclusive_lock_owner(), True) eq(image2.is_exclusive_lock_owner(), False) def test_snapshot_leadership(self): with Image(ioctx, image_name) as image: image.create_snap('snap') eq(image.is_exclusive_lock_owner(), True) try: with Image(ioctx, image_name) as image: image.write(b'0'*256, 0) eq(image.is_exclusive_lock_owner(), True) image.set_snap('snap') eq(image.is_exclusive_lock_owner(), False) with Image(ioctx, image_name, snapshot='snap') as image: eq(image.is_exclusive_lock_owner(), False) finally: with Image(ioctx, image_name) as image: image.remove_snap('snap') def test_read_only_leadership(self): with Image(ioctx, image_name, read_only=True) as image: eq(image.is_exclusive_lock_owner(), False) def test_follower_flatten(self): with Image(ioctx, image_name) as image: image.create_snap('snap') image.protect_snap('snap') try: RBD().clone(ioctx, image_name, 'snap', ioctx, 'clone', features) with Image(ioctx, 'clone') as image1, Image(ioctx2, 'clone') as image2: data = rand_data(256) image1.write(data, 0) image2.flatten() assert_raises(ImageNotFound, image1.parent_info) assert_raises(ImageNotFound, image1.parent_id) parent = True for x in range(30): try: image2.parent_info() except ImageNotFound: parent = False break eq(False, parent) finally: RBD().remove(ioctx, 'clone') with Image(ioctx, image_name) as image: image.unprotect_snap('snap') image.remove_snap('snap') def test_follower_resize(self): with Image(ioctx, image_name) as image1, Image(ioctx2, image_name) as image2: image1.write(b'0'*256, 0) for new_size in [IMG_SIZE * 2, IMG_SIZE // 2]: image2.resize(new_size); eq(new_size, image1.size()) for x in range(30): if new_size == image2.size(): break time.sleep(1) eq(new_size, image2.size()) def test_follower_snap_create(self): with Image(ioctx, image_name) as image1, Image(ioctx2, image_name) as image2: image2.create_snap('snap1') image1.remove_snap('snap1') def test_follower_snap_rollback(self): with Image(ioctx, image_name) as image1, Image(ioctx2, image_name) as image2: image1.create_snap('snap') try: assert_raises(ReadOnlyImage, image2.rollback_to_snap, 'snap') image1.rollback_to_snap('snap') finally: image1.remove_snap('snap') def test_follower_discard(self): global rados with Image(ioctx, image_name) as image1, Image(ioctx2, image_name) as image2: data = rand_data(256) image1.write(data, 0) image2.discard(0, 256) eq(image1.is_exclusive_lock_owner(), False) eq(image2.is_exclusive_lock_owner(), True) read = image2.read(0, 256) if rados.conf_get('rbd_skip_partial_discard') == 'false': eq(256 * b'\0', read) else: eq(data, read) def test_follower_write(self): with Image(ioctx, image_name) as image1, Image(ioctx2, image_name) as image2: data = rand_data(256) image1.write(data, 0) image2.write(data, IMG_SIZE // 2) eq(image1.is_exclusive_lock_owner(), False) eq(image2.is_exclusive_lock_owner(), True) for offset in [0, IMG_SIZE // 2]: read = image2.read(offset, 256) eq(data, read) def test_acquire_release_lock(self): with Image(ioctx, image_name) as image: image.lock_acquire(RBD_LOCK_MODE_EXCLUSIVE) image.lock_release() @attr('SKIP_IF_CRIMSON') def test_break_lock(self): blocklist_rados = Rados(conffile='') blocklist_rados.connect() try: blocklist_ioctx = blocklist_rados.open_ioctx(pool_name) try: rados2.conf_set('rbd_blocklist_on_break_lock', 'true') with Image(ioctx2, image_name) as image, \ Image(blocklist_ioctx, image_name) as blocklist_image: lock_owners = list(image.lock_get_owners()) eq(0, len(lock_owners)) blocklist_image.lock_acquire(RBD_LOCK_MODE_EXCLUSIVE) assert_raises(ReadOnlyImage, image.lock_acquire, RBD_LOCK_MODE_EXCLUSIVE) lock_owners = list(image.lock_get_owners()) eq(1, len(lock_owners)) eq(RBD_LOCK_MODE_EXCLUSIVE, lock_owners[0]['mode']) image.lock_break(RBD_LOCK_MODE_EXCLUSIVE, lock_owners[0]['owner']) assert_raises(ConnectionShutdown, blocklist_image.is_exclusive_lock_owner) blocklist_rados.wait_for_latest_osdmap() data = rand_data(256) assert_raises(ConnectionShutdown, blocklist_image.write, data, 0) image.lock_acquire(RBD_LOCK_MODE_EXCLUSIVE) try: blocklist_image.close() except ConnectionShutdown: pass finally: blocklist_ioctx.close() finally: blocklist_rados.shutdown() class TestMirroring(object): @staticmethod def check_info(info, global_id, state, primary=None): eq(global_id, info['global_id']) eq(state, info['state']) if primary is not None: eq(primary, info['primary']) def setUp(self): self.rbd = RBD() self.initial_mirror_mode = self.rbd.mirror_mode_get(ioctx) self.rbd.mirror_mode_set(ioctx, RBD_MIRROR_MODE_POOL) create_image() self.image = Image(ioctx, image_name) def tearDown(self): self.image.close() remove_image() self.rbd.mirror_mode_set(ioctx, self.initial_mirror_mode) def test_uuid(self): mirror_uuid = self.rbd.mirror_uuid_get(ioctx) assert(mirror_uuid) def test_site_name(self): site_name = "us-west-1" self.rbd.mirror_site_name_set(rados, site_name) eq(site_name, self.rbd.mirror_site_name_get(rados)) self.rbd.mirror_site_name_set(rados, "") eq(rados.get_fsid(), self.rbd.mirror_site_name_get(rados)) def test_mirror_peer_bootstrap(self): eq([], list(self.rbd.mirror_peer_list(ioctx))) self.rbd.mirror_mode_set(ioctx, RBD_MIRROR_MODE_DISABLED) assert_raises(InvalidArgument, self.rbd.mirror_peer_bootstrap_create, ioctx); self.rbd.mirror_mode_set(ioctx, RBD_MIRROR_MODE_POOL) token_b64 = self.rbd.mirror_peer_bootstrap_create(ioctx) token = base64.b64decode(token_b64) token_dict = json.loads(token) eq(sorted(['fsid', 'client_id', 'key', 'mon_host']), sorted(list(token_dict.keys()))) # requires different cluster assert_raises(InvalidArgument, self.rbd.mirror_peer_bootstrap_import, ioctx, RBD_MIRROR_PEER_DIRECTION_RX, token_b64) def test_mirror_peer(self): eq([], list(self.rbd.mirror_peer_list(ioctx))) site_name = "test_site" client_name = "test_client" uuid = self.rbd.mirror_peer_add(ioctx, site_name, client_name, direction=RBD_MIRROR_PEER_DIRECTION_RX_TX) assert(uuid) peer = { 'uuid' : uuid, 'direction': RBD_MIRROR_PEER_DIRECTION_RX_TX, 'site_name' : site_name, 'cluster_name' : site_name, 'mirror_uuid': '', 'client_name' : client_name, } eq([peer], list(self.rbd.mirror_peer_list(ioctx))) cluster_name = "test_cluster1" self.rbd.mirror_peer_set_cluster(ioctx, uuid, cluster_name) client_name = "test_client1" self.rbd.mirror_peer_set_client(ioctx, uuid, client_name) peer = { 'uuid' : uuid, 'direction': RBD_MIRROR_PEER_DIRECTION_RX_TX, 'site_name' : cluster_name, 'cluster_name' : cluster_name, 'mirror_uuid': '', 'client_name' : client_name, } eq([peer], list(self.rbd.mirror_peer_list(ioctx))) attribs = { RBD_MIRROR_PEER_ATTRIBUTE_NAME_MON_HOST: 'host1', RBD_MIRROR_PEER_ATTRIBUTE_NAME_KEY: 'abc' } self.rbd.mirror_peer_set_attributes(ioctx, uuid, attribs) eq(attribs, self.rbd.mirror_peer_get_attributes(ioctx, uuid)) self.rbd.mirror_peer_remove(ioctx, uuid) eq([], list(self.rbd.mirror_peer_list(ioctx))) @require_features([RBD_FEATURE_EXCLUSIVE_LOCK, RBD_FEATURE_JOURNALING]) def test_mirror_image(self): self.rbd.mirror_mode_set(ioctx, RBD_MIRROR_MODE_IMAGE) self.image.mirror_image_disable(True) info = self.image.mirror_image_get_info() self.check_info(info, '', RBD_MIRROR_IMAGE_DISABLED, False) self.image.mirror_image_enable() info = self.image.mirror_image_get_info() global_id = info['global_id'] self.check_info(info, global_id, RBD_MIRROR_IMAGE_ENABLED, True) self.rbd.mirror_mode_set(ioctx, RBD_MIRROR_MODE_POOL) fail = False try: self.image.mirror_image_disable(True) except InvalidArgument: fail = True eq(True, fail) # Fails because of mirror mode pool self.image.mirror_image_demote() info = self.image.mirror_image_get_info() self.check_info(info, global_id, RBD_MIRROR_IMAGE_ENABLED, False) entries = dict(self.rbd.mirror_image_info_list(ioctx)) info['mode'] = RBD_MIRROR_IMAGE_MODE_JOURNAL; eq(info, entries[self.image.id()]) self.image.mirror_image_resync() self.image.mirror_image_promote(True) info = self.image.mirror_image_get_info() self.check_info(info, global_id, RBD_MIRROR_IMAGE_ENABLED, True) entries = dict(self.rbd.mirror_image_info_list(ioctx)) info['mode'] = RBD_MIRROR_IMAGE_MODE_JOURNAL; eq(info, entries[self.image.id()]) fail = False try: self.image.mirror_image_resync() except InvalidArgument: fail = True eq(True, fail) # Fails because it is primary status = self.image.mirror_image_get_status() eq(image_name, status['name']) eq(False, status['up']) eq(MIRROR_IMAGE_STATUS_STATE_UNKNOWN, status['state']) info = status['info'] self.check_info(info, global_id, RBD_MIRROR_IMAGE_ENABLED, True) @require_features([RBD_FEATURE_EXCLUSIVE_LOCK, RBD_FEATURE_JOURNALING]) def test_mirror_image_status(self): info = self.image.mirror_image_get_info() global_id = info['global_id'] state = info['state'] primary = info['primary'] status = self.image.mirror_image_get_status() eq(image_name, status['name']) eq(False, status['up']) eq(MIRROR_IMAGE_STATUS_STATE_UNKNOWN, status['state']) eq([], status['remote_statuses']) info = status['info'] self.check_info(info, global_id, state, primary) images = list(self.rbd.mirror_image_status_list(ioctx)) eq(1, len(images)) status = images[0] eq(image_name, status['name']) eq(False, status['up']) eq(MIRROR_IMAGE_STATUS_STATE_UNKNOWN, status['state']) info = status['info'] self.check_info(info, global_id, state) states = self.rbd.mirror_image_status_summary(ioctx) eq([(MIRROR_IMAGE_STATUS_STATE_UNKNOWN, 1)], states) assert_raises(ImageNotFound, self.image.mirror_image_get_instance_id) instance_ids = list(self.rbd.mirror_image_instance_id_list(ioctx)) eq(0, len(instance_ids)) N = 65 for i in range(N): self.rbd.create(ioctx, image_name + str(i), IMG_SIZE, IMG_ORDER, old_format=False, features=int(features)) images = list(self.rbd.mirror_image_status_list(ioctx)) eq(N + 1, len(images)) for i in range(N): self.rbd.remove(ioctx, image_name + str(i)) def test_mirror_image_create_snapshot(self): assert_raises(InvalidArgument, self.image.mirror_image_create_snapshot) peer1_uuid = self.rbd.mirror_peer_add(ioctx, "cluster1", "client") peer2_uuid = self.rbd.mirror_peer_add(ioctx, "cluster2", "client") self.rbd.mirror_mode_set(ioctx, RBD_MIRROR_MODE_IMAGE) self.image.mirror_image_disable(False) self.image.mirror_image_enable(RBD_MIRROR_IMAGE_MODE_SNAPSHOT) mode = self.image.mirror_image_get_mode() eq(RBD_MIRROR_IMAGE_MODE_SNAPSHOT, mode) snaps = list(self.image.list_snaps()) eq(1, len(snaps)) snap = snaps[0] eq(snap['namespace'], RBD_SNAP_NAMESPACE_TYPE_MIRROR) eq(RBD_SNAP_MIRROR_STATE_PRIMARY, snap['mirror']['state']) info = self.image.mirror_image_get_info() eq(True, info['primary']) entries = dict( self.rbd.mirror_image_info_list(ioctx, RBD_MIRROR_IMAGE_MODE_SNAPSHOT)) info['mode'] = RBD_MIRROR_IMAGE_MODE_SNAPSHOT; eq(info, entries[self.image.id()]) snap_id = self.image.mirror_image_create_snapshot( RBD_SNAP_CREATE_SKIP_QUIESCE) snaps = list(self.image.list_snaps()) eq(2, len(snaps)) snap = snaps[0] eq(snap['namespace'], RBD_SNAP_NAMESPACE_TYPE_MIRROR) eq(RBD_SNAP_MIRROR_STATE_PRIMARY, snap['mirror']['state']) snap = snaps[1] eq(snap['id'], snap_id) eq(snap['namespace'], RBD_SNAP_NAMESPACE_TYPE_MIRROR) eq(RBD_SNAP_MIRROR_STATE_PRIMARY, snap['mirror']['state']) eq(sorted([peer1_uuid, peer2_uuid]), sorted(snap['mirror']['mirror_peer_uuids'])) eq(RBD_SNAP_NAMESPACE_TYPE_MIRROR, self.image.snap_get_namespace_type(snap_id)) mirror_snap = self.image.snap_get_mirror_namespace(snap_id) eq(mirror_snap, snap['mirror']) self.image.mirror_image_demote() assert_raises(InvalidArgument, self.image.mirror_image_create_snapshot) snaps = list(self.image.list_snaps()) eq(3, len(snaps)) snap = snaps[0] eq(snap['namespace'], RBD_SNAP_NAMESPACE_TYPE_MIRROR) snap = snaps[1] eq(snap['id'], snap_id) eq(snap['namespace'], RBD_SNAP_NAMESPACE_TYPE_MIRROR) snap = snaps[2] eq(snap['namespace'], RBD_SNAP_NAMESPACE_TYPE_MIRROR) eq(RBD_SNAP_MIRROR_STATE_PRIMARY_DEMOTED, snap['mirror']['state']) eq(sorted([peer1_uuid, peer2_uuid]), sorted(snap['mirror']['mirror_peer_uuids'])) self.rbd.mirror_peer_remove(ioctx, peer1_uuid) self.rbd.mirror_peer_remove(ioctx, peer2_uuid) self.image.mirror_image_promote(False) def test_aio_mirror_image_create_snapshot(self): peer_uuid = self.rbd.mirror_peer_add(ioctx, "cluster", "client") self.rbd.mirror_mode_set(ioctx, RBD_MIRROR_MODE_IMAGE) self.image.mirror_image_disable(False) self.image.mirror_image_enable(RBD_MIRROR_IMAGE_MODE_SNAPSHOT) snaps = list(self.image.list_snaps()) eq(1, len(snaps)) snap = snaps[0] eq(snap['namespace'], RBD_SNAP_NAMESPACE_TYPE_MIRROR) eq(RBD_SNAP_MIRROR_STATE_PRIMARY, snap['mirror']['state']) # this is a list so that the local cb() can modify it info = [None] def cb(_, _info): info[0] = _info comp = self.image.aio_mirror_image_get_info(cb) comp.wait_for_complete_and_cb() assert_not_equal(info[0], None) eq(comp.get_return_value(), 0) eq(sys.getrefcount(comp), 2) info = info[0] global_id = info['global_id'] self.check_info(info, global_id, RBD_MIRROR_IMAGE_ENABLED, True) mode = [None] def cb(_, _mode): mode[0] = _mode comp = self.image.aio_mirror_image_get_mode(cb) comp.wait_for_complete_and_cb() eq(comp.get_return_value(), 0) eq(sys.getrefcount(comp), 2) eq(mode[0], RBD_MIRROR_IMAGE_MODE_SNAPSHOT) snap_id = [None] def cb(_, _snap_id): snap_id[0] = _snap_id comp = self.image.aio_mirror_image_create_snapshot(0, cb) comp.wait_for_complete_and_cb() assert_not_equal(snap_id[0], None) eq(comp.get_return_value(), 0) eq(sys.getrefcount(comp), 2) snaps = list(self.image.list_snaps()) eq(2, len(snaps)) snap = snaps[1] eq(snap['id'], snap_id[0]) eq(snap['namespace'], RBD_SNAP_NAMESPACE_TYPE_MIRROR) eq(RBD_SNAP_MIRROR_STATE_PRIMARY, snap['mirror']['state']) eq([peer_uuid], snap['mirror']['mirror_peer_uuids']) self.rbd.mirror_peer_remove(ioctx, peer_uuid) class TestTrash(object): def setUp(self): global rados2 rados2 = Rados(conffile='') rados2.connect() global ioctx2 ioctx2 = rados2.open_ioctx(pool_name) def tearDown(self): global ioctx2 ioctx2.close() global rados2 rados2.shutdown() def test_move(self): create_image() with Image(ioctx, image_name) as image: image_id = image.id() RBD().trash_move(ioctx, image_name, 1000) RBD().trash_remove(ioctx, image_id, True) def test_purge(self): create_image() with Image(ioctx, image_name) as image: image_name1 = image_name image_id1 = image.id() create_image() with Image(ioctx, image_name) as image: image_name2 = image_name image_id2 = image.id() RBD().trash_move(ioctx, image_name1, 0) RBD().trash_move(ioctx, image_name2, 1000) RBD().trash_purge(ioctx, datetime.now()) entries = list(RBD().trash_list(ioctx)) eq([image_id2], [x['id'] for x in entries]) RBD().trash_remove(ioctx, image_id2, True) def test_remove_denied(self): create_image() with Image(ioctx, image_name) as image: image_id = image.id() RBD().trash_move(ioctx, image_name, 1000) assert_raises(PermissionError, RBD().trash_remove, ioctx, image_id) RBD().trash_remove(ioctx, image_id, True) def test_remove(self): create_image() with Image(ioctx, image_name) as image: image_id = image.id() RBD().trash_move(ioctx, image_name, 0) RBD().trash_remove(ioctx, image_id) def test_remove_with_progress(self): d = {'received_callback': False} def progress_cb(current, total): d['received_callback'] = True return 0 create_image() with Image(ioctx, image_name) as image: image_id = image.id() RBD().trash_move(ioctx, image_name, 0) RBD().trash_remove(ioctx, image_id, on_progress=progress_cb) eq(True, d['received_callback']) def test_get(self): create_image() with Image(ioctx, image_name) as image: image_id = image.id() RBD().trash_move(ioctx, image_name, 1000) info = RBD().trash_get(ioctx, image_id) eq(image_id, info['id']) eq(image_name, info['name']) eq('USER', info['source']) assert(info['deferment_end_time'] > info['deletion_time']) RBD().trash_remove(ioctx, image_id, True) def test_list(self): create_image() with Image(ioctx, image_name) as image: image_id1 = image.id() image_name1 = image_name RBD().trash_move(ioctx, image_name, 1000) create_image() with Image(ioctx, image_name) as image: image_id2 = image.id() image_name2 = image_name RBD().trash_move(ioctx, image_name, 1000) entries = list(RBD().trash_list(ioctx)) for e in entries: if e['id'] == image_id1: eq(e['name'], image_name1) elif e['id'] == image_id2: eq(e['name'], image_name2) else: assert False eq(e['source'], 'USER') assert e['deferment_end_time'] > e['deletion_time'] RBD().trash_remove(ioctx, image_id1, True) RBD().trash_remove(ioctx, image_id2, True) def test_restore(self): create_image() with Image(ioctx, image_name) as image: image_id = image.id() RBD().trash_move(ioctx, image_name, 1000) RBD().trash_restore(ioctx, image_id, image_name) remove_image() def test_create_group(): create_group() remove_group() def test_rename_group(): create_group() if group_name is not None: rename_group() eq(["new" + group_name], RBD().group_list(ioctx)) RBD().group_remove(ioctx, "new" + group_name) else: remove_group() def test_list_groups_empty(): eq([], RBD().group_list(ioctx)) @with_setup(create_group, remove_group) def test_list_groups(): eq([group_name], RBD().group_list(ioctx)) @with_setup(create_group) def test_list_groups_after_removed(): remove_group() eq([], RBD().group_list(ioctx)) class TestGroups(object): def setUp(self): global snap_name self.rbd = RBD() create_image() self.image_names = [image_name] self.image = Image(ioctx, image_name) create_group() snap_name = get_temp_snap_name() self.group = Group(ioctx, group_name) def tearDown(self): remove_group() self.image = None for name in self.image_names: RBD().remove(ioctx, name) def test_group_image_add(self): self.group.add_image(ioctx, image_name) def test_group_image_list_empty(self): eq([], list(self.group.list_images())) def test_group_image_list(self): eq([], list(self.group.list_images())) self.group.add_image(ioctx, image_name) eq([image_name], [img['name'] for img in self.group.list_images()]) def test_group_image_list_move_to_trash(self): eq([], list(self.group.list_images())) with Image(ioctx, image_name) as image: image_id = image.id() self.group.add_image(ioctx, image_name) eq([image_name], [img['name'] for img in self.group.list_images()]) RBD().trash_move(ioctx, image_name, 0) eq([], list(self.group.list_images())) RBD().trash_restore(ioctx, image_id, image_name) def test_group_image_many_images(self): eq([], list(self.group.list_images())) self.group.add_image(ioctx, image_name) for x in range(0, 20): create_image() self.image_names.append(image_name) self.group.add_image(ioctx, image_name) self.image_names.sort() answer = [img['name'] for img in self.group.list_images()] answer.sort() eq(self.image_names, answer) def test_group_image_remove(self): eq([], list(self.group.list_images())) self.group.add_image(ioctx, image_name) with Image(ioctx, image_name) as image: eq(RBD_OPERATION_FEATURE_GROUP, image.op_features() & RBD_OPERATION_FEATURE_GROUP) group = image.group() eq(group_name, group['name']) eq([image_name], [img['name'] for img in self.group.list_images()]) self.group.remove_image(ioctx, image_name) eq([], list(self.group.list_images())) with Image(ioctx, image_name) as image: eq(0, image.op_features() & RBD_OPERATION_FEATURE_GROUP) def test_group_snap(self): global snap_name eq([], list(self.group.list_snaps())) self.group.create_snap(snap_name) eq([snap_name], [snap['name'] for snap in self.group.list_snaps()]) for snap in self.image.list_snaps(): eq(rbd.RBD_SNAP_NAMESPACE_TYPE_GROUP, snap['namespace']) info = snap['group'] eq(group_name, info['group_name']) eq(snap_name, info['group_snap_name']) self.group.remove_snap(snap_name) eq([], list(self.group.list_snaps())) def test_group_snap_flags(self): global snap_name eq([], list(self.group.list_snaps())) self.group.create_snap(snap_name, 0) eq([snap_name], [snap['name'] for snap in self.group.list_snaps()]) self.group.remove_snap(snap_name) self.group.create_snap(snap_name, RBD_SNAP_CREATE_SKIP_QUIESCE) eq([snap_name], [snap['name'] for snap in self.group.list_snaps()]) self.group.remove_snap(snap_name) self.group.create_snap(snap_name, RBD_SNAP_CREATE_IGNORE_QUIESCE_ERROR) eq([snap_name], [snap['name'] for snap in self.group.list_snaps()]) self.group.remove_snap(snap_name) assert_raises(InvalidArgument, self.group.create_snap, snap_name, RBD_SNAP_CREATE_SKIP_QUIESCE | RBD_SNAP_CREATE_IGNORE_QUIESCE_ERROR) eq([], list(self.group.list_snaps())) def test_group_snap_list_many(self): global snap_name eq([], list(self.group.list_snaps())) snap_names = [] for x in range(0, 20): snap_names.append(snap_name) self.group.create_snap(snap_name) snap_name = get_temp_snap_name() snap_names.sort() answer = [snap['name'] for snap in self.group.list_snaps()] answer.sort() eq(snap_names, answer) def test_group_snap_namespace(self): global snap_name eq([], list(self.group.list_snaps())) self.group.add_image(ioctx, image_name) self.group.create_snap(snap_name) eq(1, len([snap['name'] for snap in self.image.list_snaps()])) self.group.remove_image(ioctx, image_name) self.group.remove_snap(snap_name) eq([], list(self.group.list_snaps())) def test_group_snap_rename(self): global snap_name new_snap_name = "new" + snap_name eq([], list(self.group.list_snaps())) self.group.create_snap(snap_name) eq([snap_name], [snap['name'] for snap in self.group.list_snaps()]) self.group.rename_snap(snap_name, new_snap_name) eq([new_snap_name], [snap['name'] for snap in self.group.list_snaps()]) self.group.remove_snap(new_snap_name) eq([], list(self.group.list_snaps())) def test_group_snap_rollback(self): eq([], list(self.group.list_images())) self.group.add_image(ioctx, image_name) with Image(ioctx, image_name) as image: image.write(b'\0' * 256, 0) read = image.read(0, 256) eq(read, b'\0' * 256) global snap_name eq([], list(self.group.list_snaps())) self.group.create_snap(snap_name) eq([snap_name], [snap['name'] for snap in self.group.list_snaps()]) with Image(ioctx, image_name) as image: data = rand_data(256) image.write(data, 0) read = image.read(0, 256) eq(read, data) self.group.rollback_to_snap(snap_name) with Image(ioctx, image_name) as image: read = image.read(0, 256) eq(read, b'\0' * 256) self.group.remove_image(ioctx, image_name) eq([], list(self.group.list_images())) self.group.remove_snap(snap_name) eq([], list(self.group.list_snaps())) @with_setup(create_image, remove_image) def test_rename(): rbd = RBD() image_name2 = get_temp_image_name() class TestMigration(object): def test_migration(self): create_image() RBD().migration_prepare(ioctx, image_name, ioctx, image_name, features=63, order=23, stripe_unit=1<<23, stripe_count=1, data_pool=None) status = RBD().migration_status(ioctx, image_name) eq(image_name, status['source_image_name']) eq(image_name, status['dest_image_name']) eq(RBD_IMAGE_MIGRATION_STATE_PREPARED, status['state']) with Image(ioctx, image_name) as image: source_spec = image.migration_source_spec() eq("native", source_spec["type"]) RBD().migration_execute(ioctx, image_name) RBD().migration_commit(ioctx, image_name) remove_image() def test_migration_import(self): create_image() with Image(ioctx, image_name) as image: image_id = image.id() image.create_snap('snap') source_spec = json.dumps( {'type': 'native', 'pool_id': ioctx.get_pool_id(), 'pool_namespace': '', 'image_name': image_name, 'image_id': image_id, 'snap_name': 'snap'}) dst_image_name = get_temp_image_name() RBD().migration_prepare_import(source_spec, ioctx, dst_image_name, features=63, order=23, stripe_unit=1<<23, stripe_count=1, data_pool=None) status = RBD().migration_status(ioctx, dst_image_name) eq('', status['source_image_name']) eq(dst_image_name, status['dest_image_name']) eq(RBD_IMAGE_MIGRATION_STATE_PREPARED, status['state']) with Image(ioctx, dst_image_name) as image: source_spec = image.migration_source_spec() eq("native", source_spec["type"]) RBD().migration_execute(ioctx, dst_image_name) RBD().migration_commit(ioctx, dst_image_name) with Image(ioctx, image_name) as image: image.remove_snap('snap') with Image(ioctx, dst_image_name) as image: image.remove_snap('snap') RBD().remove(ioctx, dst_image_name) RBD().remove(ioctx, image_name) def test_migration_with_progress(self): d = {'received_callback': False} def progress_cb(current, total): d['received_callback'] = True return 0 create_image() RBD().migration_prepare(ioctx, image_name, ioctx, image_name, features=63, order=23, stripe_unit=1<<23, stripe_count=1, data_pool=None) RBD().migration_execute(ioctx, image_name, on_progress=progress_cb) eq(True, d['received_callback']) d['received_callback'] = False RBD().migration_commit(ioctx, image_name, on_progress=progress_cb) eq(True, d['received_callback']) remove_image() def test_migrate_abort(self): create_image() RBD().migration_prepare(ioctx, image_name, ioctx, image_name, features=63, order=23, stripe_unit=1<<23, stripe_count=1, data_pool=None) RBD().migration_abort(ioctx, image_name) remove_image() def test_migrate_abort_with_progress(self): d = {'received_callback': False} def progress_cb(current, total): d['received_callback'] = True return 0 create_image() RBD().migration_prepare(ioctx, image_name, ioctx, image_name, features=63, order=23, stripe_unit=1<<23, stripe_count=1, data_pool=None) RBD().migration_abort(ioctx, image_name, on_progress=progress_cb) eq(True, d['received_callback']) remove_image()
104,274
36.121752
111
py
null
ceph-main/src/test/pybind/test_rgwfs.py
# vim: expandtab smarttab shiftwidth=4 softtabstop=4 from nose.tools import assert_raises, assert_equal, with_setup import rgw as librgwfs rgwfs = None root_handler = None root_dir_handler = None def setup_module(): global rgwfs global root_handler rgwfs = librgwfs.LibRGWFS("testid", "", "") root_handler = rgwfs.mount() def teardown_module(): global rgwfs rgwfs.shutdown() def setup_test(): global root_dir_handler names = [] try: root_dir_handler = rgwfs.opendir(root_handler, b"bucket", 0) except Exception: root_dir_handler = rgwfs.mkdir(root_handler, b"bucket", 0) def cb(name, offset, flags): names.append(name) rgwfs.readdir(root_dir_handler, cb, 0, 0) for name in names: rgwfs.unlink(root_dir_handler, name, 0) @with_setup(setup_test) def test_version(): rgwfs.version() @with_setup(setup_test) def test_fstat(): stat = rgwfs.fstat(root_dir_handler) assert(len(stat) == 13) file_handler = rgwfs.create(root_dir_handler, b'file-1', 0) stat = rgwfs.fstat(file_handler) assert(len(stat) == 13) rgwfs.close(file_handler) @with_setup(setup_test) def test_statfs(): stat = rgwfs.statfs() assert(len(stat) == 11) @with_setup(setup_test) def test_fsync(): fd = rgwfs.create(root_dir_handler, b'file-1', 0) rgwfs.write(fd, 0, b"asdf") rgwfs.fsync(fd, 0) rgwfs.write(fd, 4, b"qwer") rgwfs.fsync(fd, 1) rgwfs.close(fd) @with_setup(setup_test) def test_directory(): dir_handler = rgwfs.mkdir(root_dir_handler, b"temp-directory", 0) rgwfs.close(dir_handler) rgwfs.unlink(root_dir_handler, b"temp-directory") @with_setup(setup_test) def test_walk_dir(): dirs = [b"dir-1", b"dir-2", b"dir-3"] handles = [] for i in dirs: d = rgwfs.mkdir(root_dir_handler, i, 0) handles.append(d) entries = [] def cb(name, offset): entries.append((name, offset)) offset, eof = rgwfs.readdir(root_dir_handler, cb, 0) for i in handles: rgwfs.close(i) for name, _ in entries: assert(name in dirs) rgwfs.unlink(root_dir_handler, name) @with_setup(setup_test) def test_rename(): file_handler = rgwfs.create(root_dir_handler, b"a", 0) rgwfs.close(file_handler) rgwfs.rename(root_dir_handler, b"a", root_dir_handler, b"b") file_handler = rgwfs.open(root_dir_handler, b"b", 0) rgwfs.fstat(file_handler) rgwfs.close(file_handler) rgwfs.unlink(root_dir_handler, b"b") @with_setup(setup_test) def test_open(): assert_raises(librgwfs.ObjectNotFound, rgwfs.open, root_dir_handler, b'file-1', 0) assert_raises(librgwfs.ObjectNotFound, rgwfs.open, root_dir_handler, b'file-1', 0) fd = rgwfs.create(root_dir_handler, b'file-1', 0) rgwfs.write(fd, 0, b"asdf") rgwfs.close(fd) fd = rgwfs.open(root_dir_handler, b'file-1', 0) assert_equal(rgwfs.read(fd, 0, 4), b"asdf") rgwfs.close(fd) fd = rgwfs.open(root_dir_handler, b'file-1', 0) rgwfs.write(fd, 0, b"aaaazxcv") rgwfs.close(fd) fd = rgwfs.open(root_dir_handler, b'file-1', 0) assert_equal(rgwfs.read(fd, 4, 4), b"zxcv") rgwfs.close(fd) fd = rgwfs.open(root_dir_handler, b'file-1', 0) assert_equal(rgwfs.read(fd, 0, 4), b"aaaa") rgwfs.close(fd) rgwfs.unlink(root_dir_handler, b"file-1") @with_setup(setup_test) def test_mount_unmount(): global root_handler global root_dir_handler test_directory() rgwfs.close(root_dir_handler) rgwfs.close(root_handler) rgwfs.unmount() root_handler = rgwfs.mount() root_dir_handler = rgwfs.opendir(root_handler, b"bucket", 0) test_open()
3,722
24.675862
69
py
null
ceph-main/src/test/rbd_mirror/random_write.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "common/ceph_argparse.h" #include "common/config.h" #include "common/debug.h" #include "common/errno.h" #include "common/Cond.h" #include "include/rados/librados.hpp" #include "include/rbd/librbd.hpp" #include "global/global_init.h" #include <string> #include <vector> #define dout_context g_ceph_context #define dout_subsys ceph_subsys_rbd_mirror #undef dout_prefix #define dout_prefix *_dout << "random-write: " namespace { const uint32_t NUM_THREADS = 8; const uint32_t MAX_IO_SIZE = 24576; const uint32_t MIN_IO_SIZE = 4; void usage() { std::cout << "usage: ceph_test_rbd_mirror_random_write [options...] \\" << std::endl; std::cout << " <pool> <image>" << std::endl; std::cout << std::endl; std::cout << " pool image pool" << std::endl; std::cout << " image image to write" << std::endl; std::cout << std::endl; std::cout << "options:\n"; std::cout << " -m monaddress[:port] connect to specified monitor\n"; std::cout << " --keyring=<path> path to keyring for local cluster\n"; std::cout << " --log-file=<logfile> file to log debug output\n"; std::cout << " --debug-rbd-mirror=<log-level>/<memory-level> set rbd-mirror debug level\n"; generic_server_usage(); } void rbd_bencher_completion(void *c, void *pc); struct rbd_bencher { librbd::Image *image; ceph::mutex lock = ceph::make_mutex("rbd_bencher::lock"); ceph::condition_variable cond; int in_flight; explicit rbd_bencher(librbd::Image *i) : image(i), in_flight(0) { } bool start_write(int max, uint64_t off, uint64_t len, bufferlist& bl, int op_flags) { { std::lock_guard l{lock}; if (in_flight >= max) return false; in_flight++; } librbd::RBD::AioCompletion *c = new librbd::RBD::AioCompletion((void *)this, rbd_bencher_completion); image->aio_write2(off, len, bl, c, op_flags); //cout << "start " << c << " at " << off << "~" << len << std::endl; return true; } void wait_for(int max) { using namespace std::chrono_literals; std::unique_lock l{lock}; while (in_flight > max) { cond.wait_for(l, 200ms); } } }; void rbd_bencher_completion(void *vc, void *pc) { librbd::RBD::AioCompletion *c = (librbd::RBD::AioCompletion *)vc; rbd_bencher *b = static_cast<rbd_bencher *>(pc); //cout << "complete " << c << std::endl; int ret = c->get_return_value(); if (ret != 0) { std::cout << "write error: " << cpp_strerror(ret) << std::endl; exit(ret < 0 ? -ret : ret); } b->lock.lock(); b->in_flight--; b->cond.notify_all(); b->lock.unlock(); c->release(); } void write_image(librbd::Image &image) { srand(time(NULL) % (unsigned long) -1); uint64_t max_io_bytes = MAX_IO_SIZE * 1024; bufferptr bp(max_io_bytes); memset(bp.c_str(), rand() & 0xff, bp.length()); bufferlist bl; bl.push_back(bp); uint64_t size = 0; image.size(&size); ceph_assert(size != 0); std::vector<uint64_t> thread_offset; uint64_t i; uint64_t start_pos; // disturb all thread's offset, used by seq write for (i = 0; i < NUM_THREADS; i++) { start_pos = (rand() % (size / max_io_bytes)) * max_io_bytes; thread_offset.push_back(start_pos); } uint64_t total_ios = 0; uint64_t total_bytes = 0; rbd_bencher b(&image); while (true) { b.wait_for(NUM_THREADS - 1); for (uint32_t i = 0; i < NUM_THREADS; ++i) { // mostly small writes with a small chance of large writes uint32_t io_modulo = MIN_IO_SIZE + 1; if (rand() % 30 == 0) { io_modulo += MAX_IO_SIZE; } uint32_t io_size = (((rand() % io_modulo) + MIN_IO_SIZE) * 1024); thread_offset[i] = (rand() % (size / io_size)) * io_size; if (!b.start_write(NUM_THREADS, thread_offset[i], io_size, bl, LIBRADOS_OP_FLAG_FADVISE_RANDOM)) { break; } ++i; ++total_ios; total_bytes += io_size; if (total_ios % 100 == 0) { std::cout << total_ios << " IOs, " << total_bytes << " bytes" << std::endl; } } } b.wait_for(0); } } // anonymous namespace int main(int argc, const char **argv) { auto args = argv_to_vec(argc, argv); if (args.empty()) { std::cerr << argv[0] << ": -h or --help for usage" << std::endl; exit(1); } if (ceph_argparse_need_usage(args)) { usage(); exit(0); } auto cct = global_init(nullptr, args, CEPH_ENTITY_TYPE_CLIENT, CODE_ENVIRONMENT_UTILITY, CINIT_FLAG_NO_MON_CONFIG); if (args.size() < 2) { usage(); return EXIT_FAILURE; } std::string pool_name = args[0]; std::string image_name = args[1]; common_init_finish(g_ceph_context); dout(5) << "connecting to cluster" << dendl; librados::Rados rados; librados::IoCtx io_ctx; librbd::RBD rbd; librbd::Image image; int r = rados.init_with_context(g_ceph_context); if (r < 0) { derr << "could not initialize RADOS handle" << dendl; return EXIT_FAILURE; } r = rados.connect(); if (r < 0) { derr << "error connecting to local cluster" << dendl; return EXIT_FAILURE; } r = rados.ioctx_create(pool_name.c_str(), io_ctx); if (r < 0) { derr << "error finding local pool " << pool_name << ": " << cpp_strerror(r) << dendl; return EXIT_FAILURE; } r = rbd.open(io_ctx, image, image_name.c_str()); if (r < 0) { derr << "error opening image " << image_name << ": " << cpp_strerror(r) << dendl; return EXIT_FAILURE; } write_image(image); return EXIT_SUCCESS; }
5,727
26.146919
95
cc
null
ceph-main/src/test/rbd_mirror/test_ClusterWatcher.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "include/rados/librados.hpp" #include "common/Cond.h" #include "common/errno.h" #include "common/ceph_mutex.h" #include "librbd/internal.h" #include "librbd/api/Mirror.h" #include "tools/rbd_mirror/ClusterWatcher.h" #include "tools/rbd_mirror/ServiceDaemon.h" #include "tools/rbd_mirror/Types.h" #include "test/rbd_mirror/test_fixture.h" #include "test/librados/test_cxx.h" #include "test/librbd/test_support.h" #include "gtest/gtest.h" #include <boost/scope_exit.hpp> #include <iostream> #include <map> #include <memory> #include <set> using rbd::mirror::ClusterWatcher; using rbd::mirror::PeerSpec; using rbd::mirror::RadosRef; using std::map; using std::set; using std::string; void register_test_cluster_watcher() { } class TestClusterWatcher : public ::rbd::mirror::TestFixture { public: TestClusterWatcher() { m_cluster = std::make_shared<librados::Rados>(); EXPECT_EQ("", connect_cluster_pp(*m_cluster)); } ~TestClusterWatcher() override { m_cluster->wait_for_latest_osdmap(); for (auto& pool : m_pools) { EXPECT_EQ(0, m_cluster->pool_delete(pool.c_str())); } } void SetUp() override { TestFixture::SetUp(); m_service_daemon.reset(new rbd::mirror::ServiceDaemon<>(g_ceph_context, m_cluster, m_threads)); m_cluster_watcher.reset(new ClusterWatcher(m_cluster, m_lock, m_service_daemon.get())); } void TearDown() override { m_service_daemon.reset(); m_cluster_watcher.reset(); TestFixture::TearDown(); } void create_pool(bool enable_mirroring, const PeerSpec &peer, string *uuid = nullptr, string *name=nullptr) { string pool_name = get_temp_pool_name("test-rbd-mirror-"); ASSERT_EQ(0, m_cluster->pool_create(pool_name.c_str())); int64_t pool_id = m_cluster->pool_lookup(pool_name.c_str()); ASSERT_GE(pool_id, 0); librados::IoCtx ioctx; ASSERT_EQ(0, m_cluster->ioctx_create2(pool_id, ioctx)); ioctx.application_enable("rbd", true); m_pools.insert(pool_name); if (enable_mirroring) { ASSERT_EQ(0, librbd::api::Mirror<>::mode_set(ioctx, RBD_MIRROR_MODE_POOL)); std::string gen_uuid; ASSERT_EQ(0, librbd::api::Mirror<>::peer_site_add( ioctx, uuid != nullptr ? uuid : &gen_uuid, RBD_MIRROR_PEER_DIRECTION_RX_TX, peer.cluster_name, peer.client_name)); m_pool_peers[pool_id].insert(peer); } if (name != nullptr) { *name = pool_name; } } void delete_pool(const string &name, const PeerSpec &peer) { int64_t pool_id = m_cluster->pool_lookup(name.c_str()); ASSERT_GE(pool_id, 0); if (m_pool_peers.find(pool_id) != m_pool_peers.end()) { m_pool_peers[pool_id].erase(peer); if (m_pool_peers[pool_id].empty()) { m_pool_peers.erase(pool_id); } } m_pools.erase(name); ASSERT_EQ(0, m_cluster->pool_delete(name.c_str())); } void set_peer_config_key(const std::string& pool_name, const PeerSpec &peer) { int64_t pool_id = m_cluster->pool_lookup(pool_name.c_str()); ASSERT_GE(pool_id, 0); std::string json = "{" "\\\"mon_host\\\": \\\"" + peer.mon_host + "\\\", " "\\\"key\\\": \\\"" + peer.key + "\\\"" "}"; bufferlist in_bl; ASSERT_EQ(0, m_cluster->mon_command( "{" "\"prefix\": \"config-key set\"," "\"key\": \"" RBD_MIRROR_PEER_CONFIG_KEY_PREFIX + stringify(pool_id) + "/" + peer.uuid + "\"," "\"val\": \"" + json + "\"" + "}", in_bl, nullptr, nullptr)); } void create_cache_pool(const string &base_pool, string *cache_pool_name) { bufferlist inbl; *cache_pool_name = get_temp_pool_name("test-rbd-mirror-"); ASSERT_EQ(0, m_cluster->pool_create(cache_pool_name->c_str())); ASSERT_EQ(0, m_cluster->mon_command( "{\"prefix\": \"osd tier add\", \"pool\": \"" + base_pool + "\", \"tierpool\": \"" + *cache_pool_name + "\", \"force_nonempty\": \"--force-nonempty\" }", inbl, NULL, NULL)); ASSERT_EQ(0, m_cluster->mon_command( "{\"prefix\": \"osd tier set-overlay\", \"pool\": \"" + base_pool + "\", \"overlaypool\": \"" + *cache_pool_name + "\"}", inbl, NULL, NULL)); ASSERT_EQ(0, m_cluster->mon_command( "{\"prefix\": \"osd tier cache-mode\", \"pool\": \"" + *cache_pool_name + "\", \"mode\": \"writeback\"}", inbl, NULL, NULL)); m_cluster->wait_for_latest_osdmap(); } void remove_cache_pool(const string &base_pool, const string &cache_pool) { bufferlist inbl; // tear down tiers ASSERT_EQ(0, m_cluster->mon_command( "{\"prefix\": \"osd tier remove-overlay\", \"pool\": \"" + base_pool + "\"}", inbl, NULL, NULL)); ASSERT_EQ(0, m_cluster->mon_command( "{\"prefix\": \"osd tier remove\", \"pool\": \"" + base_pool + "\", \"tierpool\": \"" + cache_pool + "\"}", inbl, NULL, NULL)); m_cluster->wait_for_latest_osdmap(); m_cluster->pool_delete(cache_pool.c_str()); } void check_peers() { m_cluster_watcher->refresh_pools(); std::lock_guard l{m_lock}; ASSERT_EQ(m_pool_peers, m_cluster_watcher->get_pool_peers()); } RadosRef m_cluster; ceph::mutex m_lock = ceph::make_mutex("TestClusterWatcherLock"); std::unique_ptr<rbd::mirror::ServiceDaemon<>> m_service_daemon; std::unique_ptr<ClusterWatcher> m_cluster_watcher; set<string> m_pools; ClusterWatcher::PoolPeers m_pool_peers; }; TEST_F(TestClusterWatcher, NoPools) { check_peers(); } TEST_F(TestClusterWatcher, NoMirroredPools) { check_peers(); create_pool(false, PeerSpec()); check_peers(); create_pool(false, PeerSpec()); check_peers(); create_pool(false, PeerSpec()); check_peers(); } TEST_F(TestClusterWatcher, ReplicatedPools) { PeerSpec site1("", "site1", "mirror1"); PeerSpec site2("", "site2", "mirror2"); string first_pool, last_pool; check_peers(); create_pool(true, site1, &site1.uuid, &first_pool); check_peers(); create_pool(false, PeerSpec()); check_peers(); create_pool(false, PeerSpec()); check_peers(); create_pool(false, PeerSpec()); check_peers(); create_pool(true, site2, &site2.uuid); check_peers(); create_pool(true, site2, &site2.uuid); check_peers(); create_pool(true, site2, &site2.uuid, &last_pool); check_peers(); delete_pool(first_pool, site1); check_peers(); delete_pool(last_pool, site2); check_peers(); } TEST_F(TestClusterWatcher, CachePools) { PeerSpec site1("", "site1", "mirror1"); string base1, base2, cache1, cache2; create_pool(true, site1, &site1.uuid, &base1); check_peers(); create_cache_pool(base1, &cache1); BOOST_SCOPE_EXIT( base1, cache1, this_ ) { this_->remove_cache_pool(base1, cache1); } BOOST_SCOPE_EXIT_END; check_peers(); create_pool(false, PeerSpec(), nullptr, &base2); create_cache_pool(base2, &cache2); BOOST_SCOPE_EXIT( base2, cache2, this_ ) { this_->remove_cache_pool(base2, cache2); } BOOST_SCOPE_EXIT_END; check_peers(); } TEST_F(TestClusterWatcher, ConfigKey) { REQUIRE(!is_librados_test_stub(*m_cluster)); std::string pool_name; check_peers(); PeerSpec site1("", "site1", "mirror1"); create_pool(true, site1, &site1.uuid, &pool_name); check_peers(); PeerSpec site2("", "site2", "mirror2"); site2.mon_host = "abc"; site2.key = "xyz"; create_pool(false, site2, &site2.uuid); set_peer_config_key(pool_name, site2); check_peers(); } TEST_F(TestClusterWatcher, SiteName) { REQUIRE(!is_librados_test_stub(*m_cluster)); std::string site_name; librbd::RBD rbd; ASSERT_EQ(0, rbd.mirror_site_name_get(*m_cluster, &site_name)); m_cluster_watcher->refresh_pools(); std::lock_guard l{m_lock}; ASSERT_EQ(site_name, m_cluster_watcher->get_site_name()); }
8,113
29.503759
79
cc
null
ceph-main/src/test/rbd_mirror/test_ImageDeleter.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system * * Copyright (C) 2016 SUSE LINUX GmbH * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #include "include/rados/librados.hpp" #include "include/rbd/librbd.hpp" #include "include/stringify.h" #include "cls/rbd/cls_rbd_types.h" #include "cls/rbd/cls_rbd_client.h" #include "tools/rbd_mirror/ImageDeleter.h" #include "tools/rbd_mirror/ServiceDaemon.h" #include "tools/rbd_mirror/Threads.h" #include "tools/rbd_mirror/Throttler.h" #include "tools/rbd_mirror/Types.h" #include "librbd/ImageCtx.h" #include "librbd/ImageState.h" #include "librbd/Operations.h" #include "librbd/Journal.h" #include "librbd/internal.h" #include "librbd/Utils.h" #include "librbd/api/Image.h" #include "librbd/api/Mirror.h" #include "librbd/journal/DisabledPolicy.h" #include "test/rbd_mirror/test_fixture.h" #include "test/librados/test.h" #include "gtest/gtest.h" #define GLOBAL_IMAGE_ID "global_image_id" #define GLOBAL_CLONE_IMAGE_ID "global_image_id_clone" #define dout_subsys ceph_subsys_rbd_mirror using rbd::mirror::RadosRef; using rbd::mirror::TestFixture; using namespace librbd; using cls::rbd::MirrorImageMode; using cls::rbd::MirrorImageState; void register_test_rbd_mirror_image_deleter() { } class TestImageDeleter : public TestFixture { public: const std::string m_local_mirror_uuid = "local mirror uuid"; const std::string m_remote_mirror_uuid = "remote mirror uuid"; void SetUp() override { TestFixture::SetUp(); m_image_deletion_throttler.reset( new rbd::mirror::Throttler<>(g_ceph_context, "rbd_mirror_concurrent_image_deletions")); m_service_daemon.reset(new rbd::mirror::ServiceDaemon<>(g_ceph_context, _rados, m_threads)); librbd::api::Mirror<>::mode_set(m_local_io_ctx, RBD_MIRROR_MODE_IMAGE); m_deleter = new rbd::mirror::ImageDeleter<>( m_local_io_ctx, m_threads, m_image_deletion_throttler.get(), m_service_daemon.get()); m_local_image_id = librbd::util::generate_image_id(m_local_io_ctx); librbd::ImageOptions image_opts; image_opts.set(RBD_IMAGE_OPTION_FEATURES, RBD_FEATURES_ALL); EXPECT_EQ(0, librbd::create(m_local_io_ctx, m_image_name, m_local_image_id, 1 << 20, image_opts, GLOBAL_IMAGE_ID, m_remote_mirror_uuid, true)); cls::rbd::MirrorImage mirror_image( MirrorImageMode::MIRROR_IMAGE_MODE_JOURNAL, GLOBAL_IMAGE_ID, MirrorImageState::MIRROR_IMAGE_STATE_ENABLED); EXPECT_EQ(0, cls_client::mirror_image_set(&m_local_io_ctx, m_local_image_id, mirror_image)); } void TearDown() override { remove_image(); C_SaferCond ctx; m_deleter->shut_down(&ctx); ctx.wait(); delete m_deleter; m_service_daemon.reset(); TestFixture::TearDown(); } void init_image_deleter() { C_SaferCond ctx; m_deleter->init(&ctx); ASSERT_EQ(0, ctx.wait()); } void remove_image() { cls::rbd::MirrorImage mirror_image; int r = cls_client::mirror_image_get(&m_local_io_ctx, m_local_image_id, &mirror_image); EXPECT_EQ(1, r == 0 || r == -ENOENT); if (r != -ENOENT) { mirror_image.state = MirrorImageState::MIRROR_IMAGE_STATE_ENABLED; EXPECT_EQ(0, cls_client::mirror_image_set(&m_local_io_ctx, m_local_image_id, mirror_image)); } promote_image(); NoOpProgressContext ctx; r = librbd::api::Image<>::remove(m_local_io_ctx, m_image_name, ctx); EXPECT_EQ(1, r == 0 || r == -ENOENT); } void promote_image(ImageCtx *ictx=nullptr) { bool close = false; int r = 0; if (!ictx) { ictx = new ImageCtx("", m_local_image_id, "", m_local_io_ctx, false); r = ictx->state->open(0); close = (r == 0); } EXPECT_EQ(1, r == 0 || r == -ENOENT); if (r == 0) { int r2 = librbd::api::Mirror<>::image_promote(ictx, true); EXPECT_EQ(1, r2 == 0 || r2 == -EINVAL); } if (close) { EXPECT_EQ(0, ictx->state->close()); } } void demote_image(ImageCtx *ictx=nullptr) { bool close = false; if (!ictx) { ictx = new ImageCtx("", m_local_image_id, "", m_local_io_ctx, false); EXPECT_EQ(0, ictx->state->open(0)); close = true; } EXPECT_EQ(0, librbd::api::Mirror<>::image_demote(ictx)); if (close) { EXPECT_EQ(0, ictx->state->close()); } } void create_snapshot(std::string snap_name="snap1", bool protect=false) { ImageCtx *ictx = new ImageCtx("", m_local_image_id, "", m_local_io_ctx, false); EXPECT_EQ(0, ictx->state->open(0)); { std::unique_lock image_locker{ictx->image_lock}; ictx->set_journal_policy(new librbd::journal::DisabledPolicy()); } librbd::NoOpProgressContext prog_ctx; EXPECT_EQ(0, ictx->operations->snap_create( cls::rbd::UserSnapshotNamespace(), snap_name, 0, prog_ctx)); if (protect) { EXPECT_EQ(0, ictx->operations->snap_protect( cls::rbd::UserSnapshotNamespace(), snap_name)); } EXPECT_EQ(0, ictx->state->close()); } std::string create_clone() { ImageCtx *ictx = new ImageCtx("", m_local_image_id, "", m_local_io_ctx, false); EXPECT_EQ(0, ictx->state->open(0)); { std::unique_lock image_locker{ictx->image_lock}; ictx->set_journal_policy(new librbd::journal::DisabledPolicy()); } librbd::NoOpProgressContext prog_ctx; EXPECT_EQ(0, ictx->operations->snap_create( cls::rbd::UserSnapshotNamespace(), "snap1", 0, prog_ctx)); EXPECT_EQ(0, ictx->operations->snap_protect( cls::rbd::UserSnapshotNamespace(), "snap1")); EXPECT_EQ(0, librbd::api::Image<>::snap_set( ictx, cls::rbd::UserSnapshotNamespace(), "snap1")); std::string clone_id = librbd::util::generate_image_id(m_local_io_ctx); librbd::ImageOptions clone_opts; clone_opts.set(RBD_IMAGE_OPTION_FEATURES, ictx->features); EXPECT_EQ(0, librbd::clone(m_local_io_ctx, m_local_image_id.c_str(), nullptr, "snap1", m_local_io_ctx, clone_id.c_str(), "clone1", clone_opts, GLOBAL_CLONE_IMAGE_ID, m_remote_mirror_uuid)); cls::rbd::MirrorImage mirror_image( MirrorImageMode::MIRROR_IMAGE_MODE_JOURNAL, GLOBAL_CLONE_IMAGE_ID, MirrorImageState::MIRROR_IMAGE_STATE_ENABLED); EXPECT_EQ(0, cls_client::mirror_image_set(&m_local_io_ctx, clone_id, mirror_image)); EXPECT_EQ(0, ictx->state->close()); return clone_id; } void check_image_deleted() { ImageCtx *ictx = new ImageCtx("", m_local_image_id, "", m_local_io_ctx, false); EXPECT_EQ(-ENOENT, ictx->state->open(0)); cls::rbd::MirrorImage mirror_image; EXPECT_EQ(-ENOENT, cls_client::mirror_image_get(&m_local_io_ctx, m_local_image_id, &mirror_image)); } int trash_move(const std::string& global_image_id) { C_SaferCond ctx; rbd::mirror::ImageDeleter<>::trash_move(m_local_io_ctx, global_image_id, true, m_threads->work_queue, &ctx); return ctx.wait(); } librbd::RBD rbd; std::string m_local_image_id; std::unique_ptr<rbd::mirror::Throttler<>> m_image_deletion_throttler; std::unique_ptr<rbd::mirror::ServiceDaemon<>> m_service_daemon; rbd::mirror::ImageDeleter<> *m_deleter; }; TEST_F(TestImageDeleter, ExistingTrashMove) { ASSERT_EQ(0, trash_move(GLOBAL_IMAGE_ID)); C_SaferCond ctx; m_deleter->wait_for_deletion(m_local_image_id, false, &ctx); init_image_deleter(); ASSERT_EQ(0, ctx.wait()); } TEST_F(TestImageDeleter, LiveTrashMove) { init_image_deleter(); C_SaferCond ctx; m_deleter->wait_for_deletion(m_local_image_id, false, &ctx); ASSERT_EQ(0, trash_move(GLOBAL_IMAGE_ID)); ASSERT_EQ(0, ctx.wait()); } TEST_F(TestImageDeleter, Delete_Image_With_Snapshots) { init_image_deleter(); create_snapshot("snap1"); create_snapshot("snap2"); C_SaferCond ctx; m_deleter->wait_for_deletion(m_local_image_id, false, &ctx); ASSERT_EQ(0, trash_move(GLOBAL_IMAGE_ID)); EXPECT_EQ(0, ctx.wait()); ASSERT_EQ(0u, m_deleter->get_delete_queue_items().size()); ASSERT_EQ(0u, m_deleter->get_failed_queue_items().size()); } TEST_F(TestImageDeleter, Delete_Image_With_ProtectedSnapshots) { init_image_deleter(); create_snapshot("snap1", true); create_snapshot("snap2", true); C_SaferCond ctx; m_deleter->wait_for_deletion(m_local_image_id, false, &ctx); ASSERT_EQ(0, trash_move(GLOBAL_IMAGE_ID)); EXPECT_EQ(0, ctx.wait()); ASSERT_EQ(0u, m_deleter->get_delete_queue_items().size()); ASSERT_EQ(0u, m_deleter->get_failed_queue_items().size()); } TEST_F(TestImageDeleter, Delete_Image_With_Clone) { init_image_deleter(); std::string clone_id = create_clone(); C_SaferCond ctx1; m_deleter->set_busy_timer_interval(0.1); m_deleter->wait_for_deletion(m_local_image_id, false, &ctx1); ASSERT_EQ(0, trash_move(GLOBAL_IMAGE_ID)); EXPECT_EQ(-EBUSY, ctx1.wait()); C_SaferCond ctx2; m_deleter->wait_for_deletion(clone_id, false, &ctx2); ASSERT_EQ(0, trash_move(GLOBAL_CLONE_IMAGE_ID)); EXPECT_EQ(0, ctx2.wait()); C_SaferCond ctx3; m_deleter->wait_for_deletion(m_local_image_id, true, &ctx3); EXPECT_EQ(0, ctx3.wait()); ASSERT_EQ(0u, m_deleter->get_delete_queue_items().size()); ASSERT_EQ(0u, m_deleter->get_failed_queue_items().size()); }
10,210
31.519108
80
cc
null
ceph-main/src/test/rbd_mirror/test_ImageReplayer.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph distributed storage system * * Copyright (C) 2016 Mirantis Inc * * Author: Mykola Golub <mgolub@mirantis.com> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * */ #include "include/rados/librados.hpp" #include "include/rbd/librbd.hpp" #include "include/stringify.h" #include "test/librbd/test_support.h" #include "test/rbd_mirror/test_fixture.h" #include "cls/journal/cls_journal_types.h" #include "cls/journal/cls_journal_client.h" #include "cls/rbd/cls_rbd_types.h" #include "cls/rbd/cls_rbd_client.h" #include "journal/Journaler.h" #include "librbd/ExclusiveLock.h" #include "librbd/ImageCtx.h" #include "librbd/ImageState.h" #include "librbd/Journal.h" #include "librbd/Operations.h" #include "librbd/Utils.h" #include "librbd/internal.h" #include "librbd/api/Io.h" #include "librbd/api/Mirror.h" #include "librbd/api/Snapshot.h" #include "librbd/io/AioCompletion.h" #include "librbd/io/ReadResult.h" #include "tools/rbd_mirror/ImageReplayer.h" #include "tools/rbd_mirror/InstanceWatcher.h" #include "tools/rbd_mirror/MirrorStatusUpdater.h" #include "tools/rbd_mirror/PoolMetaCache.h" #include "tools/rbd_mirror/Threads.h" #include "tools/rbd_mirror/Throttler.h" #include "tools/rbd_mirror/Types.h" #include "test/librados/test_cxx.h" #include "gtest/gtest.h" void register_test_rbd_mirror() { } #define TEST_IO_SIZE 512 #define TEST_IO_COUNT 11 namespace rbd { namespace mirror { template <typename T> class TestImageReplayer : public TestFixture { public: static const cls::rbd::MirrorImageMode MIRROR_IMAGE_MODE = T::MIRROR_IMAGE_MODE; static const uint64_t FEATURES = T::FEATURES; struct C_WatchCtx : public librados::WatchCtx2 { TestImageReplayer *test; std::string oid; ceph::mutex lock = ceph::make_mutex("C_WatchCtx::lock"); ceph::condition_variable cond; bool notified; C_WatchCtx(TestImageReplayer *test, const std::string &oid) : test(test), oid(oid), notified(false) { } void handle_notify(uint64_t notify_id, uint64_t cookie, uint64_t notifier_id, bufferlist& bl_) override { bufferlist bl; test->m_remote_ioctx.notify_ack(oid, notify_id, cookie, bl); std::lock_guard locker{lock}; notified = true; cond.notify_all(); } void handle_error(uint64_t cookie, int err) override { ASSERT_EQ(0, err); } }; TestImageReplayer() : m_local_cluster(new librados::Rados()), m_watch_handle(0) { EXPECT_EQ("", connect_cluster_pp(*m_local_cluster.get())); EXPECT_EQ(0, m_local_cluster->conf_set("rbd_cache", "false")); EXPECT_EQ(0, m_local_cluster->conf_set("rbd_mirror_journal_poll_age", "1")); EXPECT_EQ(0, m_local_cluster->conf_set("rbd_mirror_journal_commit_age", "0.1")); m_local_pool_name = get_temp_pool_name(); EXPECT_EQ(0, m_local_cluster->pool_create(m_local_pool_name.c_str())); EXPECT_EQ(0, m_local_cluster->ioctx_create(m_local_pool_name.c_str(), m_local_ioctx)); m_local_ioctx.application_enable("rbd", true); EXPECT_EQ("", connect_cluster_pp(m_remote_cluster)); EXPECT_EQ(0, m_remote_cluster.conf_set("rbd_cache", "false")); m_remote_pool_name = get_temp_pool_name(); EXPECT_EQ(0, m_remote_cluster.pool_create(m_remote_pool_name.c_str())); m_remote_pool_id = m_remote_cluster.pool_lookup(m_remote_pool_name.c_str()); EXPECT_GE(m_remote_pool_id, 0); EXPECT_EQ(0, m_remote_cluster.ioctx_create(m_remote_pool_name.c_str(), m_remote_ioctx)); m_remote_ioctx.application_enable("rbd", true); // make snap id debugging easier when local/remote have different mappings uint64_t snap_id; EXPECT_EQ(0, m_remote_ioctx.selfmanaged_snap_create(&snap_id)); uint64_t features = FEATURES; if (MIRROR_IMAGE_MODE == cls::rbd::MIRROR_IMAGE_MODE_JOURNAL) { EXPECT_EQ(0, librbd::api::Mirror<>::mode_set(m_remote_ioctx, RBD_MIRROR_MODE_POOL)); EXPECT_EQ(0, librbd::api::Mirror<>::mode_set(m_local_ioctx, RBD_MIRROR_MODE_POOL)); } else { EXPECT_EQ(0, librbd::api::Mirror<>::mode_set(m_remote_ioctx, RBD_MIRROR_MODE_IMAGE)); EXPECT_EQ(0, librbd::api::Mirror<>::mode_set(m_local_ioctx, RBD_MIRROR_MODE_IMAGE)); uuid_d uuid_gen; uuid_gen.generate_random(); std::string remote_peer_uuid = uuid_gen.to_string(); EXPECT_EQ(0, librbd::cls_client::mirror_peer_add( &m_remote_ioctx, {remote_peer_uuid, cls::rbd::MIRROR_PEER_DIRECTION_RX_TX, "siteA", "client", m_local_mirror_uuid})); m_pool_meta_cache.set_remote_pool_meta( m_remote_ioctx.get_id(), {m_remote_mirror_uuid, remote_peer_uuid}); } EXPECT_EQ(0, librbd::api::Mirror<>::uuid_get(m_remote_ioctx, &m_remote_mirror_uuid)); EXPECT_EQ(0, librbd::api::Mirror<>::uuid_get(m_local_ioctx, &m_local_mirror_uuid)); m_image_name = get_temp_image_name(); int order = 0; EXPECT_EQ(0, librbd::create(m_remote_ioctx, m_image_name.c_str(), 1 << 22, false, features, &order, 0, 0)); if (MIRROR_IMAGE_MODE != cls::rbd::MIRROR_IMAGE_MODE_JOURNAL) { librbd::ImageCtx* remote_image_ctx; open_remote_image(&remote_image_ctx); EXPECT_EQ(0, librbd::api::Mirror<>::image_enable( remote_image_ctx, static_cast<rbd_mirror_image_mode_t>(MIRROR_IMAGE_MODE), false)); close_image(remote_image_ctx); } m_remote_image_id = get_image_id(m_remote_ioctx, m_image_name); m_global_image_id = get_global_image_id(m_remote_ioctx, m_remote_image_id); auto cct = reinterpret_cast<CephContext*>(m_local_ioctx.cct()); m_threads.reset(new Threads<>(m_local_cluster)); m_image_sync_throttler.reset(new Throttler<>( cct, "rbd_mirror_concurrent_image_syncs")); m_instance_watcher = InstanceWatcher<>::create( m_local_ioctx, *m_threads->asio_engine, nullptr, m_image_sync_throttler.get()); m_instance_watcher->handle_acquire_leader(); EXPECT_EQ(0, m_local_ioctx.create(RBD_MIRRORING, false)); m_local_status_updater = MirrorStatusUpdater<>::create( m_local_ioctx, m_threads.get(), ""); C_SaferCond status_updater_ctx; m_local_status_updater->init(&status_updater_ctx); EXPECT_EQ(0, status_updater_ctx.wait()); } ~TestImageReplayer() override { unwatch(); m_instance_watcher->handle_release_leader(); delete m_replayer; delete m_instance_watcher; C_SaferCond status_updater_ctx; m_local_status_updater->shut_down(&status_updater_ctx); EXPECT_EQ(0, status_updater_ctx.wait()); delete m_local_status_updater; EXPECT_EQ(0, m_remote_cluster.pool_delete(m_remote_pool_name.c_str())); EXPECT_EQ(0, m_local_cluster->pool_delete(m_local_pool_name.c_str())); } void create_replayer() { m_replayer = new ImageReplayer<>(m_local_ioctx, m_local_mirror_uuid, m_global_image_id, m_threads.get(), m_instance_watcher, m_local_status_updater, nullptr, &m_pool_meta_cache); m_replayer->add_peer({"peer uuid", m_remote_ioctx, {m_remote_mirror_uuid, "remote mirror peer uuid"}, nullptr}); } void start() { C_SaferCond cond; m_replayer->start(&cond); ASSERT_EQ(0, cond.wait()); create_watch_ctx(); } void create_watch_ctx() { std::string oid; if (MIRROR_IMAGE_MODE == cls::rbd::MIRROR_IMAGE_MODE_JOURNAL) { oid = ::journal::Journaler::header_oid(m_remote_image_id); } else { oid = librbd::util::header_name(m_remote_image_id); } ASSERT_EQ(0U, m_watch_handle); ASSERT_TRUE(m_watch_ctx == nullptr); m_watch_ctx = new C_WatchCtx(this, oid); ASSERT_EQ(0, m_remote_ioctx.watch2(oid, &m_watch_handle, m_watch_ctx)); } void unwatch() { if (m_watch_handle != 0) { m_remote_ioctx.unwatch2(m_watch_handle); delete m_watch_ctx; m_watch_ctx = nullptr; m_watch_handle = 0; } } void stop() { unwatch(); C_SaferCond cond; m_replayer->stop(&cond); ASSERT_EQ(0, cond.wait()); } void bootstrap() { create_replayer(); start(); wait_for_replay_complete(); stop(); } std::string get_temp_image_name() { return "image" + stringify(++_image_number); } std::string get_image_id(librados::IoCtx &ioctx, const std::string &image_name) { std::string obj = librbd::util::id_obj_name(image_name); std::string id; EXPECT_EQ(0, librbd::cls_client::get_id(&ioctx, obj, &id)); return id; } std::string get_global_image_id(librados::IoCtx& io_ctx, const std::string& image_id) { cls::rbd::MirrorImage mirror_image; EXPECT_EQ(0, librbd::cls_client::mirror_image_get(&io_ctx, image_id, &mirror_image)); return mirror_image.global_image_id; } void open_image(librados::IoCtx &ioctx, const std::string &image_name, bool readonly, librbd::ImageCtx **ictxp) { librbd::ImageCtx *ictx = new librbd::ImageCtx(image_name.c_str(), "", "", ioctx, readonly); EXPECT_EQ(0, ictx->state->open(0)); *ictxp = ictx; } void open_local_image(librbd::ImageCtx **ictxp) { open_image(m_local_ioctx, m_image_name, true, ictxp); } void open_remote_image(librbd::ImageCtx **ictxp) { open_image(m_remote_ioctx, m_image_name, false, ictxp); } void close_image(librbd::ImageCtx *ictx) { ictx->state->close(); } void get_commit_positions(cls::journal::ObjectPosition *master_position, cls::journal::ObjectPosition *mirror_position) { std::string master_client_id = ""; std::string mirror_client_id = m_local_mirror_uuid; m_replayer->flush(); C_SaferCond cond; uint64_t minimum_set; uint64_t active_set; std::set<cls::journal::Client> registered_clients; std::string oid = ::journal::Journaler::header_oid(m_remote_image_id); cls::journal::client::get_mutable_metadata(m_remote_ioctx, oid, &minimum_set, &active_set, &registered_clients, &cond); ASSERT_EQ(0, cond.wait()); *master_position = cls::journal::ObjectPosition(); *mirror_position = cls::journal::ObjectPosition(); std::set<cls::journal::Client>::const_iterator c; for (c = registered_clients.begin(); c != registered_clients.end(); ++c) { std::cout << __func__ << ": client: " << *c << std::endl; if (c->state != cls::journal::CLIENT_STATE_CONNECTED) { continue; } cls::journal::ObjectPositions object_positions = c->commit_position.object_positions; cls::journal::ObjectPositions::const_iterator p = object_positions.begin(); if (p != object_positions.end()) { if (c->id == master_client_id) { ASSERT_EQ(cls::journal::ObjectPosition(), *master_position); *master_position = *p; } else if (c->id == mirror_client_id) { ASSERT_EQ(cls::journal::ObjectPosition(), *mirror_position); *mirror_position = *p; } } } } bool wait_for_watcher_notify(int seconds) { if (m_watch_handle == 0) { return false; } std::unique_lock locker{m_watch_ctx->lock}; while (!m_watch_ctx->notified) { if (m_watch_ctx->cond.wait_for(locker, std::chrono::seconds(seconds)) == std::cv_status::timeout) { return false; } } m_watch_ctx->notified = false; return true; } int get_last_mirror_snapshot(librados::IoCtx& io_ctx, const std::string& image_id, uint64_t* mirror_snap_id, cls::rbd::MirrorSnapshotNamespace* mirror_ns) { auto header_oid = librbd::util::header_name(image_id); ::SnapContext snapc; int r = librbd::cls_client::get_snapcontext(&io_ctx, header_oid, &snapc); if (r < 0) { return r; } // stored in reverse order for (auto snap_id : snapc.snaps) { cls::rbd::SnapshotInfo snap_info; r = librbd::cls_client::snapshot_get(&io_ctx, header_oid, snap_id, &snap_info); if (r < 0) { return r; } auto ns = std::get_if<cls::rbd::MirrorSnapshotNamespace>( &snap_info.snapshot_namespace); if (ns != nullptr) { *mirror_snap_id = snap_id; *mirror_ns = *ns; return 0; } } return -ENOENT; } void wait_for_journal_synced() { cls::journal::ObjectPosition master_position; cls::journal::ObjectPosition mirror_position; for (int i = 0; i < 100; i++) { get_commit_positions(&master_position, &mirror_position); if (master_position == mirror_position) { break; } wait_for_watcher_notify(1); } ASSERT_EQ(master_position, mirror_position); } void wait_for_snapshot_synced() { uint64_t remote_snap_id = CEPH_NOSNAP; cls::rbd::MirrorSnapshotNamespace remote_mirror_ns; ASSERT_EQ(0, get_last_mirror_snapshot(m_remote_ioctx, m_remote_image_id, &remote_snap_id, &remote_mirror_ns)); std::cout << "remote_snap_id=" << remote_snap_id << std::endl; std::string local_image_id; ASSERT_EQ(0, librbd::cls_client::mirror_image_get_image_id( &m_local_ioctx, m_global_image_id, &local_image_id)); uint64_t local_snap_id = CEPH_NOSNAP; cls::rbd::MirrorSnapshotNamespace local_mirror_ns; for (int i = 0; i < 100; i++) { int r = get_last_mirror_snapshot(m_local_ioctx, local_image_id, &local_snap_id, &local_mirror_ns); if (r == 0 && ((remote_mirror_ns.state == cls::rbd::MIRROR_SNAPSHOT_STATE_PRIMARY && local_mirror_ns.state == cls::rbd::MIRROR_SNAPSHOT_STATE_NON_PRIMARY) || (remote_mirror_ns.state == cls::rbd::MIRROR_SNAPSHOT_STATE_PRIMARY_DEMOTED && local_mirror_ns.state == cls::rbd::MIRROR_SNAPSHOT_STATE_NON_PRIMARY_DEMOTED)) && local_mirror_ns.primary_mirror_uuid == m_remote_mirror_uuid && local_mirror_ns.primary_snap_id == remote_snap_id && local_mirror_ns.complete) { std::cout << "local_snap_id=" << local_snap_id << ", " << "local_snap_ns=" << local_mirror_ns << std::endl; return; } wait_for_watcher_notify(1); } ADD_FAILURE() << "failed to locate matching snapshot: " << "remote_snap_id=" << remote_snap_id << ", " << "remote_snap_ns=" << remote_mirror_ns << ", " << "local_snap_id=" << local_snap_id << ", " << "local_snap_ns=" << local_mirror_ns; } void wait_for_replay_complete() { if (MIRROR_IMAGE_MODE == cls::rbd::MIRROR_IMAGE_MODE_JOURNAL) { wait_for_journal_synced(); } else { wait_for_snapshot_synced(); } } void wait_for_stopped() { for (int i = 0; i < 100; i++) { if (m_replayer->is_stopped()) { break; } wait_for_watcher_notify(1); } ASSERT_TRUE(m_replayer->is_stopped()); } void write_test_data(librbd::ImageCtx *ictx, const char *test_data, off_t off, size_t len) { size_t written; bufferlist bl; bl.append(std::string(test_data, len)); written = librbd::api::Io<>::write(*ictx, off, len, std::move(bl), 0); printf("wrote: %d\n", (int)written); ASSERT_EQ(len, written); } void read_test_data(librbd::ImageCtx *ictx, const char *expected, off_t off, size_t len) { ssize_t read; char *result = (char *)malloc(len + 1); ASSERT_NE(static_cast<char *>(NULL), result); read = librbd::api::Io<>::read( *ictx, off, len, librbd::io::ReadResult{result, len}, 0); printf("read: %d\n", (int)read); ASSERT_EQ(len, static_cast<size_t>(read)); result[len] = '\0'; if (memcmp(result, expected, len)) { printf("read: %s\nexpected: %s\n", result, expected); ASSERT_EQ(0, memcmp(result, expected, len)); } free(result); } void generate_test_data() { for (int i = 0; i < TEST_IO_SIZE; ++i) { m_test_data[i] = (char) (rand() % (126 - 33) + 33); } m_test_data[TEST_IO_SIZE] = '\0'; } void flush(librbd::ImageCtx *ictx) { C_SaferCond aio_flush_ctx; auto c = librbd::io::AioCompletion::create(&aio_flush_ctx); c->get(); librbd::api::Io<>::aio_flush(*ictx, c, true); ASSERT_EQ(0, c->wait_for_complete()); c->put(); if (MIRROR_IMAGE_MODE == cls::rbd::MIRROR_IMAGE_MODE_JOURNAL) { C_SaferCond journal_flush_ctx; ictx->journal->flush_commit_position(&journal_flush_ctx); ASSERT_EQ(0, journal_flush_ctx.wait()); } else { uint64_t snap_id = CEPH_NOSNAP; ASSERT_EQ(0, librbd::api::Mirror<>::image_snapshot_create( ictx, 0, &snap_id)); } printf("flushed\n"); } static int _image_number; PoolMetaCache m_pool_meta_cache{g_ceph_context}; std::shared_ptr<librados::Rados> m_local_cluster; std::unique_ptr<Threads<>> m_threads; std::unique_ptr<Throttler<>> m_image_sync_throttler; librados::Rados m_remote_cluster; InstanceWatcher<> *m_instance_watcher; MirrorStatusUpdater<> *m_local_status_updater; std::string m_local_mirror_uuid = "local mirror uuid"; std::string m_remote_mirror_uuid = "remote mirror uuid"; std::string m_local_pool_name, m_remote_pool_name; librados::IoCtx m_local_ioctx, m_remote_ioctx; std::string m_image_name; int64_t m_remote_pool_id; std::string m_remote_image_id; std::string m_global_image_id; ImageReplayer<> *m_replayer = nullptr; C_WatchCtx *m_watch_ctx = nullptr; uint64_t m_watch_handle = 0; char m_test_data[TEST_IO_SIZE + 1]; std::string m_journal_commit_age; }; template <typename T> int TestImageReplayer<T>::_image_number; template <cls::rbd::MirrorImageMode _mirror_image_mode, uint64_t _features> class TestImageReplayerParams { public: static const cls::rbd::MirrorImageMode MIRROR_IMAGE_MODE = _mirror_image_mode; static const uint64_t FEATURES = _features; }; typedef ::testing::Types<TestImageReplayerParams< cls::rbd::MIRROR_IMAGE_MODE_JOURNAL, 125>, TestImageReplayerParams< cls::rbd::MIRROR_IMAGE_MODE_SNAPSHOT, 1>, TestImageReplayerParams< cls::rbd::MIRROR_IMAGE_MODE_SNAPSHOT, 5>, TestImageReplayerParams< cls::rbd::MIRROR_IMAGE_MODE_SNAPSHOT, 61>, TestImageReplayerParams< cls::rbd::MIRROR_IMAGE_MODE_SNAPSHOT, 125>> TestImageReplayerTypes; TYPED_TEST_SUITE(TestImageReplayer, TestImageReplayerTypes); TYPED_TEST(TestImageReplayer, Bootstrap) { this->bootstrap(); } typedef TestImageReplayer<TestImageReplayerParams< cls::rbd::MIRROR_IMAGE_MODE_JOURNAL, 125>> TestImageReplayerJournal; TYPED_TEST(TestImageReplayer, BootstrapErrorLocalImageExists) { int order = 0; EXPECT_EQ(0, librbd::create(this->m_local_ioctx, this->m_image_name.c_str(), 1 << 22, false, 0, &order, 0, 0)); this->create_replayer(); C_SaferCond cond; this->m_replayer->start(&cond); ASSERT_EQ(-EEXIST, cond.wait()); } TEST_F(TestImageReplayerJournal, BootstrapErrorNoJournal) { ASSERT_EQ(0, librbd::Journal<>::remove(this->m_remote_ioctx, this->m_remote_image_id)); this->create_replayer(); C_SaferCond cond; this->m_replayer->start(&cond); ASSERT_EQ(-ENOENT, cond.wait()); } TYPED_TEST(TestImageReplayer, BootstrapErrorMirrorDisabled) { // disable remote image mirroring ASSERT_EQ(0, librbd::api::Mirror<>::mode_set(this->m_remote_ioctx, RBD_MIRROR_MODE_IMAGE)); librbd::ImageCtx *ictx; this->open_remote_image(&ictx); ASSERT_EQ(0, librbd::api::Mirror<>::image_disable(ictx, true)); this->close_image(ictx); this->create_replayer(); C_SaferCond cond; this->m_replayer->start(&cond); ASSERT_EQ(-ENOENT, cond.wait()); } TYPED_TEST(TestImageReplayer, BootstrapMirrorDisabling) { // set remote image mirroring state to DISABLING if (gtest_TypeParam_::MIRROR_IMAGE_MODE == cls::rbd::MIRROR_IMAGE_MODE_JOURNAL) { ASSERT_EQ(0, librbd::api::Mirror<>::mode_set(this->m_remote_ioctx, RBD_MIRROR_MODE_IMAGE)); librbd::ImageCtx *ictx; this->open_remote_image(&ictx); ASSERT_EQ(0, librbd::api::Mirror<>::image_enable( ictx, RBD_MIRROR_IMAGE_MODE_JOURNAL, false)); this->close_image(ictx); } cls::rbd::MirrorImage mirror_image; ASSERT_EQ(0, librbd::cls_client::mirror_image_get(&this->m_remote_ioctx, this->m_remote_image_id, &mirror_image)); mirror_image.state = cls::rbd::MirrorImageState::MIRROR_IMAGE_STATE_DISABLING; ASSERT_EQ(0, librbd::cls_client::mirror_image_set(&this->m_remote_ioctx, this->m_remote_image_id, mirror_image)); this->create_replayer(); C_SaferCond cond; this->m_replayer->start(&cond); ASSERT_EQ(-ENOENT, cond.wait()); ASSERT_TRUE(this->m_replayer->is_stopped()); } TYPED_TEST(TestImageReplayer, BootstrapDemoted) { // demote remote image librbd::ImageCtx *ictx; this->open_remote_image(&ictx); ASSERT_EQ(0, librbd::api::Mirror<>::image_demote(ictx)); this->close_image(ictx); this->create_replayer(); C_SaferCond cond; this->m_replayer->start(&cond); ASSERT_EQ(-EREMOTEIO, cond.wait()); ASSERT_TRUE(this->m_replayer->is_stopped()); } TYPED_TEST(TestImageReplayer, StartInterrupted) { this->create_replayer(); C_SaferCond start_cond, stop_cond; this->m_replayer->start(&start_cond); this->m_replayer->stop(&stop_cond); int r = start_cond.wait(); printf("start returned %d\n", r); // TODO: improve the test to avoid this race ASSERT_TRUE(r == -ECANCELED || r == 0); ASSERT_EQ(0, stop_cond.wait()); } TEST_F(TestImageReplayerJournal, JournalReset) { this->bootstrap(); delete this->m_replayer; ASSERT_EQ(0, librbd::Journal<>::reset(this->m_remote_ioctx, this->m_remote_image_id)); // try to recover this->bootstrap(); } TEST_F(TestImageReplayerJournal, ErrorNoJournal) { this->bootstrap(); // disable remote journal journaling // (reset before disabling, so it does not fail with EBUSY) ASSERT_EQ(0, librbd::Journal<>::reset(this->m_remote_ioctx, this->m_remote_image_id)); librbd::ImageCtx *ictx; this->open_remote_image(&ictx); uint64_t features; ASSERT_EQ(0, librbd::get_features(ictx, &features)); ASSERT_EQ(0, ictx->operations->update_features(RBD_FEATURE_JOURNALING, false)); this->close_image(ictx); C_SaferCond cond; this->m_replayer->start(&cond); ASSERT_EQ(0, cond.wait()); } TYPED_TEST(TestImageReplayer, StartStop) { this->bootstrap(); this->start(); this->wait_for_replay_complete(); this->stop(); } TYPED_TEST(TestImageReplayer, WriteAndStartReplay) { this->bootstrap(); // Write to remote image and start replay librbd::ImageCtx *ictx; this->generate_test_data(); this->open_remote_image(&ictx); for (int i = 0; i < TEST_IO_COUNT; ++i) { this->write_test_data(ictx, this->m_test_data, TEST_IO_SIZE * i, TEST_IO_SIZE); } this->flush(ictx); this->close_image(ictx); this->start(); this->wait_for_replay_complete(); this->stop(); this->open_local_image(&ictx); for (int i = 0; i < TEST_IO_COUNT; ++i) { this->read_test_data(ictx, this->m_test_data, TEST_IO_SIZE * i, TEST_IO_SIZE); } this->close_image(ictx); } TYPED_TEST(TestImageReplayer, StartReplayAndWrite) { this->bootstrap(); // Start replay and write to remote image librbd::ImageCtx *ictx; this->start(); this->generate_test_data(); this->open_remote_image(&ictx); for (int i = 0; i < TEST_IO_COUNT; ++i) { this->write_test_data(ictx, this->m_test_data, TEST_IO_SIZE * i, TEST_IO_SIZE); } this->flush(ictx); this->wait_for_replay_complete(); for (int i = TEST_IO_COUNT; i < 2 * TEST_IO_COUNT; ++i) { this->write_test_data(ictx, this->m_test_data, TEST_IO_SIZE * i, TEST_IO_SIZE); } this->flush(ictx); this->close_image(ictx); this->wait_for_replay_complete(); this->open_local_image(&ictx); for (int i = 0; i < 2 * TEST_IO_COUNT; ++i) { this->read_test_data(ictx, this->m_test_data, TEST_IO_SIZE * i, TEST_IO_SIZE); } this->close_image(ictx); this->stop(); } TEST_F(TestImageReplayerJournal, NextTag) { this->bootstrap(); // write, reopen, and write again to test switch to the next tag librbd::ImageCtx *ictx; this->start(); this->generate_test_data(); const int N = 10; for (int j = 0; j < N; j++) { this->open_remote_image(&ictx); for (int i = j * TEST_IO_COUNT; i < (j + 1) * TEST_IO_COUNT; ++i) { this->write_test_data(ictx, this->m_test_data, TEST_IO_SIZE * i, TEST_IO_SIZE); } this->close_image(ictx); } this->wait_for_replay_complete(); this->open_local_image(&ictx); for (int i = 0; i < N * TEST_IO_COUNT; ++i) { this->read_test_data(ictx, this->m_test_data, TEST_IO_SIZE * i, TEST_IO_SIZE); } this->close_image(ictx); this->stop(); } TYPED_TEST(TestImageReplayer, Resync) { this->bootstrap(); librbd::ImageCtx *ictx; this->start(); this->generate_test_data(); this->open_remote_image(&ictx); for (int i = 0; i < TEST_IO_COUNT; ++i) { this->write_test_data(ictx, this->m_test_data, TEST_IO_SIZE * i, TEST_IO_SIZE); } this->flush(ictx); this->wait_for_replay_complete(); for (int i = TEST_IO_COUNT; i < 2 * TEST_IO_COUNT; ++i) { this->write_test_data(ictx, this->m_test_data, TEST_IO_SIZE * i, TEST_IO_SIZE); } this->flush(ictx); this->close_image(ictx); this->open_local_image(&ictx); EXPECT_EQ(0, librbd::api::Mirror<>::image_resync(ictx)); this->close_image(ictx); this->wait_for_stopped(); C_SaferCond cond; this->m_replayer->start(&cond); ASSERT_EQ(0, cond.wait()); ASSERT_TRUE(this->m_replayer->is_replaying()); this->wait_for_replay_complete(); this->open_local_image(&ictx); for (int i = 0; i < 2 * TEST_IO_COUNT; ++i) { this->read_test_data(ictx, this->m_test_data, TEST_IO_SIZE * i, TEST_IO_SIZE); } this->close_image(ictx); this->stop(); } TYPED_TEST(TestImageReplayer, Resync_While_Stop) { this->bootstrap(); this->start(); this->generate_test_data(); librbd::ImageCtx *ictx; this->open_remote_image(&ictx); for (int i = 0; i < TEST_IO_COUNT; ++i) { this->write_test_data(ictx, this->m_test_data, TEST_IO_SIZE * i, TEST_IO_SIZE); } this->flush(ictx); this->wait_for_replay_complete(); for (int i = TEST_IO_COUNT; i < 2 * TEST_IO_COUNT; ++i) { this->write_test_data(ictx, this->m_test_data, TEST_IO_SIZE * i, TEST_IO_SIZE); } this->flush(ictx); this->close_image(ictx); this->wait_for_replay_complete(); C_SaferCond cond; this->m_replayer->stop(&cond); ASSERT_EQ(0, cond.wait()); this->open_local_image(&ictx); EXPECT_EQ(0, librbd::api::Mirror<>::image_resync(ictx)); this->close_image(ictx); C_SaferCond cond2; this->m_replayer->start(&cond2); ASSERT_EQ(0, cond2.wait()); ASSERT_TRUE(this->m_replayer->is_stopped()); C_SaferCond cond3; this->m_replayer->start(&cond3); ASSERT_EQ(0, cond3.wait()); ASSERT_TRUE(this->m_replayer->is_replaying()); this->wait_for_replay_complete(); this->open_local_image(&ictx); for (int i = 0; i < 2 * TEST_IO_COUNT; ++i) { this->read_test_data(ictx, this->m_test_data, TEST_IO_SIZE * i, TEST_IO_SIZE); } this->close_image(ictx); this->stop(); } TYPED_TEST(TestImageReplayer, Resync_StartInterrupted) { this->bootstrap(); librbd::ImageCtx *ictx; this->open_local_image(&ictx); EXPECT_EQ(0, librbd::api::Mirror<>::image_resync(ictx)); this->close_image(ictx); C_SaferCond cond; this->m_replayer->start(&cond); ASSERT_EQ(0, cond.wait()); ASSERT_TRUE(this->m_replayer->is_stopped()); C_SaferCond cond2; this->m_replayer->start(&cond2); ASSERT_EQ(0, cond2.wait()); this->create_watch_ctx(); ASSERT_TRUE(this->m_replayer->is_replaying()); this->generate_test_data(); this->open_remote_image(&ictx); for (int i = 0; i < TEST_IO_COUNT; ++i) { this->write_test_data(ictx, this->m_test_data, TEST_IO_SIZE * i, TEST_IO_SIZE); } this->flush(ictx); this->wait_for_replay_complete(); for (int i = TEST_IO_COUNT; i < 2 * TEST_IO_COUNT; ++i) { this->write_test_data(ictx, this->m_test_data, TEST_IO_SIZE * i, TEST_IO_SIZE); } this->flush(ictx); this->close_image(ictx); this->wait_for_replay_complete(); this->open_local_image(&ictx); for (int i = 0; i < 2 * TEST_IO_COUNT; ++i) { this->read_test_data(ictx, this->m_test_data, TEST_IO_SIZE * i, TEST_IO_SIZE); } this->close_image(ictx); this->stop(); } TEST_F(TestImageReplayerJournal, MultipleReplayFailures_SingleEpoch) { this->bootstrap(); // inject a snapshot that cannot be unprotected librbd::ImageCtx *ictx; this->open_image(this->m_local_ioctx, this->m_image_name, false, &ictx); ictx->features &= ~RBD_FEATURE_JOURNALING; librbd::NoOpProgressContext prog_ctx; ASSERT_EQ(0, ictx->operations->snap_create(cls::rbd::UserSnapshotNamespace(), "foo", 0, prog_ctx)); ASSERT_EQ(0, ictx->operations->snap_protect(cls::rbd::UserSnapshotNamespace(), "foo")); ASSERT_EQ(0, librbd::cls_client::add_child(&ictx->md_ctx, RBD_CHILDREN, {ictx->md_ctx.get_id(), "", ictx->id, ictx->snap_ids[{cls::rbd::UserSnapshotNamespace(), "foo"}]}, "dummy child id")); this->close_image(ictx); // race failed op shut down with new ops this->open_remote_image(&ictx); for (uint64_t i = 0; i < 10; ++i) { std::shared_lock owner_locker{ictx->owner_lock}; C_SaferCond request_lock; ictx->exclusive_lock->acquire_lock(&request_lock); ASSERT_EQ(0, request_lock.wait()); C_SaferCond append_ctx; ictx->journal->append_op_event( i, librbd::journal::EventEntry{ librbd::journal::SnapUnprotectEvent{i, cls::rbd::UserSnapshotNamespace(), "foo"}}, &append_ctx); ASSERT_EQ(0, append_ctx.wait()); C_SaferCond commit_ctx; ictx->journal->commit_op_event(i, 0, &commit_ctx); ASSERT_EQ(0, commit_ctx.wait()); C_SaferCond release_ctx; ictx->exclusive_lock->release_lock(&release_ctx); ASSERT_EQ(0, release_ctx.wait()); } for (uint64_t i = 0; i < 5; ++i) { this->start(); this->wait_for_stopped(); this->unwatch(); } this->close_image(ictx); } TEST_F(TestImageReplayerJournal, MultipleReplayFailures_MultiEpoch) { this->bootstrap(); // inject a snapshot that cannot be unprotected librbd::ImageCtx *ictx; this->open_image(this->m_local_ioctx, this->m_image_name, false, &ictx); ictx->features &= ~RBD_FEATURE_JOURNALING; librbd::NoOpProgressContext prog_ctx; ASSERT_EQ(0, ictx->operations->snap_create(cls::rbd::UserSnapshotNamespace(), "foo", 0, prog_ctx)); ASSERT_EQ(0, ictx->operations->snap_protect(cls::rbd::UserSnapshotNamespace(), "foo")); ASSERT_EQ(0, librbd::cls_client::add_child(&ictx->md_ctx, RBD_CHILDREN, {ictx->md_ctx.get_id(), "", ictx->id, ictx->snap_ids[{cls::rbd::UserSnapshotNamespace(), "foo"}]}, "dummy child id")); this->close_image(ictx); // race failed op shut down with new tag flush this->open_remote_image(&ictx); { std::shared_lock owner_locker{ictx->owner_lock}; C_SaferCond request_lock; ictx->exclusive_lock->acquire_lock(&request_lock); ASSERT_EQ(0, request_lock.wait()); C_SaferCond append_ctx; ictx->journal->append_op_event( 1U, librbd::journal::EventEntry{ librbd::journal::SnapUnprotectEvent{1U, cls::rbd::UserSnapshotNamespace(), "foo"}}, &append_ctx); ASSERT_EQ(0, append_ctx.wait()); C_SaferCond commit_ctx; ictx->journal->commit_op_event(1U, 0, &commit_ctx); ASSERT_EQ(0, commit_ctx.wait()); C_SaferCond release_ctx; ictx->exclusive_lock->release_lock(&release_ctx); ASSERT_EQ(0, release_ctx.wait()); } this->generate_test_data(); this->write_test_data(ictx, this->m_test_data, 0, TEST_IO_SIZE); for (uint64_t i = 0; i < 5; ++i) { this->start(); this->wait_for_stopped(); this->unwatch(); } this->close_image(ictx); } TEST_F(TestImageReplayerJournal, Disconnect) { this->bootstrap(); // Make sure rbd_mirroring_resync_after_disconnect is not set EXPECT_EQ(0, this->m_local_cluster->conf_set("rbd_mirroring_resync_after_disconnect", "false")); // Test start fails if disconnected librbd::ImageCtx *ictx; this->generate_test_data(); this->open_remote_image(&ictx); for (int i = 0; i < TEST_IO_COUNT; ++i) { this->write_test_data(ictx, this->m_test_data, TEST_IO_SIZE * i, TEST_IO_SIZE); } this->flush(ictx); this->close_image(ictx); std::string oid = ::journal::Journaler::header_oid(this->m_remote_image_id); ASSERT_EQ(0, cls::journal::client::client_update_state(this->m_remote_ioctx, oid, this->m_local_mirror_uuid, cls::journal::CLIENT_STATE_DISCONNECTED)); C_SaferCond cond1; this->m_replayer->start(&cond1); ASSERT_EQ(-ENOTCONN, cond1.wait()); // Test start succeeds after resync this->open_local_image(&ictx); librbd::Journal<>::request_resync(ictx); this->close_image(ictx); C_SaferCond cond2; this->m_replayer->start(&cond2); ASSERT_EQ(0, cond2.wait()); this->start(); this->wait_for_replay_complete(); // Test replay stopped after disconnect this->open_remote_image(&ictx); for (int i = TEST_IO_COUNT; i < 2 * TEST_IO_COUNT; ++i) { this->write_test_data(ictx, this->m_test_data, TEST_IO_SIZE * i, TEST_IO_SIZE); } this->flush(ictx); this->close_image(ictx); ASSERT_EQ(0, cls::journal::client::client_update_state(this->m_remote_ioctx, oid, this->m_local_mirror_uuid, cls::journal::CLIENT_STATE_DISCONNECTED)); bufferlist bl; ASSERT_EQ(0, this->m_remote_ioctx.notify2(oid, bl, 5000, NULL)); this->wait_for_stopped(); // Test start fails after disconnect C_SaferCond cond3; this->m_replayer->start(&cond3); ASSERT_EQ(-ENOTCONN, cond3.wait()); C_SaferCond cond4; this->m_replayer->start(&cond4); ASSERT_EQ(-ENOTCONN, cond4.wait()); // Test automatic resync if rbd_mirroring_resync_after_disconnect is set EXPECT_EQ(0, this->m_local_cluster->conf_set("rbd_mirroring_resync_after_disconnect", "true")); // Resync is flagged on first start attempt C_SaferCond cond5; this->m_replayer->start(&cond5); ASSERT_EQ(-ENOTCONN, cond5.wait()); C_SaferCond cond6; this->m_replayer->start(&cond6); ASSERT_EQ(0, cond6.wait()); this->wait_for_replay_complete(); this->stop(); } TEST_F(TestImageReplayerJournal, UpdateFeatures) { // TODO add support to snapshot-based mirroring const uint64_t FEATURES_TO_UPDATE = RBD_FEATURE_OBJECT_MAP | RBD_FEATURE_FAST_DIFF; uint64_t features; librbd::ImageCtx *ictx; // Make sure the features we will update are disabled initially this->open_remote_image(&ictx); ASSERT_EQ(0, librbd::get_features(ictx, &features)); features &= FEATURES_TO_UPDATE; if (features) { ASSERT_EQ(0, ictx->operations->update_features(FEATURES_TO_UPDATE, false)); } ASSERT_EQ(0, librbd::get_features(ictx, &features)); ASSERT_EQ(0U, features & FEATURES_TO_UPDATE); this->close_image(ictx); this->bootstrap(); this->open_remote_image(&ictx); ASSERT_EQ(0, librbd::get_features(ictx, &features)); ASSERT_EQ(0U, features & FEATURES_TO_UPDATE); this->close_image(ictx); this->open_local_image(&ictx); ASSERT_EQ(0, librbd::get_features(ictx, &features)); ASSERT_EQ(0U, features & FEATURES_TO_UPDATE); this->close_image(ictx); // Start replay and update features this->start(); this->open_remote_image(&ictx); ASSERT_EQ(0, ictx->operations->update_features(FEATURES_TO_UPDATE, true)); ASSERT_EQ(0, librbd::get_features(ictx, &features)); ASSERT_EQ(FEATURES_TO_UPDATE, features & FEATURES_TO_UPDATE); this->close_image(ictx); this->wait_for_replay_complete(); this->open_local_image(&ictx); ASSERT_EQ(0, librbd::get_features(ictx, &features)); ASSERT_EQ(FEATURES_TO_UPDATE, features & FEATURES_TO_UPDATE); this->close_image(ictx); this->open_remote_image(&ictx); ASSERT_EQ(0, ictx->operations->update_features(FEATURES_TO_UPDATE, false)); ASSERT_EQ(0, librbd::get_features(ictx, &features)); ASSERT_EQ(0U, features & FEATURES_TO_UPDATE); this->close_image(ictx); this->wait_for_replay_complete(); this->open_local_image(&ictx); ASSERT_EQ(0, librbd::get_features(ictx, &features)); ASSERT_EQ(0U, features & FEATURES_TO_UPDATE); this->close_image(ictx); // Test update_features error does not stop replication this->open_remote_image(&ictx); ASSERT_EQ(0, librbd::get_features(ictx, &features)); ASSERT_NE(0U, features & RBD_FEATURE_EXCLUSIVE_LOCK); ASSERT_EQ(-EINVAL, ictx->operations->update_features(RBD_FEATURE_EXCLUSIVE_LOCK, false)); this->generate_test_data(); for (int i = 0; i < TEST_IO_COUNT; ++i) { this->write_test_data(ictx, this->m_test_data, TEST_IO_SIZE * i, TEST_IO_SIZE); } this->flush(ictx); this->close_image(ictx); this->wait_for_replay_complete(); this->open_local_image(&ictx); for (int i = 0; i < TEST_IO_COUNT; ++i) { this->read_test_data(ictx, this->m_test_data, TEST_IO_SIZE * i, TEST_IO_SIZE); } this->close_image(ictx); this->stop(); } TEST_F(TestImageReplayerJournal, MetadataSetRemove) { // TODO add support to snapshot-based mirroring const std::string KEY = "test_key"; const std::string VALUE = "test_value"; librbd::ImageCtx *ictx; std::string value; this->bootstrap(); this->start(); // Test metadata_set replication this->open_remote_image(&ictx); ASSERT_EQ(0, ictx->operations->metadata_set(KEY, VALUE)); value.clear(); ASSERT_EQ(0, librbd::metadata_get(ictx, KEY, &value)); ASSERT_EQ(VALUE, value); this->close_image(ictx); this->wait_for_replay_complete(); this->open_local_image(&ictx); value.clear(); ASSERT_EQ(0, librbd::metadata_get(ictx, KEY, &value)); ASSERT_EQ(VALUE, value); this->close_image(ictx); // Test metadata_remove replication this->open_remote_image(&ictx); ASSERT_EQ(0, ictx->operations->metadata_remove(KEY)); ASSERT_EQ(-ENOENT, librbd::metadata_get(ictx, KEY, &value)); this->close_image(ictx); this->wait_for_replay_complete(); this->open_local_image(&ictx); ASSERT_EQ(-ENOENT, librbd::metadata_get(ictx, KEY, &value)); this->close_image(ictx); this->stop(); } TEST_F(TestImageReplayerJournal, MirroringDelay) { // TODO add support to snapshot-based mirroring const double DELAY = 10; // set less than wait_for_replay_complete timeout librbd::ImageCtx *ictx; utime_t start_time; double delay; this->bootstrap(); ASSERT_EQ(0, this->m_local_cluster->conf_set("rbd_mirroring_replay_delay", stringify(DELAY).c_str())); this->open_local_image(&ictx); ASSERT_EQ(DELAY, ictx->mirroring_replay_delay); this->close_image(ictx); this->start(); // Test delay this->generate_test_data(); this->open_remote_image(&ictx); start_time = ceph_clock_now(); for (int i = 0; i < TEST_IO_COUNT; ++i) { this->write_test_data(ictx, this->m_test_data, TEST_IO_SIZE * i, TEST_IO_SIZE); } this->flush(ictx); this->close_image(ictx); this->wait_for_replay_complete(); delay = ceph_clock_now() - start_time; ASSERT_GE(delay, DELAY); // Test stop when delaying replay this->open_remote_image(&ictx); start_time = ceph_clock_now(); for (int i = 0; i < TEST_IO_COUNT; ++i) { this->write_test_data(ictx, this->m_test_data, TEST_IO_SIZE * i, TEST_IO_SIZE); } this->close_image(ictx); sleep(DELAY / 2); this->stop(); this->start(); this->wait_for_replay_complete(); delay = ceph_clock_now() - start_time; ASSERT_GE(delay, DELAY); this->stop(); } TYPED_TEST(TestImageReplayer, ImageRename) { this->create_replayer(); this->start(); librbd::ImageCtx* remote_image_ctx = nullptr; this->open_remote_image(&remote_image_ctx); auto image_name = this->get_temp_image_name(); ASSERT_EQ(0, remote_image_ctx->operations->rename(image_name.c_str())); this->flush(remote_image_ctx); this->wait_for_replay_complete(); librbd::ImageCtx* local_image_ctx = nullptr; this->open_image(this->m_local_ioctx, image_name, true, &local_image_ctx); ASSERT_EQ(image_name, local_image_ctx->name); this->close_image(local_image_ctx); this->close_image(remote_image_ctx); this->stop(); } TYPED_TEST(TestImageReplayer, UpdateFeatures) { const uint64_t FEATURES_TO_UPDATE = RBD_FEATURE_OBJECT_MAP | RBD_FEATURE_FAST_DIFF | RBD_FEATURE_DEEP_FLATTEN; REQUIRE((this->FEATURES & FEATURES_TO_UPDATE) == FEATURES_TO_UPDATE); librbd::ImageCtx* remote_image_ctx = nullptr; this->open_remote_image(&remote_image_ctx); ASSERT_EQ(0, remote_image_ctx->operations->update_features( (RBD_FEATURE_OBJECT_MAP | RBD_FEATURE_FAST_DIFF), false)); this->flush(remote_image_ctx); this->create_replayer(); this->start(); this->wait_for_replay_complete(); librbd::ImageCtx* local_image_ctx = nullptr; this->open_local_image(&local_image_ctx); ASSERT_EQ(0U, local_image_ctx->features & ( RBD_FEATURE_OBJECT_MAP | RBD_FEATURE_FAST_DIFF)); // enable object-map/fast-diff ASSERT_EQ(0, remote_image_ctx->operations->update_features( (RBD_FEATURE_OBJECT_MAP | RBD_FEATURE_FAST_DIFF), true)); this->flush(remote_image_ctx); this->wait_for_replay_complete(); ASSERT_EQ(0, local_image_ctx->state->refresh()); ASSERT_EQ(RBD_FEATURE_OBJECT_MAP | RBD_FEATURE_FAST_DIFF, local_image_ctx->features & ( RBD_FEATURE_OBJECT_MAP | RBD_FEATURE_FAST_DIFF)); // disable deep-flatten ASSERT_EQ(0, remote_image_ctx->operations->update_features( RBD_FEATURE_DEEP_FLATTEN, false)); this->flush(remote_image_ctx); this->wait_for_replay_complete(); ASSERT_EQ(0, local_image_ctx->state->refresh()); ASSERT_EQ(0, local_image_ctx->features & RBD_FEATURE_DEEP_FLATTEN); this->close_image(local_image_ctx); this->close_image(remote_image_ctx); this->stop(); } TYPED_TEST(TestImageReplayer, SnapshotUnprotect) { librbd::ImageCtx* remote_image_ctx = nullptr; this->open_remote_image(&remote_image_ctx); // create a protected snapshot librbd::NoOpProgressContext prog_ctx; ASSERT_EQ(0, remote_image_ctx->operations->snap_create( cls::rbd::UserSnapshotNamespace{}, "snap1", 0, prog_ctx)); ASSERT_EQ(0, remote_image_ctx->operations->snap_protect( cls::rbd::UserSnapshotNamespace{}, "snap1")); this->flush(remote_image_ctx); this->create_replayer(); this->start(); this->wait_for_replay_complete(); librbd::ImageCtx* local_image_ctx = nullptr; this->open_local_image(&local_image_ctx); auto local_snap_id_it = local_image_ctx->snap_ids.find({ {cls::rbd::UserSnapshotNamespace{}}, "snap1"}); ASSERT_NE(local_image_ctx->snap_ids.end(), local_snap_id_it); auto local_snap_id = local_snap_id_it->second; auto local_snap_info_it = local_image_ctx->snap_info.find(local_snap_id); ASSERT_NE(local_image_ctx->snap_info.end(), local_snap_info_it); ASSERT_EQ(RBD_PROTECTION_STATUS_PROTECTED, local_snap_info_it->second.protection_status); // unprotect the snapshot ASSERT_EQ(0, remote_image_ctx->operations->snap_unprotect( cls::rbd::UserSnapshotNamespace{}, "snap1")); this->flush(remote_image_ctx); this->wait_for_replay_complete(); ASSERT_EQ(0, local_image_ctx->state->refresh()); local_snap_info_it = local_image_ctx->snap_info.find(local_snap_id); ASSERT_NE(local_image_ctx->snap_info.end(), local_snap_info_it); ASSERT_EQ(RBD_PROTECTION_STATUS_UNPROTECTED, local_snap_info_it->second.protection_status); this->close_image(local_image_ctx); this->close_image(remote_image_ctx); this->stop(); } TYPED_TEST(TestImageReplayer, SnapshotProtect) { librbd::ImageCtx* remote_image_ctx = nullptr; this->open_remote_image(&remote_image_ctx); // create an unprotected snapshot librbd::NoOpProgressContext prog_ctx; ASSERT_EQ(0, remote_image_ctx->operations->snap_create( cls::rbd::UserSnapshotNamespace{}, "snap1", 0, prog_ctx)); this->flush(remote_image_ctx); this->create_replayer(); this->start(); this->wait_for_replay_complete(); librbd::ImageCtx* local_image_ctx = nullptr; this->open_local_image(&local_image_ctx); auto local_snap_id_it = local_image_ctx->snap_ids.find({ {cls::rbd::UserSnapshotNamespace{}}, "snap1"}); ASSERT_NE(local_image_ctx->snap_ids.end(), local_snap_id_it); auto local_snap_id = local_snap_id_it->second; auto local_snap_info_it = local_image_ctx->snap_info.find(local_snap_id); ASSERT_NE(local_image_ctx->snap_info.end(), local_snap_info_it); ASSERT_EQ(RBD_PROTECTION_STATUS_UNPROTECTED, local_snap_info_it->second.protection_status); // protect the snapshot ASSERT_EQ(0, remote_image_ctx->operations->snap_protect( cls::rbd::UserSnapshotNamespace{}, "snap1")); this->flush(remote_image_ctx); this->wait_for_replay_complete(); ASSERT_EQ(0, local_image_ctx->state->refresh()); local_snap_info_it = local_image_ctx->snap_info.find(local_snap_id); ASSERT_NE(local_image_ctx->snap_info.end(), local_snap_info_it); ASSERT_EQ(RBD_PROTECTION_STATUS_PROTECTED, local_snap_info_it->second.protection_status); this->close_image(local_image_ctx); this->close_image(remote_image_ctx); this->stop(); } TYPED_TEST(TestImageReplayer, SnapshotRemove) { librbd::ImageCtx* remote_image_ctx = nullptr; this->open_remote_image(&remote_image_ctx); // create a user snapshot librbd::NoOpProgressContext prog_ctx; ASSERT_EQ(0, remote_image_ctx->operations->snap_create( cls::rbd::UserSnapshotNamespace{}, "snap1", 0, prog_ctx)); this->flush(remote_image_ctx); this->create_replayer(); this->start(); this->wait_for_replay_complete(); librbd::ImageCtx* local_image_ctx = nullptr; this->open_local_image(&local_image_ctx); auto local_snap_id_it = local_image_ctx->snap_ids.find({ {cls::rbd::UserSnapshotNamespace{}}, "snap1"}); ASSERT_NE(local_image_ctx->snap_ids.end(), local_snap_id_it); // remove the snapshot ASSERT_EQ(0, remote_image_ctx->operations->snap_remove( cls::rbd::UserSnapshotNamespace{}, "snap1")); this->flush(remote_image_ctx); this->wait_for_replay_complete(); ASSERT_EQ(0, local_image_ctx->state->refresh()); local_snap_id_it = local_image_ctx->snap_ids.find({ {cls::rbd::UserSnapshotNamespace{}}, "snap1"}); ASSERT_EQ(local_image_ctx->snap_ids.end(), local_snap_id_it); this->close_image(local_image_ctx); this->close_image(remote_image_ctx); this->stop(); } TYPED_TEST(TestImageReplayer, SnapshotRename) { librbd::ImageCtx* remote_image_ctx = nullptr; this->open_remote_image(&remote_image_ctx); // create a user snapshot librbd::NoOpProgressContext prog_ctx; ASSERT_EQ(0, remote_image_ctx->operations->snap_create( cls::rbd::UserSnapshotNamespace{}, "snap1", 0, prog_ctx)); this->flush(remote_image_ctx); this->create_replayer(); this->start(); this->wait_for_replay_complete(); librbd::ImageCtx* local_image_ctx = nullptr; this->open_local_image(&local_image_ctx); auto local_snap_id_it = local_image_ctx->snap_ids.find({ {cls::rbd::UserSnapshotNamespace{}}, "snap1"}); ASSERT_NE(local_image_ctx->snap_ids.end(), local_snap_id_it); auto local_snap_id = local_snap_id_it->second; auto local_snap_info_it = local_image_ctx->snap_info.find(local_snap_id); ASSERT_NE(local_image_ctx->snap_info.end(), local_snap_info_it); ASSERT_EQ(RBD_PROTECTION_STATUS_UNPROTECTED, local_snap_info_it->second.protection_status); // rename the snapshot ASSERT_EQ(0, remote_image_ctx->operations->snap_rename( "snap1", "snap1-renamed")); this->flush(remote_image_ctx); this->wait_for_replay_complete(); ASSERT_EQ(0, local_image_ctx->state->refresh()); local_snap_info_it = local_image_ctx->snap_info.find(local_snap_id); ASSERT_NE(local_image_ctx->snap_info.end(), local_snap_info_it); ASSERT_EQ("snap1-renamed", local_snap_info_it->second.name); this->close_image(local_image_ctx); this->close_image(remote_image_ctx); this->stop(); } TYPED_TEST(TestImageReplayer, SnapshotLimit) { librbd::ImageCtx* remote_image_ctx = nullptr; this->open_remote_image(&remote_image_ctx); this->create_replayer(); this->start(); this->wait_for_replay_complete(); // update the snap limit ASSERT_EQ(0, librbd::api::Snapshot<>::set_limit(remote_image_ctx, 123U)); this->flush(remote_image_ctx); this->wait_for_replay_complete(); librbd::ImageCtx* local_image_ctx = nullptr; this->open_local_image(&local_image_ctx); uint64_t local_snap_limit; ASSERT_EQ(0, librbd::api::Snapshot<>::get_limit(local_image_ctx, &local_snap_limit)); ASSERT_EQ(123U, local_snap_limit); // update the limit again ASSERT_EQ(0, librbd::api::Snapshot<>::set_limit( remote_image_ctx, std::numeric_limits<uint64_t>::max())); this->flush(remote_image_ctx); this->wait_for_replay_complete(); ASSERT_EQ(0, librbd::api::Snapshot<>::get_limit(local_image_ctx, &local_snap_limit)); ASSERT_EQ(std::numeric_limits<uint64_t>::max(), local_snap_limit); this->close_image(local_image_ctx); this->close_image(remote_image_ctx); this->stop(); } } // namespace mirror } // namespace rbd
52,219
30.363363
98
cc
null
ceph-main/src/test/rbd_mirror/test_ImageSync.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "test/rbd_mirror/test_fixture.h" #include "include/stringify.h" #include "include/rbd/librbd.hpp" #include "common/Cond.h" #include "journal/Journaler.h" #include "journal/Settings.h" #include "librbd/ExclusiveLock.h" #include "librbd/ImageCtx.h" #include "librbd/ImageState.h" #include "librbd/internal.h" #include "librbd/Journal.h" #include "librbd/Operations.h" #include "librbd/api/Io.h" #include "librbd/io/AioCompletion.h" #include "librbd/io/ImageDispatchSpec.h" #include "librbd/io/ReadResult.h" #include "librbd/journal/Types.h" #include "tools/rbd_mirror/ImageSync.h" #include "tools/rbd_mirror/InstanceWatcher.h" #include "tools/rbd_mirror/Threads.h" #include "tools/rbd_mirror/Throttler.h" #include "tools/rbd_mirror/image_replayer/journal/StateBuilder.h" void register_test_image_sync() { } namespace rbd { namespace mirror { namespace { int flush(librbd::ImageCtx *image_ctx) { C_SaferCond ctx; auto aio_comp = librbd::io::AioCompletion::create_and_start( &ctx, image_ctx, librbd::io::AIO_TYPE_FLUSH); auto req = librbd::io::ImageDispatchSpec::create_flush( *image_ctx, librbd::io::IMAGE_DISPATCH_LAYER_INTERNAL_START, aio_comp, librbd::io::FLUSH_SOURCE_INTERNAL, {}); req->send(); return ctx.wait(); } void scribble(librbd::ImageCtx *image_ctx, int num_ops, uint64_t max_size) { max_size = std::min<uint64_t>(image_ctx->size, max_size); for (int i=0; i<num_ops; i++) { uint64_t off = rand() % (image_ctx->size - max_size + 1); uint64_t len = 1 + rand() % max_size; if (rand() % 4 == 0) { ASSERT_EQ((int)len, librbd::api::Io<>::discard( *image_ctx, off, len, image_ctx->discard_granularity_bytes)); } else { bufferlist bl; bl.append(std::string(len, '1')); ASSERT_EQ((int)len, librbd::api::Io<>::write( *image_ctx, off, len, std::move(bl), 0)); } } std::shared_lock owner_locker{image_ctx->owner_lock}; ASSERT_EQ(0, flush(image_ctx)); } } // anonymous namespace class TestImageSync : public TestFixture { public: void SetUp() override { TestFixture::SetUp(); create_and_open(m_local_io_ctx, &m_local_image_ctx); create_and_open(m_remote_io_ctx, &m_remote_image_ctx); auto cct = reinterpret_cast<CephContext*>(m_local_io_ctx.cct()); m_image_sync_throttler = rbd::mirror::Throttler<>::create( cct, "rbd_mirror_concurrent_image_syncs"); m_instance_watcher = rbd::mirror::InstanceWatcher<>::create( m_local_io_ctx, *m_threads->asio_engine, nullptr, m_image_sync_throttler); m_instance_watcher->handle_acquire_leader(); ContextWQ* context_wq; librbd::Journal<>::get_work_queue(cct, &context_wq); m_remote_journaler = new ::journal::Journaler( context_wq, m_threads->timer, &m_threads->timer_lock, m_remote_io_ctx, m_remote_image_ctx->id, "mirror-uuid", {}, nullptr); m_client_meta = {"image-id"}; librbd::journal::ClientData client_data(m_client_meta); bufferlist client_data_bl; encode(client_data, client_data_bl); ASSERT_EQ(0, m_remote_journaler->register_client(client_data_bl)); m_state_builder = rbd::mirror::image_replayer::journal::StateBuilder< librbd::ImageCtx>::create("global image id"); m_state_builder->remote_journaler = m_remote_journaler; m_state_builder->remote_client_meta = m_client_meta; m_sync_point_handler = m_state_builder->create_sync_point_handler(); } void TearDown() override { m_instance_watcher->handle_release_leader(); m_state_builder->remote_journaler = nullptr; m_state_builder->destroy_sync_point_handler(); m_state_builder->destroy(); delete m_remote_journaler; delete m_instance_watcher; delete m_image_sync_throttler; TestFixture::TearDown(); } void create_and_open(librados::IoCtx &io_ctx, librbd::ImageCtx **image_ctx) { librbd::RBD rbd; ASSERT_EQ(0, create_image(rbd, io_ctx, m_image_name, m_image_size)); ASSERT_EQ(0, open_image(io_ctx, m_image_name, image_ctx)); C_SaferCond ctx; { std::shared_lock owner_locker{(*image_ctx)->owner_lock}; (*image_ctx)->exclusive_lock->try_acquire_lock(&ctx); } ASSERT_EQ(0, ctx.wait()); ASSERT_TRUE((*image_ctx)->exclusive_lock->is_lock_owner()); } ImageSync<> *create_request(Context *ctx) { return new ImageSync<>(m_threads, m_local_image_ctx, m_remote_image_ctx, "mirror-uuid", m_sync_point_handler, m_instance_watcher, nullptr, ctx); } librbd::ImageCtx *m_remote_image_ctx; librbd::ImageCtx *m_local_image_ctx; rbd::mirror::Throttler<> *m_image_sync_throttler; rbd::mirror::InstanceWatcher<> *m_instance_watcher; ::journal::Journaler *m_remote_journaler; librbd::journal::MirrorPeerClientMeta m_client_meta; rbd::mirror::image_replayer::journal::StateBuilder<librbd::ImageCtx>* m_state_builder = nullptr; rbd::mirror::image_sync::SyncPointHandler* m_sync_point_handler = nullptr; }; TEST_F(TestImageSync, Empty) { C_SaferCond ctx; ImageSync<> *request = create_request(&ctx); request->send(); ASSERT_EQ(0, ctx.wait()); ASSERT_EQ(0U, m_client_meta.sync_points.size()); ASSERT_EQ(0, m_remote_image_ctx->state->refresh()); ASSERT_EQ(0U, m_remote_image_ctx->snap_ids.size()); ASSERT_EQ(0, m_local_image_ctx->state->refresh()); ASSERT_EQ(1U, m_local_image_ctx->snap_ids.size()); // deleted on journal replay } TEST_F(TestImageSync, Simple) { scribble(m_remote_image_ctx, 10, 102400); C_SaferCond ctx; ImageSync<> *request = create_request(&ctx); request->send(); ASSERT_EQ(0, ctx.wait()); int64_t object_size = std::min<int64_t>( m_remote_image_ctx->size, 1 << m_remote_image_ctx->order); bufferlist read_remote_bl; read_remote_bl.append(std::string(object_size, '1')); bufferlist read_local_bl; read_local_bl.append(std::string(object_size, '1')); for (uint64_t offset = 0; offset < m_remote_image_ctx->size; offset += object_size) { ASSERT_LE(0, librbd::api::Io<>::read( *m_remote_image_ctx, offset, object_size, librbd::io::ReadResult{&read_remote_bl}, 0)); ASSERT_LE(0, librbd::api::Io<>::read( *m_local_image_ctx, offset, object_size, librbd::io::ReadResult{&read_local_bl}, 0)); ASSERT_TRUE(read_remote_bl.contents_equal(read_local_bl)); } } TEST_F(TestImageSync, Resize) { int64_t object_size = std::min<int64_t>( m_remote_image_ctx->size, 1 << m_remote_image_ctx->order); uint64_t off = 0; uint64_t len = object_size / 10; bufferlist bl; bl.append(std::string(len, '1')); ASSERT_EQ((int)len, librbd::api::Io<>::write( *m_remote_image_ctx, off, len, std::move(bl), 0)); { std::shared_lock owner_locker{m_remote_image_ctx->owner_lock}; ASSERT_EQ(0, flush(m_remote_image_ctx)); } ASSERT_EQ(0, create_snap(m_remote_image_ctx, "snap", nullptr)); uint64_t size = object_size - 1; librbd::NoOpProgressContext no_op_progress_ctx; ASSERT_EQ(0, m_remote_image_ctx->operations->resize(size, true, no_op_progress_ctx)); C_SaferCond ctx; ImageSync<> *request = create_request(&ctx); request->send(); ASSERT_EQ(0, ctx.wait()); bufferlist read_remote_bl; read_remote_bl.append(std::string(len, '\0')); bufferlist read_local_bl; read_local_bl.append(std::string(len, '\0')); ASSERT_LE(0, librbd::api::Io<>::read( *m_remote_image_ctx, off, len, librbd::io::ReadResult{&read_remote_bl}, 0)); ASSERT_LE(0, librbd::api::Io<>::read( *m_local_image_ctx, off, len, librbd::io::ReadResult{&read_local_bl}, 0)); ASSERT_TRUE(read_remote_bl.contents_equal(read_local_bl)); } TEST_F(TestImageSync, Discard) { int64_t object_size = std::min<int64_t>( m_remote_image_ctx->size, 1 << m_remote_image_ctx->order); uint64_t off = 0; uint64_t len = object_size / 10; bufferlist bl; bl.append(std::string(len, '1')); ASSERT_EQ((int)len, librbd::api::Io<>::write( *m_remote_image_ctx, off, len, std::move(bl), 0)); { std::shared_lock owner_locker{m_remote_image_ctx->owner_lock}; ASSERT_EQ(0, flush(m_remote_image_ctx)); } ASSERT_EQ(0, create_snap(m_remote_image_ctx, "snap", nullptr)); ASSERT_EQ((int)len - 2, librbd::api::Io<>::discard( *m_remote_image_ctx, off + 1, len - 2, m_remote_image_ctx->discard_granularity_bytes)); { std::shared_lock owner_locker{m_remote_image_ctx->owner_lock}; ASSERT_EQ(0, flush(m_remote_image_ctx)); } C_SaferCond ctx; ImageSync<> *request = create_request(&ctx); request->send(); ASSERT_EQ(0, ctx.wait()); bufferlist read_remote_bl; read_remote_bl.append(std::string(object_size, '\0')); bufferlist read_local_bl; read_local_bl.append(std::string(object_size, '\0')); ASSERT_LE(0, librbd::api::Io<>::read( *m_remote_image_ctx, off, len, librbd::io::ReadResult{&read_remote_bl}, 0)); ASSERT_LE(0, librbd::api::Io<>::read( *m_local_image_ctx, off, len, librbd::io::ReadResult{&read_local_bl}, 0)); ASSERT_TRUE(read_remote_bl.contents_equal(read_local_bl)); } TEST_F(TestImageSync, SnapshotStress) { std::list<std::string> snap_names; const int num_snaps = 4; for (int idx = 0; idx <= num_snaps; ++idx) { scribble(m_remote_image_ctx, 10, 102400); librbd::NoOpProgressContext no_op_progress_ctx; uint64_t size = 1 + rand() % m_image_size; ASSERT_EQ(0, m_remote_image_ctx->operations->resize(size, true, no_op_progress_ctx)); ASSERT_EQ(0, m_remote_image_ctx->state->refresh()); if (idx < num_snaps) { snap_names.push_back("snap" + stringify(idx + 1)); ASSERT_EQ(0, create_snap(m_remote_image_ctx, snap_names.back().c_str(), nullptr)); } else { snap_names.push_back(""); } } C_SaferCond ctx; ImageSync<> *request = create_request(&ctx); request->send(); ASSERT_EQ(0, ctx.wait()); int64_t object_size = std::min<int64_t>( m_remote_image_ctx->size, 1 << m_remote_image_ctx->order); bufferlist read_remote_bl; read_remote_bl.append(std::string(object_size, '1')); bufferlist read_local_bl; read_local_bl.append(std::string(object_size, '1')); for (auto &snap_name : snap_names) { uint64_t remote_snap_id; { std::shared_lock remote_image_locker{m_remote_image_ctx->image_lock}; remote_snap_id = m_remote_image_ctx->get_snap_id( cls::rbd::UserSnapshotNamespace{}, snap_name); } uint64_t remote_size; { C_SaferCond ctx; m_remote_image_ctx->state->snap_set(remote_snap_id, &ctx); ASSERT_EQ(0, ctx.wait()); std::shared_lock remote_image_locker{m_remote_image_ctx->image_lock}; remote_size = m_remote_image_ctx->get_image_size( m_remote_image_ctx->snap_id); } uint64_t local_snap_id; { std::shared_lock image_locker{m_local_image_ctx->image_lock}; local_snap_id = m_local_image_ctx->get_snap_id( cls::rbd::UserSnapshotNamespace{}, snap_name); } uint64_t local_size; { C_SaferCond ctx; m_local_image_ctx->state->snap_set(local_snap_id, &ctx); ASSERT_EQ(0, ctx.wait()); std::shared_lock image_locker{m_local_image_ctx->image_lock}; local_size = m_local_image_ctx->get_image_size( m_local_image_ctx->snap_id); bool flags_set; ASSERT_EQ(0, m_local_image_ctx->test_flags(m_local_image_ctx->snap_id, RBD_FLAG_OBJECT_MAP_INVALID, m_local_image_ctx->image_lock, &flags_set)); ASSERT_FALSE(flags_set); } ASSERT_EQ(remote_size, local_size); for (uint64_t offset = 0; offset < remote_size; offset += object_size) { ASSERT_LE(0, librbd::api::Io<>::read( *m_remote_image_ctx, offset, object_size, librbd::io::ReadResult{&read_remote_bl}, 0)); ASSERT_LE(0, librbd::api::Io<>::read( *m_local_image_ctx, offset, object_size, librbd::io::ReadResult{&read_local_bl}, 0)); ASSERT_TRUE(read_remote_bl.contents_equal(read_local_bl)); } } } } // namespace mirror } // namespace rbd
12,634
32.693333
98
cc
null
ceph-main/src/test/rbd_mirror/test_InstanceWatcher.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "include/rados/librados.hpp" #include "include/stringify.h" #include "cls/rbd/cls_rbd_types.h" #include "cls/rbd/cls_rbd_client.h" #include "librbd/Utils.h" #include "librbd/internal.h" #include "test/rbd_mirror/test_fixture.h" #include "tools/rbd_mirror/InstanceWatcher.h" #include "tools/rbd_mirror/Threads.h" #include "common/Cond.h" #include "test/librados/test_cxx.h" #include "gtest/gtest.h" using rbd::mirror::InstanceWatcher; void register_test_instance_watcher() { } class TestInstanceWatcher : public ::rbd::mirror::TestFixture { public: std::string m_instance_id; std::string m_oid; void SetUp() override { TestFixture::SetUp(); m_local_io_ctx.remove(RBD_MIRROR_LEADER); EXPECT_EQ(0, m_local_io_ctx.create(RBD_MIRROR_LEADER, true)); m_instance_id = stringify(m_local_io_ctx.get_instance_id()); m_oid = RBD_MIRROR_INSTANCE_PREFIX + m_instance_id; } void get_instances(std::vector<std::string> *instance_ids) { instance_ids->clear(); C_SaferCond on_get; InstanceWatcher<>::get_instances(m_local_io_ctx, instance_ids, &on_get); EXPECT_EQ(0, on_get.wait()); } }; TEST_F(TestInstanceWatcher, InitShutdown) { InstanceWatcher<> instance_watcher(m_local_io_ctx, *m_threads->asio_engine, nullptr, nullptr, m_instance_id); std::vector<std::string> instance_ids; get_instances(&instance_ids); ASSERT_EQ(0U, instance_ids.size()); uint64_t size; ASSERT_EQ(-ENOENT, m_local_io_ctx.stat(m_oid, &size, nullptr)); // Init ASSERT_EQ(0, instance_watcher.init()); get_instances(&instance_ids); ASSERT_EQ(1U, instance_ids.size()); ASSERT_EQ(m_instance_id, instance_ids[0]); ASSERT_EQ(0, m_local_io_ctx.stat(m_oid, &size, nullptr)); std::list<obj_watch_t> watchers; ASSERT_EQ(0, m_local_io_ctx.list_watchers(m_oid, &watchers)); ASSERT_EQ(1U, watchers.size()); ASSERT_EQ(m_instance_id, stringify(watchers.begin()->watcher_id)); get_instances(&instance_ids); ASSERT_EQ(1U, instance_ids.size()); // Shutdown instance_watcher.shut_down(); ASSERT_EQ(-ENOENT, m_local_io_ctx.stat(m_oid, &size, nullptr)); get_instances(&instance_ids); ASSERT_EQ(0U, instance_ids.size()); } TEST_F(TestInstanceWatcher, Remove) { std::string instance_id = "instance_id"; std::string oid = RBD_MIRROR_INSTANCE_PREFIX + instance_id; std::vector<std::string> instance_ids; get_instances(&instance_ids); ASSERT_EQ(0U, instance_ids.size()); uint64_t size; ASSERT_EQ(-ENOENT, m_local_io_ctx.stat(oid, &size, nullptr)); librados::Rados cluster; librados::IoCtx io_ctx; ASSERT_EQ("", connect_cluster_pp(cluster)); ASSERT_EQ(0, cluster.ioctx_create(_local_pool_name.c_str(), io_ctx)); InstanceWatcher<> instance_watcher(m_local_io_ctx, *m_threads->asio_engine, nullptr, nullptr, "instance_id"); // Init ASSERT_EQ(0, instance_watcher.init()); get_instances(&instance_ids); ASSERT_EQ(1U, instance_ids.size()); ASSERT_EQ(instance_id, instance_ids[0]); ASSERT_EQ(0, m_local_io_ctx.stat(oid, &size, nullptr)); std::list<obj_watch_t> watchers; ASSERT_EQ(0, m_local_io_ctx.list_watchers(oid, &watchers)); ASSERT_EQ(1U, watchers.size()); // Remove C_SaferCond on_remove; InstanceWatcher<>::remove_instance(m_local_io_ctx, *m_threads->asio_engine, "instance_id", &on_remove); ASSERT_EQ(0, on_remove.wait()); ASSERT_EQ(-ENOENT, m_local_io_ctx.stat(oid, &size, nullptr)); get_instances(&instance_ids); ASSERT_EQ(0U, instance_ids.size()); // Shutdown instance_watcher.shut_down(); ASSERT_EQ(-ENOENT, m_local_io_ctx.stat(m_oid, &size, nullptr)); get_instances(&instance_ids); ASSERT_EQ(0U, instance_ids.size()); // Remove NOENT C_SaferCond on_remove_noent; InstanceWatcher<>::remove_instance(m_local_io_ctx, *m_threads->asio_engine, instance_id, &on_remove_noent); ASSERT_EQ(0, on_remove_noent.wait()); }
4,097
29.81203
77
cc
null
ceph-main/src/test/rbd_mirror/test_Instances.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "include/rados/librados.hpp" #include "cls/rbd/cls_rbd_client.h" #include "test/rbd_mirror/test_fixture.h" #include "tools/rbd_mirror/InstanceWatcher.h" #include "tools/rbd_mirror/Instances.h" #include "tools/rbd_mirror/Threads.h" #include "common/Cond.h" #include "test/librados/test.h" #include "gtest/gtest.h" #include <vector> using rbd::mirror::InstanceWatcher; using rbd::mirror::Instances; void register_test_instances() { } class TestInstances : public ::rbd::mirror::TestFixture { public: struct Listener : public rbd::mirror::instances::Listener { std::mutex lock; struct Instance { uint32_t count = 0; std::set<std::string> ids; C_SaferCond ctx; }; Instance add; Instance remove; void handle(const InstanceIds& instance_ids, Instance* instance) { std::unique_lock<std::mutex> locker(lock); for (auto& instance_id : instance_ids) { ceph_assert(instance->count > 0); --instance->count; instance->ids.insert(instance_id); if (instance->count == 0) { instance->ctx.complete(0); } } } void handle_added(const InstanceIds& instance_ids) override { handle(instance_ids, &add); } void handle_removed(const InstanceIds& instance_ids) override { handle(instance_ids, &remove); } }; virtual void SetUp() { TestFixture::SetUp(); m_local_io_ctx.remove(RBD_MIRROR_LEADER); EXPECT_EQ(0, m_local_io_ctx.create(RBD_MIRROR_LEADER, true)); m_instance_id = stringify(m_local_io_ctx.get_instance_id()); } Listener m_listener; std::string m_instance_id; }; TEST_F(TestInstances, InitShutdown) { m_listener.add.count = 1; Instances<> instances(m_threads, m_local_io_ctx, m_instance_id, m_listener); std::string instance_id = "instance_id"; ASSERT_EQ(0, librbd::cls_client::mirror_instances_add(&m_local_io_ctx, instance_id)); C_SaferCond on_init; instances.init(&on_init); ASSERT_EQ(0, on_init.wait()); ASSERT_LT(0U, m_listener.add.count); instances.unblock_listener(); ASSERT_EQ(0, m_listener.add.ctx.wait()); ASSERT_EQ(std::set<std::string>({instance_id}), m_listener.add.ids); C_SaferCond on_shut_down; instances.shut_down(&on_shut_down); ASSERT_EQ(0, on_shut_down.wait()); } TEST_F(TestInstances, InitEnoent) { Instances<> instances(m_threads, m_local_io_ctx, m_instance_id, m_listener); m_local_io_ctx.remove(RBD_MIRROR_LEADER); C_SaferCond on_init; instances.init(&on_init); ASSERT_EQ(0, on_init.wait()); C_SaferCond on_shut_down; instances.shut_down(&on_shut_down); ASSERT_EQ(0, on_shut_down.wait()); } TEST_F(TestInstances, NotifyRemove) { // speed testing up a little EXPECT_EQ(0, _rados->conf_set("rbd_mirror_leader_heartbeat_interval", "1")); EXPECT_EQ(0, _rados->conf_set("rbd_mirror_leader_max_missed_heartbeats", "2")); EXPECT_EQ(0, _rados->conf_set("rbd_mirror_leader_max_acquire_attempts_before_break", "0")); m_listener.add.count = 2; m_listener.remove.count = 1; Instances<> instances(m_threads, m_local_io_ctx, m_instance_id, m_listener); std::string instance_id1 = "instance_id1"; std::string instance_id2 = "instance_id2"; ASSERT_EQ(0, librbd::cls_client::mirror_instances_add(&m_local_io_ctx, instance_id1)); C_SaferCond on_init; instances.init(&on_init); ASSERT_EQ(0, on_init.wait()); instances.acked({instance_id2}); ASSERT_LT(0U, m_listener.add.count); instances.unblock_listener(); ASSERT_EQ(0, m_listener.add.ctx.wait()); ASSERT_EQ(std::set<std::string>({instance_id1, instance_id2}), m_listener.add.ids); std::vector<std::string> instance_ids; for (int i = 0; i < 100; i++) { instances.acked({instance_id1}); if (m_listener.remove.count > 0) { usleep(250000); } } instances.acked({instance_id1}); ASSERT_EQ(0, m_listener.remove.ctx.wait()); ASSERT_EQ(std::set<std::string>({instance_id2}), m_listener.remove.ids); C_SaferCond on_get; instances.acked({instance_id1}); InstanceWatcher<>::get_instances(m_local_io_ctx, &instance_ids, &on_get); EXPECT_EQ(0, on_get.wait()); EXPECT_EQ(1U, instance_ids.size()); ASSERT_EQ(instance_ids[0], instance_id1); C_SaferCond on_shut_down; instances.shut_down(&on_shut_down); ASSERT_EQ(0, on_shut_down.wait()); }
4,603
26.90303
86
cc
null
ceph-main/src/test/rbd_mirror/test_LeaderWatcher.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "include/rados/librados.hpp" #include "librbd/internal.h" #include "librbd/Utils.h" #include "librbd/api/Mirror.h" #include "test/librbd/test_support.h" #include "test/rbd_mirror/test_fixture.h" #include "tools/rbd_mirror/LeaderWatcher.h" #include "tools/rbd_mirror/Threads.h" #include "common/Cond.h" #include "test/librados/test_cxx.h" #include "gtest/gtest.h" using librbd::util::unique_lock_name; using rbd::mirror::LeaderWatcher; void register_test_leader_watcher() { } class TestLeaderWatcher : public ::rbd::mirror::TestFixture { public: class Listener : public rbd::mirror::leader_watcher::Listener { public: Listener() : m_test_lock(ceph::make_mutex( unique_lock_name("LeaderWatcher::m_test_lock", this))) { } void on_acquire(int r, Context *ctx) { std::lock_guard locker{m_test_lock}; m_on_acquire_r = r; m_on_acquire = ctx; } void on_release(int r, Context *ctx) { std::lock_guard locker{m_test_lock}; m_on_release_r = r; m_on_release = ctx; } int acquire_count() const { std::lock_guard locker{m_test_lock}; return m_acquire_count; } int release_count() const { std::lock_guard locker{m_test_lock}; return m_release_count; } void post_acquire_handler(Context *on_finish) override { std::lock_guard locker{m_test_lock}; m_acquire_count++; on_finish->complete(m_on_acquire_r); m_on_acquire_r = 0; if (m_on_acquire != nullptr) { m_on_acquire->complete(0); m_on_acquire = nullptr; } } void pre_release_handler(Context *on_finish) override { std::lock_guard locker{m_test_lock}; m_release_count++; on_finish->complete(m_on_release_r); m_on_release_r = 0; if (m_on_release != nullptr) { m_on_release->complete(0); m_on_release = nullptr; } } void update_leader_handler(const std::string &leader_instance_id) override { } void handle_instances_added(const InstanceIds& instance_ids) override { } void handle_instances_removed(const InstanceIds& instance_ids) override { } private: mutable ceph::mutex m_test_lock; int m_acquire_count = 0; int m_release_count = 0; int m_on_acquire_r = 0; int m_on_release_r = 0; Context *m_on_acquire = nullptr; Context *m_on_release = nullptr; }; struct Connection { librados::Rados cluster; librados::IoCtx io_ctx; }; std::list<std::unique_ptr<Connection> > m_connections; void SetUp() override { TestFixture::SetUp(); EXPECT_EQ(0, librbd::api::Mirror<>::mode_set(m_local_io_ctx, RBD_MIRROR_MODE_POOL)); if (is_librados_test_stub(*_rados)) { // speed testing up a little EXPECT_EQ(0, _rados->conf_set("rbd_mirror_leader_heartbeat_interval", "1")); } } librados::IoCtx &create_connection(bool no_heartbeats = false) { m_connections.push_back(std::unique_ptr<Connection>(new Connection())); Connection *c = m_connections.back().get(); EXPECT_EQ("", connect_cluster_pp(c->cluster)); if (no_heartbeats) { EXPECT_EQ(0, c->cluster.conf_set("rbd_mirror_leader_heartbeat_interval", "3600")); } else if (is_librados_test_stub(*_rados)) { EXPECT_EQ(0, c->cluster.conf_set("rbd_mirror_leader_heartbeat_interval", "1")); } EXPECT_EQ(0, c->cluster.ioctx_create(_local_pool_name.c_str(), c->io_ctx)); return c->io_ctx; } }; TEST_F(TestLeaderWatcher, InitShutdown) { Listener listener; LeaderWatcher<> leader_watcher(m_threads, m_local_io_ctx, &listener); C_SaferCond on_init_acquire; listener.on_acquire(0, &on_init_acquire); ASSERT_EQ(0, leader_watcher.init()); ASSERT_EQ(0, on_init_acquire.wait()); ASSERT_TRUE(leader_watcher.is_leader()); leader_watcher.shut_down(); ASSERT_EQ(1, listener.acquire_count()); ASSERT_EQ(1, listener.release_count()); ASSERT_FALSE(leader_watcher.is_leader()); } TEST_F(TestLeaderWatcher, Release) { Listener listener; LeaderWatcher<> leader_watcher(m_threads, m_local_io_ctx, &listener); C_SaferCond on_init_acquire; listener.on_acquire(0, &on_init_acquire); ASSERT_EQ(0, leader_watcher.init()); ASSERT_EQ(0, on_init_acquire.wait()); ASSERT_TRUE(leader_watcher.is_leader()); C_SaferCond on_release; C_SaferCond on_acquire; listener.on_release(0, &on_release); listener.on_acquire(0, &on_acquire); leader_watcher.release_leader(); ASSERT_EQ(0, on_release.wait()); ASSERT_FALSE(leader_watcher.is_leader()); // wait for lock re-acquired due to no another locker ASSERT_EQ(0, on_acquire.wait()); ASSERT_TRUE(leader_watcher.is_leader()); C_SaferCond on_release2; listener.on_release(0, &on_release2); leader_watcher.release_leader(); ASSERT_EQ(0, on_release2.wait()); leader_watcher.shut_down(); ASSERT_EQ(2, listener.acquire_count()); ASSERT_EQ(2, listener.release_count()); } TEST_F(TestLeaderWatcher, ListenerError) { Listener listener; LeaderWatcher<> leader_watcher(m_threads, m_local_io_ctx, &listener); // make listener return error on acquire C_SaferCond on_init_acquire, on_init_release; listener.on_acquire(-EINVAL, &on_init_acquire); listener.on_release(0, &on_init_release); ASSERT_EQ(0, leader_watcher.init()); ASSERT_EQ(0, on_init_acquire.wait()); ASSERT_EQ(0, on_init_release.wait()); ASSERT_FALSE(leader_watcher.is_leader()); // wait for lock re-acquired due to no another locker C_SaferCond on_acquire; listener.on_acquire(0, &on_acquire); ASSERT_EQ(0, on_acquire.wait()); ASSERT_TRUE(leader_watcher.is_leader()); // make listener return error on release C_SaferCond on_release; listener.on_release(-EINVAL, &on_release); leader_watcher.release_leader(); ASSERT_EQ(0, on_release.wait()); ASSERT_FALSE(leader_watcher.is_leader()); leader_watcher.shut_down(); ASSERT_EQ(2, listener.acquire_count()); ASSERT_EQ(2, listener.release_count()); ASSERT_FALSE(leader_watcher.is_leader()); } TEST_F(TestLeaderWatcher, Two) { Listener listener1; LeaderWatcher<> leader_watcher1(m_threads, create_connection(), &listener1); C_SaferCond on_init_acquire; listener1.on_acquire(0, &on_init_acquire); ASSERT_EQ(0, leader_watcher1.init()); ASSERT_EQ(0, on_init_acquire.wait()); Listener listener2; LeaderWatcher<> leader_watcher2(m_threads, create_connection(), &listener2); ASSERT_EQ(0, leader_watcher2.init()); ASSERT_TRUE(leader_watcher1.is_leader()); ASSERT_FALSE(leader_watcher2.is_leader()); C_SaferCond on_release; C_SaferCond on_acquire; listener1.on_release(0, &on_release); listener2.on_acquire(0, &on_acquire); leader_watcher1.release_leader(); ASSERT_EQ(0, on_release.wait()); ASSERT_FALSE(leader_watcher1.is_leader()); // wait for lock acquired by another watcher ASSERT_EQ(0, on_acquire.wait()); ASSERT_TRUE(leader_watcher2.is_leader()); leader_watcher1.shut_down(); leader_watcher2.shut_down(); ASSERT_EQ(1, listener1.acquire_count()); ASSERT_EQ(1, listener1.release_count()); ASSERT_EQ(1, listener2.acquire_count()); ASSERT_EQ(1, listener2.release_count()); } TEST_F(TestLeaderWatcher, Break) { Listener listener1, listener2; LeaderWatcher<> leader_watcher1(m_threads, create_connection(true /* no heartbeats */), &listener1); LeaderWatcher<> leader_watcher2(m_threads, create_connection(), &listener2); C_SaferCond on_init_acquire; listener1.on_acquire(0, &on_init_acquire); ASSERT_EQ(0, leader_watcher1.init()); ASSERT_EQ(0, on_init_acquire.wait()); C_SaferCond on_acquire; listener2.on_acquire(0, &on_acquire); ASSERT_EQ(0, leader_watcher2.init()); ASSERT_FALSE(leader_watcher2.is_leader()); // wait for lock broken due to no heartbeats and re-acquired ASSERT_EQ(0, on_acquire.wait()); ASSERT_TRUE(leader_watcher2.is_leader()); leader_watcher1.shut_down(); leader_watcher2.shut_down(); } TEST_F(TestLeaderWatcher, Stress) { const int WATCHERS_COUNT = 20; std::list<LeaderWatcher<> *> leader_watchers; Listener listener; for (int i = 0; i < WATCHERS_COUNT; i++) { auto leader_watcher = new LeaderWatcher<>(m_threads, create_connection(), &listener); leader_watchers.push_back(leader_watcher); } C_SaferCond on_init_acquire; listener.on_acquire(0, &on_init_acquire); for (auto &leader_watcher : leader_watchers) { ASSERT_EQ(0, leader_watcher->init()); } ASSERT_EQ(0, on_init_acquire.wait()); while (true) { C_SaferCond on_acquire; listener.on_acquire(0, &on_acquire); std::unique_ptr<LeaderWatcher<> > leader_watcher; for (auto it = leader_watchers.begin(); it != leader_watchers.end(); ) { if ((*it)->is_leader()) { ASSERT_FALSE(leader_watcher); leader_watcher.reset(*it); it = leader_watchers.erase(it); } else { it++; } } ASSERT_TRUE(leader_watcher); leader_watcher->shut_down(); if (leader_watchers.empty()) { break; } ASSERT_EQ(0, on_acquire.wait()); } }
9,354
28.326019
80
cc
null
ceph-main/src/test/rbd_mirror/test_PoolWatcher.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "include/rados/librados.hpp" #include "include/rbd/librbd.hpp" #include "include/stringify.h" #include "test/rbd_mirror/test_fixture.h" #include "cls/rbd/cls_rbd_types.h" #include "cls/rbd/cls_rbd_client.h" #include "include/rbd_types.h" #include "librbd/internal.h" #include "librbd/ImageCtx.h" #include "librbd/ImageState.h" #include "librbd/Operations.h" #include "librbd/Utils.h" #include "librbd/api/Mirror.h" #include "common/Cond.h" #include "common/errno.h" #include "common/ceph_mutex.h" #include "tools/rbd_mirror/PoolWatcher.h" #include "tools/rbd_mirror/Threads.h" #include "tools/rbd_mirror/Types.h" #include "tools/rbd_mirror/pool_watcher/Types.h" #include "test/librados/test_cxx.h" #include "gtest/gtest.h" #include <boost/scope_exit.hpp> #include <iostream> #include <map> #include <memory> #include <set> #include <vector> using namespace std::chrono_literals; using rbd::mirror::ImageId; using rbd::mirror::ImageIds; using rbd::mirror::PoolWatcher; using rbd::mirror::PeerSpec; using rbd::mirror::RadosRef; using std::map; using std::set; using std::string; void register_test_pool_watcher() { } class TestPoolWatcher : public ::rbd::mirror::TestFixture { public: TestPoolWatcher() : m_pool_watcher_listener(this), m_image_number(0), m_snap_number(0) { m_cluster = std::make_shared<librados::Rados>(); EXPECT_EQ("", connect_cluster_pp(*m_cluster)); } void TearDown() override { if (m_pool_watcher) { C_SaferCond ctx; m_pool_watcher->shut_down(&ctx); EXPECT_EQ(0, ctx.wait()); } m_cluster->wait_for_latest_osdmap(); for (auto& pool : m_pools) { EXPECT_EQ(0, m_cluster->pool_delete(pool.c_str())); } TestFixture::TearDown(); } struct PoolWatcherListener : public rbd::mirror::pool_watcher::Listener { TestPoolWatcher *test; ceph::condition_variable cond; ImageIds image_ids; explicit PoolWatcherListener(TestPoolWatcher *test) : test(test) { } void handle_update(const std::string &mirror_uuid, ImageIds &&added_image_ids, ImageIds &&removed_image_ids) override { std::lock_guard locker{test->m_lock}; for (auto &image_id : removed_image_ids) { image_ids.erase(image_id); } image_ids.insert(added_image_ids.begin(), added_image_ids.end()); cond.notify_all(); } }; void create_pool(bool enable_mirroring, const PeerSpec &peer, string *name=nullptr) { string pool_name = get_temp_pool_name("test-rbd-mirror-"); ASSERT_EQ(0, m_cluster->pool_create(pool_name.c_str())); int64_t pool_id = m_cluster->pool_lookup(pool_name.c_str()); ASSERT_GE(pool_id, 0); m_pools.insert(pool_name); librados::IoCtx ioctx; ASSERT_EQ(0, m_cluster->ioctx_create2(pool_id, ioctx)); ioctx.application_enable("rbd", true); m_pool_watcher.reset(new PoolWatcher<>(m_threads, ioctx, "mirror uuid", m_pool_watcher_listener)); if (enable_mirroring) { ASSERT_EQ(0, librbd::api::Mirror<>::mode_set(ioctx, RBD_MIRROR_MODE_POOL)); std::string uuid; ASSERT_EQ(0, librbd::api::Mirror<>::peer_site_add( ioctx, &uuid, RBD_MIRROR_PEER_DIRECTION_RX_TX, peer.cluster_name, peer.client_name)); } if (name != nullptr) { *name = pool_name; } m_pool_watcher->init(); } string get_image_id(librados::IoCtx *ioctx, const string &image_name) { string obj = librbd::util::id_obj_name(image_name); string id; EXPECT_EQ(0, librbd::cls_client::get_id(ioctx, obj, &id)); return id; } void create_image(const string &pool_name, bool mirrored=true, string *image_name=nullptr) { uint64_t features = librbd::util::get_rbd_default_features(g_ceph_context); string name = "image" + stringify(++m_image_number); if (mirrored) { features |= RBD_FEATURE_EXCLUSIVE_LOCK | RBD_FEATURE_JOURNALING; } librados::IoCtx ioctx; ASSERT_EQ(0, m_cluster->ioctx_create(pool_name.c_str(), ioctx)); int order = 0; ASSERT_EQ(0, librbd::create(ioctx, name.c_str(), 1 << 22, false, features, &order, 0, 0)); if (mirrored) { librbd::Image image; librbd::RBD rbd; rbd.open(ioctx, image, name.c_str()); image.mirror_image_enable2(RBD_MIRROR_IMAGE_MODE_JOURNAL); librbd::mirror_image_info_t mirror_image_info; ASSERT_EQ(0, image.mirror_image_get_info(&mirror_image_info, sizeof(mirror_image_info))); image.close(); m_mirrored_images.insert(ImageId( mirror_image_info.global_id, get_image_id(&ioctx, name))); } if (image_name != nullptr) *image_name = name; } void clone_image(const string &parent_pool_name, const string &parent_image_name, const string &clone_pool_name, bool mirrored=true, string *image_name=nullptr) { librados::IoCtx pioctx, cioctx; ASSERT_EQ(0, m_cluster->ioctx_create(parent_pool_name.c_str(), pioctx)); ASSERT_EQ(0, m_cluster->ioctx_create(clone_pool_name.c_str(), cioctx)); string snap_name = "snap" + stringify(++m_snap_number); { librbd::ImageCtx *ictx = new librbd::ImageCtx(parent_image_name.c_str(), "", "", pioctx, false); ictx->state->open(0); librbd::NoOpProgressContext prog_ctx; EXPECT_EQ(0, ictx->operations->snap_create(cls::rbd::UserSnapshotNamespace(), snap_name, 0, prog_ctx)); EXPECT_EQ(0, ictx->operations->snap_protect(cls::rbd::UserSnapshotNamespace(), snap_name)); ictx->state->close(); } uint64_t features = librbd::util::get_rbd_default_features(g_ceph_context); string name = "clone" + stringify(++m_image_number); if (mirrored) { features |= RBD_FEATURE_EXCLUSIVE_LOCK | RBD_FEATURE_JOURNALING; } int order = 0; librbd::clone(pioctx, parent_image_name.c_str(), snap_name.c_str(), cioctx, name.c_str(), features, &order, 0, 0); if (mirrored) { librbd::Image image; librbd::RBD rbd; rbd.open(cioctx, image, name.c_str()); image.mirror_image_enable2(RBD_MIRROR_IMAGE_MODE_JOURNAL); librbd::mirror_image_info_t mirror_image_info; ASSERT_EQ(0, image.mirror_image_get_info(&mirror_image_info, sizeof(mirror_image_info))); image.close(); m_mirrored_images.insert(ImageId( mirror_image_info.global_id, get_image_id(&cioctx, name))); } if (image_name != nullptr) *image_name = name; } void check_images() { std::unique_lock l{m_lock}; while (m_mirrored_images != m_pool_watcher_listener.image_ids) { if (m_pool_watcher_listener.cond.wait_for(l, 10s) == std::cv_status::timeout) { break; } } ASSERT_EQ(m_mirrored_images, m_pool_watcher_listener.image_ids); } ceph::mutex m_lock = ceph::make_mutex("TestPoolWatcherLock"); RadosRef m_cluster; PoolWatcherListener m_pool_watcher_listener; std::unique_ptr<PoolWatcher<> > m_pool_watcher; set<string> m_pools; ImageIds m_mirrored_images; uint64_t m_image_number; uint64_t m_snap_number; }; TEST_F(TestPoolWatcher, EmptyPool) { string uuid1 = "00000000-0000-0000-0000-000000000001"; PeerSpec site1(uuid1, "site1", "mirror1"); create_pool(true, site1); check_images(); } TEST_F(TestPoolWatcher, ReplicatedPools) { string uuid1 = "00000000-0000-0000-0000-000000000001"; PeerSpec site1(uuid1, "site1", "mirror1"); string first_pool, local_pool, last_pool; create_pool(true, site1, &first_pool); check_images(); create_image(first_pool); check_images(); string parent_image, parent_image2; create_image(first_pool, true, &parent_image); check_images(); clone_image(first_pool, parent_image, first_pool); check_images(); clone_image(first_pool, parent_image, first_pool, true, &parent_image2); check_images(); create_image(first_pool, false); check_images(); }
8,152
30.723735
87
cc
null
ceph-main/src/test/rbd_mirror/test_fixture.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "cls/rbd/cls_rbd_types.h" #include "test/rbd_mirror/test_fixture.h" #include "include/stringify.h" #include "include/rbd/librbd.hpp" #include "librbd/ImageCtx.h" #include "librbd/ImageState.h" #include "librbd/Operations.h" #include "librbd/internal.h" #include "test/librados/test_cxx.h" #include "tools/rbd_mirror/Threads.h" namespace rbd { namespace mirror { std::string TestFixture::_local_pool_name; std::string TestFixture::_remote_pool_name; std::shared_ptr<librados::Rados> TestFixture::_rados; uint64_t TestFixture::_image_number = 0; std::string TestFixture::_data_pool; TestFixture::TestFixture() { } void TestFixture::SetUpTestCase() { _rados = std::shared_ptr<librados::Rados>(new librados::Rados()); ASSERT_EQ("", connect_cluster_pp(*_rados.get())); ASSERT_EQ(0, _rados->conf_set("rbd_cache", "false")); _local_pool_name = get_temp_pool_name("test-rbd-mirror-"); ASSERT_EQ(0, _rados->pool_create(_local_pool_name.c_str())); librados::IoCtx local_ioctx; ASSERT_EQ(0, _rados->ioctx_create(_local_pool_name.c_str(), local_ioctx)); local_ioctx.application_enable("rbd", true); _remote_pool_name = get_temp_pool_name("test-rbd-mirror-"); ASSERT_EQ(0, _rados->pool_create(_remote_pool_name.c_str())); librados::IoCtx remote_ioctx; ASSERT_EQ(0, _rados->ioctx_create(_remote_pool_name.c_str(), remote_ioctx)); remote_ioctx.application_enable("rbd", true); ASSERT_EQ(0, create_image_data_pool(_data_pool)); if (!_data_pool.empty()) { printf("using image data pool: %s\n", _data_pool.c_str()); } } void TestFixture::TearDownTestCase() { if (!_data_pool.empty()) { ASSERT_EQ(0, _rados->pool_delete(_data_pool.c_str())); } ASSERT_EQ(0, _rados->pool_delete(_remote_pool_name.c_str())); ASSERT_EQ(0, _rados->pool_delete(_local_pool_name.c_str())); _rados->shutdown(); } void TestFixture::SetUp() { static bool seeded = false; if (!seeded) { seeded = true; int seed = getpid(); std::cout << "seed " << seed << std::endl; srand(seed); } ASSERT_EQ(0, _rados->ioctx_create(_local_pool_name.c_str(), m_local_io_ctx)); ASSERT_EQ(0, _rados->ioctx_create(_remote_pool_name.c_str(), m_remote_io_ctx)); m_image_name = get_temp_image_name(); m_threads = new rbd::mirror::Threads<>(_rados); } void TestFixture::TearDown() { for (auto image_ctx : m_image_ctxs) { image_ctx->state->close(); } m_remote_io_ctx.close(); m_local_io_ctx.close(); delete m_threads; } int TestFixture::create_image(librbd::RBD &rbd, librados::IoCtx &ioctx, const std::string &name, uint64_t size) { int order = 18; return rbd.create2(ioctx, name.c_str(), size, RBD_FEATURES_ALL, &order); } int TestFixture::open_image(librados::IoCtx &io_ctx, const std::string &image_name, librbd::ImageCtx **image_ctx) { *image_ctx = new librbd::ImageCtx(image_name.c_str(), "", nullptr, io_ctx, false); m_image_ctxs.insert(*image_ctx); return (*image_ctx)->state->open(0); } int TestFixture::create_snap(librbd::ImageCtx *image_ctx, const char* snap_name, librados::snap_t *snap_id) { librbd::NoOpProgressContext prog_ctx; int r = image_ctx->operations->snap_create(cls::rbd::UserSnapshotNamespace(), snap_name, 0, prog_ctx); if (r < 0) { return r; } r = image_ctx->state->refresh(); if (r < 0) { return r; } if (image_ctx->snap_ids.count({cls::rbd::UserSnapshotNamespace(), snap_name}) == 0) { return -ENOENT; } if (snap_id != nullptr) { *snap_id = image_ctx->snap_ids[{cls::rbd::UserSnapshotNamespace(), snap_name}]; } return 0; } std::string TestFixture::get_temp_image_name() { ++_image_number; return "image" + stringify(_image_number); } int TestFixture::create_image_data_pool(std::string &data_pool) { std::string pool; int r = _rados->conf_get("rbd_default_data_pool", pool); if (r != 0) { return r; } else if (pool.empty()) { return 0; } r = _rados->pool_create(pool.c_str()); if (r < 0) { return r; } librados::IoCtx data_ioctx; r = _rados->ioctx_create(pool.c_str(), data_ioctx); if (r < 0) { return r; } data_ioctx.application_enable("rbd", true); data_pool = pool; return 0; } } // namespace mirror } // namespace rbd
4,486
26.697531
81
cc
null
ceph-main/src/test/rbd_mirror/test_fixture.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_TEST_RBD_MIRROR_TEST_FIXTURE_H #define CEPH_TEST_RBD_MIRROR_TEST_FIXTURE_H #include "include/int_types.h" #include "include/rados/librados.hpp" #include <gtest/gtest.h> #include <memory> #include <set> namespace librbd { class ImageCtx; class RBD; } namespace rbd { namespace mirror { template <typename> class Threads; class TestFixture : public ::testing::Test { public: TestFixture(); static void SetUpTestCase(); static void TearDownTestCase(); void SetUp() override; void TearDown() override; librados::IoCtx m_local_io_ctx; librados::IoCtx m_remote_io_ctx; std::string m_image_name; uint64_t m_image_size = 1 << 24; std::set<librbd::ImageCtx *> m_image_ctxs; Threads<librbd::ImageCtx> *m_threads = nullptr; int create_image(librbd::RBD &rbd, librados::IoCtx &ioctx, const std::string &name, uint64_t size); int open_image(librados::IoCtx &io_ctx, const std::string &image_name, librbd::ImageCtx **image_ctx); int create_snap(librbd::ImageCtx *image_ctx, const char* snap_name, librados::snap_t *snap_id = nullptr); static std::string get_temp_image_name(); static int create_image_data_pool(std::string &data_pool); static std::string _local_pool_name; static std::string _remote_pool_name; static std::shared_ptr<librados::Rados> _rados; static uint64_t _image_number; static std::string _data_pool; }; } // namespace mirror } // namespace rbd #endif // CEPH_TEST_RBD_MIRROR_TEST_FIXTURE_H
1,623
23.606061
72
h
null
ceph-main/src/test/rbd_mirror/test_main.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "common/perf_counters.h" #include "include/rados/librados.hpp" #include "global/global_context.h" #include "test/librados/test_cxx.h" #include "gtest/gtest.h" #include <iostream> #include <string> PerfCounters *g_journal_perf_counters = nullptr; PerfCounters *g_snapshot_perf_counters = nullptr; extern void register_test_cluster_watcher(); extern void register_test_image_policy(); extern void register_test_image_sync(); extern void register_test_instance_watcher(); extern void register_test_instances(); extern void register_test_leader_watcher(); extern void register_test_pool_watcher(); extern void register_test_rbd_mirror(); extern void register_test_rbd_mirror_image_deleter(); int main(int argc, char **argv) { register_test_cluster_watcher(); register_test_image_policy(); register_test_image_sync(); register_test_instance_watcher(); register_test_instances(); register_test_leader_watcher(); register_test_pool_watcher(); register_test_rbd_mirror(); register_test_rbd_mirror_image_deleter(); ::testing::InitGoogleTest(&argc, argv); librados::Rados rados; std::string result = connect_cluster_pp(rados); if (result != "" ) { std::cerr << result << std::endl; return 1; } g_ceph_context = reinterpret_cast<CephContext*>(rados.cct()); int r = rados.conf_set("lockdep", "true"); if (r < 0) { std::cerr << "warning: failed to enable lockdep" << std::endl; } return RUN_ALL_TESTS(); }
1,558
27.87037
70
cc
null
ceph-main/src/test/rbd_mirror/test_mock_ImageMap.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "test/rbd_mirror/test_mock_fixture.h" #include "test/librados_test_stub/MockTestMemIoCtxImpl.h" #include "test/librados_test_stub/MockTestMemRadosClient.h" #include "test/librbd/mock/MockImageCtx.h" #include "test/rbd_mirror/mock/MockContextWQ.h" #include "test/rbd_mirror/mock/MockSafeTimer.h" #include "librbd/MirroringWatcher.h" #include "tools/rbd_mirror/Threads.h" #include "tools/rbd_mirror/ImageMap.h" #include "tools/rbd_mirror/image_map/LoadRequest.h" #include "tools/rbd_mirror/image_map/UpdateRequest.h" #include "tools/rbd_mirror/image_map/Types.h" #include "include/stringify.h" namespace librbd { namespace { struct MockTestImageCtx : public librbd::MockImageCtx { MockTestImageCtx(librbd::ImageCtx &image_ctx) : librbd::MockImageCtx(image_ctx) { } }; } // anonymous namespace } // namespace librbd namespace rbd { namespace mirror { template <> struct Threads<librbd::MockTestImageCtx> { MockSafeTimer *timer; ceph::mutex &timer_lock; MockContextWQ *work_queue; Threads(Threads<librbd::ImageCtx> *threads) : timer(new MockSafeTimer()), timer_lock(threads->timer_lock), work_queue(new MockContextWQ()) { } ~Threads() { delete timer; delete work_queue; } }; namespace image_map { template <> struct LoadRequest<librbd::MockTestImageCtx> { std::map<std::string, cls::rbd::MirrorImageMap> *image_map; Context *on_finish = nullptr; static LoadRequest *s_instance; static LoadRequest *create(librados::IoCtx &ioctx, std::map<std::string, cls::rbd::MirrorImageMap> *image_map, Context *on_finish) { ceph_assert(s_instance != nullptr); s_instance->image_map = image_map; s_instance->on_finish = on_finish; return s_instance; } MOCK_METHOD0(send, void()); LoadRequest() { s_instance = this; } }; template <> struct UpdateRequest<librbd::MockTestImageCtx> { Context *on_finish = nullptr; static UpdateRequest *s_instance; static UpdateRequest *create(librados::IoCtx &ioctx, std::map<std::string, cls::rbd::MirrorImageMap> &&update_mapping, std::set<std::string> &&global_image_ids, Context *on_finish) { ceph_assert(s_instance != nullptr); s_instance->on_finish = on_finish; return s_instance; } MOCK_METHOD0(send, void()); UpdateRequest() { s_instance = this; } }; LoadRequest<librbd::MockTestImageCtx> * LoadRequest<librbd::MockTestImageCtx>::s_instance = nullptr; UpdateRequest<librbd::MockTestImageCtx> * UpdateRequest<librbd::MockTestImageCtx>::s_instance = nullptr; } // namespace image_map } // namespace mirror } // namespace rbd // template definitions #include "tools/rbd_mirror/ImageMap.cc" namespace rbd { namespace mirror { using ::testing::_; using ::testing::DoAll; using ::testing::WithArg; using ::testing::AtLeast; using ::testing::InSequence; using ::testing::Invoke; using ::testing::ReturnArg; using ::testing::StrEq; using image_map::Listener; using image_map::LoadRequest; using image_map::UpdateRequest; using ::rbd::mirror::Threads; class TestMockImageMap : public TestMockFixture { public: typedef Threads<librbd::MockTestImageCtx> MockThreads; typedef ImageMap<librbd::MockTestImageCtx> MockImageMap; typedef LoadRequest<librbd::MockTestImageCtx> MockLoadRequest; typedef UpdateRequest<librbd::MockTestImageCtx> MockUpdateRequest; struct MockListener : Listener { TestMockImageMap *test_mock_image_map; MockListener(TestMockImageMap *test_mock_image_map) : test_mock_image_map(test_mock_image_map) { } MOCK_METHOD2(mock_acquire_image, void(const std::string &, Context*)); MOCK_METHOD2(mock_release_image, void(const std::string &, Context*)); MOCK_METHOD3(mock_remove_image, void(const std::string &, const std::string &, Context*)); void acquire_image(const std::string &global_image_id, const std::string &instance_id, Context* on_finish) { mock_acquire_image(global_image_id, on_finish); } void release_image(const std::string &global_image_id, const std::string &instance_id, Context* on_finish) { mock_release_image(global_image_id, on_finish); } void remove_image(const std::string &mirror_uuid, const std::string &global_image_id, const std::string &instance_id, Context* on_finish) { mock_remove_image(mirror_uuid, global_image_id, on_finish); } }; TestMockImageMap() = default; void SetUp() override { TestFixture::SetUp(); m_local_instance_id = stringify(m_local_io_ctx.get_instance_id()); EXPECT_EQ(0, _rados->conf_set("rbd_mirror_image_policy_migration_throttle", "0")); EXPECT_EQ(0, _rados->conf_set("rbd_mirror_image_policy_type", "simple")); } void TearDown() override { EXPECT_EQ(0, _rados->conf_set("rbd_mirror_image_policy_type", "none")); TestFixture::TearDown(); } void expect_work_queue(MockThreads &mock_threads) { EXPECT_CALL(*mock_threads.work_queue, queue(_, _)) .WillRepeatedly(Invoke([this](Context *ctx, int r) { m_threads->work_queue->queue(ctx, r); })); } void expect_add_event(MockThreads &mock_threads) { EXPECT_CALL(*mock_threads.timer, add_event_after(_,_)) .WillOnce(DoAll(WithArg<1>(Invoke([this](Context *ctx) { auto wrapped_ctx = new LambdaContext([this, ctx](int r) { std::lock_guard timer_locker{m_threads->timer_lock}; ctx->complete(r); }); m_threads->work_queue->queue(wrapped_ctx, 0); })), ReturnArg<1>())); } void expect_rebalance_event(MockThreads &mock_threads) { EXPECT_CALL(*mock_threads.timer, add_event_after(_,_)) .WillOnce(DoAll(WithArg<1>(Invoke([this](Context *ctx) { // disable rebalance so as to not reschedule it again CephContext *cct = reinterpret_cast<CephContext *>(m_local_io_ctx.cct()); cct->_conf.set_val("rbd_mirror_image_policy_rebalance_timeout", "0"); auto wrapped_ctx = new LambdaContext([this, ctx](int r) { std::lock_guard timer_locker{m_threads->timer_lock}; ctx->complete(r); }); m_threads->work_queue->queue(wrapped_ctx, 0); })), ReturnArg<1>())); } void expect_load_request(MockLoadRequest &request, int r) { EXPECT_CALL(request, send()) .WillOnce(Invoke([&request, r]() { request.on_finish->complete(r); })); } void expect_update_request(MockUpdateRequest &request, int r) { EXPECT_CALL(request, send()) .WillOnce(Invoke([this, &request, r]() { request.on_finish->complete(r); if (r == 0) { std::lock_guard locker{m_lock}; ++m_map_update_count; m_cond.notify_all(); } })); } void expect_listener_acquire_image(MockListener &mock_listener, const std::string &global_image_id, std::map<std::string, Context*> *peer_ack_ctxs) { EXPECT_CALL(mock_listener, mock_acquire_image(global_image_id, _)) .WillOnce(WithArg<1>(Invoke([this, global_image_id, peer_ack_ctxs](Context* ctx) { std::lock_guard locker{m_lock}; peer_ack_ctxs->insert({global_image_id, ctx}); ++m_notify_update_count; m_cond.notify_all(); }))); } void expect_listener_release_image(MockListener &mock_listener, const std::string &global_image_id, std::map<std::string, Context*> *peer_ack_ctxs) { EXPECT_CALL(mock_listener, mock_release_image(global_image_id, _)) .WillOnce(WithArg<1>(Invoke([this, global_image_id, peer_ack_ctxs](Context* ctx) { std::lock_guard locker{m_lock}; peer_ack_ctxs->insert({global_image_id, ctx}); ++m_notify_update_count; m_cond.notify_all(); }))); } void expect_listener_remove_image(MockListener &mock_listener, const std::string &mirror_uuid, const std::string &global_image_id, std::map<std::string, Context*> *peer_ack_ctxs) { EXPECT_CALL(mock_listener, mock_remove_image(mirror_uuid, global_image_id, _)) .WillOnce(WithArg<2>(Invoke([this, global_image_id, peer_ack_ctxs](Context* ctx) { std::lock_guard locker{m_lock}; peer_ack_ctxs->insert({global_image_id, ctx}); ++m_notify_update_count; m_cond.notify_all(); }))); } void expect_listener_images_unmapped(MockListener &mock_listener, size_t count, std::set<std::string> *global_image_ids, std::map<std::string, Context*> *peer_ack_ctxs) { EXPECT_CALL(mock_listener, mock_release_image(_, _)) .Times(count) .WillRepeatedly(Invoke([this, global_image_ids, peer_ack_ctxs](std::string global_image_id, Context* ctx) { std::lock_guard locker{m_lock}; global_image_ids->emplace(global_image_id); peer_ack_ctxs->insert({global_image_id, ctx}); ++m_notify_update_count; m_cond.notify_all(); })); } void remote_peer_ack_nowait(MockImageMap *image_map, const std::set<std::string> &global_image_ids, int ret, std::map<std::string, Context*> *peer_ack_ctxs) { for (auto& global_image_id : global_image_ids) { auto it = peer_ack_ctxs->find(global_image_id); ASSERT_TRUE(it != peer_ack_ctxs->end()); auto ack_ctx = it->second; peer_ack_ctxs->erase(it); ack_ctx->complete(ret); wait_for_scheduled_task(); } } void remote_peer_ack_wait(MockImageMap *image_map, const std::set<std::string> &global_image_ids, int ret, std::map<std::string, Context*> *peer_ack_ctxs) { for (auto& global_image_id : global_image_ids) { auto it = peer_ack_ctxs->find(global_image_id); ASSERT_TRUE(it != peer_ack_ctxs->end()); auto ack_ctx = it->second; peer_ack_ctxs->erase(it); ack_ctx->complete(ret); wait_for_scheduled_task(); ASSERT_TRUE(wait_for_map_update(1)); } } void remote_peer_ack_listener_wait(MockImageMap *image_map, const std::set<std::string> &global_image_ids, int ret, std::map<std::string, Context*> *peer_ack_ctxs) { for (auto& global_image_id : global_image_ids) { auto it = peer_ack_ctxs->find(global_image_id); ASSERT_TRUE(it != peer_ack_ctxs->end()); auto ack_ctx = it->second; peer_ack_ctxs->erase(it); ack_ctx->complete(ret); ASSERT_TRUE(wait_for_map_update(1)); ASSERT_TRUE(wait_for_listener_notify(1)); } } void update_map_and_acquire(MockThreads &mock_threads, MockUpdateRequest &mock_update_request, MockListener &mock_listener, const std::set<std::string> &global_image_ids, int ret, std::map<std::string, Context*> *peer_ack_ctxs) { for (auto const &global_image_id : global_image_ids) { expect_add_event(mock_threads); expect_update_request(mock_update_request, ret); expect_add_event(mock_threads); expect_listener_acquire_image(mock_listener, global_image_id, peer_ack_ctxs); } } void update_map_request(MockThreads &mock_threads, MockUpdateRequest &mock_update_request, const std::set<std::string> &global_image_ids, int ret) { for (uint32_t i = 0; i < global_image_ids.size(); ++i) { expect_add_event(mock_threads); expect_update_request(mock_update_request, ret); } } void wait_for_scheduled_task() { m_threads->work_queue->drain(); } bool wait_for_listener_notify(uint32_t count) { std::unique_lock locker{m_lock}; while (m_notify_update_count < count) { if (m_cond.wait_for(locker, 10s) == std::cv_status::timeout) { break; } } if (m_notify_update_count < count) { return false; } m_notify_update_count -= count; return true; } bool wait_for_map_update(uint32_t count) { std::unique_lock locker{m_lock}; while (m_map_update_count < count) { if (m_cond.wait_for(locker, 10s) == std::cv_status::timeout) { break; } } if (m_map_update_count < count) { return false; } m_map_update_count -= count; return true; } int when_shut_down(MockImageMap *image_map) { C_SaferCond ctx; image_map->shut_down(&ctx); return ctx.wait(); } void listener_acquire_images(MockListener &mock_listener, const std::set<std::string> &global_image_ids, std::map<std::string, Context*> *peer_ack_ctxs) { for (auto const &global_image_id : global_image_ids) { expect_listener_acquire_image(mock_listener, global_image_id, peer_ack_ctxs); } } void listener_release_images(MockListener &mock_listener, const std::set<std::string> &global_image_ids, std::map<std::string, Context*> *peer_ack_ctxs) { for (auto const &global_image_id : global_image_ids) { expect_listener_release_image(mock_listener, global_image_id, peer_ack_ctxs); } } void listener_remove_images(MockListener &mock_listener, const std::string &mirror_uuid, std::set<std::string> &global_image_ids, std::map<std::string, Context*> *peer_ack_ctxs) { for (auto const &global_image_id : global_image_ids) { expect_listener_remove_image(mock_listener, mirror_uuid, global_image_id, peer_ack_ctxs); } } ceph::mutex m_lock = ceph::make_mutex("TestMockImageMap::m_lock"); ceph::condition_variable m_cond; uint32_t m_notify_update_count = 0; uint32_t m_map_update_count = 0; std::string m_local_instance_id; }; TEST_F(TestMockImageMap, SetLocalImages) { MockThreads mock_threads(m_threads); expect_work_queue(mock_threads); InSequence seq; MockLoadRequest mock_load_request; expect_load_request(mock_load_request, 0); MockListener mock_listener(this); std::unique_ptr<MockImageMap> mock_image_map{ MockImageMap::create(m_local_io_ctx, &mock_threads, m_local_instance_id, mock_listener)}; C_SaferCond cond; mock_image_map->init(&cond); ASSERT_EQ(0, cond.wait()); std::set<std::string> global_image_ids{ "global id 1", "global id 2" }; std::set<std::string> global_image_ids_ack(global_image_ids); // UPDATE_MAPPING+ACQUIRE expect_add_event(mock_threads); MockUpdateRequest mock_update_request; expect_update_request(mock_update_request, 0); expect_add_event(mock_threads); std::map<std::string, Context*> peer_ack_ctxs; listener_acquire_images(mock_listener, global_image_ids, &peer_ack_ctxs); // initial image list mock_image_map->update_images("", std::move(global_image_ids), {}); ASSERT_TRUE(wait_for_map_update(1)); ASSERT_TRUE(wait_for_listener_notify(global_image_ids_ack.size())); // remote peer ACKs image acquire request remote_peer_ack_nowait(mock_image_map.get(), global_image_ids_ack, 0, &peer_ack_ctxs); wait_for_scheduled_task(); ASSERT_EQ(0, when_shut_down(mock_image_map.get())); } TEST_F(TestMockImageMap, AddRemoveLocalImage) { MockThreads mock_threads(m_threads); expect_work_queue(mock_threads); InSequence seq; MockLoadRequest mock_load_request; expect_load_request(mock_load_request, 0); MockListener mock_listener(this); std::unique_ptr<MockImageMap> mock_image_map{ MockImageMap::create(m_local_io_ctx, &mock_threads, m_local_instance_id, mock_listener)}; C_SaferCond cond; mock_image_map->init(&cond); ASSERT_EQ(0, cond.wait()); std::set<std::string> initial_global_image_ids{ "global id 1", "global id 2" }; std::set<std::string> initial_global_image_ids_ack(initial_global_image_ids); std::set<std::string> remove_global_image_ids{ "global id 1", "global id 2" }; std::set<std::string> remove_global_image_ids_ack(remove_global_image_ids); // UPDATE_MAPPING+ACQUIRE expect_add_event(mock_threads); MockUpdateRequest mock_update_request; expect_update_request(mock_update_request, 0); expect_add_event(mock_threads); std::map<std::string, Context*> peer_ack_ctxs; listener_acquire_images(mock_listener, initial_global_image_ids, &peer_ack_ctxs); // initial image list mock_image_map->update_images("", std::move(initial_global_image_ids), {}); ASSERT_TRUE(wait_for_map_update(1)); ASSERT_TRUE(wait_for_listener_notify(initial_global_image_ids_ack.size())); // remote peer ACKs image acquire request remote_peer_ack_nowait(mock_image_map.get(), initial_global_image_ids_ack, 0, &peer_ack_ctxs); // RELEASE+REMOVE_MAPPING expect_add_event(mock_threads); listener_release_images(mock_listener, remove_global_image_ids, &peer_ack_ctxs); update_map_request(mock_threads, mock_update_request, remove_global_image_ids, 0); // remove images mock_image_map->update_images("", {}, std::move(remove_global_image_ids)); ASSERT_TRUE(wait_for_listener_notify(remove_global_image_ids_ack.size())); remote_peer_ack_wait(mock_image_map.get(), remove_global_image_ids_ack, 0, &peer_ack_ctxs); wait_for_scheduled_task(); ASSERT_EQ(0, when_shut_down(mock_image_map.get())); } TEST_F(TestMockImageMap, AddRemoveRemoteImage) { MockThreads mock_threads(m_threads); expect_work_queue(mock_threads); InSequence seq; MockLoadRequest mock_load_request; expect_load_request(mock_load_request, 0); MockListener mock_listener(this); std::unique_ptr<MockImageMap> mock_image_map{ MockImageMap::create(m_local_io_ctx, &mock_threads, m_local_instance_id, mock_listener)}; C_SaferCond cond; mock_image_map->init(&cond); ASSERT_EQ(0, cond.wait()); std::set<std::string> initial_global_image_ids{ "global id 1", "global id 2" }; std::set<std::string> initial_global_image_ids_ack(initial_global_image_ids); std::set<std::string> remove_global_image_ids{ "global id 1", "global id 2" }; std::set<std::string> remove_global_image_ids_ack(remove_global_image_ids); // UPDATE_MAPPING+ACQUIRE expect_add_event(mock_threads); MockUpdateRequest mock_update_request; expect_update_request(mock_update_request, 0); expect_add_event(mock_threads); std::map<std::string, Context*> peer_ack_ctxs; listener_acquire_images(mock_listener, initial_global_image_ids, &peer_ack_ctxs); // initial image list mock_image_map->update_images("uuid1", std::move(initial_global_image_ids), {}); ASSERT_TRUE(wait_for_map_update(1)); ASSERT_TRUE(wait_for_listener_notify(initial_global_image_ids_ack.size())); // remote peer ACKs image acquire request remote_peer_ack_nowait(mock_image_map.get(), initial_global_image_ids_ack, 0, &peer_ack_ctxs); // RELEASE+REMOVE_MAPPING std::map<std::string, Context*> peer_remove_ack_ctxs; listener_remove_images(mock_listener, "uuid1", remove_global_image_ids, &peer_remove_ack_ctxs); expect_add_event(mock_threads); listener_release_images(mock_listener, remove_global_image_ids, &peer_ack_ctxs); update_map_request(mock_threads, mock_update_request, remove_global_image_ids, 0); // remove images mock_image_map->update_images("uuid1", {}, std::move(remove_global_image_ids)); ASSERT_TRUE(wait_for_listener_notify(remove_global_image_ids_ack.size() * 2)); remote_peer_ack_nowait(mock_image_map.get(), remove_global_image_ids_ack, 0, &peer_remove_ack_ctxs); remote_peer_ack_wait(mock_image_map.get(), remove_global_image_ids_ack, 0, &peer_ack_ctxs); wait_for_scheduled_task(); ASSERT_EQ(0, when_shut_down(mock_image_map.get())); } TEST_F(TestMockImageMap, AddRemoveRemoteImageDuplicateNotification) { MockThreads mock_threads(m_threads); expect_work_queue(mock_threads); InSequence seq; MockLoadRequest mock_load_request; expect_load_request(mock_load_request, 0); MockListener mock_listener(this); std::unique_ptr<MockImageMap> mock_image_map{ MockImageMap::create(m_local_io_ctx, &mock_threads, m_local_instance_id, mock_listener)}; C_SaferCond cond; mock_image_map->init(&cond); ASSERT_EQ(0, cond.wait()); std::set<std::string> initial_global_image_ids{ "global id 1", "global id 2" }; std::set<std::string> initial_global_image_ids_dup(initial_global_image_ids); std::set<std::string> initial_global_image_ids_ack(initial_global_image_ids); std::set<std::string> remove_global_image_ids{ "global id 1", "global id 2" }; std::set<std::string> remove_global_image_ids_dup(remove_global_image_ids); std::set<std::string> remove_global_image_ids_ack(remove_global_image_ids); // UPDATE_MAPPING+ACQUIRE expect_add_event(mock_threads); MockUpdateRequest mock_update_request; expect_update_request(mock_update_request, 0); expect_add_event(mock_threads); std::map<std::string, Context*> peer_ack_ctxs; listener_acquire_images(mock_listener, initial_global_image_ids, &peer_ack_ctxs); // initial image list mock_image_map->update_images("uuid1", std::move(initial_global_image_ids), {}); ASSERT_TRUE(wait_for_map_update(1)); ASSERT_TRUE(wait_for_listener_notify(initial_global_image_ids_ack.size())); // trigger duplicate "add" event wait_for_scheduled_task(); mock_image_map->update_images("uuid1", std::move(initial_global_image_ids_dup), {}); // remote peer ACKs image acquire request remote_peer_ack_nowait(mock_image_map.get(), initial_global_image_ids_ack, 0, &peer_ack_ctxs); // RELEASE+REMOVE_MAPPING std::map<std::string, Context*> peer_remove_ack_ctxs; listener_remove_images(mock_listener, "uuid1", remove_global_image_ids, &peer_remove_ack_ctxs); expect_add_event(mock_threads); listener_release_images(mock_listener, remove_global_image_ids, &peer_ack_ctxs); update_map_request(mock_threads, mock_update_request, remove_global_image_ids, 0); // remove images mock_image_map->update_images("uuid1", {}, std::move(remove_global_image_ids)); ASSERT_TRUE(wait_for_listener_notify(remove_global_image_ids_ack.size() * 2)); remote_peer_ack_nowait(mock_image_map.get(), remove_global_image_ids_ack, 0, &peer_remove_ack_ctxs); remote_peer_ack_wait(mock_image_map.get(), remove_global_image_ids_ack, 0, &peer_ack_ctxs); // trigger duplicate "remove" notification mock_image_map->update_images("uuid1", {}, std::move(remove_global_image_ids_dup)); wait_for_scheduled_task(); ASSERT_EQ(0, when_shut_down(mock_image_map.get())); } TEST_F(TestMockImageMap, AcquireImageErrorRetry) { MockThreads mock_threads(m_threads); expect_work_queue(mock_threads); InSequence seq; MockLoadRequest mock_load_request; expect_load_request(mock_load_request, 0); MockListener mock_listener(this); std::unique_ptr<MockImageMap> mock_image_map{ MockImageMap::create(m_local_io_ctx, &mock_threads, m_local_instance_id, mock_listener)}; C_SaferCond cond; mock_image_map->init(&cond); ASSERT_EQ(0, cond.wait()); std::set<std::string> initial_global_image_ids{ "global id 1", "global id 2" }; std::set<std::string> initial_global_image_ids_ack(initial_global_image_ids); // UPDATE_MAPPING failure expect_add_event(mock_threads); MockUpdateRequest mock_update_request; expect_update_request(mock_update_request, -EIO); // UPDATE_MAPPING+ACQUIRE expect_add_event(mock_threads); expect_update_request(mock_update_request, 0); expect_add_event(mock_threads); std::map<std::string, Context*> peer_ack_ctxs; listener_acquire_images(mock_listener, initial_global_image_ids, &peer_ack_ctxs); // initial image list mock_image_map->update_images("uuid1", std::move(initial_global_image_ids), {}); ASSERT_TRUE(wait_for_map_update(1)); ASSERT_TRUE(wait_for_listener_notify(initial_global_image_ids_ack.size())); // remote peer ACKs image acquire request remote_peer_ack_nowait(mock_image_map.get(), initial_global_image_ids_ack, 0, &peer_ack_ctxs); wait_for_scheduled_task(); ASSERT_EQ(0, when_shut_down(mock_image_map.get())); } TEST_F(TestMockImageMap, RemoveRemoteAndLocalImage) { MockThreads mock_threads(m_threads); expect_work_queue(mock_threads); InSequence seq; MockLoadRequest mock_load_request; expect_load_request(mock_load_request, 0); MockListener mock_listener(this); std::unique_ptr<MockImageMap> mock_image_map{ MockImageMap::create(m_local_io_ctx, &mock_threads, m_local_instance_id, mock_listener)}; C_SaferCond cond; mock_image_map->init(&cond); ASSERT_EQ(0, cond.wait()); // remote image set std::set<std::string> initial_remote_global_image_ids{ "global id 1" }; std::set<std::string> initial_remote_global_image_ids_ack(initial_remote_global_image_ids); // local image set std::set<std::string> initial_local_global_image_ids{ "global id 1" }; // remote/local images to remove std::set<std::string> remote_remove_global_image_ids{ "global id 1" }; std::set<std::string> remote_remove_global_image_ids_ack(remote_remove_global_image_ids); std::set<std::string> local_remove_global_image_ids{ "global id 1" }; std::set<std::string> local_remove_global_image_ids_ack(local_remove_global_image_ids); // UPDATE_MAPPING+ACQUIRE expect_add_event(mock_threads); MockUpdateRequest mock_update_request; expect_update_request(mock_update_request, 0); expect_add_event(mock_threads); std::map<std::string, Context*> peer_ack_ctxs; listener_acquire_images(mock_listener, initial_remote_global_image_ids, &peer_ack_ctxs); // initial remote image list mock_image_map->update_images("uuid1", std::move(initial_remote_global_image_ids), {}); ASSERT_TRUE(wait_for_map_update(1)); ASSERT_TRUE(wait_for_listener_notify(initial_remote_global_image_ids_ack.size())); // remote peer ACKs image acquire request remote_peer_ack_nowait(mock_image_map.get(), initial_remote_global_image_ids_ack, 0, &peer_ack_ctxs); // set initial local image list -- this is a no-op from policy pov mock_image_map->update_images("", std::move(initial_local_global_image_ids), {}); // remove remote images -- this should be a no-op from policy pov // except the listener notification std::map<std::string, Context*> peer_ack_remove_ctxs; listener_remove_images(mock_listener, "uuid1", remote_remove_global_image_ids, &peer_ack_remove_ctxs); mock_image_map->update_images("uuid1", {}, std::move(remote_remove_global_image_ids)); ASSERT_TRUE(wait_for_listener_notify(remote_remove_global_image_ids_ack.size())); // RELEASE+REMOVE_MAPPING expect_add_event(mock_threads); listener_release_images(mock_listener, local_remove_global_image_ids, &peer_ack_ctxs); update_map_request(mock_threads, mock_update_request, local_remove_global_image_ids, 0); // remove local images mock_image_map->update_images("", {}, std::move(local_remove_global_image_ids)); ASSERT_TRUE(wait_for_listener_notify(local_remove_global_image_ids_ack.size())); remote_peer_ack_nowait(mock_image_map.get(), local_remove_global_image_ids_ack, 0, &peer_ack_remove_ctxs); remote_peer_ack_wait(mock_image_map.get(), local_remove_global_image_ids_ack, 0, &peer_ack_ctxs); wait_for_scheduled_task(); ASSERT_EQ(0, when_shut_down(mock_image_map.get())); } TEST_F(TestMockImageMap, AddInstance) { MockThreads mock_threads(m_threads); expect_work_queue(mock_threads); InSequence seq; MockLoadRequest mock_load_request; expect_load_request(mock_load_request, 0); MockListener mock_listener(this); std::unique_ptr<MockImageMap> mock_image_map{ MockImageMap::create(m_local_io_ctx, &mock_threads, m_local_instance_id, mock_listener)}; C_SaferCond cond; mock_image_map->init(&cond); ASSERT_EQ(0, cond.wait()); std::set<std::string> global_image_ids{ "global id 1", "global id 2", "global id 3", "global id 4", "global id 5" }; std::set<std::string> global_image_ids_ack(global_image_ids); // UPDATE_MAPPING+ACQUIRE expect_add_event(mock_threads); MockUpdateRequest mock_update_request; expect_update_request(mock_update_request, 0); expect_add_event(mock_threads); std::map<std::string, Context*> peer_ack_ctxs; listener_acquire_images(mock_listener, global_image_ids, &peer_ack_ctxs); // initial image list mock_image_map->update_images("uuid1", std::move(global_image_ids), {}); ASSERT_TRUE(wait_for_map_update(1)); ASSERT_TRUE(wait_for_listener_notify(global_image_ids_ack.size())); // remote peer ACKs image acquire request remote_peer_ack_nowait(mock_image_map.get(), global_image_ids_ack, 0, &peer_ack_ctxs); wait_for_scheduled_task(); mock_image_map->update_instances_added({m_local_instance_id}); std::set<std::string> shuffled_global_image_ids; // RELEASE+UPDATE_MAPPING+ACQUIRE expect_add_event(mock_threads); expect_listener_images_unmapped(mock_listener, 3, &shuffled_global_image_ids, &peer_ack_ctxs); mock_image_map->update_instances_added({"9876"}); wait_for_scheduled_task(); ASSERT_TRUE(wait_for_listener_notify(shuffled_global_image_ids.size())); update_map_and_acquire(mock_threads, mock_update_request, mock_listener, shuffled_global_image_ids, 0, &peer_ack_ctxs); remote_peer_ack_listener_wait(mock_image_map.get(), shuffled_global_image_ids, 0, &peer_ack_ctxs); // completion shuffle action for now (re)mapped images remote_peer_ack_nowait(mock_image_map.get(), shuffled_global_image_ids, 0, &peer_ack_ctxs); wait_for_scheduled_task(); ASSERT_EQ(0, when_shut_down(mock_image_map.get())); } TEST_F(TestMockImageMap, RemoveInstance) { MockThreads mock_threads(m_threads); expect_work_queue(mock_threads); InSequence seq; MockLoadRequest mock_load_request; expect_load_request(mock_load_request, 0); MockListener mock_listener(this); std::unique_ptr<MockImageMap> mock_image_map{ MockImageMap::create(m_local_io_ctx, &mock_threads, m_local_instance_id, mock_listener)}; C_SaferCond cond; mock_image_map->init(&cond); ASSERT_EQ(0, cond.wait()); std::set<std::string> global_image_ids{ "global id 1", "global id 2", "global id 3", "global id 4", "global id 5" }; std::set<std::string> global_image_ids_ack(global_image_ids); expect_add_event(mock_threads); // UPDATE_MAPPING+ACQUIRE MockUpdateRequest mock_update_request; expect_update_request(mock_update_request, 0); expect_add_event(mock_threads); std::map<std::string, Context*> peer_ack_ctxs; listener_acquire_images(mock_listener, global_image_ids, &peer_ack_ctxs); // set initial image list mock_image_map->update_images("uuid1", std::move(global_image_ids), {}); ASSERT_TRUE(wait_for_map_update(1)); ASSERT_TRUE(wait_for_listener_notify(global_image_ids_ack.size())); // remote peer ACKs image acquire request -- completing action remote_peer_ack_nowait(mock_image_map.get(), global_image_ids_ack, 0, &peer_ack_ctxs); wait_for_scheduled_task(); mock_image_map->update_instances_added({m_local_instance_id}); std::set<std::string> shuffled_global_image_ids; // RELEASE+UPDATE_MAPPING+ACQUIRE expect_add_event(mock_threads); expect_listener_images_unmapped(mock_listener, 3, &shuffled_global_image_ids, &peer_ack_ctxs); mock_image_map->update_instances_added({"9876"}); wait_for_scheduled_task(); ASSERT_TRUE(wait_for_listener_notify(shuffled_global_image_ids.size())); update_map_and_acquire(mock_threads, mock_update_request, mock_listener, shuffled_global_image_ids, 0, &peer_ack_ctxs); remote_peer_ack_listener_wait(mock_image_map.get(), shuffled_global_image_ids, 0, &peer_ack_ctxs); // completion shuffle action for now (re)mapped images remote_peer_ack_nowait(mock_image_map.get(), shuffled_global_image_ids, 0, &peer_ack_ctxs); wait_for_scheduled_task(); shuffled_global_image_ids.clear(); // remove added instance expect_add_event(mock_threads); expect_listener_images_unmapped(mock_listener, 2, &shuffled_global_image_ids, &peer_ack_ctxs); mock_image_map->update_instances_removed({"9876"}); wait_for_scheduled_task(); ASSERT_TRUE(wait_for_listener_notify(shuffled_global_image_ids.size())); update_map_and_acquire(mock_threads, mock_update_request, mock_listener, shuffled_global_image_ids, 0, &peer_ack_ctxs); remote_peer_ack_listener_wait(mock_image_map.get(), shuffled_global_image_ids, 0, &peer_ack_ctxs); // completion shuffle action for now (re)mapped images remote_peer_ack_nowait(mock_image_map.get(), shuffled_global_image_ids, 0, &peer_ack_ctxs); wait_for_scheduled_task(); ASSERT_EQ(0, when_shut_down(mock_image_map.get())); } TEST_F(TestMockImageMap, AddInstancePingPongImageTest) { EXPECT_EQ(0, _rados->conf_set("rbd_mirror_image_policy_migration_throttle", "600")); MockThreads mock_threads(m_threads); expect_work_queue(mock_threads); InSequence seq; std::set<std::string> global_image_ids{ "global id 1", "global id 2", "global id 3", "global id 4", "global id 5", "global id 6", "global id 7", "global id 8", "global id 9", "global id 10", "global id 11", "global id 12", "global id 13", "global id 14" }; std::map<std::string, cls::rbd::MirrorImageMap> image_mapping; for (auto& global_image_id : global_image_ids) { image_mapping[global_image_id] = {m_local_instance_id, {}, {}}; } // ACQUIRE MockLoadRequest mock_load_request; EXPECT_CALL(mock_load_request, send()).WillOnce( Invoke([&mock_load_request, &image_mapping]() { *mock_load_request.image_map = image_mapping; mock_load_request.on_finish->complete(0); })); expect_add_event(mock_threads); MockListener mock_listener(this); std::map<std::string, Context*> peer_ack_ctxs; listener_acquire_images(mock_listener, global_image_ids, &peer_ack_ctxs); std::unique_ptr<MockImageMap> mock_image_map{ MockImageMap::create(m_local_io_ctx, &mock_threads, m_local_instance_id, mock_listener)}; C_SaferCond cond; mock_image_map->init(&cond); ASSERT_EQ(0, cond.wait()); mock_image_map->update_instances_added({m_local_instance_id}); std::set<std::string> global_image_ids_ack(global_image_ids); // remote peer ACKs image acquire request -- completing action ASSERT_TRUE(wait_for_listener_notify(global_image_ids_ack.size())); remote_peer_ack_nowait(mock_image_map.get(), global_image_ids_ack, 0, &peer_ack_ctxs); wait_for_scheduled_task(); // RELEASE+UPDATE_MAPPING+ACQUIRE expect_add_event(mock_threads); MockUpdateRequest mock_update_request; expect_update_request(mock_update_request, 0); expect_add_event(mock_threads); listener_acquire_images(mock_listener, global_image_ids, &peer_ack_ctxs); // set initial image list mock_image_map->update_images("uuid1", std::move(global_image_ids), {}); ASSERT_TRUE(wait_for_map_update(1)); ASSERT_TRUE(wait_for_listener_notify(global_image_ids_ack.size())); // remote peer ACKs image acquire request -- completing action remote_peer_ack_nowait(mock_image_map.get(), global_image_ids_ack, 0, &peer_ack_ctxs); wait_for_scheduled_task(); std::set<std::string> shuffled_global_image_ids; // RELEASE+UPDATE_MAPPING+ACQUIRE expect_add_event(mock_threads); expect_listener_images_unmapped(mock_listener, 7, &shuffled_global_image_ids, &peer_ack_ctxs); mock_image_map->update_instances_added({"9876"}); wait_for_scheduled_task(); ASSERT_TRUE(wait_for_listener_notify(shuffled_global_image_ids.size())); update_map_and_acquire(mock_threads, mock_update_request, mock_listener, shuffled_global_image_ids, 0, &peer_ack_ctxs); remote_peer_ack_listener_wait(mock_image_map.get(), shuffled_global_image_ids, 0, &peer_ack_ctxs); // completion shuffle action for now (re)mapped images remote_peer_ack_nowait(mock_image_map.get(), shuffled_global_image_ids, 0, &peer_ack_ctxs); wait_for_scheduled_task(); std::set<std::string> migrated_global_image_ids(shuffled_global_image_ids); shuffled_global_image_ids.clear(); // RELEASE+UPDATE_MAPPING+ACQUIRE expect_add_event(mock_threads); expect_listener_images_unmapped(mock_listener, 3, &shuffled_global_image_ids, &peer_ack_ctxs); // add another instance mock_image_map->update_instances_added({"5432"}); wait_for_scheduled_task(); ASSERT_TRUE(wait_for_listener_notify(shuffled_global_image_ids.size())); update_map_and_acquire(mock_threads, mock_update_request, mock_listener, shuffled_global_image_ids, 0, &peer_ack_ctxs); remote_peer_ack_listener_wait(mock_image_map.get(), shuffled_global_image_ids, 0, &peer_ack_ctxs); // completion shuffle action for now (re)mapped images remote_peer_ack_nowait(mock_image_map.get(), shuffled_global_image_ids, 0, &peer_ack_ctxs); // shuffle set should be distinct std::set<std::string> reshuffled; std::set_intersection(migrated_global_image_ids.begin(), migrated_global_image_ids.end(), shuffled_global_image_ids.begin(), shuffled_global_image_ids.end(), std::inserter(reshuffled, reshuffled.begin())); ASSERT_TRUE(reshuffled.empty()); wait_for_scheduled_task(); ASSERT_EQ(0, when_shut_down(mock_image_map.get())); } TEST_F(TestMockImageMap, RemoveInstanceWithRemoveImage) { MockThreads mock_threads(m_threads); expect_work_queue(mock_threads); InSequence seq; MockLoadRequest mock_load_request; expect_load_request(mock_load_request, 0); MockListener mock_listener(this); std::unique_ptr<MockImageMap> mock_image_map{ MockImageMap::create(m_local_io_ctx, &mock_threads, m_local_instance_id, mock_listener)}; C_SaferCond cond; mock_image_map->init(&cond); ASSERT_EQ(0, cond.wait()); std::set<std::string> global_image_ids{ "global id 1", "global id 2", "global id 3", "remote id 4", }; std::set<std::string> global_image_ids_ack(global_image_ids); std::set<std::string> remove_global_image_ids{ "global id 1" }; std::set<std::string> remove_global_image_ids_ack(remove_global_image_ids); expect_add_event(mock_threads); // UPDATE_MAPPING+ACQUIRE MockUpdateRequest mock_update_request; expect_update_request(mock_update_request, 0); expect_add_event(mock_threads); std::map<std::string, Context*> peer_ack_ctxs; listener_acquire_images(mock_listener, global_image_ids, &peer_ack_ctxs); // initial image list mock_image_map->update_images("uuid1", std::move(global_image_ids), {}); ASSERT_TRUE(wait_for_map_update(1)); ASSERT_TRUE(wait_for_listener_notify(global_image_ids_ack.size())); remote_peer_ack_nowait(mock_image_map.get(), global_image_ids_ack, 0, &peer_ack_ctxs); wait_for_scheduled_task(); mock_image_map->update_instances_added({m_local_instance_id}); std::set<std::string> shuffled_global_image_ids; // RELEASE+UPDATE_MAPPING+ACQUIRE expect_add_event(mock_threads); expect_listener_images_unmapped(mock_listener, 2, &shuffled_global_image_ids, &peer_ack_ctxs); mock_image_map->update_instances_added({"9876"}); wait_for_scheduled_task(); ASSERT_TRUE(wait_for_listener_notify(shuffled_global_image_ids.size())); update_map_and_acquire(mock_threads, mock_update_request, mock_listener, shuffled_global_image_ids, 0, &peer_ack_ctxs); remote_peer_ack_listener_wait(mock_image_map.get(), shuffled_global_image_ids, 0, &peer_ack_ctxs); // completion shuffle action for now (re)mapped images remote_peer_ack_nowait(mock_image_map.get(), shuffled_global_image_ids, 0, &peer_ack_ctxs); wait_for_scheduled_task(); std::set<std::string> shuffled_global_image_ids_ack(shuffled_global_image_ids); // RELEASE std::map<std::string, Context*> peer_ack_remove_ctxs; listener_remove_images(mock_listener, "uuid1", shuffled_global_image_ids, &peer_ack_remove_ctxs); expect_add_event(mock_threads); listener_release_images(mock_listener, shuffled_global_image_ids, &peer_ack_ctxs); expect_add_event(mock_threads); expect_update_request(mock_update_request, 0); expect_add_event(mock_threads); expect_update_request(mock_update_request, 0); mock_image_map->update_images("uuid1", {}, std::move(shuffled_global_image_ids)); ASSERT_TRUE(wait_for_listener_notify(shuffled_global_image_ids_ack.size() * 2)); // instance failed -- update policy for instance removal mock_image_map->update_instances_removed({"9876"}); remote_peer_ack_nowait(mock_image_map.get(), shuffled_global_image_ids, -ENOENT, &peer_ack_remove_ctxs); remote_peer_ack_wait(mock_image_map.get(), shuffled_global_image_ids, -EBLOCKLISTED, &peer_ack_ctxs); wait_for_scheduled_task(); ASSERT_EQ(0, when_shut_down(mock_image_map.get())); } TEST_F(TestMockImageMap, AddErrorAndRemoveImage) { MockThreads mock_threads(m_threads); expect_work_queue(mock_threads); InSequence seq; MockLoadRequest mock_load_request; expect_load_request(mock_load_request, 0); MockListener mock_listener(this); std::unique_ptr<MockImageMap> mock_image_map{ MockImageMap::create(m_local_io_ctx, &mock_threads, m_local_instance_id, mock_listener)}; C_SaferCond cond; mock_image_map->init(&cond); ASSERT_EQ(0, cond.wait()); mock_image_map->update_instances_added({m_local_instance_id}); std::set<std::string> global_image_ids{ "global id 1", "global id 2", "global id 3", "remote id 4", }; std::set<std::string> global_image_ids_ack(global_image_ids); // UPDATE_MAPPING+ACQUIRE expect_add_event(mock_threads); MockUpdateRequest mock_update_request; expect_update_request(mock_update_request, 0); expect_add_event(mock_threads); std::map<std::string, Context*> peer_ack_ctxs; listener_acquire_images(mock_listener, global_image_ids, &peer_ack_ctxs); // initial image list mock_image_map->update_images("uuid1", std::move(global_image_ids), {}); ASSERT_TRUE(wait_for_map_update(1)); ASSERT_TRUE(wait_for_listener_notify(global_image_ids_ack.size())); // remote peer ACKs image acquire request remote_peer_ack_nowait(mock_image_map.get(), global_image_ids_ack, 0, &peer_ack_ctxs); wait_for_scheduled_task(); std::set<std::string> shuffled_global_image_ids; // RELEASE+UPDATE_MAPPING+ACQUIRE expect_add_event(mock_threads); expect_listener_images_unmapped(mock_listener, 2, &shuffled_global_image_ids, &peer_ack_ctxs); mock_image_map->update_instances_added({"9876"}); wait_for_scheduled_task(); ASSERT_TRUE(wait_for_listener_notify(shuffled_global_image_ids.size())); update_map_and_acquire(mock_threads, mock_update_request, mock_listener, shuffled_global_image_ids, 0, &peer_ack_ctxs); remote_peer_ack_listener_wait(mock_image_map.get(), shuffled_global_image_ids, 0, &peer_ack_ctxs); wait_for_scheduled_task(); mock_image_map->update_instances_removed({"9876"}); std::set<std::string> released_global_image_ids; std::map<std::string, Context*> release_peer_ack_ctxs; expect_add_event(mock_threads); expect_listener_images_unmapped(mock_listener, 1, &released_global_image_ids, &release_peer_ack_ctxs); expect_add_event(mock_threads); expect_listener_images_unmapped(mock_listener, 1, &released_global_image_ids, &release_peer_ack_ctxs); // instance blocklisted -- ACQUIRE request fails remote_peer_ack_nowait(mock_image_map.get(), shuffled_global_image_ids, -EBLOCKLISTED, &peer_ack_ctxs); ASSERT_TRUE(wait_for_listener_notify(shuffled_global_image_ids.size())); std::map<std::string, Context*> remap_peer_ack_ctxs; update_map_and_acquire(mock_threads, mock_update_request, mock_listener, shuffled_global_image_ids, 0, &remap_peer_ack_ctxs); // instance blocklisted -- RELEASE request fails remote_peer_ack_listener_wait(mock_image_map.get(), shuffled_global_image_ids, -ENOENT, &release_peer_ack_ctxs); wait_for_scheduled_task(); // new peer acks acquire request remote_peer_ack_nowait(mock_image_map.get(), shuffled_global_image_ids, 0, &remap_peer_ack_ctxs); wait_for_scheduled_task(); std::set<std::string> shuffled_global_image_ids_ack(shuffled_global_image_ids); // remove image std::map<std::string, Context*> peer_ack_remove_ctxs; listener_remove_images(mock_listener, "uuid1", shuffled_global_image_ids, &peer_ack_remove_ctxs); expect_add_event(mock_threads); listener_release_images(mock_listener, shuffled_global_image_ids, &peer_ack_ctxs); update_map_request(mock_threads, mock_update_request, shuffled_global_image_ids, 0); mock_image_map->update_images("uuid1", {}, std::move(shuffled_global_image_ids)); ASSERT_TRUE(wait_for_listener_notify(shuffled_global_image_ids_ack.size() * 2)); remote_peer_ack_nowait(mock_image_map.get(), shuffled_global_image_ids_ack, 0, &peer_ack_remove_ctxs); remote_peer_ack_wait(mock_image_map.get(), shuffled_global_image_ids_ack, 0, &peer_ack_ctxs); wait_for_scheduled_task(); ASSERT_EQ(0, when_shut_down(mock_image_map.get())); } TEST_F(TestMockImageMap, MirrorUUIDUpdated) { MockThreads mock_threads(m_threads); expect_work_queue(mock_threads); InSequence seq; MockLoadRequest mock_load_request; expect_load_request(mock_load_request, 0); MockListener mock_listener(this); std::unique_ptr<MockImageMap> mock_image_map{ MockImageMap::create(m_local_io_ctx, &mock_threads, m_local_instance_id, mock_listener)}; C_SaferCond cond; mock_image_map->init(&cond); ASSERT_EQ(0, cond.wait()); // remote image set std::set<std::string> initial_remote_global_image_ids{ "global id 1", "global id 2", "global id 3" }; std::set<std::string> initial_remote_global_image_ids_ack(initial_remote_global_image_ids); // remote/local images to remove std::set<std::string> remote_removed_global_image_ids{ "global id 1", "global id 2", "global id 3" }; std::set<std::string> remote_removed_global_image_ids_ack(remote_removed_global_image_ids); std::set<std::string> remote_added_global_image_ids{ "global id 1", "global id 2", "global id 3" }; std::set<std::string> remote_added_global_image_ids_ack(remote_added_global_image_ids); // UPDATE_MAPPING+ACQUIRE expect_add_event(mock_threads); MockUpdateRequest mock_update_request; expect_update_request(mock_update_request, 0); expect_add_event(mock_threads); std::map<std::string, Context*> peer_ack_ctxs; listener_acquire_images(mock_listener, initial_remote_global_image_ids, &peer_ack_ctxs); // initial remote image list mock_image_map->update_images("uuid1", std::move(initial_remote_global_image_ids), {}); ASSERT_TRUE(wait_for_map_update(1)); ASSERT_TRUE(wait_for_listener_notify(initial_remote_global_image_ids_ack.size())); // remote peer ACKs image acquire request remote_peer_ack_nowait(mock_image_map.get(), initial_remote_global_image_ids_ack, 0, &peer_ack_ctxs); wait_for_scheduled_task(); // RELEASE+REMOVE_MAPPING std::map<std::string, Context*> peer_remove_ack_ctxs; listener_remove_images(mock_listener, "uuid1", remote_removed_global_image_ids, &peer_remove_ack_ctxs); expect_add_event(mock_threads); listener_release_images(mock_listener, remote_removed_global_image_ids, &peer_ack_ctxs); update_map_request(mock_threads, mock_update_request, remote_removed_global_image_ids, 0); mock_image_map->update_images("uuid1", {}, std::move(remote_removed_global_image_ids)); ASSERT_TRUE(wait_for_listener_notify(remote_removed_global_image_ids_ack.size() * 2)); remote_peer_ack_nowait(mock_image_map.get(), remote_removed_global_image_ids_ack, 0, &peer_remove_ack_ctxs); remote_peer_ack_wait(mock_image_map.get(), remote_removed_global_image_ids_ack, 0, &peer_ack_ctxs); // UPDATE_MAPPING+ACQUIRE expect_add_event(mock_threads); expect_update_request(mock_update_request, 0); expect_add_event(mock_threads); listener_acquire_images(mock_listener, remote_added_global_image_ids, &peer_ack_ctxs); mock_image_map->update_images("uuid2", std::move(remote_added_global_image_ids), {}); ASSERT_TRUE(wait_for_map_update(1)); ASSERT_TRUE(wait_for_listener_notify(remote_added_global_image_ids_ack.size())); // remote peer ACKs image acquire request remote_peer_ack_nowait(mock_image_map.get(), remote_added_global_image_ids_ack, 0, &peer_ack_ctxs); wait_for_scheduled_task(); ASSERT_EQ(0, when_shut_down(mock_image_map.get())); } TEST_F(TestMockImageMap, RebalanceImageMap) { MockThreads mock_threads(m_threads); expect_work_queue(mock_threads); InSequence seq; MockLoadRequest mock_load_request; expect_load_request(mock_load_request, 0); MockListener mock_listener(this); std::unique_ptr<MockImageMap> mock_image_map{ MockImageMap::create(m_local_io_ctx, &mock_threads, m_local_instance_id, mock_listener)}; C_SaferCond cond; mock_image_map->init(&cond); ASSERT_EQ(0, cond.wait()); std::set<std::string> global_image_ids{ "global id 1", "global id 2", "global id 3", "global id 4", "global id 5", "global id 6", "global id 7", "global id 8", "global id 9", "global id 10", }; std::set<std::string> global_image_ids_ack(global_image_ids); // UPDATE_MAPPING+ACQUIRE expect_add_event(mock_threads); MockUpdateRequest mock_update_request; expect_update_request(mock_update_request, 0); expect_add_event(mock_threads); std::map<std::string, Context*> peer_ack_ctxs; listener_acquire_images(mock_listener, global_image_ids, &peer_ack_ctxs); // initial image list mock_image_map->update_images("", std::move(global_image_ids), {}); ASSERT_TRUE(wait_for_map_update(1)); ASSERT_TRUE(wait_for_listener_notify(global_image_ids_ack.size())); // remote peer ACKs image acquire request remote_peer_ack_nowait(mock_image_map.get(), global_image_ids_ack, 0, &peer_ack_ctxs); wait_for_scheduled_task(); mock_image_map->update_instances_added({m_local_instance_id}); std::set<std::string> shuffled_global_image_ids; // RELEASE+UPDATE_MAPPING+ACQUIRE expect_add_event(mock_threads); expect_listener_images_unmapped(mock_listener, 5, &shuffled_global_image_ids, &peer_ack_ctxs); mock_image_map->update_instances_added({"9876"}); wait_for_scheduled_task(); ASSERT_TRUE(wait_for_listener_notify(shuffled_global_image_ids.size())); update_map_and_acquire(mock_threads, mock_update_request, mock_listener, shuffled_global_image_ids, 0, &peer_ack_ctxs); remote_peer_ack_listener_wait(mock_image_map.get(), shuffled_global_image_ids, 0, &peer_ack_ctxs); // completion shuffle action for now (re)mapped images remote_peer_ack_nowait(mock_image_map.get(), shuffled_global_image_ids, 0, &peer_ack_ctxs); wait_for_scheduled_task(); // remove all shuffled images -- make way for rebalance std::set<std::string> shuffled_global_image_ids_ack(shuffled_global_image_ids); // RELEASE+REMOVE_MAPPING expect_add_event(mock_threads); listener_release_images(mock_listener, shuffled_global_image_ids, &peer_ack_ctxs); update_map_request(mock_threads, mock_update_request, shuffled_global_image_ids, 0); mock_image_map->update_images("", {}, std::move(shuffled_global_image_ids)); ASSERT_TRUE(wait_for_listener_notify(shuffled_global_image_ids_ack.size())); remote_peer_ack_wait(mock_image_map.get(), shuffled_global_image_ids_ack, 0, &peer_ack_ctxs); wait_for_scheduled_task(); shuffled_global_image_ids.clear(); shuffled_global_image_ids_ack.clear(); std::set<std::string> new_global_image_ids = { "global id 11" }; std::set<std::string> new_global_image_ids_ack(new_global_image_ids); expect_add_event(mock_threads); expect_update_request(mock_update_request, 0); expect_add_event(mock_threads); listener_acquire_images(mock_listener, new_global_image_ids, &peer_ack_ctxs); expect_rebalance_event(mock_threads); // rebalance task expect_add_event(mock_threads); // update task scheduled by // rebalance task expect_listener_images_unmapped(mock_listener, 2, &shuffled_global_image_ids, &peer_ack_ctxs); mock_image_map->update_images("", std::move(new_global_image_ids), {}); ASSERT_TRUE(wait_for_map_update(1)); ASSERT_TRUE(wait_for_listener_notify(new_global_image_ids_ack.size())); // set rebalance interval CephContext *cct = reinterpret_cast<CephContext *>(m_local_io_ctx.cct()); cct->_conf.set_val("rbd_mirror_image_policy_rebalance_timeout", "5"); remote_peer_ack_nowait(mock_image_map.get(), new_global_image_ids_ack, 0, &peer_ack_ctxs); wait_for_scheduled_task(); ASSERT_TRUE(wait_for_listener_notify(shuffled_global_image_ids.size())); update_map_and_acquire(mock_threads, mock_update_request, mock_listener, shuffled_global_image_ids, 0, &peer_ack_ctxs); remote_peer_ack_listener_wait(mock_image_map.get(), shuffled_global_image_ids, 0, &peer_ack_ctxs); // completion shuffle action for now (re)mapped images remote_peer_ack_nowait(mock_image_map.get(), shuffled_global_image_ids, 0, &peer_ack_ctxs); wait_for_scheduled_task(); ASSERT_EQ(0, when_shut_down(mock_image_map.get())); } } // namespace mirror } // namespace rbd
57,432
35.166877
113
cc
null
ceph-main/src/test/rbd_mirror/test_mock_ImageReplayer.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "cls/journal/cls_journal_types.h" #include "librbd/journal/Types.h" #include "librbd/journal/TypeTraits.h" #include "tools/rbd_mirror/ImageDeleter.h" #include "tools/rbd_mirror/ImageReplayer.h" #include "tools/rbd_mirror/InstanceWatcher.h" #include "tools/rbd_mirror/MirrorStatusUpdater.h" #include "tools/rbd_mirror/Threads.h" #include "tools/rbd_mirror/image_replayer/BootstrapRequest.h" #include "tools/rbd_mirror/image_replayer/Replayer.h" #include "tools/rbd_mirror/image_replayer/ReplayerListener.h" #include "tools/rbd_mirror/image_replayer/StateBuilder.h" #include "tools/rbd_mirror/image_replayer/Utils.h" #include "test/rbd_mirror/test_mock_fixture.h" #include "test/librbd/mock/MockImageCtx.h" #include "test/rbd_mirror/mock/MockContextWQ.h" #include "test/rbd_mirror/mock/MockSafeTimer.h" namespace librbd { namespace { struct MockTestImageCtx : public MockImageCtx { MockTestImageCtx(librbd::ImageCtx &image_ctx) : librbd::MockImageCtx(image_ctx) { } }; } // anonymous namespace } // namespace librbd namespace rbd { namespace mirror { template <> struct ImageDeleter<librbd::MockTestImageCtx> { static ImageDeleter* s_instance; static void trash_move(librados::IoCtx& local_io_ctx, const std::string& global_image_id, bool resync, MockContextWQ* work_queue, Context* on_finish) { ceph_assert(s_instance != nullptr); s_instance->trash_move(global_image_id, resync, on_finish); } MOCK_METHOD3(trash_move, void(const std::string&, bool, Context*)); ImageDeleter() { s_instance = this; } }; ImageDeleter<librbd::MockTestImageCtx>* ImageDeleter<librbd::MockTestImageCtx>::s_instance = nullptr; template <> struct MirrorStatusUpdater<librbd::MockTestImageCtx> { MOCK_METHOD1(exists, bool(const std::string&)); MOCK_METHOD3(set_mirror_image_status, void(const std::string&, const cls::rbd::MirrorImageSiteStatus&, bool)); MOCK_METHOD2(remove_refresh_mirror_image_status, void(const std::string&, Context*)); MOCK_METHOD3(remove_mirror_image_status, void(const std::string&, bool, Context*)); }; template <> struct Threads<librbd::MockTestImageCtx> { MockSafeTimer *timer; ceph::mutex &timer_lock; MockContextWQ *work_queue; Threads(Threads<librbd::ImageCtx> *threads) : timer(new MockSafeTimer()), timer_lock(threads->timer_lock), work_queue(new MockContextWQ()) { } ~Threads() { delete timer; delete work_queue; } }; template<> class InstanceWatcher<librbd::MockTestImageCtx> { }; namespace image_replayer { template<> struct BootstrapRequest<librbd::MockTestImageCtx> { static BootstrapRequest* s_instance; StateBuilder<librbd::MockTestImageCtx>** state_builder = nullptr; bool *do_resync = nullptr; Context *on_finish = nullptr; static BootstrapRequest* create( Threads<librbd::MockTestImageCtx>* threads, librados::IoCtx &local_io_ctx, librados::IoCtx& remote_io_ctx, rbd::mirror::InstanceWatcher<librbd::MockTestImageCtx> *instance_watcher, const std::string &global_image_id, const std::string &local_mirror_uuid, const RemotePoolMeta& remote_pool_meta, ::journal::CacheManagerHandler *cache_manager_handler, PoolMetaCache* pool_meta_cache, rbd::mirror::ProgressContext *progress_ctx, StateBuilder<librbd::MockTestImageCtx>** state_builder, bool *do_resync, Context *on_finish) { ceph_assert(s_instance != nullptr); s_instance->state_builder = state_builder; s_instance->do_resync = do_resync; s_instance->on_finish = on_finish; return s_instance; } BootstrapRequest() { ceph_assert(s_instance == nullptr); s_instance = this; } ~BootstrapRequest() { ceph_assert(s_instance == this); s_instance = nullptr; } void put() { } void get() { } std::string get_local_image_name() const { return "local image name"; } inline bool is_syncing() const { return false; } MOCK_METHOD0(send, void()); MOCK_METHOD0(cancel, void()); }; struct MockReplayer : public Replayer { image_replayer::ReplayerListener* replayer_listener; MOCK_METHOD0(destroy, void()); MOCK_METHOD1(init, void(Context*)); MOCK_METHOD1(shut_down, void(Context*)); MOCK_METHOD1(flush, void(Context*)); MOCK_METHOD2(get_replay_status, bool(std::string*, Context*)); MOCK_CONST_METHOD0(is_replaying, bool()); MOCK_CONST_METHOD0(is_resync_requested, bool()); MOCK_CONST_METHOD0(get_error_code, int()); MOCK_CONST_METHOD0(get_error_description, std::string()); }; template <> struct StateBuilder<librbd::MockTestImageCtx> { static StateBuilder* s_instance; librbd::MockTestImageCtx* local_image_ctx = nullptr; std::string local_image_id; std::string remote_image_id; void destroy() { } MOCK_METHOD1(close, void(Context*)); MOCK_METHOD5(create_replayer, Replayer*(Threads<librbd::MockTestImageCtx>*, InstanceWatcher<librbd::MockTestImageCtx>*, const std::string&, PoolMetaCache*, ReplayerListener*)); StateBuilder() { s_instance = this; } }; BootstrapRequest<librbd::MockTestImageCtx>* BootstrapRequest<librbd::MockTestImageCtx>::s_instance = nullptr; StateBuilder<librbd::MockTestImageCtx>* StateBuilder<librbd::MockTestImageCtx>::s_instance = nullptr; } // namespace image_replayer } // namespace mirror } // namespace rbd // template definitions #include "tools/rbd_mirror/ImageReplayer.cc" namespace rbd { namespace mirror { using ::testing::_; using ::testing::AtLeast; using ::testing::DoAll; using ::testing::InSequence; using ::testing::Invoke; using ::testing::MatcherCast; using ::testing::Return; using ::testing::ReturnArg; using ::testing::SetArgPointee; using ::testing::WithArg; class TestMockImageReplayer : public TestMockFixture { public: typedef Threads<librbd::MockTestImageCtx> MockThreads; typedef ImageDeleter<librbd::MockTestImageCtx> MockImageDeleter; typedef MirrorStatusUpdater<librbd::MockTestImageCtx> MockMirrorStatusUpdater; typedef image_replayer::BootstrapRequest<librbd::MockTestImageCtx> MockBootstrapRequest; typedef image_replayer::StateBuilder<librbd::MockTestImageCtx> MockStateBuilder; typedef image_replayer::MockReplayer MockReplayer; typedef ImageReplayer<librbd::MockTestImageCtx> MockImageReplayer; typedef InstanceWatcher<librbd::MockTestImageCtx> MockInstanceWatcher; void SetUp() override { TestMockFixture::SetUp(); librbd::RBD rbd; ASSERT_EQ(0, create_image(rbd, m_remote_io_ctx, m_image_name, m_image_size)); ASSERT_EQ(0, open_image(m_remote_io_ctx, m_image_name, &m_remote_image_ctx)); } void TearDown() override { delete m_image_replayer; TestMockFixture::TearDown(); } void create_local_image() { librbd::RBD rbd; ASSERT_EQ(0, create_image(rbd, m_local_io_ctx, m_image_name, m_image_size)); ASSERT_EQ(0, open_image(m_local_io_ctx, m_image_name, &m_local_image_ctx)); } void expect_work_queue_repeatedly(MockThreads &mock_threads) { EXPECT_CALL(*mock_threads.work_queue, queue(_, _)) .WillRepeatedly(Invoke([this](Context *ctx, int r) { m_threads->work_queue->queue(ctx, r); })); } void expect_add_event_after_repeatedly(MockThreads &mock_threads) { EXPECT_CALL(*mock_threads.timer, add_event_after(_, _)) .WillRepeatedly( DoAll(Invoke([this](double seconds, Context *ctx) { m_threads->timer->add_event_after(seconds, ctx); }), ReturnArg<1>())); EXPECT_CALL(*mock_threads.timer, cancel_event(_)) .WillRepeatedly( Invoke([this](Context *ctx) { return m_threads->timer->cancel_event(ctx); })); } void expect_trash_move(MockImageDeleter& mock_image_deleter, const std::string& global_image_id, bool ignore_orphan, int r) { EXPECT_CALL(mock_image_deleter, trash_move(global_image_id, ignore_orphan, _)) .WillOnce(WithArg<2>(Invoke([this, r](Context* ctx) { m_threads->work_queue->queue(ctx, r); }))); } bufferlist encode_tag_data(const librbd::journal::TagData &tag_data) { bufferlist bl; encode(tag_data, bl); return bl; } void expect_send(MockBootstrapRequest& mock_bootstrap_request, MockStateBuilder& mock_state_builder, librbd::MockTestImageCtx& mock_local_image_ctx, bool do_resync, bool set_local_image, int r) { EXPECT_CALL(mock_bootstrap_request, send()) .WillOnce(Invoke([this, &mock_bootstrap_request, &mock_state_builder, &mock_local_image_ctx, set_local_image, do_resync, r]() { if (r == 0 || r == -ENOLINK) { mock_state_builder.local_image_id = mock_local_image_ctx.id; mock_state_builder.remote_image_id = m_remote_image_ctx->id; *mock_bootstrap_request.state_builder = &mock_state_builder; } if (r == 0) { mock_state_builder.local_image_ctx = &mock_local_image_ctx; *mock_bootstrap_request.do_resync = do_resync; } if (r < 0 && r != -ENOENT) { mock_state_builder.remote_image_id = ""; } if (r == -ENOENT) { *mock_bootstrap_request.state_builder = &mock_state_builder; } if (set_local_image) { mock_state_builder.local_image_id = mock_local_image_ctx.id; } mock_bootstrap_request.on_finish->complete(r); })); } void expect_create_replayer(MockStateBuilder& mock_state_builder, MockReplayer& mock_replayer) { EXPECT_CALL(mock_state_builder, create_replayer(_, _, _, _, _)) .WillOnce(WithArg<4>( Invoke([&mock_replayer] (image_replayer::ReplayerListener* replayer_listener) { mock_replayer.replayer_listener = replayer_listener; return &mock_replayer; }))); } void expect_close(MockStateBuilder& mock_state_builder, int r) { EXPECT_CALL(mock_state_builder, close(_)) .WillOnce(Invoke([this, r](Context* ctx) { m_threads->work_queue->queue(ctx, r); })); } void expect_init(MockReplayer& mock_replayer, int r) { EXPECT_CALL(mock_replayer, init(_)) .WillOnce(Invoke([this, r](Context* ctx) { m_threads->work_queue->queue(ctx, r); })); } void expect_shut_down(MockReplayer& mock_replayer, int r) { EXPECT_CALL(mock_replayer, shut_down(_)) .WillOnce(Invoke([this, r](Context* ctx) { m_threads->work_queue->queue(ctx, r); })); EXPECT_CALL(mock_replayer, destroy()); } void expect_get_replay_status(MockReplayer& mock_replayer) { EXPECT_CALL(mock_replayer, get_replay_status(_, _)) .WillRepeatedly(DoAll(WithArg<1>(CompleteContext(-EEXIST)), Return(true))); } void expect_set_mirror_image_status_repeatedly() { EXPECT_CALL(m_local_status_updater, set_mirror_image_status(_, _, _)) .WillRepeatedly(Invoke([](auto, auto, auto){})); EXPECT_CALL(m_remote_status_updater, set_mirror_image_status(_, _, _)) .WillRepeatedly(Invoke([](auto, auto, auto){})); } void expect_mirror_image_status_exists(bool exists) { EXPECT_CALL(m_local_status_updater, exists(_)) .WillOnce(Return(exists)); EXPECT_CALL(m_remote_status_updater, exists(_)) .WillOnce(Return(exists)); } void create_image_replayer(MockThreads &mock_threads) { m_image_replayer = new MockImageReplayer( m_local_io_ctx, "local_mirror_uuid", "global image id", &mock_threads, &m_instance_watcher, &m_local_status_updater, nullptr, nullptr); m_image_replayer->add_peer({"peer_uuid", m_remote_io_ctx, {"remote mirror uuid", "remote mirror peer uuid"}, &m_remote_status_updater}); } void wait_for_stopped() { for (int i = 0; i < 10000; i++) { if (m_image_replayer->is_stopped()) { break; } usleep(1000); } ASSERT_TRUE(m_image_replayer->is_stopped()); } librbd::ImageCtx *m_remote_image_ctx; librbd::ImageCtx *m_local_image_ctx = nullptr; MockInstanceWatcher m_instance_watcher; MockMirrorStatusUpdater m_local_status_updater; MockMirrorStatusUpdater m_remote_status_updater; MockImageReplayer *m_image_replayer = nullptr; }; TEST_F(TestMockImageReplayer, StartStop) { // START create_local_image(); librbd::MockTestImageCtx mock_local_image_ctx(*m_local_image_ctx); MockThreads mock_threads(m_threads); expect_work_queue_repeatedly(mock_threads); expect_add_event_after_repeatedly(mock_threads); MockImageDeleter mock_image_deleter; MockReplayer mock_replayer; expect_get_replay_status(mock_replayer); expect_set_mirror_image_status_repeatedly(); InSequence seq; MockBootstrapRequest mock_bootstrap_request; MockStateBuilder mock_state_builder; expect_send(mock_bootstrap_request, mock_state_builder, mock_local_image_ctx, false, false, 0); expect_create_replayer(mock_state_builder, mock_replayer); expect_init(mock_replayer, 0); create_image_replayer(mock_threads); C_SaferCond start_ctx; m_image_replayer->start(&start_ctx); ASSERT_EQ(0, start_ctx.wait()); ASSERT_EQ(image_replayer::HEALTH_STATE_OK, m_image_replayer->get_health_state()); // STOP expect_shut_down(mock_replayer, 0); expect_close(mock_state_builder, 0); expect_mirror_image_status_exists(false); C_SaferCond stop_ctx; m_image_replayer->stop(&stop_ctx); ASSERT_EQ(0, stop_ctx.wait()); ASSERT_EQ(image_replayer::HEALTH_STATE_OK, m_image_replayer->get_health_state()); } TEST_F(TestMockImageReplayer, LocalImagePrimary) { create_local_image(); librbd::MockTestImageCtx mock_local_image_ctx(*m_local_image_ctx); MockThreads mock_threads(m_threads); expect_work_queue_repeatedly(mock_threads); expect_add_event_after_repeatedly(mock_threads); MockImageDeleter mock_image_deleter; MockBootstrapRequest mock_bootstrap_request; expect_set_mirror_image_status_repeatedly(); InSequence seq; MockStateBuilder mock_state_builder; expect_send(mock_bootstrap_request, mock_state_builder, mock_local_image_ctx, false, false, -ENOMSG); expect_mirror_image_status_exists(false); create_image_replayer(mock_threads); C_SaferCond start_ctx; m_image_replayer->start(&start_ctx); ASSERT_EQ(0, start_ctx.wait()); } TEST_F(TestMockImageReplayer, MetadataCleanup) { // START create_local_image(); librbd::MockTestImageCtx mock_local_image_ctx(*m_local_image_ctx); MockThreads mock_threads(m_threads); expect_work_queue_repeatedly(mock_threads); expect_add_event_after_repeatedly(mock_threads); MockImageDeleter mock_image_deleter; MockBootstrapRequest mock_bootstrap_request; MockReplayer mock_replayer; expect_get_replay_status(mock_replayer); expect_set_mirror_image_status_repeatedly(); InSequence seq; MockStateBuilder mock_state_builder; expect_send(mock_bootstrap_request, mock_state_builder, mock_local_image_ctx, false, true, -ENOLINK); expect_close(mock_state_builder, 0); expect_trash_move(mock_image_deleter, "global image id", false, 0); expect_mirror_image_status_exists(false); create_image_replayer(mock_threads); C_SaferCond start_ctx; m_image_replayer->start(&start_ctx); ASSERT_EQ(0, start_ctx.wait()); } TEST_F(TestMockImageReplayer, BootstrapRemoteDeleted) { create_local_image(); librbd::MockTestImageCtx mock_local_image_ctx(*m_local_image_ctx); MockThreads mock_threads(m_threads); expect_work_queue_repeatedly(mock_threads); expect_add_event_after_repeatedly(mock_threads); MockImageDeleter mock_image_deleter; expect_set_mirror_image_status_repeatedly(); InSequence seq; MockBootstrapRequest mock_bootstrap_request; MockStateBuilder mock_state_builder; expect_send(mock_bootstrap_request, mock_state_builder, mock_local_image_ctx, false, false, -ENOLINK); expect_close(mock_state_builder, 0); expect_trash_move(mock_image_deleter, "global image id", false, 0); expect_mirror_image_status_exists(false); create_image_replayer(mock_threads); C_SaferCond start_ctx; m_image_replayer->start(&start_ctx); ASSERT_EQ(0, start_ctx.wait()); } TEST_F(TestMockImageReplayer, BootstrapResyncRequested) { create_local_image(); librbd::MockTestImageCtx mock_local_image_ctx(*m_local_image_ctx); MockThreads mock_threads(m_threads); expect_work_queue_repeatedly(mock_threads); expect_add_event_after_repeatedly(mock_threads); MockImageDeleter mock_image_deleter; expect_set_mirror_image_status_repeatedly(); InSequence seq; MockBootstrapRequest mock_bootstrap_request; MockStateBuilder mock_state_builder; expect_send(mock_bootstrap_request, mock_state_builder, mock_local_image_ctx, true, false, 0); expect_close(mock_state_builder, 0); expect_trash_move(mock_image_deleter, "global image id", true, 0); expect_mirror_image_status_exists(false); create_image_replayer(mock_threads); C_SaferCond start_ctx; m_image_replayer->start(&start_ctx); ASSERT_EQ(0, start_ctx.wait()); } TEST_F(TestMockImageReplayer, BootstrapError) { create_local_image(); librbd::MockTestImageCtx mock_local_image_ctx(*m_local_image_ctx); MockThreads mock_threads(m_threads); expect_work_queue_repeatedly(mock_threads); expect_add_event_after_repeatedly(mock_threads); MockImageDeleter mock_image_deleter; MockBootstrapRequest mock_bootstrap_request; expect_set_mirror_image_status_repeatedly(); InSequence seq; MockStateBuilder mock_state_builder; expect_send(mock_bootstrap_request, mock_state_builder, mock_local_image_ctx, false, false, -EINVAL); expect_mirror_image_status_exists(false); create_image_replayer(mock_threads); C_SaferCond start_ctx; m_image_replayer->start(&start_ctx); ASSERT_EQ(-EINVAL, start_ctx.wait()); } TEST_F(TestMockImageReplayer, BootstrapCancel) { create_local_image(); librbd::MockTestImageCtx mock_local_image_ctx(*m_local_image_ctx); MockThreads mock_threads(m_threads); expect_work_queue_repeatedly(mock_threads); expect_add_event_after_repeatedly(mock_threads); MockImageDeleter mock_image_deleter; expect_set_mirror_image_status_repeatedly(); InSequence seq; create_image_replayer(mock_threads); MockBootstrapRequest mock_bootstrap_request; MockStateBuilder mock_state_builder; EXPECT_CALL(mock_bootstrap_request, send()) .WillOnce(Invoke([this, &mock_bootstrap_request]() { m_image_replayer->stop(nullptr); mock_bootstrap_request.on_finish->complete(-ECANCELED); })); EXPECT_CALL(mock_bootstrap_request, cancel()); expect_mirror_image_status_exists(false); C_SaferCond start_ctx; m_image_replayer->start(&start_ctx); ASSERT_EQ(-ECANCELED, start_ctx.wait()); } TEST_F(TestMockImageReplayer, StopError) { // START create_local_image(); librbd::MockTestImageCtx mock_local_image_ctx(*m_local_image_ctx); MockThreads mock_threads(m_threads); expect_work_queue_repeatedly(mock_threads); expect_add_event_after_repeatedly(mock_threads); MockImageDeleter mock_image_deleter; MockBootstrapRequest mock_bootstrap_request; MockReplayer mock_replayer; expect_get_replay_status(mock_replayer); expect_set_mirror_image_status_repeatedly(); InSequence seq; MockStateBuilder mock_state_builder; expect_send(mock_bootstrap_request, mock_state_builder, mock_local_image_ctx, false, false, 0); expect_create_replayer(mock_state_builder, mock_replayer); expect_init(mock_replayer, 0); create_image_replayer(mock_threads); C_SaferCond start_ctx; m_image_replayer->start(&start_ctx); ASSERT_EQ(0, start_ctx.wait()); // STOP (errors are ignored) expect_shut_down(mock_replayer, -EINVAL); expect_close(mock_state_builder, -EINVAL); expect_mirror_image_status_exists(false); C_SaferCond stop_ctx; m_image_replayer->stop(&stop_ctx); ASSERT_EQ(0, stop_ctx.wait()); } TEST_F(TestMockImageReplayer, ReplayerError) { create_local_image(); librbd::MockTestImageCtx mock_local_image_ctx(*m_local_image_ctx); MockThreads mock_threads(m_threads); expect_work_queue_repeatedly(mock_threads); expect_add_event_after_repeatedly(mock_threads); MockImageDeleter mock_image_deleter; MockBootstrapRequest mock_bootstrap_request; MockReplayer mock_replayer; expect_set_mirror_image_status_repeatedly(); InSequence seq; MockStateBuilder mock_state_builder; expect_send(mock_bootstrap_request, mock_state_builder, mock_local_image_ctx, false, false, 0); expect_create_replayer(mock_state_builder, mock_replayer); expect_init(mock_replayer, -EINVAL); EXPECT_CALL(mock_replayer, get_error_description()) .WillOnce(Return("FAIL")); EXPECT_CALL(mock_replayer, destroy()); expect_close(mock_state_builder, -EINVAL); expect_mirror_image_status_exists(false); create_image_replayer(mock_threads); C_SaferCond start_ctx; m_image_replayer->start(&start_ctx); ASSERT_EQ(-EINVAL, start_ctx.wait()); } TEST_F(TestMockImageReplayer, ReplayerResync) { // START create_local_image(); librbd::MockTestImageCtx mock_local_image_ctx(*m_local_image_ctx); MockThreads mock_threads(m_threads); expect_work_queue_repeatedly(mock_threads); expect_add_event_after_repeatedly(mock_threads); MockImageDeleter mock_image_deleter; MockBootstrapRequest mock_bootstrap_request; MockReplayer mock_replayer; expect_get_replay_status(mock_replayer); expect_set_mirror_image_status_repeatedly(); InSequence seq; MockStateBuilder mock_state_builder; expect_send(mock_bootstrap_request, mock_state_builder, mock_local_image_ctx, false, false, 0); expect_create_replayer(mock_state_builder, mock_replayer); expect_init(mock_replayer, 0); create_image_replayer(mock_threads); C_SaferCond start_ctx; m_image_replayer->start(&start_ctx); ASSERT_EQ(0, start_ctx.wait()); // NOTIFY EXPECT_CALL(mock_replayer, is_resync_requested()) .WillOnce(Return(true)); expect_shut_down(mock_replayer, 0); expect_close(mock_state_builder, 0); expect_trash_move(mock_image_deleter, "global image id", true, 0); expect_mirror_image_status_exists(false); mock_replayer.replayer_listener->handle_notification(); ASSERT_FALSE(m_image_replayer->is_running()); wait_for_stopped(); } TEST_F(TestMockImageReplayer, ReplayerInterrupted) { // START create_local_image(); librbd::MockTestImageCtx mock_local_image_ctx(*m_local_image_ctx); MockThreads mock_threads(m_threads); expect_work_queue_repeatedly(mock_threads); expect_add_event_after_repeatedly(mock_threads); MockImageDeleter mock_image_deleter; MockBootstrapRequest mock_bootstrap_request; MockReplayer mock_replayer; expect_get_replay_status(mock_replayer); expect_set_mirror_image_status_repeatedly(); InSequence seq; MockStateBuilder mock_state_builder; expect_send(mock_bootstrap_request, mock_state_builder, mock_local_image_ctx, false, false, 0); expect_create_replayer(mock_state_builder, mock_replayer); expect_init(mock_replayer, 0); create_image_replayer(mock_threads); C_SaferCond start_ctx; m_image_replayer->start(&start_ctx); ASSERT_EQ(0, start_ctx.wait()); // NOTIFY EXPECT_CALL(mock_replayer, is_resync_requested()) .WillOnce(Return(false)); EXPECT_CALL(mock_replayer, is_replaying()) .WillOnce(Return(false)); EXPECT_CALL(mock_replayer, get_error_code()) .WillOnce(Return(-EINVAL)); EXPECT_CALL(mock_replayer, get_error_description()) .WillOnce(Return("INVALID")); expect_shut_down(mock_replayer, 0); expect_close(mock_state_builder, 0); expect_mirror_image_status_exists(false); mock_replayer.replayer_listener->handle_notification(); ASSERT_FALSE(m_image_replayer->is_running()); wait_for_stopped(); } TEST_F(TestMockImageReplayer, ReplayerRenamed) { // START create_local_image(); librbd::MockTestImageCtx mock_local_image_ctx(*m_local_image_ctx); MockThreads mock_threads(m_threads); expect_work_queue_repeatedly(mock_threads); expect_add_event_after_repeatedly(mock_threads); MockImageDeleter mock_image_deleter; MockBootstrapRequest mock_bootstrap_request; MockReplayer mock_replayer; expect_get_replay_status(mock_replayer); expect_set_mirror_image_status_repeatedly(); InSequence seq; MockStateBuilder mock_state_builder; expect_send(mock_bootstrap_request, mock_state_builder, mock_local_image_ctx, false, false, 0); expect_create_replayer(mock_state_builder, mock_replayer); expect_init(mock_replayer, 0); create_image_replayer(mock_threads); C_SaferCond start_ctx; m_image_replayer->start(&start_ctx); ASSERT_EQ(0, start_ctx.wait()); // NOTIFY EXPECT_CALL(mock_replayer, is_resync_requested()) .WillOnce(Return(false)); EXPECT_CALL(mock_replayer, is_replaying()) .WillOnce(Return(true)); mock_local_image_ctx.name = "NEW NAME"; mock_replayer.replayer_listener->handle_notification(); // STOP expect_shut_down(mock_replayer, 0); expect_close(mock_state_builder, 0); expect_mirror_image_status_exists(false); C_SaferCond stop_ctx; m_image_replayer->stop(&stop_ctx); ASSERT_EQ(0, stop_ctx.wait()); auto image_spec = image_replayer::util::compute_image_spec( m_local_io_ctx, "NEW NAME"); ASSERT_EQ(image_spec, m_image_replayer->get_name()); } TEST_F(TestMockImageReplayer, StopJoinInterruptedReplayer) { // START create_local_image(); librbd::MockTestImageCtx mock_local_image_ctx(*m_local_image_ctx); MockThreads mock_threads(m_threads); expect_work_queue_repeatedly(mock_threads); expect_add_event_after_repeatedly(mock_threads); MockReplayer mock_replayer; expect_get_replay_status(mock_replayer); expect_set_mirror_image_status_repeatedly(); InSequence seq; MockBootstrapRequest mock_bootstrap_request; MockStateBuilder mock_state_builder; expect_send(mock_bootstrap_request, mock_state_builder, mock_local_image_ctx, false, false, 0); expect_create_replayer(mock_state_builder, mock_replayer); expect_init(mock_replayer, 0); create_image_replayer(mock_threads); C_SaferCond start_ctx; m_image_replayer->start(&start_ctx); ASSERT_EQ(0, start_ctx.wait()); // NOTIFY EXPECT_CALL(mock_replayer, is_resync_requested()) .WillOnce(Return(false)); EXPECT_CALL(mock_replayer, is_replaying()) .WillOnce(Return(false)); EXPECT_CALL(mock_replayer, get_error_code()) .WillOnce(Return(-EINVAL)); EXPECT_CALL(mock_replayer, get_error_description()) .WillOnce(Return("INVALID")); const double DELAY = 10; EXPECT_CALL(mock_replayer, shut_down(_)) .WillOnce(Invoke([this, DELAY](Context* ctx) { std::lock_guard l(m_threads->timer_lock); m_threads->timer->add_event_after(DELAY, ctx); })); EXPECT_CALL(mock_replayer, destroy()); expect_close(mock_state_builder, 0); expect_mirror_image_status_exists(false); mock_replayer.replayer_listener->handle_notification(); ASSERT_FALSE(m_image_replayer->is_running()); C_SaferCond stop_ctx; m_image_replayer->stop(&stop_ctx); ASSERT_EQ(ETIMEDOUT, stop_ctx.wait_for(DELAY * 3 / 4)); ASSERT_EQ(0, stop_ctx.wait_for(DELAY)); } TEST_F(TestMockImageReplayer, StopJoinRequestedStop) { // START create_local_image(); librbd::MockTestImageCtx mock_local_image_ctx(*m_local_image_ctx); MockThreads mock_threads(m_threads); expect_work_queue_repeatedly(mock_threads); expect_add_event_after_repeatedly(mock_threads); MockReplayer mock_replayer; expect_get_replay_status(mock_replayer); expect_set_mirror_image_status_repeatedly(); InSequence seq; MockBootstrapRequest mock_bootstrap_request; MockStateBuilder mock_state_builder; expect_send(mock_bootstrap_request, mock_state_builder, mock_local_image_ctx, false, false, 0); expect_create_replayer(mock_state_builder, mock_replayer); expect_init(mock_replayer, 0); create_image_replayer(mock_threads); C_SaferCond start_ctx; m_image_replayer->start(&start_ctx); ASSERT_EQ(0, start_ctx.wait()); // STOP const double DELAY = 10; EXPECT_CALL(mock_replayer, shut_down(_)) .WillOnce(Invoke([this, DELAY](Context* ctx) { std::lock_guard l(m_threads->timer_lock); m_threads->timer->add_event_after(DELAY, ctx); })); EXPECT_CALL(mock_replayer, destroy()); expect_close(mock_state_builder, 0); expect_mirror_image_status_exists(false); C_SaferCond stop_ctx1; m_image_replayer->stop(&stop_ctx1); C_SaferCond stop_ctx2; m_image_replayer->stop(&stop_ctx2); ASSERT_EQ(ETIMEDOUT, stop_ctx2.wait_for(DELAY * 3 / 4)); ASSERT_EQ(0, stop_ctx2.wait_for(DELAY)); ASSERT_EQ(0, stop_ctx1.wait_for(0)); } } // namespace mirror } // namespace rbd
29,542
30.065195
109
cc
null
ceph-main/src/test/rbd_mirror/test_mock_ImageSync.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "test/rbd_mirror/test_mock_fixture.h" #include "include/rbd/librbd.hpp" #include "librbd/DeepCopyRequest.h" #include "test/librados_test_stub/MockTestMemIoCtxImpl.h" #include "test/librbd/mock/MockImageCtx.h" #include "test/rbd_mirror/mock/image_sync/MockSyncPointHandler.h" #include "tools/rbd_mirror/ImageSync.h" #include "tools/rbd_mirror/Threads.h" #include "tools/rbd_mirror/image_sync/SyncPointCreateRequest.h" #include "tools/rbd_mirror/image_sync/SyncPointPruneRequest.h" namespace librbd { namespace { struct MockTestImageCtx : public librbd::MockImageCtx { explicit MockTestImageCtx(librbd::ImageCtx &image_ctx) : librbd::MockImageCtx(image_ctx) { } }; } // anonymous namespace template <> class DeepCopyRequest<librbd::MockTestImageCtx> { public: static DeepCopyRequest* s_instance; Context *on_finish; static DeepCopyRequest* create( librbd::MockTestImageCtx *src_image_ctx, librbd::MockTestImageCtx *dst_image_ctx, librados::snap_t src_snap_id_start, librados::snap_t src_snap_id_end, librados::snap_t dst_snap_id_start, bool flatten, const librbd::deep_copy::ObjectNumber &object_number, librbd::asio::ContextWQ *work_queue, SnapSeqs *snap_seqs, deep_copy::Handler *handler, Context *on_finish) { ceph_assert(s_instance != nullptr); s_instance->on_finish = on_finish; return s_instance; } DeepCopyRequest() { s_instance = this; } void put() { } void get() { } MOCK_METHOD0(cancel, void()); MOCK_METHOD0(send, void()); }; DeepCopyRequest<librbd::MockTestImageCtx>* DeepCopyRequest<librbd::MockTestImageCtx>::s_instance = nullptr; } // namespace librbd // template definitions #include "tools/rbd_mirror/ImageSync.cc" namespace rbd { namespace mirror { template <> struct Threads<librbd::MockTestImageCtx> { ceph::mutex &timer_lock; SafeTimer *timer; librbd::asio::ContextWQ *work_queue; Threads(Threads<librbd::ImageCtx> *threads) : timer_lock(threads->timer_lock), timer(threads->timer), work_queue(threads->work_queue) { } }; template<> struct InstanceWatcher<librbd::MockTestImageCtx> { MOCK_METHOD2(notify_sync_request, void(const std::string, Context *)); MOCK_METHOD1(cancel_sync_request, bool(const std::string &)); MOCK_METHOD1(notify_sync_complete, void(const std::string &)); }; namespace image_sync { template <> class SyncPointCreateRequest<librbd::MockTestImageCtx> { public: static SyncPointCreateRequest *s_instance; Context *on_finish; static SyncPointCreateRequest* create(librbd::MockTestImageCtx *remote_image_ctx, const std::string &mirror_uuid, image_sync::SyncPointHandler* sync_point_handler, Context *on_finish) { ceph_assert(s_instance != nullptr); s_instance->on_finish = on_finish; return s_instance; } SyncPointCreateRequest() { s_instance = this; } MOCK_METHOD0(send, void()); }; template <> class SyncPointPruneRequest<librbd::MockTestImageCtx> { public: static SyncPointPruneRequest *s_instance; Context *on_finish; bool sync_complete; static SyncPointPruneRequest* create(librbd::MockTestImageCtx *remote_image_ctx, bool sync_complete, image_sync::SyncPointHandler* sync_point_handler, Context *on_finish) { ceph_assert(s_instance != nullptr); s_instance->on_finish = on_finish; s_instance->sync_complete = sync_complete; return s_instance; } SyncPointPruneRequest() { s_instance = this; } MOCK_METHOD0(send, void()); }; SyncPointCreateRequest<librbd::MockTestImageCtx>* SyncPointCreateRequest<librbd::MockTestImageCtx>::s_instance = nullptr; SyncPointPruneRequest<librbd::MockTestImageCtx>* SyncPointPruneRequest<librbd::MockTestImageCtx>::s_instance = nullptr; } // namespace image_sync using ::testing::_; using ::testing::DoAll; using ::testing::InSequence; using ::testing::Invoke; using ::testing::Return; using ::testing::StrEq; using ::testing::WithArg; using ::testing::InvokeWithoutArgs; class TestMockImageSync : public TestMockFixture { public: typedef Threads<librbd::MockTestImageCtx> MockThreads; typedef ImageSync<librbd::MockTestImageCtx> MockImageSync; typedef InstanceWatcher<librbd::MockTestImageCtx> MockInstanceWatcher; typedef image_sync::SyncPointCreateRequest<librbd::MockTestImageCtx> MockSyncPointCreateRequest; typedef image_sync::SyncPointPruneRequest<librbd::MockTestImageCtx> MockSyncPointPruneRequest; typedef image_sync::MockSyncPointHandler MockSyncPointHandler; typedef librbd::DeepCopyRequest<librbd::MockTestImageCtx> MockImageCopyRequest; void SetUp() override { TestMockFixture::SetUp(); librbd::RBD rbd; ASSERT_EQ(0, create_image(rbd, m_remote_io_ctx, m_image_name, m_image_size)); ASSERT_EQ(0, open_image(m_remote_io_ctx, m_image_name, &m_remote_image_ctx)); ASSERT_EQ(0, create_image(rbd, m_local_io_ctx, m_image_name, m_image_size)); ASSERT_EQ(0, open_image(m_local_io_ctx, m_image_name, &m_local_image_ctx)); } void expect_get_snap_id(librbd::MockTestImageCtx &mock_image_ctx) { EXPECT_CALL(mock_image_ctx, get_snap_id(_, _)) .WillOnce(Return(123)); } void expect_notify_sync_request(MockInstanceWatcher &mock_instance_watcher, const std::string &sync_id, int r) { EXPECT_CALL(mock_instance_watcher, notify_sync_request(sync_id, _)) .WillOnce(Invoke([this, r](const std::string &, Context *on_sync_start) { m_threads->work_queue->queue(on_sync_start, r); })); } void expect_cancel_sync_request(MockInstanceWatcher &mock_instance_watcher, const std::string &sync_id, bool canceled) { EXPECT_CALL(mock_instance_watcher, cancel_sync_request(sync_id)) .WillOnce(Return(canceled)); } void expect_notify_sync_complete(MockInstanceWatcher &mock_instance_watcher, const std::string &sync_id) { EXPECT_CALL(mock_instance_watcher, notify_sync_complete(sync_id)); } void expect_create_sync_point(librbd::MockTestImageCtx &mock_local_image_ctx, MockSyncPointCreateRequest &mock_sync_point_create_request, int r) { EXPECT_CALL(mock_sync_point_create_request, send()) .WillOnce(Invoke([this, &mock_local_image_ctx, &mock_sync_point_create_request, r]() { if (r == 0) { mock_local_image_ctx.snap_ids[{cls::rbd::UserSnapshotNamespace(), "snap1"}] = 123; m_sync_points.emplace_back(cls::rbd::UserSnapshotNamespace(), "snap1", "", boost::none); } m_threads->work_queue->queue(mock_sync_point_create_request.on_finish, r); })); } void expect_copy_image(MockImageCopyRequest &mock_image_copy_request, int r) { EXPECT_CALL(mock_image_copy_request, send()) .WillOnce(Invoke([this, &mock_image_copy_request, r]() { m_threads->work_queue->queue(mock_image_copy_request.on_finish, r); })); } void expect_flush_sync_point(MockSyncPointHandler& mock_sync_point_handler, int r) { EXPECT_CALL(mock_sync_point_handler, update_sync_points(_, _, false, _)) .WillOnce(WithArg<3>(CompleteContext(r))); } void expect_prune_sync_point(MockSyncPointPruneRequest &mock_sync_point_prune_request, bool sync_complete, int r) { EXPECT_CALL(mock_sync_point_prune_request, send()) .WillOnce(Invoke([this, &mock_sync_point_prune_request, sync_complete, r]() { ASSERT_EQ(sync_complete, mock_sync_point_prune_request.sync_complete); if (r == 0 && !m_sync_points.empty()) { if (sync_complete) { m_sync_points.pop_front(); } else { while (m_sync_points.size() > 1) { m_sync_points.pop_back(); } } } m_threads->work_queue->queue(mock_sync_point_prune_request.on_finish, r); })); } void expect_get_snap_seqs(MockSyncPointHandler& mock_sync_point_handler) { EXPECT_CALL(mock_sync_point_handler, get_snap_seqs()) .WillRepeatedly(Return(librbd::SnapSeqs{})); } void expect_get_sync_points(MockSyncPointHandler& mock_sync_point_handler) { EXPECT_CALL(mock_sync_point_handler, get_sync_points()) .WillRepeatedly(Invoke([this]() { return m_sync_points; })); } MockImageSync *create_request(MockThreads& mock_threads, librbd::MockTestImageCtx &mock_remote_image_ctx, librbd::MockTestImageCtx &mock_local_image_ctx, MockSyncPointHandler& mock_sync_point_handler, MockInstanceWatcher &mock_instance_watcher, Context *ctx) { return new MockImageSync(&mock_threads, &mock_local_image_ctx, &mock_remote_image_ctx, "mirror-uuid", &mock_sync_point_handler, &mock_instance_watcher, nullptr, ctx); } librbd::ImageCtx *m_remote_image_ctx; librbd::ImageCtx *m_local_image_ctx; image_sync::SyncPoints m_sync_points; }; TEST_F(TestMockImageSync, SimpleSync) { MockThreads mock_threads(m_threads); librbd::MockTestImageCtx mock_remote_image_ctx(*m_remote_image_ctx); librbd::MockTestImageCtx mock_local_image_ctx(*m_local_image_ctx); MockSyncPointHandler mock_sync_point_handler; MockInstanceWatcher mock_instance_watcher; MockImageCopyRequest mock_image_copy_request; MockSyncPointCreateRequest mock_sync_point_create_request; MockSyncPointPruneRequest mock_sync_point_prune_request; expect_get_snap_seqs(mock_sync_point_handler); expect_get_sync_points(mock_sync_point_handler); InSequence seq; expect_notify_sync_request(mock_instance_watcher, mock_local_image_ctx.id, 0); expect_create_sync_point(mock_local_image_ctx, mock_sync_point_create_request, 0); expect_get_snap_id(mock_remote_image_ctx); expect_copy_image(mock_image_copy_request, 0); expect_flush_sync_point(mock_sync_point_handler, 0); expect_prune_sync_point(mock_sync_point_prune_request, true, 0); expect_notify_sync_complete(mock_instance_watcher, mock_local_image_ctx.id); C_SaferCond ctx; MockImageSync *request = create_request(mock_threads, mock_remote_image_ctx, mock_local_image_ctx, mock_sync_point_handler, mock_instance_watcher, &ctx); request->send(); ASSERT_EQ(0, ctx.wait()); } TEST_F(TestMockImageSync, RestartSync) { MockThreads mock_threads(m_threads); librbd::MockTestImageCtx mock_remote_image_ctx(*m_remote_image_ctx); librbd::MockTestImageCtx mock_local_image_ctx(*m_local_image_ctx); MockSyncPointHandler mock_sync_point_handler; MockInstanceWatcher mock_instance_watcher; MockImageCopyRequest mock_image_copy_request; MockSyncPointCreateRequest mock_sync_point_create_request; MockSyncPointPruneRequest mock_sync_point_prune_request; m_sync_points = {{cls::rbd::UserSnapshotNamespace(), "snap1", "", boost::none}, {cls::rbd::UserSnapshotNamespace(), "snap2", "snap1", boost::none}}; mock_local_image_ctx.snap_ids[{cls::rbd::UserSnapshotNamespace(), "snap1"}] = 123; mock_local_image_ctx.snap_ids[{cls::rbd::UserSnapshotNamespace(), "snap2"}] = 234; expect_test_features(mock_local_image_ctx); expect_get_snap_seqs(mock_sync_point_handler); expect_get_sync_points(mock_sync_point_handler); InSequence seq; expect_notify_sync_request(mock_instance_watcher, mock_local_image_ctx.id, 0); expect_prune_sync_point(mock_sync_point_prune_request, false, 0); expect_get_snap_id(mock_remote_image_ctx); expect_copy_image(mock_image_copy_request, 0); expect_flush_sync_point(mock_sync_point_handler, 0); expect_prune_sync_point(mock_sync_point_prune_request, true, 0); expect_notify_sync_complete(mock_instance_watcher, mock_local_image_ctx.id); C_SaferCond ctx; MockImageSync *request = create_request(mock_threads, mock_remote_image_ctx, mock_local_image_ctx, mock_sync_point_handler, mock_instance_watcher, &ctx); request->send(); ASSERT_EQ(0, ctx.wait()); } TEST_F(TestMockImageSync, CancelNotifySyncRequest) { MockThreads mock_threads(m_threads); librbd::MockTestImageCtx mock_remote_image_ctx(*m_remote_image_ctx); librbd::MockTestImageCtx mock_local_image_ctx(*m_local_image_ctx); MockSyncPointHandler mock_sync_point_handler; MockInstanceWatcher mock_instance_watcher; expect_get_snap_seqs(mock_sync_point_handler); expect_get_sync_points(mock_sync_point_handler); InSequence seq; Context *on_sync_start = nullptr; C_SaferCond notify_sync_ctx; EXPECT_CALL(mock_instance_watcher, notify_sync_request(mock_local_image_ctx.id, _)) .WillOnce(Invoke([&on_sync_start, &notify_sync_ctx]( const std::string &, Context *ctx) { on_sync_start = ctx; notify_sync_ctx.complete(0); })); EXPECT_CALL(mock_instance_watcher, cancel_sync_request(mock_local_image_ctx.id)) .WillOnce(Invoke([&on_sync_start](const std::string &) { EXPECT_NE(nullptr, on_sync_start); on_sync_start->complete(-ECANCELED); return true; })); C_SaferCond ctx; MockImageSync *request = create_request(mock_threads, mock_remote_image_ctx, mock_local_image_ctx, mock_sync_point_handler, mock_instance_watcher, &ctx); request->get(); request->send(); // cancel the notify sync request once it starts ASSERT_EQ(0, notify_sync_ctx.wait()); request->cancel(); request->put(); ASSERT_EQ(-ECANCELED, ctx.wait()); } TEST_F(TestMockImageSync, CancelImageCopy) { MockThreads mock_threads(m_threads); librbd::MockTestImageCtx mock_remote_image_ctx(*m_remote_image_ctx); librbd::MockTestImageCtx mock_local_image_ctx(*m_local_image_ctx); MockSyncPointHandler mock_sync_point_handler; MockInstanceWatcher mock_instance_watcher; MockImageCopyRequest mock_image_copy_request; MockSyncPointCreateRequest mock_sync_point_create_request; MockSyncPointPruneRequest mock_sync_point_prune_request; m_sync_points = {{cls::rbd::UserSnapshotNamespace(), "snap1", "", boost::none}}; expect_get_snap_seqs(mock_sync_point_handler); expect_get_sync_points(mock_sync_point_handler); InSequence seq; expect_notify_sync_request(mock_instance_watcher, mock_local_image_ctx.id, 0); expect_prune_sync_point(mock_sync_point_prune_request, false, 0); expect_get_snap_id(mock_remote_image_ctx); C_SaferCond image_copy_ctx; EXPECT_CALL(mock_image_copy_request, send()) .WillOnce(Invoke([&image_copy_ctx]() { image_copy_ctx.complete(0); })); expect_cancel_sync_request(mock_instance_watcher, mock_local_image_ctx.id, false); EXPECT_CALL(mock_image_copy_request, cancel()); expect_notify_sync_complete(mock_instance_watcher, mock_local_image_ctx.id); C_SaferCond ctx; MockImageSync *request = create_request(mock_threads, mock_remote_image_ctx, mock_local_image_ctx, mock_sync_point_handler, mock_instance_watcher, &ctx); request->get(); request->send(); // cancel the image copy once it starts ASSERT_EQ(0, image_copy_ctx.wait()); request->cancel(); request->put(); m_threads->work_queue->queue(mock_image_copy_request.on_finish, 0); ASSERT_EQ(-ECANCELED, ctx.wait()); } TEST_F(TestMockImageSync, CancelAfterCopyImage) { MockThreads mock_threads(m_threads); librbd::MockTestImageCtx mock_remote_image_ctx(*m_remote_image_ctx); librbd::MockTestImageCtx mock_local_image_ctx(*m_local_image_ctx); MockSyncPointHandler mock_sync_point_handler; MockInstanceWatcher mock_instance_watcher; MockImageCopyRequest mock_image_copy_request; MockSyncPointCreateRequest mock_sync_point_create_request; MockSyncPointPruneRequest mock_sync_point_prune_request; C_SaferCond ctx; MockImageSync *request = create_request(mock_threads, mock_remote_image_ctx, mock_local_image_ctx, mock_sync_point_handler, mock_instance_watcher, &ctx); expect_get_snap_seqs(mock_sync_point_handler); expect_get_sync_points(mock_sync_point_handler); InSequence seq; expect_notify_sync_request(mock_instance_watcher, mock_local_image_ctx.id, 0); expect_create_sync_point(mock_local_image_ctx, mock_sync_point_create_request, 0); expect_get_snap_id(mock_remote_image_ctx); EXPECT_CALL(mock_image_copy_request, send()) .WillOnce((DoAll(InvokeWithoutArgs([request]() { request->cancel(); }), Invoke([this, &mock_image_copy_request]() { m_threads->work_queue->queue(mock_image_copy_request.on_finish, 0); })))); expect_cancel_sync_request(mock_instance_watcher, mock_local_image_ctx.id, false); EXPECT_CALL(mock_image_copy_request, cancel()); expect_notify_sync_complete(mock_instance_watcher, mock_local_image_ctx.id); request->send(); ASSERT_EQ(-ECANCELED, ctx.wait()); } } // namespace mirror } // namespace rbd
18,060
37.509595
121
cc
null
ceph-main/src/test/rbd_mirror/test_mock_InstanceReplayer.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "test/librbd/mock/MockImageCtx.h" #include "test/rbd_mirror/test_mock_fixture.h" #include "test/rbd_mirror/mock/MockContextWQ.h" #include "test/rbd_mirror/mock/MockSafeTimer.h" #include "tools/rbd_mirror/ImageReplayer.h" #include "tools/rbd_mirror/InstanceWatcher.h" #include "tools/rbd_mirror/InstanceReplayer.h" #include "tools/rbd_mirror/ServiceDaemon.h" #include "tools/rbd_mirror/Threads.h" #include "tools/rbd_mirror/image_replayer/Types.h" namespace librbd { namespace { struct MockTestImageCtx : public MockImageCtx { MockTestImageCtx(librbd::ImageCtx &image_ctx) : librbd::MockImageCtx(image_ctx) { } }; } // anonymous namespace } // namespace librbd namespace rbd { namespace mirror { template <> struct Threads<librbd::MockTestImageCtx> { MockSafeTimer *timer; ceph::mutex &timer_lock; ceph::condition_variable timer_cond; MockContextWQ *work_queue; Threads(Threads<librbd::ImageCtx> *threads) : timer(new MockSafeTimer()), timer_lock(threads->timer_lock), work_queue(new MockContextWQ()) { } ~Threads() { delete timer; delete work_queue; } }; template<> struct ServiceDaemon<librbd::MockTestImageCtx> { MOCK_METHOD4(add_or_update_namespace_attribute, void(int64_t, const std::string&, const std::string&, const service_daemon::AttributeValue&)); }; template<> struct InstanceWatcher<librbd::MockTestImageCtx> { }; template<> struct ImageReplayer<librbd::MockTestImageCtx> { static ImageReplayer* s_instance; std::string global_image_id; static ImageReplayer *create( librados::IoCtx &local_io_ctx, const std::string &local_mirror_uuid, const std::string &global_image_id, Threads<librbd::MockTestImageCtx> *threads, InstanceWatcher<librbd::MockTestImageCtx> *instance_watcher, MirrorStatusUpdater<librbd::MockTestImageCtx>* local_status_updater, journal::CacheManagerHandler *cache_manager_handler, PoolMetaCache* pool_meta_cache) { ceph_assert(s_instance != nullptr); s_instance->global_image_id = global_image_id; return s_instance; } ImageReplayer() { ceph_assert(s_instance == nullptr); s_instance = this; } virtual ~ImageReplayer() { ceph_assert(s_instance == this); s_instance = nullptr; } MOCK_METHOD0(destroy, void()); MOCK_METHOD2(start, void(Context *, bool)); MOCK_METHOD2(stop, void(Context *, bool)); MOCK_METHOD1(restart, void(Context*)); MOCK_METHOD0(flush, void()); MOCK_METHOD1(print_status, void(Formatter *)); MOCK_METHOD1(add_peer, void(const Peer<librbd::MockTestImageCtx>& peer)); MOCK_METHOD0(get_global_image_id, const std::string &()); MOCK_METHOD0(get_local_image_id, const std::string &()); MOCK_METHOD0(is_running, bool()); MOCK_METHOD0(is_stopped, bool()); MOCK_METHOD0(is_blocklisted, bool()); MOCK_CONST_METHOD0(is_finished, bool()); MOCK_METHOD1(set_finished, void(bool)); MOCK_CONST_METHOD0(get_health_state, image_replayer::HealthState()); }; ImageReplayer<librbd::MockTestImageCtx>* ImageReplayer<librbd::MockTestImageCtx>::s_instance = nullptr; template<> struct MirrorStatusUpdater<librbd::MockTestImageCtx> { }; } // namespace mirror } // namespace rbd // template definitions #include "tools/rbd_mirror/InstanceReplayer.cc" namespace rbd { namespace mirror { using ::testing::_; using ::testing::DoAll; using ::testing::InSequence; using ::testing::Invoke; using ::testing::Return; using ::testing::ReturnArg; using ::testing::ReturnRef; using ::testing::WithArg; class TestMockInstanceReplayer : public TestMockFixture { public: typedef Threads<librbd::MockTestImageCtx> MockThreads; typedef ImageReplayer<librbd::MockTestImageCtx> MockImageReplayer; typedef InstanceReplayer<librbd::MockTestImageCtx> MockInstanceReplayer; typedef InstanceWatcher<librbd::MockTestImageCtx> MockInstanceWatcher; typedef MirrorStatusUpdater<librbd::MockTestImageCtx> MockMirrorStatusUpdater; typedef ServiceDaemon<librbd::MockTestImageCtx> MockServiceDaemon; void expect_work_queue(MockThreads &mock_threads) { EXPECT_CALL(*mock_threads.work_queue, queue(_, _)) .WillOnce(Invoke([this](Context *ctx, int r) { m_threads->work_queue->queue(ctx, r); })); } void expect_add_event_after(MockThreads &mock_threads, Context** timer_ctx = nullptr) { EXPECT_CALL(*mock_threads.timer, add_event_after(_, _)) .WillOnce(DoAll( WithArg<1>(Invoke([this, &mock_threads, timer_ctx](Context *ctx) { ceph_assert(ceph_mutex_is_locked(mock_threads.timer_lock)); if (timer_ctx != nullptr) { *timer_ctx = ctx; mock_threads.timer_cond.notify_one(); } else { m_threads->work_queue->queue( new LambdaContext([&mock_threads, ctx](int) { std::lock_guard timer_lock{mock_threads.timer_lock}; ctx->complete(0); }), 0); } })), ReturnArg<1>())); } void expect_cancel_event(MockThreads &mock_threads, bool canceled) { EXPECT_CALL(*mock_threads.timer, cancel_event(_)) .WillOnce(Return(canceled)); } }; TEST_F(TestMockInstanceReplayer, AcquireReleaseImage) { MockThreads mock_threads(m_threads); MockServiceDaemon mock_service_daemon; MockMirrorStatusUpdater mock_status_updater; MockInstanceWatcher mock_instance_watcher; MockImageReplayer mock_image_replayer; MockInstanceReplayer instance_replayer( m_local_io_ctx, "local_mirror_uuid", &mock_threads, &mock_service_daemon, &mock_status_updater, nullptr, nullptr); std::string global_image_id("global_image_id"); EXPECT_CALL(mock_image_replayer, get_global_image_id()) .WillRepeatedly(ReturnRef(global_image_id)); InSequence seq; expect_work_queue(mock_threads); Context *timer_ctx = nullptr; expect_add_event_after(mock_threads, &timer_ctx); instance_replayer.init(); instance_replayer.add_peer({"peer_uuid", m_remote_io_ctx, {}, nullptr}); // Acquire C_SaferCond on_acquire; EXPECT_CALL(mock_image_replayer, add_peer(_)); EXPECT_CALL(mock_image_replayer, is_stopped()).WillOnce(Return(true)); EXPECT_CALL(mock_image_replayer, is_blocklisted()).WillOnce(Return(false)); EXPECT_CALL(mock_image_replayer, is_finished()).WillOnce(Return(false)); EXPECT_CALL(mock_image_replayer, start(_, false)) .WillOnce(CompleteContext(0)); expect_work_queue(mock_threads); instance_replayer.acquire_image(&mock_instance_watcher, global_image_id, &on_acquire); ASSERT_EQ(0, on_acquire.wait()); // Release C_SaferCond on_release; EXPECT_CALL(mock_image_replayer, is_stopped()) .WillOnce(Return(false)); EXPECT_CALL(mock_image_replayer, is_running()) .WillOnce(Return(false)); expect_work_queue(mock_threads); expect_add_event_after(mock_threads); expect_work_queue(mock_threads); EXPECT_CALL(mock_image_replayer, is_stopped()) .WillOnce(Return(false)); EXPECT_CALL(mock_image_replayer, is_running()) .WillOnce(Return(true)); EXPECT_CALL(mock_image_replayer, stop(_, false)) .WillOnce(CompleteContext(0)); expect_work_queue(mock_threads); EXPECT_CALL(mock_image_replayer, is_stopped()) .WillOnce(Return(true)); expect_work_queue(mock_threads); EXPECT_CALL(mock_image_replayer, destroy()); instance_replayer.release_image("global_image_id", &on_release); ASSERT_EQ(0, on_release.wait()); expect_work_queue(mock_threads); expect_cancel_event(mock_threads, true); expect_work_queue(mock_threads); instance_replayer.shut_down(); ASSERT_TRUE(timer_ctx != nullptr); delete timer_ctx; } TEST_F(TestMockInstanceReplayer, RemoveFinishedImage) { MockThreads mock_threads(m_threads); MockServiceDaemon mock_service_daemon; MockMirrorStatusUpdater mock_status_updater; MockInstanceWatcher mock_instance_watcher; MockImageReplayer mock_image_replayer; MockInstanceReplayer instance_replayer( m_local_io_ctx, "local_mirror_uuid", &mock_threads, &mock_service_daemon, &mock_status_updater, nullptr, nullptr); std::string global_image_id("global_image_id"); EXPECT_CALL(mock_image_replayer, get_global_image_id()) .WillRepeatedly(ReturnRef(global_image_id)); InSequence seq; expect_work_queue(mock_threads); Context *timer_ctx1 = nullptr; expect_add_event_after(mock_threads, &timer_ctx1); instance_replayer.init(); instance_replayer.add_peer({"peer_uuid", m_remote_io_ctx, {}, nullptr}); // Acquire C_SaferCond on_acquire; EXPECT_CALL(mock_image_replayer, add_peer(_)); EXPECT_CALL(mock_image_replayer, is_stopped()).WillOnce(Return(true)); EXPECT_CALL(mock_image_replayer, is_blocklisted()).WillOnce(Return(false)); EXPECT_CALL(mock_image_replayer, is_finished()).WillOnce(Return(false)); EXPECT_CALL(mock_image_replayer, start(_, false)) .WillOnce(CompleteContext(0)); expect_work_queue(mock_threads); instance_replayer.acquire_image(&mock_instance_watcher, global_image_id, &on_acquire); ASSERT_EQ(0, on_acquire.wait()); // periodic start timer Context *timer_ctx2 = nullptr; expect_add_event_after(mock_threads, &timer_ctx2); Context *start_image_replayers_ctx = nullptr; EXPECT_CALL(*mock_threads.work_queue, queue(_, 0)) .WillOnce(Invoke([&start_image_replayers_ctx](Context *ctx, int r) { start_image_replayers_ctx = ctx; })); ASSERT_TRUE(timer_ctx1 != nullptr); { std::lock_guard timer_locker{mock_threads.timer_lock}; timer_ctx1->complete(0); } // remove finished image replayer EXPECT_CALL(mock_image_replayer, get_health_state()).WillOnce( Return(image_replayer::HEALTH_STATE_OK)); EXPECT_CALL(mock_image_replayer, is_stopped()).WillOnce(Return(true)); EXPECT_CALL(mock_image_replayer, is_blocklisted()).WillOnce(Return(false)); EXPECT_CALL(mock_image_replayer, is_finished()).WillOnce(Return(true)); EXPECT_CALL(mock_image_replayer, destroy()); EXPECT_CALL(mock_service_daemon, add_or_update_namespace_attribute(_, _, _, _)).Times(3); ASSERT_TRUE(start_image_replayers_ctx != nullptr); start_image_replayers_ctx->complete(0); // shut down expect_work_queue(mock_threads); expect_cancel_event(mock_threads, true); expect_work_queue(mock_threads); instance_replayer.shut_down(); ASSERT_TRUE(timer_ctx2 != nullptr); delete timer_ctx2; } TEST_F(TestMockInstanceReplayer, Reacquire) { MockThreads mock_threads(m_threads); MockServiceDaemon mock_service_daemon; MockMirrorStatusUpdater mock_status_updater; MockInstanceWatcher mock_instance_watcher; MockImageReplayer mock_image_replayer; MockInstanceReplayer instance_replayer( m_local_io_ctx, "local_mirror_uuid", &mock_threads, &mock_service_daemon, &mock_status_updater, nullptr, nullptr); std::string global_image_id("global_image_id"); EXPECT_CALL(mock_image_replayer, get_global_image_id()) .WillRepeatedly(ReturnRef(global_image_id)); InSequence seq; expect_work_queue(mock_threads); Context *timer_ctx = nullptr; expect_add_event_after(mock_threads, &timer_ctx); instance_replayer.init(); instance_replayer.add_peer({"peer_uuid", m_remote_io_ctx, {}, nullptr}); // Acquire EXPECT_CALL(mock_image_replayer, add_peer(_)); EXPECT_CALL(mock_image_replayer, is_stopped()).WillOnce(Return(true)); EXPECT_CALL(mock_image_replayer, is_blocklisted()).WillOnce(Return(false)); EXPECT_CALL(mock_image_replayer, is_finished()).WillOnce(Return(false)); EXPECT_CALL(mock_image_replayer, start(_, false)) .WillOnce(CompleteContext(0)); expect_work_queue(mock_threads); C_SaferCond on_acquire1; instance_replayer.acquire_image(&mock_instance_watcher, global_image_id, &on_acquire1); ASSERT_EQ(0, on_acquire1.wait()); // Re-acquire EXPECT_CALL(mock_image_replayer, set_finished(false)); EXPECT_CALL(mock_image_replayer, restart(_)) .WillOnce(CompleteContext(0)); expect_work_queue(mock_threads); C_SaferCond on_acquire2; instance_replayer.acquire_image(&mock_instance_watcher, global_image_id, &on_acquire2); ASSERT_EQ(0, on_acquire2.wait()); expect_work_queue(mock_threads); expect_cancel_event(mock_threads, true); EXPECT_CALL(mock_image_replayer, is_stopped()).WillOnce(Return(true)); expect_work_queue(mock_threads); expect_work_queue(mock_threads); EXPECT_CALL(mock_image_replayer, is_stopped()).WillOnce(Return(true)); EXPECT_CALL(mock_image_replayer, destroy()); instance_replayer.shut_down(); ASSERT_TRUE(timer_ctx != nullptr); delete timer_ctx; } } // namespace mirror } // namespace rbd
12,869
32.603133
103
cc
null
ceph-main/src/test/rbd_mirror/test_mock_InstanceWatcher.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "librados/AioCompletionImpl.h" #include "librbd/ManagedLock.h" #include "test/librados/test_cxx.h" #include "test/librados_test_stub/MockTestMemIoCtxImpl.h" #include "test/librados_test_stub/MockTestMemRadosClient.h" #include "test/librbd/mock/MockImageCtx.h" #include "test/rbd_mirror/test_mock_fixture.h" #include "tools/rbd_mirror/InstanceReplayer.h" #include "tools/rbd_mirror/InstanceWatcher.h" #include "tools/rbd_mirror/Threads.h" namespace librbd { namespace { struct MockTestImageCtx : public MockImageCtx { MockTestImageCtx(librbd::ImageCtx &image_ctx) : librbd::MockImageCtx(image_ctx) { } }; } // anonymous namespace template <> struct ManagedLock<MockTestImageCtx> { static ManagedLock* s_instance; static ManagedLock *create(librados::IoCtx& ioctx, librbd::AsioEngine& asio_engine, const std::string& oid, librbd::Watcher *watcher, managed_lock::Mode mode, bool blocklist_on_break_lock, uint32_t blocklist_expire_seconds) { ceph_assert(s_instance != nullptr); return s_instance; } ManagedLock() { ceph_assert(s_instance == nullptr); s_instance = this; } ~ManagedLock() { ceph_assert(s_instance == this); s_instance = nullptr; } MOCK_METHOD0(destroy, void()); MOCK_METHOD1(shut_down, void(Context *)); MOCK_METHOD1(acquire_lock, void(Context *)); MOCK_METHOD2(get_locker, void(managed_lock::Locker *, Context *)); MOCK_METHOD3(break_lock, void(const managed_lock::Locker &, bool, Context *)); }; ManagedLock<MockTestImageCtx> *ManagedLock<MockTestImageCtx>::s_instance = nullptr; } // namespace librbd namespace rbd { namespace mirror { template <> struct Threads<librbd::MockTestImageCtx> { ceph::mutex &timer_lock; SafeTimer *timer; librbd::asio::ContextWQ *work_queue; librbd::AsioEngine* asio_engine; Threads(Threads<librbd::ImageCtx> *threads) : timer_lock(threads->timer_lock), timer(threads->timer), work_queue(threads->work_queue), asio_engine(threads->asio_engine) { } }; template <> struct InstanceReplayer<librbd::MockTestImageCtx> { MOCK_METHOD3(acquire_image, void(InstanceWatcher<librbd::MockTestImageCtx> *, const std::string &, Context *)); MOCK_METHOD2(release_image, void(const std::string &, Context *)); MOCK_METHOD3(remove_peer_image, void(const std::string&, const std::string&, Context *)); }; template <> struct Throttler<librbd::MockTestImageCtx> { static Throttler* s_instance; Throttler() { ceph_assert(s_instance == nullptr); s_instance = this; } virtual ~Throttler() { ceph_assert(s_instance == this); s_instance = nullptr; } MOCK_METHOD3(start_op, void(const std::string &, const std::string &, Context *)); MOCK_METHOD2(finish_op, void(const std::string &, const std::string &)); MOCK_METHOD2(drain, void(const std::string &, int)); }; Throttler<librbd::MockTestImageCtx>* Throttler<librbd::MockTestImageCtx>::s_instance = nullptr; } // namespace mirror } // namespace rbd // template definitions #include "tools/rbd_mirror/InstanceWatcher.cc" namespace rbd { namespace mirror { using ::testing::_; using ::testing::InSequence; using ::testing::Invoke; using ::testing::Return; using ::testing::StrEq; using ::testing::WithArg; class TestMockInstanceWatcher : public TestMockFixture { public: typedef librbd::ManagedLock<librbd::MockTestImageCtx> MockManagedLock; typedef InstanceReplayer<librbd::MockTestImageCtx> MockInstanceReplayer; typedef InstanceWatcher<librbd::MockTestImageCtx> MockInstanceWatcher; typedef Threads<librbd::MockTestImageCtx> MockThreads; std::string m_instance_id; std::string m_oid; MockThreads *m_mock_threads; void SetUp() override { TestFixture::SetUp(); m_local_io_ctx.remove(RBD_MIRROR_LEADER); EXPECT_EQ(0, m_local_io_ctx.create(RBD_MIRROR_LEADER, true)); m_instance_id = stringify(m_local_io_ctx.get_instance_id()); m_oid = RBD_MIRROR_INSTANCE_PREFIX + m_instance_id; m_mock_threads = new MockThreads(m_threads); } void TearDown() override { delete m_mock_threads; TestMockFixture::TearDown(); } void expect_register_watch(librados::MockTestMemIoCtxImpl &mock_io_ctx) { EXPECT_CALL(mock_io_ctx, aio_watch(m_oid, _, _, _)); } void expect_register_watch(librados::MockTestMemIoCtxImpl &mock_io_ctx, const std::string &instance_id) { std::string oid = RBD_MIRROR_INSTANCE_PREFIX + instance_id; EXPECT_CALL(mock_io_ctx, aio_watch(oid, _, _, _)); } void expect_unregister_watch(librados::MockTestMemIoCtxImpl &mock_io_ctx) { EXPECT_CALL(mock_io_ctx, aio_unwatch(_, _)); } void expect_register_instance(librados::MockTestMemIoCtxImpl &mock_io_ctx, int r) { EXPECT_CALL(mock_io_ctx, exec(RBD_MIRROR_LEADER, _, StrEq("rbd"), StrEq("mirror_instances_add"), _, _, _, _)) .WillOnce(Return(r)); } void expect_unregister_instance(librados::MockTestMemIoCtxImpl &mock_io_ctx, int r) { EXPECT_CALL(mock_io_ctx, exec(RBD_MIRROR_LEADER, _, StrEq("rbd"), StrEq("mirror_instances_remove"), _, _, _, _)) .WillOnce(Return(r)); } void expect_acquire_lock(MockManagedLock &mock_managed_lock, int r) { EXPECT_CALL(mock_managed_lock, acquire_lock(_)) .WillOnce(CompleteContext(r)); } void expect_release_lock(MockManagedLock &mock_managed_lock, int r) { EXPECT_CALL(mock_managed_lock, shut_down(_)).WillOnce(CompleteContext(r)); } void expect_destroy_lock(MockManagedLock &mock_managed_lock, Context *ctx = nullptr) { EXPECT_CALL(mock_managed_lock, destroy()) .WillOnce(Invoke([ctx]() { if (ctx != nullptr) { ctx->complete(0); } })); } void expect_get_locker(MockManagedLock &mock_managed_lock, const librbd::managed_lock::Locker &locker, int r) { EXPECT_CALL(mock_managed_lock, get_locker(_, _)) .WillOnce(Invoke([r, locker](librbd::managed_lock::Locker *out, Context *ctx) { if (r == 0) { *out = locker; } ctx->complete(r); })); } void expect_break_lock(MockManagedLock &mock_managed_lock, const librbd::managed_lock::Locker &locker, int r) { EXPECT_CALL(mock_managed_lock, break_lock(locker, true, _)) .WillOnce(WithArg<2>(CompleteContext(r))); } }; TEST_F(TestMockInstanceWatcher, InitShutdown) { MockManagedLock mock_managed_lock; librados::MockTestMemIoCtxImpl &mock_io_ctx(get_mock_io_ctx(m_local_io_ctx)); auto instance_watcher = new MockInstanceWatcher( m_local_io_ctx, *m_mock_threads->asio_engine, nullptr, nullptr, m_instance_id); InSequence seq; // Init expect_register_instance(mock_io_ctx, 0); expect_register_watch(mock_io_ctx); expect_acquire_lock(mock_managed_lock, 0); ASSERT_EQ(0, instance_watcher->init()); // Shutdown expect_release_lock(mock_managed_lock, 0); expect_unregister_watch(mock_io_ctx); expect_unregister_instance(mock_io_ctx, 0); instance_watcher->shut_down(); expect_destroy_lock(mock_managed_lock); delete instance_watcher; } TEST_F(TestMockInstanceWatcher, InitError) { MockManagedLock mock_managed_lock; librados::MockTestMemIoCtxImpl &mock_io_ctx(get_mock_io_ctx(m_local_io_ctx)); auto instance_watcher = new MockInstanceWatcher( m_local_io_ctx, *m_mock_threads->asio_engine, nullptr, nullptr, m_instance_id); InSequence seq; expect_register_instance(mock_io_ctx, 0); expect_register_watch(mock_io_ctx); expect_acquire_lock(mock_managed_lock, -EINVAL); expect_unregister_watch(mock_io_ctx); expect_unregister_instance(mock_io_ctx, 0); ASSERT_EQ(-EINVAL, instance_watcher->init()); expect_destroy_lock(mock_managed_lock); delete instance_watcher; } TEST_F(TestMockInstanceWatcher, ShutdownError) { MockManagedLock mock_managed_lock; librados::MockTestMemIoCtxImpl &mock_io_ctx(get_mock_io_ctx(m_local_io_ctx)); auto instance_watcher = new MockInstanceWatcher( m_local_io_ctx, *m_mock_threads->asio_engine, nullptr, nullptr, m_instance_id); InSequence seq; // Init expect_register_instance(mock_io_ctx, 0); expect_register_watch(mock_io_ctx); expect_acquire_lock(mock_managed_lock, 0); ASSERT_EQ(0, instance_watcher->init()); // Shutdown expect_release_lock(mock_managed_lock, -EINVAL); expect_unregister_watch(mock_io_ctx); expect_unregister_instance(mock_io_ctx, 0); instance_watcher->shut_down(); expect_destroy_lock(mock_managed_lock); delete instance_watcher; } TEST_F(TestMockInstanceWatcher, Remove) { MockManagedLock mock_managed_lock; librados::MockTestMemIoCtxImpl &mock_io_ctx(get_mock_io_ctx(m_local_io_ctx)); librbd::managed_lock::Locker locker{entity_name_t::CLIENT(1), "auto 123", "1.2.3.4:0/0", 123}; InSequence seq; expect_get_locker(mock_managed_lock, locker, 0); expect_break_lock(mock_managed_lock, locker, 0); expect_unregister_instance(mock_io_ctx, 0); C_SaferCond on_destroy; expect_destroy_lock(mock_managed_lock, &on_destroy); C_SaferCond on_remove; MockInstanceWatcher::remove_instance(m_local_io_ctx, *m_mock_threads->asio_engine, "instance_id", &on_remove); ASSERT_EQ(0, on_remove.wait()); ASSERT_EQ(0, on_destroy.wait()); } TEST_F(TestMockInstanceWatcher, RemoveNoent) { MockManagedLock mock_managed_lock; librados::MockTestMemIoCtxImpl &mock_io_ctx(get_mock_io_ctx(m_local_io_ctx)); InSequence seq; expect_get_locker(mock_managed_lock, librbd::managed_lock::Locker(), -ENOENT); expect_unregister_instance(mock_io_ctx, 0); C_SaferCond on_destroy; expect_destroy_lock(mock_managed_lock, &on_destroy); C_SaferCond on_remove; MockInstanceWatcher::remove_instance(m_local_io_ctx, *m_mock_threads->asio_engine, "instance_id", &on_remove); ASSERT_EQ(0, on_remove.wait()); ASSERT_EQ(0, on_destroy.wait()); } TEST_F(TestMockInstanceWatcher, ImageAcquireRelease) { MockManagedLock mock_managed_lock; librados::IoCtx& io_ctx1 = m_local_io_ctx; std::string instance_id1 = m_instance_id; librados::MockTestMemIoCtxImpl &mock_io_ctx1(get_mock_io_ctx(io_ctx1)); MockInstanceReplayer mock_instance_replayer1; auto instance_watcher1 = MockInstanceWatcher::create( io_ctx1, *m_mock_threads->asio_engine, &mock_instance_replayer1, nullptr); librados::Rados cluster; librados::IoCtx io_ctx2; EXPECT_EQ("", connect_cluster_pp(cluster)); EXPECT_EQ(0, cluster.ioctx_create(_local_pool_name.c_str(), io_ctx2)); std::string instance_id2 = stringify(io_ctx2.get_instance_id()); librados::MockTestMemIoCtxImpl &mock_io_ctx2(get_mock_io_ctx(io_ctx2)); MockInstanceReplayer mock_instance_replayer2; auto instance_watcher2 = MockInstanceWatcher::create( io_ctx2, *m_mock_threads->asio_engine, &mock_instance_replayer2, nullptr); InSequence seq; // Init instance watcher 1 expect_register_instance(mock_io_ctx1, 0); expect_register_watch(mock_io_ctx1, instance_id1); expect_acquire_lock(mock_managed_lock, 0); ASSERT_EQ(0, instance_watcher1->init()); // Init instance watcher 2 expect_register_instance(mock_io_ctx2, 0); expect_register_watch(mock_io_ctx2, instance_id2); expect_acquire_lock(mock_managed_lock, 0); ASSERT_EQ(0, instance_watcher2->init()); // Acquire Image on the same instance EXPECT_CALL(mock_instance_replayer1, acquire_image(instance_watcher1, "gid", _)) .WillOnce(WithArg<2>(CompleteContext(0))); C_SaferCond on_acquire1; instance_watcher1->notify_image_acquire(instance_id1, "gid", &on_acquire1); ASSERT_EQ(0, on_acquire1.wait()); // Acquire Image on the other instance EXPECT_CALL(mock_instance_replayer2, acquire_image(instance_watcher2, "gid", _)) .WillOnce(WithArg<2>(CompleteContext(0))); C_SaferCond on_acquire2; instance_watcher1->notify_image_acquire(instance_id2, "gid", &on_acquire2); ASSERT_EQ(0, on_acquire2.wait()); // Release Image on the same instance EXPECT_CALL(mock_instance_replayer1, release_image("gid", _)) .WillOnce(WithArg<1>(CompleteContext(0))); C_SaferCond on_release1; instance_watcher1->notify_image_release(instance_id1, "gid", &on_release1); ASSERT_EQ(0, on_release1.wait()); // Release Image on the other instance EXPECT_CALL(mock_instance_replayer2, release_image("gid", _)) .WillOnce(WithArg<1>(CompleteContext(0))); C_SaferCond on_release2; instance_watcher1->notify_image_release(instance_id2, "gid", &on_release2); ASSERT_EQ(0, on_release2.wait()); // Shutdown instance watcher 1 expect_release_lock(mock_managed_lock, 0); expect_unregister_watch(mock_io_ctx1); expect_unregister_instance(mock_io_ctx1, 0); instance_watcher1->shut_down(); expect_destroy_lock(mock_managed_lock); delete instance_watcher1; // Shutdown instance watcher 2 expect_release_lock(mock_managed_lock, 0); expect_unregister_watch(mock_io_ctx2); expect_unregister_instance(mock_io_ctx2, 0); instance_watcher2->shut_down(); expect_destroy_lock(mock_managed_lock); delete instance_watcher2; } TEST_F(TestMockInstanceWatcher, PeerImageRemoved) { MockManagedLock mock_managed_lock; librados::IoCtx& io_ctx1 = m_local_io_ctx; std::string instance_id1 = m_instance_id; librados::MockTestMemIoCtxImpl &mock_io_ctx1(get_mock_io_ctx(io_ctx1)); MockInstanceReplayer mock_instance_replayer1; auto instance_watcher1 = MockInstanceWatcher::create( io_ctx1, *m_mock_threads->asio_engine, &mock_instance_replayer1, nullptr); librados::Rados cluster; librados::IoCtx io_ctx2; EXPECT_EQ("", connect_cluster_pp(cluster)); EXPECT_EQ(0, cluster.ioctx_create(_local_pool_name.c_str(), io_ctx2)); std::string instance_id2 = stringify(io_ctx2.get_instance_id()); librados::MockTestMemIoCtxImpl &mock_io_ctx2(get_mock_io_ctx(io_ctx2)); MockInstanceReplayer mock_instance_replayer2; auto instance_watcher2 = MockInstanceWatcher::create( io_ctx2, *m_mock_threads->asio_engine, &mock_instance_replayer2, nullptr); InSequence seq; // Init instance watcher 1 expect_register_instance(mock_io_ctx1, 0); expect_register_watch(mock_io_ctx1, instance_id1); expect_acquire_lock(mock_managed_lock, 0); ASSERT_EQ(0, instance_watcher1->init()); // Init instance watcher 2 expect_register_instance(mock_io_ctx2, 0); expect_register_watch(mock_io_ctx2, instance_id2); expect_acquire_lock(mock_managed_lock, 0); ASSERT_EQ(0, instance_watcher2->init()); // Peer Image Removed on the same instance EXPECT_CALL(mock_instance_replayer1, remove_peer_image("gid", "uuid", _)) .WillOnce(WithArg<2>(CompleteContext(0))); C_SaferCond on_removed1; instance_watcher1->notify_peer_image_removed(instance_id1, "gid", "uuid", &on_removed1); ASSERT_EQ(0, on_removed1.wait()); // Peer Image Removed on the other instance EXPECT_CALL(mock_instance_replayer2, remove_peer_image("gid", "uuid", _)) .WillOnce(WithArg<2>(CompleteContext(0))); C_SaferCond on_removed2; instance_watcher1->notify_peer_image_removed(instance_id2, "gid", "uuid", &on_removed2); ASSERT_EQ(0, on_removed2.wait()); // Shutdown instance watcher 1 expect_release_lock(mock_managed_lock, 0); expect_unregister_watch(mock_io_ctx1); expect_unregister_instance(mock_io_ctx1, 0); instance_watcher1->shut_down(); expect_destroy_lock(mock_managed_lock); delete instance_watcher1; // Shutdown instance watcher 2 expect_release_lock(mock_managed_lock, 0); expect_unregister_watch(mock_io_ctx2); expect_unregister_instance(mock_io_ctx2, 0); instance_watcher2->shut_down(); expect_destroy_lock(mock_managed_lock); delete instance_watcher2; } TEST_F(TestMockInstanceWatcher, ImageAcquireReleaseCancel) { MockManagedLock mock_managed_lock; librados::MockTestMemIoCtxImpl &mock_io_ctx(get_mock_io_ctx(m_local_io_ctx)); auto instance_watcher = new MockInstanceWatcher( m_local_io_ctx, *m_mock_threads->asio_engine, nullptr, nullptr, m_instance_id); InSequence seq; // Init expect_register_instance(mock_io_ctx, 0); expect_register_watch(mock_io_ctx); expect_acquire_lock(mock_managed_lock, 0); ASSERT_EQ(0, instance_watcher->init()); // Send Acquire Image and cancel EXPECT_CALL(mock_io_ctx, aio_notify(_, _, _, _, _)) .WillOnce(Invoke( [this, instance_watcher, &mock_io_ctx]( const std::string& o, librados::AioCompletionImpl *c, bufferlist& bl, uint64_t timeout_ms, bufferlist *pbl) { c->get(); auto ctx = new LambdaContext( [instance_watcher, &mock_io_ctx, c, pbl](int r) { instance_watcher->cancel_notify_requests("other"); encode(librbd::watcher::NotifyResponse(), *pbl); mock_io_ctx.get_mock_rados_client()-> finish_aio_completion(c, -ETIMEDOUT); }); m_threads->work_queue->queue(ctx, 0); })); C_SaferCond on_acquire; instance_watcher->notify_image_acquire("other", "gid", &on_acquire); ASSERT_EQ(-ECANCELED, on_acquire.wait()); // Send Release Image and cancel EXPECT_CALL(mock_io_ctx, aio_notify(_, _, _, _, _)) .WillOnce(Invoke( [this, instance_watcher, &mock_io_ctx]( const std::string& o, librados::AioCompletionImpl *c, bufferlist& bl, uint64_t timeout_ms, bufferlist *pbl) { c->get(); auto ctx = new LambdaContext( [instance_watcher, &mock_io_ctx, c, pbl](int r) { instance_watcher->cancel_notify_requests("other"); encode(librbd::watcher::NotifyResponse(), *pbl); mock_io_ctx.get_mock_rados_client()-> finish_aio_completion(c, -ETIMEDOUT); }); m_threads->work_queue->queue(ctx, 0); })); C_SaferCond on_release; instance_watcher->notify_image_release("other", "gid", &on_release); ASSERT_EQ(-ECANCELED, on_release.wait()); // Shutdown expect_release_lock(mock_managed_lock, 0); expect_unregister_watch(mock_io_ctx); expect_unregister_instance(mock_io_ctx, 0); instance_watcher->shut_down(); expect_destroy_lock(mock_managed_lock); delete instance_watcher; } TEST_F(TestMockInstanceWatcher, PeerImageAcquireWatchDNE) { MockManagedLock mock_managed_lock; librados::MockTestMemIoCtxImpl &mock_io_ctx(get_mock_io_ctx(m_local_io_ctx)); MockInstanceReplayer mock_instance_replayer; auto instance_watcher = new MockInstanceWatcher( m_local_io_ctx, *m_mock_threads->asio_engine, &mock_instance_replayer, nullptr, m_instance_id); InSequence seq; // Init expect_register_instance(mock_io_ctx, 0); expect_register_watch(mock_io_ctx); expect_acquire_lock(mock_managed_lock, 0); ASSERT_EQ(0, instance_watcher->init()); // Acquire image on dead (blocklisted) instance C_SaferCond on_acquire; instance_watcher->notify_image_acquire("dead instance", "global image id", &on_acquire); ASSERT_EQ(-ENOENT, on_acquire.wait()); // Shutdown expect_release_lock(mock_managed_lock, 0); expect_unregister_watch(mock_io_ctx); expect_unregister_instance(mock_io_ctx, 0); instance_watcher->shut_down(); expect_destroy_lock(mock_managed_lock); delete instance_watcher; } TEST_F(TestMockInstanceWatcher, PeerImageReleaseWatchDNE) { MockManagedLock mock_managed_lock; librados::MockTestMemIoCtxImpl &mock_io_ctx(get_mock_io_ctx(m_local_io_ctx)); MockInstanceReplayer mock_instance_replayer; auto instance_watcher = new MockInstanceWatcher( m_local_io_ctx, *m_mock_threads->asio_engine, &mock_instance_replayer, nullptr, m_instance_id); InSequence seq; // Init expect_register_instance(mock_io_ctx, 0); expect_register_watch(mock_io_ctx); expect_acquire_lock(mock_managed_lock, 0); ASSERT_EQ(0, instance_watcher->init()); // Release image on dead (blocklisted) instance C_SaferCond on_acquire; instance_watcher->notify_image_release("dead instance", "global image id", &on_acquire); ASSERT_EQ(-ENOENT, on_acquire.wait()); // Shutdown expect_release_lock(mock_managed_lock, 0); expect_unregister_watch(mock_io_ctx); expect_unregister_instance(mock_io_ctx, 0); instance_watcher->shut_down(); expect_destroy_lock(mock_managed_lock); delete instance_watcher; } TEST_F(TestMockInstanceWatcher, PeerImageRemovedCancel) { MockManagedLock mock_managed_lock; librados::MockTestMemIoCtxImpl &mock_io_ctx(get_mock_io_ctx(m_local_io_ctx)); auto instance_watcher = new MockInstanceWatcher( m_local_io_ctx, *m_mock_threads->asio_engine, nullptr, nullptr, m_instance_id); InSequence seq; // Init expect_register_instance(mock_io_ctx, 0); expect_register_watch(mock_io_ctx); expect_acquire_lock(mock_managed_lock, 0); ASSERT_EQ(0, instance_watcher->init()); // Send Acquire Image and cancel EXPECT_CALL(mock_io_ctx, aio_notify(_, _, _, _, _)) .WillOnce(Invoke( [this, instance_watcher, &mock_io_ctx]( const std::string& o, librados::AioCompletionImpl *c, bufferlist& bl, uint64_t timeout_ms, bufferlist *pbl) { c->get(); auto ctx = new LambdaContext( [instance_watcher, &mock_io_ctx, c, pbl](int r) { instance_watcher->cancel_notify_requests("other"); encode(librbd::watcher::NotifyResponse(), *pbl); mock_io_ctx.get_mock_rados_client()-> finish_aio_completion(c, -ETIMEDOUT); }); m_threads->work_queue->queue(ctx, 0); })); C_SaferCond on_acquire; instance_watcher->notify_peer_image_removed("other", "gid", "uuid", &on_acquire); ASSERT_EQ(-ECANCELED, on_acquire.wait()); // Shutdown expect_release_lock(mock_managed_lock, 0); expect_unregister_watch(mock_io_ctx); expect_unregister_instance(mock_io_ctx, 0); instance_watcher->shut_down(); expect_destroy_lock(mock_managed_lock); delete instance_watcher; } class TestMockInstanceWatcher_NotifySync : public TestMockInstanceWatcher { public: typedef Throttler<librbd::MockTestImageCtx> MockThrottler; MockManagedLock mock_managed_lock; MockThrottler mock_image_sync_throttler; std::string instance_id1; std::string instance_id2; librados::Rados cluster; librados::IoCtx io_ctx2; MockInstanceWatcher *instance_watcher1; MockInstanceWatcher *instance_watcher2; void SetUp() override { TestMockInstanceWatcher::SetUp(); instance_id1 = m_instance_id; librados::IoCtx& io_ctx1 = m_local_io_ctx; librados::MockTestMemIoCtxImpl &mock_io_ctx1(get_mock_io_ctx(io_ctx1)); instance_watcher1 = MockInstanceWatcher::create(io_ctx1, *m_mock_threads->asio_engine, nullptr, &mock_image_sync_throttler); EXPECT_EQ("", connect_cluster_pp(cluster)); EXPECT_EQ(0, cluster.ioctx_create(_local_pool_name.c_str(), io_ctx2)); instance_id2 = stringify(io_ctx2.get_instance_id()); librados::MockTestMemIoCtxImpl &mock_io_ctx2(get_mock_io_ctx(io_ctx2)); instance_watcher2 = MockInstanceWatcher::create(io_ctx2, *m_mock_threads->asio_engine, nullptr, &mock_image_sync_throttler); InSequence seq; // Init instance watcher 1 (leader) expect_register_instance(mock_io_ctx1, 0); expect_register_watch(mock_io_ctx1, instance_id1); expect_acquire_lock(mock_managed_lock, 0); EXPECT_EQ(0, instance_watcher1->init()); instance_watcher1->handle_acquire_leader(); // Init instance watcher 2 expect_register_instance(mock_io_ctx2, 0); expect_register_watch(mock_io_ctx2, instance_id2); expect_acquire_lock(mock_managed_lock, 0); EXPECT_EQ(0, instance_watcher2->init()); instance_watcher2->handle_update_leader(instance_id1); } void TearDown() override { librados::IoCtx& io_ctx1 = m_local_io_ctx; librados::MockTestMemIoCtxImpl &mock_io_ctx1(get_mock_io_ctx(io_ctx1)); librados::MockTestMemIoCtxImpl &mock_io_ctx2(get_mock_io_ctx(io_ctx2)); InSequence seq; expect_throttler_drain(); instance_watcher1->handle_release_leader(); // Shutdown instance watcher 1 expect_release_lock(mock_managed_lock, 0); expect_unregister_watch(mock_io_ctx1); expect_unregister_instance(mock_io_ctx1, 0); instance_watcher1->shut_down(); expect_destroy_lock(mock_managed_lock); delete instance_watcher1; // Shutdown instance watcher 2 expect_release_lock(mock_managed_lock, 0); expect_unregister_watch(mock_io_ctx2); expect_unregister_instance(mock_io_ctx2, 0); instance_watcher2->shut_down(); expect_destroy_lock(mock_managed_lock); delete instance_watcher2; TestMockInstanceWatcher::TearDown(); } void expect_throttler_start_op(const std::string &sync_id, Context *on_call = nullptr, Context **on_start_ctx = nullptr) { EXPECT_CALL(mock_image_sync_throttler, start_op("", sync_id, _)) .WillOnce(Invoke([on_call, on_start_ctx] (const std::string &, const std::string &, Context *ctx) { if (on_start_ctx != nullptr) { *on_start_ctx = ctx; } else { ctx->complete(0); } if (on_call != nullptr) { on_call->complete(0); } })); } void expect_throttler_finish_op(const std::string &sync_id, Context *on_finish) { EXPECT_CALL(mock_image_sync_throttler, finish_op("", "sync_id")) .WillOnce(Invoke([on_finish](const std::string &, const std::string &) { on_finish->complete(0); })); } void expect_throttler_drain() { EXPECT_CALL(mock_image_sync_throttler, drain("", -ESTALE)); } }; TEST_F(TestMockInstanceWatcher_NotifySync, StartStopOnLeader) { InSequence seq; expect_throttler_start_op("sync_id"); C_SaferCond on_start; instance_watcher1->notify_sync_request("sync_id", &on_start); ASSERT_EQ(0, on_start.wait()); C_SaferCond on_finish; expect_throttler_finish_op("sync_id", &on_finish); instance_watcher1->notify_sync_complete("sync_id"); ASSERT_EQ(0, on_finish.wait()); } TEST_F(TestMockInstanceWatcher_NotifySync, CancelStartedOnLeader) { InSequence seq; expect_throttler_start_op("sync_id"); C_SaferCond on_start; instance_watcher1->notify_sync_request("sync_id", &on_start); ASSERT_EQ(0, on_start.wait()); ASSERT_FALSE(instance_watcher1->cancel_sync_request("sync_id")); C_SaferCond on_finish; expect_throttler_finish_op("sync_id", &on_finish); instance_watcher1->notify_sync_complete("sync_id"); ASSERT_EQ(0, on_finish.wait()); } TEST_F(TestMockInstanceWatcher_NotifySync, StartStopOnNonLeader) { InSequence seq; expect_throttler_start_op("sync_id"); C_SaferCond on_start; instance_watcher2->notify_sync_request("sync_id", &on_start); ASSERT_EQ(0, on_start.wait()); C_SaferCond on_finish; expect_throttler_finish_op("sync_id", &on_finish); instance_watcher2->notify_sync_complete("sync_id"); ASSERT_EQ(0, on_finish.wait()); } TEST_F(TestMockInstanceWatcher_NotifySync, CancelStartedOnNonLeader) { InSequence seq; expect_throttler_start_op("sync_id"); C_SaferCond on_start; instance_watcher2->notify_sync_request("sync_id", &on_start); ASSERT_EQ(0, on_start.wait()); ASSERT_FALSE(instance_watcher2->cancel_sync_request("sync_id")); C_SaferCond on_finish; expect_throttler_finish_op("sync_id", &on_finish); instance_watcher2->notify_sync_complete("sync_id"); ASSERT_EQ(0, on_finish.wait()); } TEST_F(TestMockInstanceWatcher_NotifySync, CancelWaitingOnNonLeader) { InSequence seq; C_SaferCond on_start_op_called; Context *on_start_ctx; expect_throttler_start_op("sync_id", &on_start_op_called, &on_start_ctx); C_SaferCond on_start; instance_watcher2->notify_sync_request("sync_id", &on_start); ASSERT_EQ(0, on_start_op_called.wait()); ASSERT_TRUE(instance_watcher2->cancel_sync_request("sync_id")); // emulate watcher timeout on_start_ctx->complete(-ETIMEDOUT); ASSERT_EQ(-ECANCELED, on_start.wait()); } TEST_F(TestMockInstanceWatcher_NotifySync, InFlightPrevNotification) { // start sync when previous notification is still in flight InSequence seq; expect_throttler_start_op("sync_id"); C_SaferCond on_start1; instance_watcher2->notify_sync_request("sync_id", &on_start1); ASSERT_EQ(0, on_start1.wait()); C_SaferCond on_start2; EXPECT_CALL(mock_image_sync_throttler, finish_op("", "sync_id")) .WillOnce(Invoke([this, &on_start2](const std::string &, const std::string &) { instance_watcher2->notify_sync_request("sync_id", &on_start2); })); expect_throttler_start_op("sync_id"); instance_watcher2->notify_sync_complete("sync_id"); ASSERT_EQ(0, on_start2.wait()); C_SaferCond on_finish; expect_throttler_finish_op("sync_id", &on_finish); instance_watcher2->notify_sync_complete("sync_id"); ASSERT_EQ(0, on_finish.wait()); } TEST_F(TestMockInstanceWatcher_NotifySync, NoInFlightReleaseAcquireLeader) { InSequence seq; expect_throttler_drain(); instance_watcher1->handle_release_leader(); instance_watcher1->handle_acquire_leader(); } TEST_F(TestMockInstanceWatcher_NotifySync, StartedOnLeaderReleaseLeader) { InSequence seq; expect_throttler_drain(); instance_watcher1->handle_release_leader(); instance_watcher2->handle_acquire_leader(); expect_throttler_start_op("sync_id"); C_SaferCond on_start; instance_watcher2->notify_sync_request("sync_id", &on_start); ASSERT_EQ(0, on_start.wait()); expect_throttler_drain(); instance_watcher2->handle_release_leader(); instance_watcher2->notify_sync_complete("sync_id"); instance_watcher1->handle_acquire_leader(); } TEST_F(TestMockInstanceWatcher_NotifySync, WaitingOnLeaderReleaseLeader) { InSequence seq; C_SaferCond on_start_op_called; Context *on_start_ctx; expect_throttler_start_op("sync_id", &on_start_op_called, &on_start_ctx); C_SaferCond on_start; instance_watcher1->notify_sync_request("sync_id", &on_start); ASSERT_EQ(0, on_start_op_called.wait()); expect_throttler_drain(); instance_watcher1->handle_release_leader(); // emulate throttler queue drain on leader release on_start_ctx->complete(-ESTALE); expect_throttler_start_op("sync_id"); instance_watcher2->handle_acquire_leader(); instance_watcher1->handle_update_leader(instance_id2); ASSERT_EQ(0, on_start.wait()); C_SaferCond on_finish; expect_throttler_finish_op("sync_id", &on_finish); instance_watcher1->notify_sync_complete("sync_id"); ASSERT_EQ(0, on_finish.wait()); expect_throttler_drain(); instance_watcher2->handle_release_leader(); instance_watcher1->handle_acquire_leader(); } TEST_F(TestMockInstanceWatcher_NotifySync, StartedOnNonLeaderAcquireLeader) { InSequence seq; expect_throttler_drain(); instance_watcher1->handle_release_leader(); instance_watcher2->handle_acquire_leader(); instance_watcher1->handle_update_leader(instance_id2); expect_throttler_start_op("sync_id"); C_SaferCond on_start; instance_watcher1->notify_sync_request("sync_id", &on_start); ASSERT_EQ(0, on_start.wait()); expect_throttler_drain(); instance_watcher2->handle_release_leader(); instance_watcher1->handle_acquire_leader(); instance_watcher2->handle_update_leader(instance_id1); instance_watcher1->notify_sync_complete("sync_id"); } TEST_F(TestMockInstanceWatcher_NotifySync, WaitingOnNonLeaderAcquireLeader) { InSequence seq; C_SaferCond on_start_op_called; Context *on_start_ctx; expect_throttler_start_op("sync_id", &on_start_op_called, &on_start_ctx); C_SaferCond on_start; instance_watcher2->notify_sync_request("sync_id", &on_start); ASSERT_EQ(0, on_start_op_called.wait()); expect_throttler_drain(); instance_watcher1->handle_release_leader(); // emulate throttler queue drain on leader release on_start_ctx->complete(-ESTALE); EXPECT_CALL(mock_image_sync_throttler, start_op("", "sync_id", _)) .WillOnce(WithArg<2>(CompleteContext(0))); instance_watcher2->handle_acquire_leader(); instance_watcher1->handle_update_leader(instance_id2); ASSERT_EQ(0, on_start.wait()); C_SaferCond on_finish; expect_throttler_finish_op("sync_id", &on_finish); instance_watcher2->notify_sync_complete("sync_id"); ASSERT_EQ(0, on_finish.wait()); expect_throttler_drain(); instance_watcher2->handle_release_leader(); instance_watcher1->handle_acquire_leader(); } } // namespace mirror } // namespace rbd
34,516
33.936235
95
cc
null
ceph-main/src/test/rbd_mirror/test_mock_LeaderWatcher.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "librbd/AsioEngine.h" #include "librbd/Utils.h" #include "test/librbd/mock/MockImageCtx.h" #include "test/rbd_mirror/test_mock_fixture.h" #include "tools/rbd_mirror/LeaderWatcher.h" #include "tools/rbd_mirror/Threads.h" using librbd::util::create_async_context_callback; namespace librbd { namespace { struct MockTestImageCtx : public MockImageCtx { MockTestImageCtx(librbd::ImageCtx &image_ctx) : librbd::MockImageCtx(image_ctx) { } }; } // anonymous namespace struct MockManagedLock { static MockManagedLock *s_instance; static MockManagedLock &get_instance() { ceph_assert(s_instance != nullptr); return *s_instance; } MockManagedLock() { s_instance = this; } bool m_release_lock_on_shutdown = false; Context *m_on_released = nullptr; MOCK_METHOD0(construct, void()); MOCK_METHOD0(destroy, void()); MOCK_CONST_METHOD0(is_lock_owner, bool()); MOCK_METHOD1(shut_down, void(Context *)); MOCK_METHOD1(try_acquire_lock, void(Context *)); MOCK_METHOD1(release_lock, void(Context *)); MOCK_METHOD0(reacquire_lock, void()); MOCK_METHOD3(break_lock, void(const managed_lock::Locker &, bool, Context *)); MOCK_METHOD2(get_locker, void(managed_lock::Locker *, Context *)); MOCK_METHOD0(set_state_post_acquiring, void()); MOCK_CONST_METHOD0(is_shutdown, bool()); MOCK_CONST_METHOD0(is_state_post_acquiring, bool()); MOCK_CONST_METHOD0(is_state_pre_releasing, bool()); MOCK_CONST_METHOD0(is_state_locked, bool()); }; MockManagedLock *MockManagedLock::s_instance = nullptr; template <> struct ManagedLock<MockTestImageCtx> { ManagedLock(librados::IoCtx& ioctx, librbd::AsioEngine& asio_engine, const std::string& oid, librbd::Watcher *watcher, managed_lock::Mode mode, bool blocklist_on_break_lock, uint32_t blocklist_expire_seconds) : m_work_queue(asio_engine.get_work_queue()) { MockManagedLock::get_instance().construct(); } virtual ~ManagedLock() { MockManagedLock::get_instance().destroy(); } librbd::asio::ContextWQ *m_work_queue; mutable ceph::mutex m_lock = ceph::make_mutex("ManagedLock::m_lock"); bool is_lock_owner() const { return MockManagedLock::get_instance().is_lock_owner(); } void shut_down(Context *on_shutdown) { if (MockManagedLock::get_instance().m_release_lock_on_shutdown) { on_shutdown = new LambdaContext( [this, on_shutdown](int r) { MockManagedLock::get_instance().m_release_lock_on_shutdown = false; shut_down(on_shutdown); }); release_lock(on_shutdown); return; } MockManagedLock::get_instance().shut_down(on_shutdown); } void try_acquire_lock(Context *on_acquired) { Context *post_acquire_ctx = create_async_context_callback( m_work_queue, new LambdaContext( [this, on_acquired](int r) { post_acquire_lock_handler(r, on_acquired); })); MockManagedLock::get_instance().try_acquire_lock(post_acquire_ctx); } void release_lock(Context *on_released) { ceph_assert(MockManagedLock::get_instance().m_on_released == nullptr); MockManagedLock::get_instance().m_on_released = on_released; Context *post_release_ctx = new LambdaContext( [this](int r) { ceph_assert(MockManagedLock::get_instance().m_on_released != nullptr); post_release_lock_handler(false, r, MockManagedLock::get_instance().m_on_released); MockManagedLock::get_instance().m_on_released = nullptr; }); Context *release_ctx = new LambdaContext( [post_release_ctx](int r) { if (r < 0) { MockManagedLock::get_instance().m_on_released->complete(r); } else { MockManagedLock::get_instance().release_lock(post_release_ctx); } }); Context *pre_release_ctx = new LambdaContext( [this, release_ctx](int r) { bool shutting_down = MockManagedLock::get_instance().m_release_lock_on_shutdown; pre_release_lock_handler(shutting_down, release_ctx); }); m_work_queue->queue(pre_release_ctx, 0); } void reacquire_lock(Context* on_finish) { MockManagedLock::get_instance().reacquire_lock(); } void get_locker(managed_lock::Locker *locker, Context *on_finish) { MockManagedLock::get_instance().get_locker(locker, on_finish); } void break_lock(const managed_lock::Locker &locker, bool force_break_lock, Context *on_finish) { MockManagedLock::get_instance().break_lock(locker, force_break_lock, on_finish); } void set_state_post_acquiring() { MockManagedLock::get_instance().set_state_post_acquiring(); } bool is_shutdown() const { return MockManagedLock::get_instance().is_shutdown(); } bool is_state_post_acquiring() const { return MockManagedLock::get_instance().is_state_post_acquiring(); } bool is_state_pre_releasing() const { return MockManagedLock::get_instance().is_state_pre_releasing(); } bool is_state_locked() const { return MockManagedLock::get_instance().is_state_locked(); } virtual void post_acquire_lock_handler(int r, Context *on_finish) = 0; virtual void pre_release_lock_handler(bool shutting_down, Context *on_finish) = 0; virtual void post_release_lock_handler(bool shutting_down, int r, Context *on_finish) = 0; }; } // namespace librbd namespace rbd { namespace mirror { template <> struct Threads<librbd::MockTestImageCtx> { ceph::mutex &timer_lock; SafeTimer *timer; librbd::asio::ContextWQ *work_queue; librbd::AsioEngine* asio_engine; Threads(Threads<librbd::ImageCtx> *threads) : timer_lock(threads->timer_lock), timer(threads->timer), work_queue(threads->work_queue), asio_engine(threads->asio_engine) { } }; template <> struct Instances<librbd::MockTestImageCtx> { static Instances* s_instance; static Instances *create(Threads<librbd::MockTestImageCtx> *threads, librados::IoCtx &ioctx, const std::string& instance_id, instances::Listener&) { ceph_assert(s_instance != nullptr); return s_instance; } Instances() { ceph_assert(s_instance == nullptr); s_instance = this; } ~Instances() { ceph_assert(s_instance == this); s_instance = nullptr; } MOCK_METHOD0(destroy, void()); MOCK_METHOD1(init, void(Context *)); MOCK_METHOD1(shut_down, void(Context *)); MOCK_METHOD1(acked, void(const std::vector<std::string> &)); MOCK_METHOD0(unblock_listener, void()); }; Instances<librbd::MockTestImageCtx> *Instances<librbd::MockTestImageCtx>::s_instance = nullptr; } // namespace mirror } // namespace rbd // template definitions #include "tools/rbd_mirror/LeaderWatcher.cc" namespace rbd { namespace mirror { using ::testing::_; using ::testing::AtLeast; using ::testing::DoAll; using ::testing::InSequence; using ::testing::Invoke; using ::testing::Return; using librbd::MockManagedLock; struct MockListener : public leader_watcher::Listener { static MockListener* s_instance; MockListener() { ceph_assert(s_instance == nullptr); s_instance = this; } ~MockListener() override { ceph_assert(s_instance == this); s_instance = nullptr; } MOCK_METHOD1(post_acquire_handler, void(Context *)); MOCK_METHOD1(pre_release_handler, void(Context *)); MOCK_METHOD1(update_leader_handler, void(const std::string &)); MOCK_METHOD1(handle_instances_added, void(const InstanceIds&)); MOCK_METHOD1(handle_instances_removed, void(const InstanceIds&)); }; MockListener *MockListener::s_instance = nullptr; class TestMockLeaderWatcher : public TestMockFixture { public: typedef Instances<librbd::MockTestImageCtx> MockInstances; typedef LeaderWatcher<librbd::MockTestImageCtx> MockLeaderWatcher; typedef Threads<librbd::MockTestImageCtx> MockThreads; void SetUp() override { TestMockFixture::SetUp(); m_mock_threads = new MockThreads(m_threads); } void TearDown() override { delete m_mock_threads; TestMockFixture::TearDown(); } void expect_construct(MockManagedLock &mock_managed_lock) { EXPECT_CALL(mock_managed_lock, construct()); } void expect_destroy(MockManagedLock &mock_managed_lock) { EXPECT_CALL(mock_managed_lock, destroy()); } void expect_is_lock_owner(MockManagedLock &mock_managed_lock, bool owner) { EXPECT_CALL(mock_managed_lock, is_lock_owner()) .WillOnce(Return(owner)); } void expect_shut_down(MockManagedLock &mock_managed_lock, bool release_lock_on_shutdown, int r) { mock_managed_lock.m_release_lock_on_shutdown = release_lock_on_shutdown; EXPECT_CALL(mock_managed_lock, shut_down(_)) .WillOnce(CompleteContext(r)); } void expect_try_acquire_lock(MockManagedLock &mock_managed_lock, int r) { EXPECT_CALL(mock_managed_lock, try_acquire_lock(_)) .WillOnce(CompleteContext(r)); if (r == 0) { expect_set_state_post_acquiring(mock_managed_lock); } } void expect_release_lock(MockManagedLock &mock_managed_lock, int r, Context *on_finish = nullptr) { EXPECT_CALL(mock_managed_lock, release_lock(_)) .WillOnce(Invoke([on_finish, &mock_managed_lock, r](Context *ctx) { if (on_finish != nullptr) { auto on_released = mock_managed_lock.m_on_released; ceph_assert(on_released != nullptr); mock_managed_lock.m_on_released = new LambdaContext( [on_released, on_finish](int r) { on_released->complete(r); on_finish->complete(r); }); } ctx->complete(r); })); } void expect_get_locker(MockManagedLock &mock_managed_lock, const librbd::managed_lock::Locker &locker, int r) { EXPECT_CALL(mock_managed_lock, get_locker(_, _)) .WillOnce(Invoke([r, locker](librbd::managed_lock::Locker *out, Context *ctx) { if (r == 0) { *out = locker; } ctx->complete(r); })); } void expect_break_lock(MockManagedLock &mock_managed_lock, const librbd::managed_lock::Locker &locker, int r, Context *on_finish) { EXPECT_CALL(mock_managed_lock, break_lock(locker, true, _)) .WillOnce(Invoke([on_finish, r](const librbd::managed_lock::Locker &, bool, Context *ctx) { ctx->complete(r); on_finish->complete(0); })); } void expect_set_state_post_acquiring(MockManagedLock &mock_managed_lock) { EXPECT_CALL(mock_managed_lock, set_state_post_acquiring()); } void expect_is_shutdown(MockManagedLock &mock_managed_lock) { EXPECT_CALL(mock_managed_lock, is_shutdown()) .Times(AtLeast(0)).WillRepeatedly(Return(false)); } void expect_is_leader(MockManagedLock &mock_managed_lock, bool post_acquiring, bool locked) { EXPECT_CALL(mock_managed_lock, is_state_post_acquiring()) .WillOnce(Return(post_acquiring)); if (!post_acquiring) { EXPECT_CALL(mock_managed_lock, is_state_locked()) .WillOnce(Return(locked)); } } void expect_is_leader(MockManagedLock &mock_managed_lock) { EXPECT_CALL(mock_managed_lock, is_state_post_acquiring()) .Times(AtLeast(0)).WillRepeatedly(Return(false)); EXPECT_CALL(mock_managed_lock, is_state_locked()) .Times(AtLeast(0)).WillRepeatedly(Return(false)); EXPECT_CALL(mock_managed_lock, is_state_pre_releasing()) .Times(AtLeast(0)).WillRepeatedly(Return(false)); } void expect_notify_heartbeat(MockManagedLock &mock_managed_lock, Context *on_finish) { // is_leader in notify_heartbeat EXPECT_CALL(mock_managed_lock, is_state_post_acquiring()) .WillOnce(Return(false)); EXPECT_CALL(mock_managed_lock, is_state_locked()) .WillOnce(Return(true)); // is_leader in handle_notify_heartbeat EXPECT_CALL(mock_managed_lock, is_state_post_acquiring()) .WillOnce(Return(false)); EXPECT_CALL(mock_managed_lock, is_state_locked()) .WillOnce(DoAll(Invoke([on_finish]() { on_finish->complete(0); }), Return(true))); } void expect_destroy(MockInstances &mock_instances) { EXPECT_CALL(mock_instances, destroy()); } void expect_init(MockInstances &mock_instances, int r) { EXPECT_CALL(mock_instances, init(_)) .WillOnce(CompleteContext(m_mock_threads->work_queue, r)); } void expect_shut_down(MockInstances &mock_instances, int r) { EXPECT_CALL(mock_instances, shut_down(_)) .WillOnce(CompleteContext(m_mock_threads->work_queue, r)); expect_destroy(mock_instances); } void expect_acquire_notify(MockManagedLock &mock_managed_lock, MockListener &mock_listener, int r) { expect_is_leader(mock_managed_lock, true, false); EXPECT_CALL(mock_listener, post_acquire_handler(_)) .WillOnce(CompleteContext(r)); expect_is_leader(mock_managed_lock, true, false); } void expect_release_notify(MockManagedLock &mock_managed_lock, MockListener &mock_listener, int r) { expect_is_leader(mock_managed_lock, false, false); EXPECT_CALL(mock_listener, pre_release_handler(_)) .WillOnce(CompleteContext(r)); expect_is_leader(mock_managed_lock, false, false); } void expect_unblock_listener(MockInstances& mock_instances) { EXPECT_CALL(mock_instances, unblock_listener()); } void expect_instances_acked(MockInstances& mock_instances) { EXPECT_CALL(mock_instances, acked(_)); } MockThreads *m_mock_threads; }; TEST_F(TestMockLeaderWatcher, InitShutdown) { MockManagedLock mock_managed_lock; MockInstances mock_instances; MockListener listener; expect_is_shutdown(mock_managed_lock); expect_destroy(mock_managed_lock); InSequence seq; expect_construct(mock_managed_lock); MockLeaderWatcher leader_watcher(m_mock_threads, m_local_io_ctx, &listener); // Init C_SaferCond on_heartbeat_finish; expect_is_leader(mock_managed_lock, false, false); expect_try_acquire_lock(mock_managed_lock, 0); expect_init(mock_instances, 0); expect_acquire_notify(mock_managed_lock, listener, 0); expect_unblock_listener(mock_instances); expect_notify_heartbeat(mock_managed_lock, &on_heartbeat_finish); expect_instances_acked(mock_instances); ASSERT_EQ(0, leader_watcher.init()); ASSERT_EQ(0, on_heartbeat_finish.wait()); // Shutdown expect_release_notify(mock_managed_lock, listener, 0); expect_shut_down(mock_instances, 0); expect_release_lock(mock_managed_lock, 0); expect_shut_down(mock_managed_lock, true, 0); expect_is_leader(mock_managed_lock, false, false); leader_watcher.shut_down(); } TEST_F(TestMockLeaderWatcher, InitReleaseShutdown) { MockManagedLock mock_managed_lock; MockInstances mock_instances; MockListener listener; expect_is_shutdown(mock_managed_lock); expect_destroy(mock_managed_lock); InSequence seq; expect_construct(mock_managed_lock); MockLeaderWatcher leader_watcher(m_mock_threads, m_local_io_ctx, &listener); // Init C_SaferCond on_heartbeat_finish; expect_is_leader(mock_managed_lock, false, false); expect_try_acquire_lock(mock_managed_lock, 0); expect_init(mock_instances, 0); expect_acquire_notify(mock_managed_lock, listener, 0); expect_unblock_listener(mock_instances); expect_notify_heartbeat(mock_managed_lock, &on_heartbeat_finish); expect_instances_acked(mock_instances); ASSERT_EQ(0, leader_watcher.init()); ASSERT_EQ(0, on_heartbeat_finish.wait()); // Release expect_is_leader(mock_managed_lock, false, true); expect_release_notify(mock_managed_lock, listener, 0); expect_shut_down(mock_instances, 0); C_SaferCond on_release; expect_release_lock(mock_managed_lock, 0, &on_release); leader_watcher.release_leader(); ASSERT_EQ(0, on_release.wait()); // Shutdown expect_shut_down(mock_managed_lock, false, 0); expect_is_leader(mock_managed_lock, false, false); leader_watcher.shut_down(); } TEST_F(TestMockLeaderWatcher, AcquireError) { MockManagedLock mock_managed_lock; MockInstances mock_instances; MockListener listener; expect_is_shutdown(mock_managed_lock); expect_is_leader(mock_managed_lock); expect_destroy(mock_managed_lock); InSequence seq; expect_construct(mock_managed_lock); MockLeaderWatcher leader_watcher(m_mock_threads, m_local_io_ctx, &listener); // Init C_SaferCond on_heartbeat_finish; expect_is_leader(mock_managed_lock, false, false); expect_try_acquire_lock(mock_managed_lock, -EAGAIN); expect_get_locker(mock_managed_lock, librbd::managed_lock::Locker(), -ENOENT); expect_try_acquire_lock(mock_managed_lock, 0); expect_init(mock_instances, 0); expect_acquire_notify(mock_managed_lock, listener, 0); expect_unblock_listener(mock_instances); expect_notify_heartbeat(mock_managed_lock, &on_heartbeat_finish); expect_instances_acked(mock_instances); ASSERT_EQ(0, leader_watcher.init()); ASSERT_EQ(0, on_heartbeat_finish.wait()); // Shutdown expect_release_notify(mock_managed_lock, listener, 0); expect_shut_down(mock_instances, 0); expect_release_lock(mock_managed_lock, 0); expect_shut_down(mock_managed_lock, true, 0); expect_is_leader(mock_managed_lock, false, false); leader_watcher.shut_down(); } TEST_F(TestMockLeaderWatcher, Break) { EXPECT_EQ(0, _rados->conf_set("rbd_mirror_leader_heartbeat_interval", "1")); EXPECT_EQ(0, _rados->conf_set("rbd_mirror_leader_max_missed_heartbeats", "1")); CephContext *cct = reinterpret_cast<CephContext *>(m_local_io_ctx.cct()); int max_acquire_attempts = cct->_conf.get_val<uint64_t>( "rbd_mirror_leader_max_acquire_attempts_before_break"); MockManagedLock mock_managed_lock; MockInstances mock_instances; MockListener listener; librbd::managed_lock::Locker locker{entity_name_t::CLIENT(1), "auto 123", "1.2.3.4:0/0", 123}; expect_is_shutdown(mock_managed_lock); expect_is_leader(mock_managed_lock); expect_destroy(mock_managed_lock); EXPECT_CALL(listener, update_leader_handler(_)); InSequence seq; expect_construct(mock_managed_lock); MockLeaderWatcher leader_watcher(m_mock_threads, m_local_io_ctx, &listener); // Init expect_is_leader(mock_managed_lock, false, false); for (int i = 0; i < max_acquire_attempts; i++) { expect_try_acquire_lock(mock_managed_lock, -EAGAIN); expect_get_locker(mock_managed_lock, locker, 0); } C_SaferCond on_break; expect_break_lock(mock_managed_lock, locker, 0, &on_break); C_SaferCond on_heartbeat_finish; expect_try_acquire_lock(mock_managed_lock, 0); expect_init(mock_instances, 0); expect_acquire_notify(mock_managed_lock, listener, 0); expect_unblock_listener(mock_instances); expect_notify_heartbeat(mock_managed_lock, &on_heartbeat_finish); expect_instances_acked(mock_instances); ASSERT_EQ(0, leader_watcher.init()); ASSERT_EQ(0, on_heartbeat_finish.wait()); // Shutdown expect_release_notify(mock_managed_lock, listener, 0); expect_shut_down(mock_instances, 0); expect_release_lock(mock_managed_lock, 0); expect_shut_down(mock_managed_lock, true, 0); expect_is_leader(mock_managed_lock, false, false); leader_watcher.shut_down(); } } // namespace mirror } // namespace rbd
20,057
31.614634
95
cc
null
ceph-main/src/test/rbd_mirror/test_mock_MirrorStatusUpdater.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "test/rbd_mirror/test_mock_fixture.h" #include "include/stringify.h" #include "tools/rbd_mirror/MirrorStatusUpdater.h" #include "tools/rbd_mirror/MirrorStatusWatcher.h" #include "tools/rbd_mirror/Threads.h" #include "test/librados_test_stub/MockTestMemIoCtxImpl.h" #include "test/librbd/mock/MockImageCtx.h" #include "test/rbd_mirror/mock/MockContextWQ.h" #include "test/rbd_mirror/mock/MockSafeTimer.h" #include <map> #include <string> #include <utility> namespace librbd { namespace { struct MockTestImageCtx : public MockImageCtx { MockTestImageCtx(librbd::ImageCtx &image_ctx) : librbd::MockImageCtx(image_ctx) { } }; } // anonymous namespace } // namespace librbd namespace rbd { namespace mirror { template <> struct MirrorStatusWatcher<librbd::MockTestImageCtx> { static MirrorStatusWatcher* s_instance; static MirrorStatusWatcher* create(librados::IoCtx& io_ctx, MockContextWQ* mock_context_wq) { ceph_assert(s_instance != nullptr); return s_instance; } MOCK_METHOD1(init, void(Context*)); MOCK_METHOD1(shut_down, void(Context*)); MirrorStatusWatcher() { s_instance = this; } }; MirrorStatusWatcher<librbd::MockTestImageCtx>* MirrorStatusWatcher<librbd::MockTestImageCtx>::s_instance = nullptr; template <> struct Threads<librbd::MockTestImageCtx> { MockSafeTimer *timer; ceph::mutex &timer_lock; MockContextWQ *work_queue; Threads(Threads<librbd::ImageCtx> *threads) : timer(new MockSafeTimer()), timer_lock(threads->timer_lock), work_queue(new MockContextWQ()) { } ~Threads() { delete timer; delete work_queue; } }; } // namespace mirror } // namespace rbd #include "tools/rbd_mirror/MirrorStatusUpdater.cc" namespace rbd { namespace mirror { using ::testing::_; using ::testing::DoDefault; using ::testing::InSequence; using ::testing::Invoke; using ::testing::StrEq; using ::testing::Return; using ::testing::WithArg; class TestMockMirrorStatusUpdater : public TestMockFixture { public: typedef MirrorStatusUpdater<librbd::MockTestImageCtx> MockMirrorStatusUpdater; typedef MirrorStatusWatcher<librbd::MockTestImageCtx> MockMirrorStatusWatcher; typedef Threads<librbd::MockTestImageCtx> MockThreads; typedef std::map<std::string, cls::rbd::MirrorImageSiteStatus> MirrorImageSiteStatuses; void SetUp() override { TestMockFixture::SetUp(); m_mock_local_io_ctx = &get_mock_io_ctx(m_local_io_ctx); m_mock_threads = new MockThreads(m_threads); } void TearDown() override { delete m_mock_threads; TestMockFixture::TearDown(); } void expect_timer_add_event(Context** timer_event) { EXPECT_CALL(*m_mock_threads->timer, add_event_after(_, _)) .WillOnce(WithArg<1>(Invoke([timer_event](Context *ctx) { *timer_event = ctx; return ctx; }))); } void expect_timer_cancel_event() { EXPECT_CALL(*m_mock_threads->timer, cancel_event(_)) .WillOnce(Invoke([](Context* ctx) { delete ctx; return false; })); } void expect_work_queue(bool async) { EXPECT_CALL(*m_mock_threads->work_queue, queue(_, _)) .WillOnce(Invoke([this, async](Context *ctx, int r) { if (async) { m_threads->work_queue->queue(ctx, r); } else { ctx->complete(r); } })); } void expect_mirror_status_watcher_init( MockMirrorStatusWatcher& mock_mirror_status_watcher, int r) { EXPECT_CALL(*mock_mirror_status_watcher.s_instance, init(_)) .WillOnce(Invoke([this, r](Context* ctx) { m_threads->work_queue->queue(ctx, r); })); } void expect_mirror_status_watcher_shut_down( MockMirrorStatusWatcher& mock_mirror_status_watcher, int r) { EXPECT_CALL(*mock_mirror_status_watcher.s_instance, shut_down(_)) .WillOnce(Invoke([this, r](Context* ctx) { m_threads->work_queue->queue(ctx, r); })); } void expect_mirror_status_update( const std::string& global_image_id, const cls::rbd::MirrorImageSiteStatus& mirror_image_status, int r) { EXPECT_CALL(*m_mock_local_io_ctx, exec(RBD_MIRRORING, _, StrEq("rbd"), StrEq("mirror_image_status_set"), _, _, _, _)) .WillOnce(WithArg<4>(Invoke( [r, global_image_id, mirror_image_status](bufferlist& in_bl) { auto bl_it = in_bl.cbegin(); std::string decode_global_image_id; decode(decode_global_image_id, bl_it); EXPECT_EQ(global_image_id, decode_global_image_id); cls::rbd::MirrorImageSiteStatus decode_mirror_image_status; decode(decode_mirror_image_status, bl_it); EXPECT_EQ(mirror_image_status, decode_mirror_image_status); return r; }))); } void expect_mirror_status_update( const MirrorImageSiteStatuses& mirror_image_site_statuses, const std::string& mirror_uuid, int r) { EXPECT_CALL(*m_mock_local_io_ctx, aio_operate(_, _, _, _, _, _)) .WillOnce(Invoke([this](auto&&... args) { int r = m_mock_local_io_ctx->do_aio_operate(decltype(args)(args)...); m_mock_local_io_ctx->aio_flush(); return r; })); for (auto [global_image_id, mirror_image_status] : mirror_image_site_statuses) { mirror_image_status.mirror_uuid = mirror_uuid; expect_mirror_status_update(global_image_id, mirror_image_status, r); if (r < 0) { break; } } } void expect_mirror_status_remove(const std::string& global_image_id, int r) { EXPECT_CALL(*m_mock_local_io_ctx, exec(RBD_MIRRORING, _, StrEq("rbd"), StrEq("mirror_image_status_remove"), _, _, _, _)) .WillOnce(WithArg<4>(Invoke( [r, global_image_id](bufferlist& in_bl) { auto bl_it = in_bl.cbegin(); std::string decode_global_image_id; decode(decode_global_image_id, bl_it); EXPECT_EQ(global_image_id, decode_global_image_id); return r; }))); } void expect_mirror_status_removes(const std::set<std::string>& mirror_images, int r) { EXPECT_CALL(*m_mock_local_io_ctx, aio_operate(_, _, _, _, _, _)) .WillOnce(Invoke([this](auto&&... args) { int r = m_mock_local_io_ctx->do_aio_operate(decltype(args)(args)...); m_mock_local_io_ctx->aio_flush(); return r; })); for (auto global_image_id : mirror_images) { expect_mirror_status_remove(global_image_id, r); if (r < 0) { break; } } } void fire_timer_event(Context** timer_event, Context** update_task) { expect_timer_add_event(timer_event); // timer queues the update task EXPECT_CALL(*m_mock_threads->work_queue, queue(_, _)) .WillOnce(WithArg<0>(Invoke([update_task](Context* ctx) mutable { *update_task = ctx; }))); // fire the timer task { std::lock_guard timer_locker{m_mock_threads->timer_lock}; ceph_assert(*timer_event != nullptr); (*timer_event)->complete(0); } } void init_mirror_status_updater( MockMirrorStatusUpdater& mock_mirror_status_updater, MockMirrorStatusWatcher& mock_mirror_status_watcher, Context** timer_event) { expect_timer_add_event(timer_event); expect_mirror_status_watcher_init(mock_mirror_status_watcher, 0); expect_work_queue(true); C_SaferCond ctx; mock_mirror_status_updater.init(&ctx); ASSERT_EQ(0, ctx.wait()); } void shut_down_mirror_status_updater( MockMirrorStatusUpdater& mock_mirror_status_updater, MockMirrorStatusWatcher& mock_mirror_status_watcher) { expect_timer_cancel_event(); expect_mirror_status_watcher_shut_down(mock_mirror_status_watcher, 0); expect_work_queue(true); C_SaferCond ctx; mock_mirror_status_updater.shut_down(&ctx); ASSERT_EQ(0, ctx.wait()); } librados::MockTestMemIoCtxImpl* m_mock_local_io_ctx = nullptr; MockThreads* m_mock_threads = nullptr; }; TEST_F(TestMockMirrorStatusUpdater, InitShutDown) { MockMirrorStatusUpdater mock_mirror_status_updater(m_local_io_ctx, m_mock_threads, ""); MockMirrorStatusWatcher* mock_mirror_status_watcher = new MockMirrorStatusWatcher(); Context* timer_event = nullptr; init_mirror_status_updater(mock_mirror_status_updater, *mock_mirror_status_watcher, &timer_event); shut_down_mirror_status_updater(mock_mirror_status_updater, *mock_mirror_status_watcher); } TEST_F(TestMockMirrorStatusUpdater, InitStatusWatcherError) { MockMirrorStatusUpdater mock_mirror_status_updater(m_local_io_ctx, m_mock_threads, ""); MockMirrorStatusWatcher* mock_mirror_status_watcher = new MockMirrorStatusWatcher(); Context* timer_event = nullptr; expect_timer_add_event(&timer_event); expect_mirror_status_watcher_init(*mock_mirror_status_watcher, -EINVAL); expect_timer_cancel_event(); expect_work_queue(true); C_SaferCond ctx; mock_mirror_status_updater.init(&ctx); ASSERT_EQ(-EINVAL, ctx.wait()); } TEST_F(TestMockMirrorStatusUpdater, ShutDownStatusWatcherError) { MockMirrorStatusUpdater mock_mirror_status_updater(m_local_io_ctx, m_mock_threads, ""); MockMirrorStatusWatcher* mock_mirror_status_watcher = new MockMirrorStatusWatcher(); Context* timer_event = nullptr; init_mirror_status_updater(mock_mirror_status_updater, *mock_mirror_status_watcher, &timer_event); C_SaferCond on_shutdown; expect_timer_cancel_event(); expect_mirror_status_watcher_shut_down(*mock_mirror_status_watcher, -EINVAL); expect_work_queue(true); mock_mirror_status_updater.shut_down(&on_shutdown); ASSERT_EQ(-EINVAL, on_shutdown.wait()); } TEST_F(TestMockMirrorStatusUpdater, SmallBatch) { MockMirrorStatusUpdater mock_mirror_status_updater(m_local_io_ctx, m_mock_threads, ""); MockMirrorStatusWatcher* mock_mirror_status_watcher = new MockMirrorStatusWatcher(); InSequence seq; Context* timer_event = nullptr; init_mirror_status_updater(mock_mirror_status_updater, *mock_mirror_status_watcher, &timer_event); MirrorImageSiteStatuses mirror_image_site_statuses; for (auto i = 0; i < 100; ++i) { auto pair = mirror_image_site_statuses.emplace( stringify(i), cls::rbd::MirrorImageSiteStatus{}); mock_mirror_status_updater.set_mirror_image_status(pair.first->first, pair.first->second, false); } Context* update_task = nullptr; fire_timer_event(&timer_event, &update_task); expect_mirror_status_update(mirror_image_site_statuses, "", 0); update_task->complete(0); shut_down_mirror_status_updater(mock_mirror_status_updater, *mock_mirror_status_watcher); } TEST_F(TestMockMirrorStatusUpdater, LargeBatch) { MockMirrorStatusUpdater mock_mirror_status_updater(m_local_io_ctx, m_mock_threads, ""); MockMirrorStatusWatcher* mock_mirror_status_watcher = new MockMirrorStatusWatcher(); InSequence seq; Context* timer_event = nullptr; init_mirror_status_updater(mock_mirror_status_updater, *mock_mirror_status_watcher, &timer_event); MirrorImageSiteStatuses mirror_image_site_statuses; for (auto i = 0; i < 200; ++i) { auto pair = mirror_image_site_statuses.emplace( stringify(i), cls::rbd::MirrorImageSiteStatus{}); mock_mirror_status_updater.set_mirror_image_status(pair.first->first, pair.first->second, false); } auto it_1 = mirror_image_site_statuses.begin(); auto it_2 = mirror_image_site_statuses.begin(); std::advance(it_2, 100); MirrorImageSiteStatuses mirror_image_site_statuses_1{it_1, it_2}; it_1 = it_2; std::advance(it_2, 100); MirrorImageSiteStatuses mirror_image_site_statuses_2{it_1, it_2}; Context* update_task = nullptr; fire_timer_event(&timer_event, &update_task); expect_mirror_status_update(mirror_image_site_statuses_1, "", 0); expect_mirror_status_update(mirror_image_site_statuses_2, "", 0); update_task->complete(0); shut_down_mirror_status_updater(mock_mirror_status_updater, *mock_mirror_status_watcher); } TEST_F(TestMockMirrorStatusUpdater, OverwriteStatus) { MockMirrorStatusUpdater mock_mirror_status_updater(m_local_io_ctx, m_mock_threads, ""); MockMirrorStatusWatcher* mock_mirror_status_watcher = new MockMirrorStatusWatcher(); InSequence seq; Context* timer_event = nullptr; init_mirror_status_updater(mock_mirror_status_updater, *mock_mirror_status_watcher, &timer_event); mock_mirror_status_updater.set_mirror_image_status("1", {}, false); mock_mirror_status_updater.set_mirror_image_status( "1", {"", cls::rbd::MIRROR_IMAGE_STATUS_STATE_REPLAYING, "description"}, false); Context* update_task = nullptr; fire_timer_event(&timer_event, &update_task); expect_mirror_status_update( {{"1", cls::rbd::MirrorImageSiteStatus{ "", cls::rbd::MIRROR_IMAGE_STATUS_STATE_REPLAYING, "description"}}}, "", 0); update_task->complete(0); shut_down_mirror_status_updater(mock_mirror_status_updater, *mock_mirror_status_watcher); } TEST_F(TestMockMirrorStatusUpdater, RemoveStatus) { MockMirrorStatusUpdater mock_mirror_status_updater(m_local_io_ctx, m_mock_threads, ""); MockMirrorStatusWatcher* mock_mirror_status_watcher = new MockMirrorStatusWatcher(); InSequence seq; Context* timer_event = nullptr; init_mirror_status_updater(mock_mirror_status_updater, *mock_mirror_status_watcher, &timer_event); C_SaferCond ctx; mock_mirror_status_updater.set_mirror_image_status("1", {}, false); expect_work_queue(false); mock_mirror_status_updater.remove_mirror_image_status("1", false, &ctx); ASSERT_EQ(0, ctx.wait()); Context* update_task = nullptr; fire_timer_event(&timer_event, &update_task); C_SaferCond remove_flush_ctx; EXPECT_CALL(*m_mock_local_io_ctx, aio_operate(_, _, _, _, _, _)) .WillOnce(Invoke([this, &remove_flush_ctx](auto&&... args) { int r = m_mock_local_io_ctx->do_aio_operate(decltype(args)(args)...); m_mock_local_io_ctx->aio_flush(); remove_flush_ctx.complete(r); return r; })); expect_mirror_status_remove("1", 0); update_task->complete(0); ASSERT_EQ(0, remove_flush_ctx.wait()); shut_down_mirror_status_updater(mock_mirror_status_updater, *mock_mirror_status_watcher); } TEST_F(TestMockMirrorStatusUpdater, OverwriteRemoveStatus) { MockMirrorStatusUpdater mock_mirror_status_updater(m_local_io_ctx, m_mock_threads, ""); MockMirrorStatusWatcher* mock_mirror_status_watcher = new MockMirrorStatusWatcher(); InSequence seq; Context* timer_event = nullptr; init_mirror_status_updater(mock_mirror_status_updater, *mock_mirror_status_watcher, &timer_event); C_SaferCond ctx; mock_mirror_status_updater.set_mirror_image_status("1", {}, false); expect_work_queue(false); mock_mirror_status_updater.remove_mirror_image_status("1", false, &ctx); ASSERT_EQ(0, ctx.wait()); mock_mirror_status_updater.set_mirror_image_status( "1", {"", cls::rbd::MIRROR_IMAGE_STATUS_STATE_REPLAYING, "description"}, false); Context* update_task = nullptr; fire_timer_event(&timer_event, &update_task); expect_mirror_status_update( {{"1", cls::rbd::MirrorImageSiteStatus{ "", cls::rbd::MIRROR_IMAGE_STATUS_STATE_REPLAYING, "description"}}}, "", 0); update_task->complete(0); shut_down_mirror_status_updater(mock_mirror_status_updater, *mock_mirror_status_watcher); } TEST_F(TestMockMirrorStatusUpdater, OverwriteStatusInFlight) { MockMirrorStatusUpdater mock_mirror_status_updater(m_local_io_ctx, m_mock_threads, ""); MockMirrorStatusWatcher* mock_mirror_status_watcher = new MockMirrorStatusWatcher(); InSequence seq; Context* timer_event = nullptr; init_mirror_status_updater(mock_mirror_status_updater, *mock_mirror_status_watcher, &timer_event); mock_mirror_status_updater.set_mirror_image_status("1", {}, false); Context* update_task = nullptr; fire_timer_event(&timer_event, &update_task); EXPECT_CALL(*m_mock_local_io_ctx, aio_operate(_, _, _, _, _, _)) .WillOnce(Invoke([this, &mock_mirror_status_updater](auto&&... args) { mock_mirror_status_updater.set_mirror_image_status( "1", {"", cls::rbd::MIRROR_IMAGE_STATUS_STATE_REPLAYING, "description"}, true); int r = m_mock_local_io_ctx->do_aio_operate(decltype(args)(args)...); m_mock_local_io_ctx->aio_flush(); return r; })); expect_mirror_status_update("1", cls::rbd::MirrorImageSiteStatus{}, 0); expect_work_queue(false); expect_mirror_status_update( {{"1", cls::rbd::MirrorImageSiteStatus{ "", cls::rbd::MIRROR_IMAGE_STATUS_STATE_REPLAYING, "description"}}}, "", 0); update_task->complete(0); shut_down_mirror_status_updater(mock_mirror_status_updater, *mock_mirror_status_watcher); } TEST_F(TestMockMirrorStatusUpdater, ImmediateUpdate) { MockMirrorStatusUpdater mock_mirror_status_updater(m_local_io_ctx, m_mock_threads, ""); MockMirrorStatusWatcher* mock_mirror_status_watcher = new MockMirrorStatusWatcher(); InSequence seq; Context* timer_event = nullptr; init_mirror_status_updater(mock_mirror_status_updater, *mock_mirror_status_watcher, &timer_event); expect_work_queue(false); expect_mirror_status_update({{"1", cls::rbd::MirrorImageSiteStatus{}}}, "", 0); mock_mirror_status_updater.set_mirror_image_status("1", {}, true); shut_down_mirror_status_updater(mock_mirror_status_updater, *mock_mirror_status_watcher); } TEST_F(TestMockMirrorStatusUpdater, RemoveImmediateUpdate) { MockMirrorStatusUpdater mock_mirror_status_updater(m_local_io_ctx, m_mock_threads, ""); MockMirrorStatusWatcher* mock_mirror_status_watcher = new MockMirrorStatusWatcher(); InSequence seq; Context* timer_event = nullptr; init_mirror_status_updater(mock_mirror_status_updater, *mock_mirror_status_watcher, &timer_event); mock_mirror_status_updater.set_mirror_image_status("1", {}, false); C_SaferCond ctx; expect_work_queue(false); expect_mirror_status_removes({"1"}, 0); expect_work_queue(false); mock_mirror_status_updater.remove_mirror_image_status("1", true, &ctx); ASSERT_EQ(0, ctx.wait()); shut_down_mirror_status_updater(mock_mirror_status_updater, *mock_mirror_status_watcher); } TEST_F(TestMockMirrorStatusUpdater, RemoveRefreshIdleStatus) { MockMirrorStatusUpdater mock_mirror_status_updater(m_local_io_ctx, m_mock_threads, ""); MockMirrorStatusWatcher* mock_mirror_status_watcher = new MockMirrorStatusWatcher(); InSequence seq; Context* timer_event = nullptr; init_mirror_status_updater(mock_mirror_status_updater, *mock_mirror_status_watcher, &timer_event); mock_mirror_status_updater.set_mirror_image_status("1", {}, false); C_SaferCond ctx; expect_work_queue(true); mock_mirror_status_updater.remove_refresh_mirror_image_status("1", &ctx); ASSERT_EQ(0, ctx.wait()); shut_down_mirror_status_updater(mock_mirror_status_updater, *mock_mirror_status_watcher); } TEST_F(TestMockMirrorStatusUpdater, RemoveRefreshInFlightStatus) { MockMirrorStatusUpdater mock_mirror_status_updater(m_local_io_ctx, m_mock_threads, ""); MockMirrorStatusWatcher* mock_mirror_status_watcher = new MockMirrorStatusWatcher(); InSequence seq; Context* timer_event = nullptr; init_mirror_status_updater(mock_mirror_status_updater, *mock_mirror_status_watcher, &timer_event); mock_mirror_status_updater.set_mirror_image_status("1", {}, false); Context* update_task = nullptr; fire_timer_event(&timer_event, &update_task); C_SaferCond on_removed; EXPECT_CALL(*m_mock_local_io_ctx, aio_operate(_, _, _, _, _, _)) .WillOnce(Invoke( [this, &mock_mirror_status_updater, &on_removed](auto&&... args) { mock_mirror_status_updater.remove_refresh_mirror_image_status( "1", &on_removed); int r = m_mock_local_io_ctx->do_aio_operate(decltype(args)(args)...); m_mock_local_io_ctx->aio_flush(); return r; })); update_task->complete(0); ASSERT_EQ(0, on_removed.wait()); shut_down_mirror_status_updater(mock_mirror_status_updater, *mock_mirror_status_watcher); } TEST_F(TestMockMirrorStatusUpdater, ShutDownWhileUpdating) { MockMirrorStatusUpdater mock_mirror_status_updater(m_local_io_ctx, m_mock_threads, ""); MockMirrorStatusWatcher* mock_mirror_status_watcher = new MockMirrorStatusWatcher(); InSequence seq; Context* timer_event = nullptr; init_mirror_status_updater(mock_mirror_status_updater, *mock_mirror_status_watcher, &timer_event); mock_mirror_status_updater.set_mirror_image_status("1", {}, false); Context* update_task = nullptr; fire_timer_event(&timer_event, &update_task); C_SaferCond on_shutdown; EXPECT_CALL(*m_mock_local_io_ctx, aio_operate(_, _, _, _, _, _)) .WillOnce(Invoke( [this, &mock_mirror_status_updater, &on_shutdown](auto&&... args) { mock_mirror_status_updater.shut_down(&on_shutdown); m_threads->work_queue->drain(); int r = m_mock_local_io_ctx->do_aio_operate(decltype(args)(args)...); m_mock_local_io_ctx->aio_flush(); return r; })); expect_timer_cancel_event(); expect_mirror_status_watcher_shut_down(*mock_mirror_status_watcher, 0); update_task->complete(0); ASSERT_EQ(0, on_shutdown.wait()); } TEST_F(TestMockMirrorStatusUpdater, MirrorPeerSitePing) { MockMirrorStatusUpdater mock_mirror_status_updater(m_local_io_ctx, m_mock_threads, "mirror uuid"); MockMirrorStatusWatcher* mock_mirror_status_watcher = new MockMirrorStatusWatcher(); InSequence seq; Context* timer_event = nullptr; init_mirror_status_updater(mock_mirror_status_updater, *mock_mirror_status_watcher, &timer_event); MirrorImageSiteStatuses mirror_image_site_statuses; for (auto i = 0; i < 100; ++i) { auto pair = mirror_image_site_statuses.emplace( stringify(i), cls::rbd::MirrorImageSiteStatus{}); mock_mirror_status_updater.set_mirror_image_status(pair.first->first, pair.first->second, false); } Context* update_task = nullptr; fire_timer_event(&timer_event, &update_task); expect_mirror_status_update(mirror_image_site_statuses, "mirror uuid", 0); update_task->complete(0); shut_down_mirror_status_updater(mock_mirror_status_updater, *mock_mirror_status_watcher); } } // namespace mirror } // namespace rbd
24,494
33.646393
115
cc
null
ceph-main/src/test/rbd_mirror/test_mock_NamespaceReplayer.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "librbd/api/Config.h" #include "test/librbd/mock/MockImageCtx.h" #include "test/rbd_mirror/test_mock_fixture.h" #include "test/rbd_mirror/mock/MockContextWQ.h" #include "test/rbd_mirror/mock/MockSafeTimer.h" #include "tools/rbd_mirror/NamespaceReplayer.h" #include "tools/rbd_mirror/ImageDeleter.h" #include "tools/rbd_mirror/ImageMap.h" #include "tools/rbd_mirror/InstanceWatcher.h" #include "tools/rbd_mirror/InstanceReplayer.h" #include "tools/rbd_mirror/MirrorStatusUpdater.h" #include "tools/rbd_mirror/PoolWatcher.h" #include "tools/rbd_mirror/ServiceDaemon.h" #include "tools/rbd_mirror/Threads.h" namespace librbd { namespace { struct MockTestImageCtx : public MockImageCtx { MockTestImageCtx(librbd::ImageCtx &image_ctx) : librbd::MockImageCtx(image_ctx) { } }; } // anonymous namespace } // namespace librbd namespace rbd { namespace mirror { template <> struct ImageDeleter<librbd::MockTestImageCtx> { static ImageDeleter* s_instance; static ImageDeleter* create( librados::IoCtx &ioctx, Threads<librbd::MockTestImageCtx> *threads, Throttler<librbd::MockTestImageCtx> *image_deletion_throttler, ServiceDaemon<librbd::MockTestImageCtx> *service_daemon) { ceph_assert(s_instance != nullptr); return s_instance; } MOCK_METHOD1(init, void(Context*)); MOCK_METHOD1(shut_down, void(Context*)); MOCK_METHOD2(print_status, void(Formatter*, std::stringstream*)); ImageDeleter() { s_instance = this; } }; ImageDeleter<librbd::MockTestImageCtx>* ImageDeleter<librbd::MockTestImageCtx>::s_instance = nullptr; template<> struct ImageMap<librbd::MockTestImageCtx> { static ImageMap* s_instance; static ImageMap *create(librados::IoCtx &ioctx, Threads<librbd::MockTestImageCtx> *threads, const std::string& instance_id, image_map::Listener &listener) { ceph_assert(s_instance != nullptr); return s_instance; } MOCK_METHOD1(init, void(Context*)); MOCK_METHOD1(shut_down, void(Context*)); MOCK_METHOD1(update_instances_added, void(const std::vector<std::string>&)); MOCK_METHOD1(update_instances_removed, void(const std::vector<std::string>&)); MOCK_METHOD3(update_images_mock, void(const std::string&, const std::set<std::string>&, const std::set<std::string>&)); void update_images(const std::string& mirror_uuid, std::set<std::string>&& added, std::set<std::string>&& removed) { update_images_mock(mirror_uuid, added, removed); } ImageMap() { s_instance = this; } }; ImageMap<librbd::MockTestImageCtx>* ImageMap<librbd::MockTestImageCtx>::s_instance = nullptr; template<> struct InstanceReplayer<librbd::MockTestImageCtx> { static InstanceReplayer* s_instance; static InstanceReplayer* create( librados::IoCtx &local_io_ctx, const std::string &local_mirror_uuid, Threads<librbd::MockTestImageCtx> *threads, ServiceDaemon<librbd::MockTestImageCtx> *service_daemon, MirrorStatusUpdater<librbd::MockTestImageCtx>* local_status_updater, journal::CacheManagerHandler *cache_manager_handler, PoolMetaCache* pool_meta_cache) { ceph_assert(s_instance != nullptr); return s_instance; } MOCK_METHOD0(start, void()); MOCK_METHOD0(stop, void()); MOCK_METHOD0(restart, void()); MOCK_METHOD0(flush, void()); MOCK_METHOD1(stop, void(Context *)); MOCK_METHOD2(print_status, void(Formatter*, std::stringstream*)); MOCK_METHOD1(add_peer, void(const Peer<librbd::MockTestImageCtx>&)); MOCK_METHOD1(init, void(Context*)); MOCK_METHOD1(shut_down, void(Context*)); MOCK_METHOD1(release_all, void(Context*)); InstanceReplayer() { s_instance = this; } }; InstanceReplayer<librbd::MockTestImageCtx>* InstanceReplayer<librbd::MockTestImageCtx>::s_instance = nullptr; template<> struct InstanceWatcher<librbd::MockTestImageCtx> { static InstanceWatcher* s_instance; static InstanceWatcher* create( librados::IoCtx &ioctx, librbd::AsioEngine& asio_engine, InstanceReplayer<librbd::MockTestImageCtx>* instance_replayer, Throttler<librbd::MockTestImageCtx> *image_sync_throttler) { ceph_assert(s_instance != nullptr); return s_instance; } MOCK_METHOD0(handle_acquire_leader, void()); MOCK_METHOD0(handle_release_leader, void()); MOCK_METHOD0(get_instance_id, std::string()); MOCK_METHOD2(print_sync_status, void(Formatter*, std::stringstream*)); MOCK_METHOD1(init, void(Context *)); MOCK_METHOD1(shut_down, void(Context *)); MOCK_METHOD3(notify_image_acquire, void(const std::string&, const std::string&, Context*)); MOCK_METHOD3(notify_image_release, void(const std::string&, const std::string&, Context*)); MOCK_METHOD4(notify_peer_image_removed, void(const std::string&, const std::string&, const std::string&, Context*)); MOCK_METHOD1(handle_update_leader, void(const std::string&)); InstanceWatcher() { s_instance = this; } }; InstanceWatcher<librbd::MockTestImageCtx>* InstanceWatcher<librbd::MockTestImageCtx>::s_instance = nullptr; template <> struct MirrorStatusUpdater<librbd::MockTestImageCtx> { std::string local_mirror_uuid; static std::map<std::string, MirrorStatusUpdater*> s_instance; static MirrorStatusUpdater *create(librados::IoCtx &io_ctx, Threads<librbd::MockTestImageCtx> *threads, const std::string& local_mirror_uuid) { ceph_assert(s_instance[local_mirror_uuid] != nullptr); return s_instance[local_mirror_uuid]; } MirrorStatusUpdater(const std::string_view& local_mirror_uuid) : local_mirror_uuid(local_mirror_uuid) { s_instance[std::string{local_mirror_uuid}] = this; } ~MirrorStatusUpdater() { s_instance.erase(local_mirror_uuid); } MOCK_METHOD1(init, void(Context *)); MOCK_METHOD1(shut_down, void(Context *)); }; std::map<std::string, MirrorStatusUpdater<librbd::MockTestImageCtx> *> MirrorStatusUpdater<librbd::MockTestImageCtx>::s_instance; template<> struct PoolWatcher<librbd::MockTestImageCtx> { int64_t pool_id = -1; static std::map<int64_t, PoolWatcher *> s_instances; static PoolWatcher *create(Threads<librbd::MockTestImageCtx> *threads, librados::IoCtx &ioctx, const std::string& mirror_uuid, pool_watcher::Listener& listener) { auto pool_id = ioctx.get_id(); ceph_assert(s_instances.count(pool_id)); return s_instances[pool_id]; } MOCK_METHOD0(is_blocklisted, bool()); MOCK_METHOD0(get_image_count, uint64_t()); MOCK_METHOD1(init, void(Context*)); MOCK_METHOD1(shut_down, void(Context*)); PoolWatcher(int64_t pool_id) : pool_id(pool_id) { ceph_assert(!s_instances.count(pool_id)); s_instances[pool_id] = this; } ~PoolWatcher() { s_instances.erase(pool_id); } }; std::map<int64_t, PoolWatcher<librbd::MockTestImageCtx> *> PoolWatcher<librbd::MockTestImageCtx>::s_instances; template<> struct ServiceDaemon<librbd::MockTestImageCtx> { MOCK_METHOD4(add_or_update_namespace_attribute, void(int64_t, const std::string&, const std::string&, const service_daemon::AttributeValue&)); MOCK_METHOD2(remove_attribute, void(int64_t, const std::string&)); MOCK_METHOD4(add_or_update_callout, uint64_t(int64_t, uint64_t, service_daemon::CalloutLevel, const std::string&)); MOCK_METHOD2(remove_callout, void(int64_t, uint64_t)); }; template <> struct Threads<librbd::MockTestImageCtx> { ceph::mutex &timer_lock; SafeTimer *timer; librbd::asio::ContextWQ *work_queue; librbd::AsioEngine* asio_engine; Threads(Threads<librbd::ImageCtx> *threads) : timer_lock(threads->timer_lock), timer(threads->timer), work_queue(threads->work_queue), asio_engine(threads->asio_engine) { } }; } // namespace mirror } // namespace rbd // template definitions #include "tools/rbd_mirror/NamespaceReplayer.cc" namespace rbd { namespace mirror { using ::testing::_; using ::testing::DoAll; using ::testing::InSequence; using ::testing::Invoke; using ::testing::Return; using ::testing::StrEq; using ::testing::WithArg; class TestMockNamespaceReplayer : public TestMockFixture { public: typedef NamespaceReplayer<librbd::MockTestImageCtx> MockNamespaceReplayer; typedef ImageDeleter<librbd::MockTestImageCtx> MockImageDeleter; typedef ImageMap<librbd::MockTestImageCtx> MockImageMap; typedef InstanceReplayer<librbd::MockTestImageCtx> MockInstanceReplayer; typedef InstanceWatcher<librbd::MockTestImageCtx> MockInstanceWatcher; typedef MirrorStatusUpdater<librbd::MockTestImageCtx> MockMirrorStatusUpdater; typedef PoolWatcher<librbd::MockTestImageCtx> MockPoolWatcher; typedef ServiceDaemon<librbd::MockTestImageCtx> MockServiceDaemon; typedef Threads<librbd::MockTestImageCtx> MockThreads; void SetUp() override { TestMockFixture::SetUp(); m_mock_threads = new MockThreads(m_threads); } void TearDown() override { delete m_mock_threads; TestMockFixture::TearDown(); } void expect_mirror_status_updater_init( MockMirrorStatusUpdater &mock_mirror_status_updater, int r) { EXPECT_CALL(mock_mirror_status_updater, init(_)) .WillOnce(CompleteContext(m_mock_threads->work_queue, r)); } void expect_mirror_status_updater_shut_down( MockMirrorStatusUpdater &mock_mirror_status_updater) { EXPECT_CALL(mock_mirror_status_updater, shut_down(_)) .WillOnce(CompleteContext(m_mock_threads->work_queue, 0)); } void expect_instance_replayer_init( MockInstanceReplayer& mock_instance_replayer, int r) { EXPECT_CALL(mock_instance_replayer, init(_)) .WillOnce(CompleteContext(m_mock_threads->work_queue, r)); } void expect_instance_replayer_shut_down( MockInstanceReplayer& mock_instance_replayer) { EXPECT_CALL(mock_instance_replayer, shut_down(_)) .WillOnce(CompleteContext(m_mock_threads->work_queue, 0)); } void expect_instance_replayer_stop( MockInstanceReplayer& mock_instance_replayer) { EXPECT_CALL(mock_instance_replayer, stop(_)) .WillOnce(CompleteContext(m_mock_threads->work_queue, 0)); } void expect_instance_replayer_add_peer( MockInstanceReplayer& mock_instance_replayer) { EXPECT_CALL(mock_instance_replayer, add_peer(_)); } void expect_instance_replayer_release_all( MockInstanceReplayer& mock_instance_replayer) { EXPECT_CALL(mock_instance_replayer, release_all(_)) .WillOnce(CompleteContext(m_mock_threads->work_queue, 0)); } void expect_instance_watcher_get_instance_id( MockInstanceWatcher& mock_instance_watcher, const std::string &instance_id) { EXPECT_CALL(mock_instance_watcher, get_instance_id()) .WillOnce(Return(instance_id)); } void expect_instance_watcher_init( MockInstanceWatcher& mock_instance_watcher, int r) { EXPECT_CALL(mock_instance_watcher, init(_)) .WillOnce(CompleteContext(m_mock_threads->work_queue, r)); } void expect_instance_watcher_shut_down( MockInstanceWatcher& mock_instance_watcher) { EXPECT_CALL(mock_instance_watcher, shut_down(_)) .WillOnce(CompleteContext(m_mock_threads->work_queue, 0)); } void expect_instance_watcher_handle_acquire_leader( MockInstanceWatcher& mock_instance_watcher) { EXPECT_CALL(mock_instance_watcher, handle_acquire_leader()); } void expect_instance_watcher_handle_release_leader( MockInstanceWatcher& mock_instance_watcher) { EXPECT_CALL(mock_instance_watcher, handle_release_leader()); } void expect_image_map_init(MockInstanceWatcher &mock_instance_watcher, MockImageMap& mock_image_map, int r) { expect_instance_watcher_get_instance_id(mock_instance_watcher, "1234"); EXPECT_CALL(mock_image_map, init(_)) .WillOnce(CompleteContext(m_mock_threads->work_queue, r)); } void expect_image_map_shut_down(MockImageMap& mock_image_map) { EXPECT_CALL(mock_image_map, shut_down(_)) .WillOnce(CompleteContext(m_mock_threads->work_queue, 0)); } void expect_pool_watcher_init(MockPoolWatcher& mock_pool_watcher, int r) { EXPECT_CALL(mock_pool_watcher, init(_)) .WillOnce(CompleteContext(m_mock_threads->work_queue, r)); } void expect_pool_watcher_shut_down(MockPoolWatcher& mock_pool_watcher) { EXPECT_CALL(mock_pool_watcher, shut_down(_)) .WillOnce(CompleteContext(m_mock_threads->work_queue, 0)); } void expect_image_deleter_init(MockImageDeleter& mock_image_deleter, int r) { EXPECT_CALL(mock_image_deleter, init(_)) .WillOnce(CompleteContext(m_mock_threads->work_queue, r)); } void expect_image_deleter_shut_down(MockImageDeleter& mock_image_deleter) { EXPECT_CALL(mock_image_deleter, shut_down(_)) .WillOnce(CompleteContext(m_mock_threads->work_queue, 0)); } MockThreads *m_mock_threads; }; TEST_F(TestMockNamespaceReplayer, Init_LocalMirrorStatusUpdaterError) { InSequence seq; auto mock_local_mirror_status_updater = new MockMirrorStatusUpdater{""}; expect_mirror_status_updater_init(*mock_local_mirror_status_updater, -EINVAL); MockNamespaceReplayer namespace_replayer( {}, m_local_io_ctx, m_remote_io_ctx, "local mirror uuid", "local peer uuid", {"remote mirror uuid", ""}, m_mock_threads, nullptr, nullptr, nullptr, nullptr, nullptr); C_SaferCond on_init; namespace_replayer.init(&on_init); ASSERT_EQ(-EINVAL, on_init.wait()); } TEST_F(TestMockNamespaceReplayer, Init_RemoteMirrorStatusUpdaterError) { InSequence seq; auto mock_local_mirror_status_updater = new MockMirrorStatusUpdater{""}; expect_mirror_status_updater_init(*mock_local_mirror_status_updater, 0); auto mock_remote_mirror_status_updater = new MockMirrorStatusUpdater{ "local mirror uuid"}; expect_mirror_status_updater_init(*mock_remote_mirror_status_updater, -EINVAL); expect_mirror_status_updater_shut_down(*mock_local_mirror_status_updater); MockNamespaceReplayer namespace_replayer( {}, m_local_io_ctx, m_remote_io_ctx, "local mirror uuid", "local peer uuid", {"remote mirror uuid", ""}, m_mock_threads, nullptr, nullptr, nullptr, nullptr, nullptr); C_SaferCond on_init; namespace_replayer.init(&on_init); ASSERT_EQ(-EINVAL, on_init.wait()); } TEST_F(TestMockNamespaceReplayer, Init_InstanceReplayerError) { InSequence seq; auto mock_local_mirror_status_updater = new MockMirrorStatusUpdater{""}; expect_mirror_status_updater_init(*mock_local_mirror_status_updater, 0); auto mock_remote_mirror_status_updater = new MockMirrorStatusUpdater{ "local mirror uuid"}; expect_mirror_status_updater_init(*mock_remote_mirror_status_updater, 0); auto mock_instance_replayer = new MockInstanceReplayer(); expect_instance_replayer_init(*mock_instance_replayer, -EINVAL); expect_mirror_status_updater_shut_down(*mock_remote_mirror_status_updater); expect_mirror_status_updater_shut_down(*mock_local_mirror_status_updater); MockNamespaceReplayer namespace_replayer( {}, m_local_io_ctx, m_remote_io_ctx, "local mirror uuid", "local peer uuid", {"remote mirror uuid", ""}, m_mock_threads, nullptr, nullptr, nullptr, nullptr, nullptr); C_SaferCond on_init; namespace_replayer.init(&on_init); ASSERT_EQ(-EINVAL, on_init.wait()); } TEST_F(TestMockNamespaceReplayer, Init_InstanceWatcherError) { InSequence seq; auto mock_local_mirror_status_updater = new MockMirrorStatusUpdater{""}; expect_mirror_status_updater_init(*mock_local_mirror_status_updater, 0); auto mock_remote_mirror_status_updater = new MockMirrorStatusUpdater{ "local mirror uuid"}; expect_mirror_status_updater_init(*mock_remote_mirror_status_updater, 0); auto mock_instance_replayer = new MockInstanceReplayer(); expect_instance_replayer_init(*mock_instance_replayer, 0); expect_instance_replayer_add_peer(*mock_instance_replayer); auto mock_instance_watcher = new MockInstanceWatcher(); expect_instance_watcher_init(*mock_instance_watcher, -EINVAL); expect_instance_replayer_shut_down(*mock_instance_replayer); expect_mirror_status_updater_shut_down(*mock_remote_mirror_status_updater); expect_mirror_status_updater_shut_down(*mock_local_mirror_status_updater); MockNamespaceReplayer namespace_replayer( {}, m_local_io_ctx, m_remote_io_ctx, "local mirror uuid", "local peer uuid", {"remote mirror uuid", ""}, m_mock_threads, nullptr, nullptr, nullptr, nullptr, nullptr); C_SaferCond on_init; namespace_replayer.init(&on_init); ASSERT_EQ(-EINVAL, on_init.wait()); } TEST_F(TestMockNamespaceReplayer, Init) { InSequence seq; auto mock_local_mirror_status_updater = new MockMirrorStatusUpdater{""}; expect_mirror_status_updater_init(*mock_local_mirror_status_updater, 0); auto mock_remote_mirror_status_updater = new MockMirrorStatusUpdater{ "local mirror uuid"}; expect_mirror_status_updater_init(*mock_remote_mirror_status_updater, 0); auto mock_instance_replayer = new MockInstanceReplayer(); expect_instance_replayer_init(*mock_instance_replayer, 0); expect_instance_replayer_add_peer(*mock_instance_replayer); auto mock_instance_watcher = new MockInstanceWatcher(); expect_instance_watcher_init(*mock_instance_watcher, 0); MockServiceDaemon mock_service_daemon; MockNamespaceReplayer namespace_replayer( {}, m_local_io_ctx, m_remote_io_ctx, "local mirror uuid", "local peer uuid", {"remote mirror uuid", ""}, m_mock_threads, nullptr, nullptr, &mock_service_daemon, nullptr, nullptr); C_SaferCond on_init; namespace_replayer.init(&on_init); ASSERT_EQ(0, on_init.wait()); expect_instance_replayer_stop(*mock_instance_replayer); expect_instance_watcher_shut_down(*mock_instance_watcher); expect_instance_replayer_shut_down(*mock_instance_replayer); expect_mirror_status_updater_shut_down(*mock_remote_mirror_status_updater); expect_mirror_status_updater_shut_down(*mock_local_mirror_status_updater); C_SaferCond on_shut_down; namespace_replayer.shut_down(&on_shut_down); ASSERT_EQ(0, on_shut_down.wait()); } TEST_F(TestMockNamespaceReplayer, AcquireLeader) { InSequence seq; // init auto mock_local_mirror_status_updater = new MockMirrorStatusUpdater{""}; expect_mirror_status_updater_init(*mock_local_mirror_status_updater, 0); auto mock_remote_mirror_status_updater = new MockMirrorStatusUpdater{ "local mirror uuid"}; expect_mirror_status_updater_init(*mock_remote_mirror_status_updater, 0); auto mock_instance_replayer = new MockInstanceReplayer(); expect_instance_replayer_init(*mock_instance_replayer, 0); expect_instance_replayer_add_peer(*mock_instance_replayer); auto mock_instance_watcher = new MockInstanceWatcher(); expect_instance_watcher_init(*mock_instance_watcher, 0); MockServiceDaemon mock_service_daemon; MockNamespaceReplayer namespace_replayer( {}, m_local_io_ctx, m_remote_io_ctx, "local mirror uuid", "local peer uuid", {"remote mirror uuid", ""}, m_mock_threads, nullptr, nullptr, &mock_service_daemon, nullptr, nullptr); C_SaferCond on_init; namespace_replayer.init(&on_init); ASSERT_EQ(0, on_init.wait()); // acquire leader expect_instance_watcher_handle_acquire_leader(*mock_instance_watcher); auto mock_image_map = new MockImageMap(); expect_image_map_init(*mock_instance_watcher, *mock_image_map, 0); auto mock_local_pool_watcher = new MockPoolWatcher(m_local_io_ctx.get_id()); expect_pool_watcher_init(*mock_local_pool_watcher, 0); auto mock_remote_pool_watcher = new MockPoolWatcher(m_remote_io_ctx.get_id()); expect_pool_watcher_init(*mock_remote_pool_watcher, 0); auto mock_image_deleter = new MockImageDeleter(); expect_image_deleter_init(*mock_image_deleter, 0); C_SaferCond on_acquire; namespace_replayer.handle_acquire_leader(&on_acquire); ASSERT_EQ(0, on_acquire.wait()); // release leader expect_instance_watcher_handle_release_leader(*mock_instance_watcher); expect_image_deleter_shut_down(*mock_image_deleter); expect_pool_watcher_shut_down(*mock_local_pool_watcher); expect_pool_watcher_shut_down(*mock_remote_pool_watcher); expect_image_map_shut_down(*mock_image_map); expect_instance_replayer_release_all(*mock_instance_replayer); // shut down expect_instance_replayer_stop(*mock_instance_replayer); expect_instance_watcher_shut_down(*mock_instance_watcher); expect_instance_replayer_shut_down(*mock_instance_replayer); expect_mirror_status_updater_shut_down(*mock_remote_mirror_status_updater); expect_mirror_status_updater_shut_down(*mock_local_mirror_status_updater); C_SaferCond on_shut_down; namespace_replayer.shut_down(&on_shut_down); ASSERT_EQ(0, on_shut_down.wait()); } } // namespace mirror } // namespace rbd
21,510
34.148693
110
cc
null
ceph-main/src/test/rbd_mirror/test_mock_PoolReplayer.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "librbd/api/Config.h" #include "librbd/api/Namespace.h" #include "test/librbd/mock/MockImageCtx.h" #include "test/librados_test_stub/MockTestMemCluster.h" #include "test/librados_test_stub/MockTestMemIoCtxImpl.h" #include "test/librados_test_stub/MockTestMemRadosClient.h" #include "test/rbd_mirror/test_mock_fixture.h" #include "test/rbd_mirror/mock/MockContextWQ.h" #include "test/rbd_mirror/mock/MockSafeTimer.h" #include "tools/rbd_mirror/Throttler.h" #include "tools/rbd_mirror/LeaderWatcher.h" #include "tools/rbd_mirror/NamespaceReplayer.h" #include "tools/rbd_mirror/PoolMetaCache.h" #include "tools/rbd_mirror/PoolReplayer.h" #include "tools/rbd_mirror/RemotePoolPoller.h" #include "tools/rbd_mirror/ServiceDaemon.h" #include "tools/rbd_mirror/Threads.h" #include "common/Formatter.h" namespace librbd { namespace { struct MockTestImageCtx : public MockImageCtx { MockTestImageCtx(librbd::ImageCtx &image_ctx) : librbd::MockImageCtx(image_ctx) { } }; } // anonymous namespace namespace api { template <> class Config<MockTestImageCtx> { public: static void apply_pool_overrides(librados::IoCtx& io_ctx, ConfigProxy* config_proxy) { } }; template <> class Namespace<MockTestImageCtx> { public: static Namespace* s_instance; static int list(librados::IoCtx& io_ctx, std::vector<std::string> *names) { if (s_instance) { return s_instance->list(names); } return 0; } Namespace() { s_instance = this; } void add(const std::string &name) { std::lock_guard locker{m_lock}; m_names.insert(name); } void remove(const std::string &name) { std::lock_guard locker{m_lock}; m_names.erase(name); } void clear() { std::lock_guard locker{m_lock}; m_names.clear(); } private: ceph::mutex m_lock = ceph::make_mutex("Namespace"); std::set<std::string> m_names; int list(std::vector<std::string> *names) { std::lock_guard locker{m_lock}; names->clear(); names->insert(names->begin(), m_names.begin(), m_names.end()); return 0; } }; Namespace<librbd::MockTestImageCtx>* Namespace<librbd::MockTestImageCtx>::s_instance = nullptr; } // namespace api } // namespace librbd namespace rbd { namespace mirror { template <> struct Throttler<librbd::MockTestImageCtx> { static Throttler* s_instance; static Throttler *create( CephContext *cct, const std::string &max_concurrent_ops_config_param_name) { return s_instance; } Throttler() { ceph_assert(s_instance == nullptr); s_instance = this; } virtual ~Throttler() { ceph_assert(s_instance == this); s_instance = nullptr; } MOCK_METHOD1(print_status, void(Formatter*)); }; Throttler<librbd::MockTestImageCtx>* Throttler<librbd::MockTestImageCtx>::s_instance = nullptr; template <> struct NamespaceReplayer<librbd::MockTestImageCtx> { static std::map<std::string, NamespaceReplayer *> s_instances; static NamespaceReplayer *create( const std::string &name, librados::IoCtx &local_ioctx, librados::IoCtx &remote_ioctx, const std::string &local_mirror_uuid, const std::string& local_mirror_peer_uuid, const RemotePoolMeta& remote_pool_meta, Threads<librbd::MockTestImageCtx> *threads, Throttler<librbd::MockTestImageCtx> *image_sync_throttler, Throttler<librbd::MockTestImageCtx> *image_deletion_throttler, ServiceDaemon<librbd::MockTestImageCtx> *service_daemon, journal::CacheManagerHandler *cache_manager_handler, PoolMetaCache* pool_meta_cache) { ceph_assert(s_instances.count(name)); auto namespace_replayer = s_instances[name]; s_instances.erase(name); return namespace_replayer; } MOCK_METHOD0(is_blocklisted, bool()); MOCK_METHOD0(get_instance_id, std::string()); MOCK_METHOD1(init, void(Context*)); MOCK_METHOD1(shut_down, void(Context*)); MOCK_METHOD1(handle_acquire_leader, void(Context *)); MOCK_METHOD1(handle_release_leader, void(Context *)); MOCK_METHOD1(handle_update_leader, void(const std::string &)); MOCK_METHOD1(handle_instances_added, void(const std::vector<std::string> &)); MOCK_METHOD1(handle_instances_removed, void(const std::vector<std::string> &)); MOCK_METHOD1(print_status, void(Formatter*)); MOCK_METHOD0(start, void()); MOCK_METHOD0(stop, void()); MOCK_METHOD0(restart, void()); MOCK_METHOD0(flush, void()); NamespaceReplayer(const std::string &name = "") { ceph_assert(!s_instances.count(name)); s_instances[name] = this; } }; std::map<std::string, NamespaceReplayer<librbd::MockTestImageCtx> *> NamespaceReplayer<librbd::MockTestImageCtx>::s_instances; template<> struct LeaderWatcher<librbd::MockTestImageCtx> { static LeaderWatcher* s_instance; leader_watcher::Listener* listener = nullptr; static LeaderWatcher *create(Threads<librbd::MockTestImageCtx> *threads, librados::IoCtx &ioctx, leader_watcher::Listener* listener) { ceph_assert(s_instance != nullptr); s_instance->listener = listener; return s_instance; } MOCK_METHOD0(is_blocklisted, bool()); MOCK_METHOD0(is_leader, bool()); MOCK_METHOD0(release_leader, void()); MOCK_METHOD1(get_leader_instance_id, bool(std::string*)); MOCK_METHOD1(list_instances, void(std::vector<std::string>*)); MOCK_METHOD0(init, int()); MOCK_METHOD0(shut_down, int()); LeaderWatcher() { s_instance = this; } }; LeaderWatcher<librbd::MockTestImageCtx>* LeaderWatcher<librbd::MockTestImageCtx>::s_instance = nullptr; template<> struct RemotePoolPoller<librbd::MockTestImageCtx> { static RemotePoolPoller* s_instance; remote_pool_poller::Listener* listener = nullptr; static RemotePoolPoller* create( Threads<librbd::MockTestImageCtx>* threads, librados::IoCtx& remote_io_ctx, const std::string& local_site_name, const std::string& local_mirror_uuid, remote_pool_poller::Listener& listener) { ceph_assert(s_instance != nullptr); s_instance->listener = &listener; return s_instance; } MOCK_METHOD1(init, void(Context*)); MOCK_METHOD1(shut_down, void(Context*)); RemotePoolPoller() { s_instance = this; } }; RemotePoolPoller<librbd::MockTestImageCtx>* RemotePoolPoller<librbd::MockTestImageCtx>::s_instance = nullptr; template<> struct ServiceDaemon<librbd::MockTestImageCtx> { MOCK_METHOD2(add_namespace, void(int64_t, const std::string &)); MOCK_METHOD2(remove_namespace, void(int64_t, const std::string &)); MOCK_METHOD3(add_or_update_attribute, void(int64_t, const std::string&, const service_daemon::AttributeValue&)); MOCK_METHOD2(remove_attribute, void(int64_t, const std::string&)); MOCK_METHOD4(add_or_update_callout, uint64_t(int64_t, uint64_t, service_daemon::CalloutLevel, const std::string&)); MOCK_METHOD2(remove_callout, void(int64_t, uint64_t)); }; template <> struct Threads<librbd::MockTestImageCtx> { MockSafeTimer *timer; ceph::mutex &timer_lock; ceph::condition_variable timer_cond; MockContextWQ *work_queue; Threads(Threads<librbd::ImageCtx> *threads) : timer(new MockSafeTimer()), timer_lock(threads->timer_lock), work_queue(new MockContextWQ()) { } ~Threads() { delete timer; delete work_queue; } }; } // namespace mirror } // namespace rbd // template definitions #include "tools/rbd_mirror/PoolReplayer.cc" namespace rbd { namespace mirror { using ::testing::_; using ::testing::AtLeast; using ::testing::DoAll; using ::testing::InSequence; using ::testing::Invoke; using ::testing::Return; using ::testing::StrEq; using ::testing::WithArg; class TestMockPoolReplayer : public TestMockFixture { public: typedef librbd::api::Namespace<librbd::MockTestImageCtx> MockNamespace; typedef PoolReplayer<librbd::MockTestImageCtx> MockPoolReplayer; typedef Throttler<librbd::MockTestImageCtx> MockThrottler; typedef NamespaceReplayer<librbd::MockTestImageCtx> MockNamespaceReplayer; typedef RemotePoolPoller<librbd::MockTestImageCtx> MockRemotePoolPoller; typedef LeaderWatcher<librbd::MockTestImageCtx> MockLeaderWatcher; typedef ServiceDaemon<librbd::MockTestImageCtx> MockServiceDaemon; typedef Threads<librbd::MockTestImageCtx> MockThreads; void expect_work_queue(MockThreads &mock_threads) { EXPECT_CALL(*mock_threads.work_queue, queue(_, _)) .WillRepeatedly(Invoke([this](Context *ctx, int r) { m_threads->work_queue->queue(ctx, r); })); } void expect_connect(librados::MockTestMemCluster& mock_cluster, librados::MockTestMemRadosClient* mock_rados_client, const std::string& cluster_name, CephContext** cct_ref) { EXPECT_CALL(mock_cluster, create_rados_client(_)) .WillOnce(Invoke([cluster_name, mock_rados_client, cct_ref](CephContext* cct) { EXPECT_EQ(cluster_name, cct->_conf->cluster); if (cct_ref != nullptr) { cct->get(); *cct_ref = cct; } return mock_rados_client; })); } void expect_create_ioctx(librados::MockTestMemRadosClient* mock_rados_client, librados::MockTestMemIoCtxImpl* mock_io_ctx_impl) { EXPECT_CALL(*mock_rados_client, create_ioctx(_, _)) .WillOnce(Invoke([mock_io_ctx_impl](int64_t id, const std::string& name) { return mock_io_ctx_impl; })); } void expect_mirror_uuid_get(librados::MockTestMemIoCtxImpl *io_ctx_impl, const std::string &uuid, int r) { bufferlist out_bl; encode(uuid, out_bl); EXPECT_CALL(*io_ctx_impl, exec(RBD_MIRRORING, _, StrEq("rbd"), StrEq("mirror_uuid_get"), _, _, _, _)) .WillOnce(DoAll(WithArg<5>(Invoke([out_bl](bufferlist *bl) { *bl = out_bl; })), Return(r))); } void expect_mirror_mode_get(librados::MockTestMemIoCtxImpl *io_ctx_impl, cls::rbd::MirrorMode mirror_mode, int r) { bufferlist out_bl; encode(mirror_mode, out_bl); EXPECT_CALL(*io_ctx_impl, exec(RBD_MIRRORING, _, StrEq("rbd"), StrEq("mirror_mode_get"), _, _, _, _)) .WillOnce(DoAll(WithArg<5>(Invoke([out_bl](bufferlist *bl) { *bl = out_bl; })), Return(r))); } void expect_mirror_mode_get(librados::MockTestMemIoCtxImpl *io_ctx_impl) { EXPECT_CALL(*io_ctx_impl, exec(RBD_MIRRORING, _, StrEq("rbd"), StrEq("mirror_mode_get"), _, _, _, _)) .WillRepeatedly(DoAll(WithArg<5>(Invoke([](bufferlist *bl) { encode(cls::rbd::MIRROR_MODE_POOL, *bl); })), Return(0))); } void expect_leader_watcher_init(MockLeaderWatcher& mock_leader_watcher, int r) { EXPECT_CALL(mock_leader_watcher, init()) .WillOnce(Return(r)); } void expect_leader_watcher_shut_down(MockLeaderWatcher& mock_leader_watcher) { EXPECT_CALL(mock_leader_watcher, shut_down()); } void expect_leader_watcher_get_leader_instance_id( MockLeaderWatcher& mock_leader_watcher) { EXPECT_CALL(mock_leader_watcher, get_leader_instance_id(_)) .WillRepeatedly(Return(true)); } void expect_leader_watcher_list_instances( MockLeaderWatcher& mock_leader_watcher) { EXPECT_CALL(mock_leader_watcher, list_instances(_)) .Times(AtLeast(0)); } void expect_remote_pool_poller_init( MockRemotePoolPoller& mock_remote_pool_poller, const RemotePoolMeta& remote_pool_meta, int r) { EXPECT_CALL(mock_remote_pool_poller, init(_)) .WillOnce(Invoke( [this, &mock_remote_pool_poller, remote_pool_meta, r] (Context* ctx) { if (r >= 0) { mock_remote_pool_poller.listener->handle_updated( remote_pool_meta); } m_threads->work_queue->queue(ctx, r); })); } void expect_remote_pool_poller_shut_down( MockRemotePoolPoller& mock_remote_pool_poller, int r) { EXPECT_CALL(mock_remote_pool_poller, shut_down(_)) .WillOnce(Invoke( [this, r](Context* ctx) { m_threads->work_queue->queue(ctx, r); })); } void expect_leader_watcher_is_blocklisted( MockLeaderWatcher &mock_leader_watcher, bool blocklisted) { EXPECT_CALL(mock_leader_watcher, is_blocklisted()) .WillRepeatedly(Return(blocklisted)); } void expect_namespace_replayer_is_blocklisted( MockNamespaceReplayer &mock_namespace_replayer, bool blocklisted) { EXPECT_CALL(mock_namespace_replayer, is_blocklisted()) .WillRepeatedly(Return(blocklisted)); } void expect_namespace_replayer_get_instance_id( MockNamespaceReplayer &mock_namespace_replayer, const std::string &instance_id) { EXPECT_CALL(mock_namespace_replayer, get_instance_id()) .WillOnce(Return(instance_id)); } void expect_namespace_replayer_init( MockNamespaceReplayer &mock_namespace_replayer, int r, Context *on_init = nullptr) { EXPECT_CALL(mock_namespace_replayer, init(_)) .WillOnce(Invoke([this, r, on_init](Context* ctx) { m_threads->work_queue->queue(ctx, r); if (on_init != nullptr) { m_threads->work_queue->queue(on_init, r); } })); } void expect_namespace_replayer_shut_down( MockNamespaceReplayer &mock_namespace_replayer, Context *on_shut_down = nullptr) { EXPECT_CALL(mock_namespace_replayer, shut_down(_)) .WillOnce(Invoke([this, on_shut_down](Context* ctx) { m_threads->work_queue->queue(ctx); if (on_shut_down != nullptr) { m_threads->work_queue->queue(on_shut_down); } })); } void expect_namespace_replayer_handle_acquire_leader( MockNamespaceReplayer &mock_namespace_replayer, int r, Context *on_acquire = nullptr) { EXPECT_CALL(mock_namespace_replayer, handle_acquire_leader(_)) .WillOnce(Invoke([this, r, on_acquire](Context* ctx) { m_threads->work_queue->queue(ctx, r); if (on_acquire != nullptr) { m_threads->work_queue->queue(on_acquire, r); } })); } void expect_namespace_replayer_handle_release_leader( MockNamespaceReplayer &mock_namespace_replayer, int r, Context *on_release = nullptr) { EXPECT_CALL(mock_namespace_replayer, handle_release_leader(_)) .WillOnce(Invoke([this, r, on_release](Context* ctx) { m_threads->work_queue->queue(ctx, r); if (on_release != nullptr) { m_threads->work_queue->queue(on_release, r); } })); } void expect_namespace_replayer_handle_update_leader( MockNamespaceReplayer &mock_namespace_replayer, const std::string &leader_instance_id, Context *on_update = nullptr) { EXPECT_CALL(mock_namespace_replayer, handle_update_leader(leader_instance_id)) .WillOnce(Invoke([on_update](const std::string &) { if (on_update != nullptr) { on_update->complete(0); } })); } void expect_namespace_replayer_handle_instances_added( MockNamespaceReplayer &mock_namespace_replayer) { EXPECT_CALL(mock_namespace_replayer, handle_instances_added(_)); } void expect_namespace_replayer_handle_instances_removed( MockNamespaceReplayer &mock_namespace_replayer) { EXPECT_CALL(mock_namespace_replayer, handle_instances_removed(_)); } void expect_service_daemon_add_namespace( MockServiceDaemon &mock_service_daemon, const std::string& namespace_name) { EXPECT_CALL(mock_service_daemon, add_namespace(m_local_io_ctx.get_id(), namespace_name)); } void expect_service_daemon_remove_namespace( MockServiceDaemon &mock_service_daemon, const std::string& namespace_name) { EXPECT_CALL(mock_service_daemon, remove_namespace(m_local_io_ctx.get_id(), namespace_name)); } void expect_service_daemon_add_or_update_attribute( MockServiceDaemon &mock_service_daemon, const std::string& key, const service_daemon::AttributeValue& value) { EXPECT_CALL(mock_service_daemon, add_or_update_attribute(_, key, value)); } void expect_service_daemon_remove_attribute( MockServiceDaemon &mock_service_daemon, const std::string& key) { EXPECT_CALL(mock_service_daemon, remove_attribute(_, key)); } void expect_service_daemon_add_or_update_instance_id_attribute( MockServiceDaemon &mock_service_daemon, const std::string &instance_id) { expect_service_daemon_add_or_update_attribute( mock_service_daemon, "instance_id", {instance_id}); } PoolMetaCache m_pool_meta_cache{g_ceph_context}; }; TEST_F(TestMockPoolReplayer, ConfigKeyOverride) { PeerSpec peer_spec{"uuid", "cluster name", "client.name"}; peer_spec.mon_host = "123"; peer_spec.key = "234"; auto mock_default_namespace_replayer = new MockNamespaceReplayer(); expect_namespace_replayer_is_blocklisted(*mock_default_namespace_replayer, false); MockThreads mock_threads(m_threads); expect_work_queue(mock_threads); auto mock_leader_watcher = new MockLeaderWatcher(); expect_leader_watcher_get_leader_instance_id(*mock_leader_watcher); expect_leader_watcher_is_blocklisted(*mock_leader_watcher, false); InSequence seq; auto& mock_cluster = get_mock_cluster(); auto mock_local_rados_client = mock_cluster.do_create_rados_client( g_ceph_context); expect_connect(mock_cluster, mock_local_rados_client, "ceph", nullptr); auto mock_remote_rados_client = mock_cluster.do_create_rados_client( g_ceph_context); CephContext* remote_cct = nullptr; expect_connect(mock_cluster, mock_remote_rados_client, "cluster name", &remote_cct); auto mock_local_io_ctx = mock_local_rados_client->do_create_ioctx( m_local_io_ctx.get_id(), m_local_io_ctx.get_pool_name()); expect_create_ioctx(mock_local_rados_client, mock_local_io_ctx); expect_mirror_uuid_get(mock_local_io_ctx, "uuid", 0); auto mock_remote_pool_poller = new MockRemotePoolPoller(); expect_remote_pool_poller_init(*mock_remote_pool_poller, {"remote mirror uuid", ""}, 0); expect_namespace_replayer_init(*mock_default_namespace_replayer, 0); expect_leader_watcher_init(*mock_leader_watcher, 0); MockServiceDaemon mock_service_daemon; std::string instance_id = stringify(mock_local_io_ctx->get_instance_id()); expect_service_daemon_add_or_update_instance_id_attribute( mock_service_daemon, instance_id); MockPoolReplayer pool_replayer(&mock_threads, &mock_service_daemon, nullptr, &m_pool_meta_cache, m_local_io_ctx.get_id(), peer_spec, {}); pool_replayer.init("siteA"); ASSERT_TRUE(remote_cct != nullptr); ASSERT_EQ("123", remote_cct->_conf.get_val<std::string>("mon_host")); ASSERT_EQ("234", remote_cct->_conf.get_val<std::string>("key")); remote_cct->put(); expect_leader_watcher_shut_down(*mock_leader_watcher); expect_namespace_replayer_shut_down(*mock_default_namespace_replayer); expect_remote_pool_poller_shut_down(*mock_remote_pool_poller, 0); pool_replayer.shut_down(); } TEST_F(TestMockPoolReplayer, AcquireReleaseLeader) { PeerSpec peer_spec{"uuid", "cluster name", "client.name"}; peer_spec.mon_host = "123"; peer_spec.key = "234"; auto mock_default_namespace_replayer = new MockNamespaceReplayer(); expect_namespace_replayer_is_blocklisted(*mock_default_namespace_replayer, false); MockThreads mock_threads(m_threads); expect_work_queue(mock_threads); auto mock_leader_watcher = new MockLeaderWatcher(); expect_leader_watcher_get_leader_instance_id(*mock_leader_watcher); expect_leader_watcher_list_instances(*mock_leader_watcher); expect_leader_watcher_is_blocklisted(*mock_leader_watcher, false); InSequence seq; auto& mock_cluster = get_mock_cluster(); auto mock_local_rados_client = mock_cluster.do_create_rados_client( g_ceph_context); expect_connect(mock_cluster, mock_local_rados_client, "ceph", nullptr); auto mock_remote_rados_client = mock_cluster.do_create_rados_client( g_ceph_context); expect_connect(mock_cluster, mock_remote_rados_client, "cluster name", nullptr); auto mock_local_io_ctx = mock_local_rados_client->do_create_ioctx( m_local_io_ctx.get_id(), m_local_io_ctx.get_pool_name()); expect_create_ioctx(mock_local_rados_client, mock_local_io_ctx); expect_mirror_uuid_get(mock_local_io_ctx, "uuid", 0); auto mock_remote_pool_poller = new MockRemotePoolPoller(); expect_remote_pool_poller_init(*mock_remote_pool_poller, {"remote mirror uuid", ""}, 0); expect_namespace_replayer_init(*mock_default_namespace_replayer, 0); expect_leader_watcher_init(*mock_leader_watcher, 0); MockServiceDaemon mock_service_daemon; std::string instance_id = stringify(mock_local_io_ctx->get_instance_id()); expect_service_daemon_add_or_update_instance_id_attribute( mock_service_daemon, instance_id); MockPoolReplayer pool_replayer(&mock_threads, &mock_service_daemon, nullptr, &m_pool_meta_cache, m_local_io_ctx.get_id(), peer_spec, {}); pool_replayer.init("siteA"); expect_service_daemon_add_or_update_attribute( mock_service_daemon, SERVICE_DAEMON_LEADER_KEY, true); expect_namespace_replayer_handle_acquire_leader( *mock_default_namespace_replayer, 0); C_SaferCond on_acquire; mock_leader_watcher->listener->post_acquire_handler(&on_acquire); ASSERT_EQ(0, on_acquire.wait()); expect_service_daemon_remove_attribute(mock_service_daemon, SERVICE_DAEMON_LEADER_KEY); expect_namespace_replayer_handle_release_leader( *mock_default_namespace_replayer, 0); C_SaferCond on_release; mock_leader_watcher->listener->pre_release_handler(&on_release); ASSERT_EQ(0, on_release.wait()); expect_leader_watcher_shut_down(*mock_leader_watcher); expect_namespace_replayer_shut_down(*mock_default_namespace_replayer); expect_remote_pool_poller_shut_down(*mock_remote_pool_poller, 0); pool_replayer.shut_down(); } TEST_F(TestMockPoolReplayer, Namespaces) { PeerSpec peer_spec{"uuid", "cluster name", "client.name"}; peer_spec.mon_host = "123"; peer_spec.key = "234"; g_ceph_context->_conf.set_val( "rbd_mirror_pool_replayers_refresh_interval", "1"); MockNamespace mock_namespace; auto mock_default_namespace_replayer = new MockNamespaceReplayer(); expect_namespace_replayer_is_blocklisted(*mock_default_namespace_replayer, false); auto mock_ns1_namespace_replayer = new MockNamespaceReplayer("ns1"); expect_namespace_replayer_is_blocklisted(*mock_ns1_namespace_replayer, false); auto mock_ns2_namespace_replayer = new MockNamespaceReplayer("ns2"); expect_namespace_replayer_is_blocklisted(*mock_ns2_namespace_replayer, false); MockThreads mock_threads(m_threads); expect_work_queue(mock_threads); auto mock_leader_watcher = new MockLeaderWatcher(); expect_leader_watcher_get_leader_instance_id(*mock_leader_watcher); expect_leader_watcher_list_instances(*mock_leader_watcher); expect_leader_watcher_is_blocklisted(*mock_leader_watcher, false); auto& mock_cluster = get_mock_cluster(); auto mock_local_rados_client = mock_cluster.do_create_rados_client( g_ceph_context); auto mock_local_io_ctx = mock_local_rados_client->do_create_ioctx( m_local_io_ctx.get_id(), m_local_io_ctx.get_pool_name()); auto mock_remote_rados_client = mock_cluster.do_create_rados_client( g_ceph_context); expect_mirror_mode_get(mock_local_io_ctx); InSequence seq; expect_connect(mock_cluster, mock_local_rados_client, "ceph", nullptr); expect_connect(mock_cluster, mock_remote_rados_client, "cluster name", nullptr); expect_create_ioctx(mock_local_rados_client, mock_local_io_ctx); expect_mirror_uuid_get(mock_local_io_ctx, "uuid", 0); auto mock_remote_pool_poller = new MockRemotePoolPoller(); expect_remote_pool_poller_init(*mock_remote_pool_poller, {"remote mirror uuid", ""}, 0); expect_namespace_replayer_init(*mock_default_namespace_replayer, 0); expect_leader_watcher_init(*mock_leader_watcher, 0); MockServiceDaemon mock_service_daemon; std::string instance_id = stringify(mock_local_io_ctx->get_instance_id()); expect_service_daemon_add_or_update_instance_id_attribute( mock_service_daemon, instance_id); MockPoolReplayer pool_replayer(&mock_threads, &mock_service_daemon, nullptr, &m_pool_meta_cache, m_local_io_ctx.get_id(), peer_spec, {}); pool_replayer.init("siteA"); C_SaferCond on_ns1_init; expect_namespace_replayer_init(*mock_ns1_namespace_replayer, 0); expect_service_daemon_add_namespace(mock_service_daemon, "ns1"); expect_namespace_replayer_handle_update_leader(*mock_ns1_namespace_replayer, "", &on_ns1_init); mock_namespace.add("ns1"); ASSERT_EQ(0, on_ns1_init.wait()); expect_service_daemon_add_or_update_attribute( mock_service_daemon, SERVICE_DAEMON_LEADER_KEY, true); expect_namespace_replayer_handle_acquire_leader( *mock_default_namespace_replayer, 0); expect_namespace_replayer_handle_acquire_leader( *mock_ns1_namespace_replayer, 0); C_SaferCond on_acquire; mock_leader_watcher->listener->post_acquire_handler(&on_acquire); ASSERT_EQ(0, on_acquire.wait()); expect_namespace_replayer_init(*mock_ns2_namespace_replayer, 0); expect_service_daemon_add_namespace(mock_service_daemon, "ns2"); C_SaferCond on_ns2_acquire; expect_namespace_replayer_handle_acquire_leader( *mock_ns2_namespace_replayer, 0, &on_ns2_acquire); expect_namespace_replayer_handle_instances_added( *mock_ns2_namespace_replayer); mock_namespace.add("ns2"); ASSERT_EQ(0, on_ns2_acquire.wait()); C_SaferCond on_ns2_shut_down; expect_service_daemon_remove_namespace(mock_service_daemon, "ns2"); expect_namespace_replayer_shut_down(*mock_ns2_namespace_replayer, &on_ns2_shut_down); mock_namespace.remove("ns2"); ASSERT_EQ(0, on_ns2_shut_down.wait()); expect_service_daemon_remove_attribute(mock_service_daemon, SERVICE_DAEMON_LEADER_KEY); expect_namespace_replayer_handle_release_leader( *mock_default_namespace_replayer, 0); expect_namespace_replayer_handle_release_leader( *mock_ns1_namespace_replayer, 0); C_SaferCond on_release; mock_leader_watcher->listener->pre_release_handler(&on_release); ASSERT_EQ(0, on_release.wait()); expect_service_daemon_remove_namespace(mock_service_daemon, "ns1"); expect_namespace_replayer_shut_down(*mock_ns1_namespace_replayer); expect_leader_watcher_shut_down(*mock_leader_watcher); expect_namespace_replayer_shut_down(*mock_default_namespace_replayer); expect_remote_pool_poller_shut_down(*mock_remote_pool_poller, 0); pool_replayer.shut_down(); } TEST_F(TestMockPoolReplayer, NamespacesError) { PeerSpec peer_spec{"uuid", "cluster name", "client.name"}; peer_spec.mon_host = "123"; peer_spec.key = "234"; g_ceph_context->_conf.set_val( "rbd_mirror_pool_replayers_refresh_interval", "1"); MockNamespace mock_namespace; auto mock_default_namespace_replayer = new MockNamespaceReplayer(); expect_namespace_replayer_is_blocklisted(*mock_default_namespace_replayer, false); auto mock_ns1_namespace_replayer = new MockNamespaceReplayer("ns1"); auto mock_ns2_namespace_replayer = new MockNamespaceReplayer("ns2"); expect_namespace_replayer_is_blocklisted(*mock_ns2_namespace_replayer, false); auto mock_ns3_namespace_replayer = new MockNamespaceReplayer("ns3"); MockThreads mock_threads(m_threads); expect_work_queue(mock_threads); auto mock_leader_watcher = new MockLeaderWatcher(); expect_leader_watcher_get_leader_instance_id(*mock_leader_watcher); expect_leader_watcher_list_instances(*mock_leader_watcher); expect_leader_watcher_is_blocklisted(*mock_leader_watcher, false); auto& mock_cluster = get_mock_cluster(); auto mock_local_rados_client = mock_cluster.do_create_rados_client( g_ceph_context); auto mock_local_io_ctx = mock_local_rados_client->do_create_ioctx( m_local_io_ctx.get_id(), m_local_io_ctx.get_pool_name()); auto mock_remote_rados_client = mock_cluster.do_create_rados_client( g_ceph_context); expect_mirror_mode_get(mock_local_io_ctx); InSequence seq; expect_connect(mock_cluster, mock_local_rados_client, "ceph", nullptr); expect_connect(mock_cluster, mock_remote_rados_client, "cluster name", nullptr); expect_create_ioctx(mock_local_rados_client, mock_local_io_ctx); expect_mirror_uuid_get(mock_local_io_ctx, "uuid", 0); auto mock_remote_pool_poller = new MockRemotePoolPoller(); expect_remote_pool_poller_init(*mock_remote_pool_poller, {"remote mirror uuid", ""}, 0); expect_namespace_replayer_init(*mock_default_namespace_replayer, 0); expect_leader_watcher_init(*mock_leader_watcher, 0); MockServiceDaemon mock_service_daemon; std::string instance_id = stringify(mock_local_io_ctx->get_instance_id()); expect_service_daemon_add_or_update_instance_id_attribute( mock_service_daemon, instance_id); MockPoolReplayer pool_replayer(&mock_threads, &mock_service_daemon, nullptr, &m_pool_meta_cache, m_local_io_ctx.get_id(), peer_spec, {}); pool_replayer.init("siteA"); // test namespace replayer init fails for non leader C_SaferCond on_ns1_init; Context* ctx = new LambdaContext( [&mock_namespace, &on_ns1_init](int r) { mock_namespace.remove("ns1"); on_ns1_init.complete(r); }); expect_namespace_replayer_init(*mock_ns1_namespace_replayer, -EINVAL, ctx); mock_namespace.add("ns1"); ASSERT_EQ(-EINVAL, on_ns1_init.wait()); // test acquire leader fails when default namespace replayer fails expect_service_daemon_add_or_update_attribute( mock_service_daemon, SERVICE_DAEMON_LEADER_KEY, true); expect_namespace_replayer_handle_acquire_leader( *mock_default_namespace_replayer, -EINVAL); C_SaferCond on_acquire1; mock_leader_watcher->listener->post_acquire_handler(&on_acquire1); ASSERT_EQ(-EINVAL, on_acquire1.wait()); // test acquire leader succeeds when non-default namespace replayer fails C_SaferCond on_ns2_init; expect_namespace_replayer_init(*mock_ns2_namespace_replayer, 0); expect_service_daemon_add_namespace(mock_service_daemon, "ns2"); expect_namespace_replayer_handle_update_leader(*mock_ns2_namespace_replayer, "", &on_ns2_init); mock_namespace.add("ns2"); ASSERT_EQ(0, on_ns2_init.wait()); expect_service_daemon_add_or_update_attribute( mock_service_daemon, SERVICE_DAEMON_LEADER_KEY, true); expect_namespace_replayer_handle_acquire_leader( *mock_default_namespace_replayer, 0); expect_namespace_replayer_handle_acquire_leader(*mock_ns2_namespace_replayer, -EINVAL); ctx = new LambdaContext( [&mock_namespace](int) { mock_namespace.remove("ns2"); }); expect_service_daemon_remove_namespace(mock_service_daemon, "ns2"); expect_namespace_replayer_shut_down(*mock_ns2_namespace_replayer, ctx); mock_namespace.add("ns2"); C_SaferCond on_acquire2; mock_leader_watcher->listener->post_acquire_handler(&on_acquire2); ASSERT_EQ(0, on_acquire2.wait()); // test namespace replayer init fails on acquire leader C_SaferCond on_ns3_shut_down; ctx = new LambdaContext( [&mock_namespace, &on_ns3_shut_down](int) { mock_namespace.remove("ns3"); on_ns3_shut_down.complete(0); }); expect_namespace_replayer_init(*mock_ns3_namespace_replayer, 0); expect_service_daemon_add_namespace(mock_service_daemon, "ns3"); expect_namespace_replayer_handle_acquire_leader(*mock_ns3_namespace_replayer, -EINVAL); expect_service_daemon_remove_namespace(mock_service_daemon, "ns3"); expect_namespace_replayer_shut_down(*mock_ns3_namespace_replayer, ctx); mock_namespace.add("ns3"); ASSERT_EQ(0, on_ns3_shut_down.wait()); expect_leader_watcher_shut_down(*mock_leader_watcher); expect_namespace_replayer_shut_down(*mock_default_namespace_replayer); expect_remote_pool_poller_shut_down(*mock_remote_pool_poller, 0); pool_replayer.shut_down(); } } // namespace mirror } // namespace rbd
34,094
35.465241
126
cc
null
ceph-main/src/test/rbd_mirror/test_mock_PoolWatcher.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "test/rbd_mirror/test_mock_fixture.h" #include "test/librados_test_stub/MockTestMemIoCtxImpl.h" #include "test/librados_test_stub/MockTestMemRadosClient.h" #include "test/librbd/mock/MockImageCtx.h" #include "test/rbd_mirror/mock/MockContextWQ.h" #include "test/rbd_mirror/mock/MockSafeTimer.h" #include "librbd/MirroringWatcher.h" #include "tools/rbd_mirror/Threads.h" #include "tools/rbd_mirror/PoolWatcher.h" #include "tools/rbd_mirror/pool_watcher/RefreshImagesRequest.h" #include "include/stringify.h" using namespace std::chrono_literals; namespace librbd { namespace { struct MockTestImageCtx : public librbd::MockImageCtx { MockTestImageCtx(librbd::ImageCtx &image_ctx) : librbd::MockImageCtx(image_ctx) { } }; } // anonymous namespace struct MockMirroringWatcher { static MockMirroringWatcher *s_instance; static MockMirroringWatcher &get_instance() { ceph_assert(s_instance != nullptr); return *s_instance; } MockMirroringWatcher() { s_instance = this; } MOCK_CONST_METHOD0(is_unregistered, bool()); MOCK_METHOD1(register_watch, void(Context*)); MOCK_METHOD1(unregister_watch, void(Context*)); MOCK_CONST_METHOD0(get_oid, std::string()); }; template <> struct MirroringWatcher<MockTestImageCtx> { static MirroringWatcher *s_instance; MirroringWatcher(librados::IoCtx &io_ctx, ::MockContextWQ *work_queue) { s_instance = this; } virtual ~MirroringWatcher() { } static MirroringWatcher<MockTestImageCtx> &get_instance() { ceph_assert(s_instance != nullptr); return *s_instance; } virtual void handle_rewatch_complete(int r) = 0; virtual void handle_mode_updated(cls::rbd::MirrorMode mirror_mode) = 0; virtual void handle_image_updated(cls::rbd::MirrorImageState state, const std::string &remote_image_id, const std::string &global_image_id) = 0; bool is_unregistered() const { return MockMirroringWatcher::get_instance().is_unregistered(); } void register_watch(Context *ctx) { MockMirroringWatcher::get_instance().register_watch(ctx); } void unregister_watch(Context *ctx) { MockMirroringWatcher::get_instance().unregister_watch(ctx); } std::string get_oid() const { return MockMirroringWatcher::get_instance().get_oid(); } }; MockMirroringWatcher *MockMirroringWatcher::s_instance = nullptr; MirroringWatcher<MockTestImageCtx> *MirroringWatcher<MockTestImageCtx>::s_instance = nullptr; } // namespace librbd namespace rbd { namespace mirror { template <> struct Threads<librbd::MockTestImageCtx> { MockSafeTimer *timer; ceph::mutex &timer_lock; MockContextWQ *work_queue; Threads(Threads<librbd::ImageCtx> *threads) : timer(new MockSafeTimer()), timer_lock(threads->timer_lock), work_queue(new MockContextWQ()) { } ~Threads() { delete timer; delete work_queue; } }; namespace pool_watcher { template <> struct RefreshImagesRequest<librbd::MockTestImageCtx> { ImageIds *image_ids = nullptr; Context *on_finish = nullptr; static RefreshImagesRequest *s_instance; static RefreshImagesRequest *create(librados::IoCtx &io_ctx, ImageIds *image_ids, Context *on_finish) { ceph_assert(s_instance != nullptr); s_instance->image_ids = image_ids; s_instance->on_finish = on_finish; return s_instance; } MOCK_METHOD0(send, void()); RefreshImagesRequest() { s_instance = this; } }; RefreshImagesRequest<librbd::MockTestImageCtx> *RefreshImagesRequest<librbd::MockTestImageCtx>::s_instance = nullptr; } // namespace pool_watcher } // namespace mirror } // namespace rbd // template definitions #include "tools/rbd_mirror/PoolWatcher.cc" namespace rbd { namespace mirror { using ::testing::_; using ::testing::DoAll; using ::testing::InSequence; using ::testing::Invoke; using ::testing::Return; using ::testing::ReturnArg; using ::testing::StrEq; using ::testing::WithArg; using ::testing::WithoutArgs; class TestMockPoolWatcher : public TestMockFixture { public: typedef PoolWatcher<librbd::MockTestImageCtx> MockPoolWatcher; typedef Threads<librbd::MockTestImageCtx> MockThreads; typedef pool_watcher::RefreshImagesRequest<librbd::MockTestImageCtx> MockRefreshImagesRequest; typedef librbd::MockMirroringWatcher MockMirroringWatcher; typedef librbd::MirroringWatcher<librbd::MockTestImageCtx> MirroringWatcher; struct MockListener : pool_watcher::Listener { TestMockPoolWatcher *test; MockListener(TestMockPoolWatcher *test) : test(test) { } MOCK_METHOD3(mock_handle_update, void(const std::string &, const ImageIds &, const ImageIds &)); void handle_update(const std::string &mirror_uuid, ImageIds &&added_image_ids, ImageIds &&removed_image_ids) override { mock_handle_update(mirror_uuid, added_image_ids, removed_image_ids); } }; TestMockPoolWatcher() = default; void expect_work_queue(MockThreads &mock_threads) { EXPECT_CALL(*mock_threads.work_queue, queue(_, _)) .WillRepeatedly(Invoke([this](Context *ctx, int r) { m_threads->work_queue->queue(ctx, r); })); } void expect_mirroring_watcher_is_unregistered(MockMirroringWatcher &mock_mirroring_watcher, bool unregistered) { EXPECT_CALL(mock_mirroring_watcher, is_unregistered()) .WillOnce(Return(unregistered)); } void expect_mirroring_watcher_register(MockMirroringWatcher &mock_mirroring_watcher, int r) { EXPECT_CALL(mock_mirroring_watcher, register_watch(_)) .WillOnce(CompleteContext(r)); } void expect_mirroring_watcher_unregister(MockMirroringWatcher &mock_mirroring_watcher, int r) { EXPECT_CALL(mock_mirroring_watcher, unregister_watch(_)) .WillOnce(CompleteContext(r)); } void expect_refresh_images(MockRefreshImagesRequest &request, const ImageIds &image_ids, int r) { EXPECT_CALL(request, send()) .WillOnce(Invoke([&request, image_ids, r]() { *request.image_ids = image_ids; request.on_finish->complete(r); })); } void expect_listener_handle_update(MockListener &mock_listener, const std::string &mirror_uuid, const ImageIds &added_image_ids, const ImageIds &removed_image_ids) { EXPECT_CALL(mock_listener, mock_handle_update(mirror_uuid, added_image_ids, removed_image_ids)) .WillOnce(WithoutArgs(Invoke([this]() { std::lock_guard locker{m_lock}; ++m_update_count; m_cond.notify_all(); }))); } void expect_timer_add_event(MockThreads &mock_threads) { EXPECT_CALL(*mock_threads.timer, add_event_after(_, _)) .WillOnce(DoAll(WithArg<1>(Invoke([this](Context *ctx) { auto wrapped_ctx = new LambdaContext([this, ctx](int r) { std::lock_guard timer_locker{m_threads->timer_lock}; ctx->complete(r); }); m_threads->work_queue->queue(wrapped_ctx, 0); })), ReturnArg<1>())); } int when_shut_down(MockPoolWatcher &mock_pool_watcher) { C_SaferCond ctx; mock_pool_watcher.shut_down(&ctx); return ctx.wait(); } bool wait_for_update(uint32_t count) { std::unique_lock locker{m_lock}; if (m_cond.wait_for(locker, 10s, [count, this] { return m_update_count >= count; })) { m_update_count -= count; return true; } else { return false; } } ceph::mutex m_lock = ceph::make_mutex("TestMockPoolWatcher::m_lock"); ceph::condition_variable m_cond; uint32_t m_update_count = 0; }; TEST_F(TestMockPoolWatcher, EmptyPool) { MockThreads mock_threads(m_threads); expect_work_queue(mock_threads); InSequence seq; MockMirroringWatcher mock_mirroring_watcher; expect_mirroring_watcher_is_unregistered(mock_mirroring_watcher, true); expect_mirroring_watcher_register(mock_mirroring_watcher, 0); MockRefreshImagesRequest mock_refresh_images_request; expect_refresh_images(mock_refresh_images_request, {}, 0); MockListener mock_listener(this); expect_listener_handle_update(mock_listener, "remote uuid", {}, {}); MockPoolWatcher mock_pool_watcher(&mock_threads, m_remote_io_ctx, "remote uuid", mock_listener); C_SaferCond ctx; mock_pool_watcher.init(&ctx); ASSERT_EQ(0, ctx.wait()); ASSERT_TRUE(wait_for_update(1)); expect_mirroring_watcher_unregister(mock_mirroring_watcher, 0); ASSERT_EQ(0, when_shut_down(mock_pool_watcher)); } TEST_F(TestMockPoolWatcher, NonEmptyPool) { MockThreads mock_threads(m_threads); expect_work_queue(mock_threads); InSequence seq; MockMirroringWatcher mock_mirroring_watcher; expect_mirroring_watcher_is_unregistered(mock_mirroring_watcher, true); expect_mirroring_watcher_register(mock_mirroring_watcher, 0); ImageIds image_ids{ {"global id 1", "remote id 1"}, {"global id 2", "remote id 2"}}; MockRefreshImagesRequest mock_refresh_images_request; expect_refresh_images(mock_refresh_images_request, image_ids, 0); MockListener mock_listener(this); expect_listener_handle_update(mock_listener, "remote uuid", image_ids, {}); MockPoolWatcher mock_pool_watcher(&mock_threads, m_remote_io_ctx, "remote uuid", mock_listener); C_SaferCond ctx; mock_pool_watcher.init(&ctx); ASSERT_EQ(0, ctx.wait()); ASSERT_TRUE(wait_for_update(1)); expect_mirroring_watcher_unregister(mock_mirroring_watcher, 0); ASSERT_EQ(0, when_shut_down(mock_pool_watcher)); } TEST_F(TestMockPoolWatcher, NotifyDuringRefresh) { MockThreads mock_threads(m_threads); expect_work_queue(mock_threads); InSequence seq; MockMirroringWatcher mock_mirroring_watcher; expect_mirroring_watcher_is_unregistered(mock_mirroring_watcher, true); expect_mirroring_watcher_register(mock_mirroring_watcher, 0); ImageIds image_ids{ {"global id 1", "remote id 1"}, {"global id 2", "remote id 2"}}; MockRefreshImagesRequest mock_refresh_images_request; bool refresh_sent = false; EXPECT_CALL(mock_refresh_images_request, send()) .WillOnce(Invoke([this, &mock_refresh_images_request, &image_ids, &refresh_sent]() { *mock_refresh_images_request.image_ids = image_ids; std::lock_guard locker{m_lock}; refresh_sent = true; m_cond.notify_all(); })); MockListener mock_listener(this); image_ids = { {"global id 1", "remote id 1a"}, {"global id 3", "remote id 3"}}; expect_listener_handle_update(mock_listener, "remote uuid", image_ids, {}); MockPoolWatcher mock_pool_watcher(&mock_threads, m_remote_io_ctx, "remote uuid", mock_listener); mock_pool_watcher.init(nullptr); { std::unique_lock locker{m_lock}; m_cond.wait(locker, [&] { return refresh_sent; }); } MirroringWatcher::get_instance().handle_image_updated( cls::rbd::MIRROR_IMAGE_STATE_DISABLING, "remote id 2", "global id 2"); MirroringWatcher::get_instance().handle_image_updated( cls::rbd::MIRROR_IMAGE_STATE_ENABLED, "remote id 1a", "global id 1"); MirroringWatcher::get_instance().handle_image_updated( cls::rbd::MIRROR_IMAGE_STATE_ENABLED, "remote id 3", "global id 3"); mock_refresh_images_request.on_finish->complete(0); ASSERT_TRUE(wait_for_update(1)); expect_mirroring_watcher_unregister(mock_mirroring_watcher, 0); ASSERT_EQ(0, when_shut_down(mock_pool_watcher)); } TEST_F(TestMockPoolWatcher, Notify) { MockThreads mock_threads(m_threads); InSequence seq; MockMirroringWatcher mock_mirroring_watcher; expect_mirroring_watcher_is_unregistered(mock_mirroring_watcher, true); expect_mirroring_watcher_register(mock_mirroring_watcher, 0); ImageIds image_ids{ {"global id 1", "remote id 1"}, {"global id 2", "remote id 2"}}; MockRefreshImagesRequest mock_refresh_images_request; expect_refresh_images(mock_refresh_images_request, image_ids, 0); EXPECT_CALL(*mock_threads.work_queue, queue(_, _)) .WillOnce(Invoke([this](Context *ctx, int r) { m_threads->work_queue->queue(ctx, r); })); MockListener mock_listener(this); expect_listener_handle_update(mock_listener, "remote uuid", image_ids, {}); Context *notify_ctx = nullptr; EXPECT_CALL(*mock_threads.work_queue, queue(_, _)) .WillOnce(Invoke([this, &notify_ctx](Context *ctx, int r) { std::lock_guard locker{m_lock}; ASSERT_EQ(nullptr, notify_ctx); notify_ctx = ctx; m_cond.notify_all(); })); expect_listener_handle_update( mock_listener, "remote uuid", {{"global id 1", "remote id 1a"}, {"global id 3", "remote id 3"}}, {{"global id 1", "remote id 1"}, {"global id 2", "remote id 2"}}); MockPoolWatcher mock_pool_watcher(&mock_threads, m_remote_io_ctx, "remote uuid", mock_listener); C_SaferCond ctx; mock_pool_watcher.init(&ctx); ASSERT_EQ(0, ctx.wait()); ASSERT_TRUE(wait_for_update(1)); C_SaferCond flush_ctx; m_threads->work_queue->queue(&flush_ctx, 0); ASSERT_EQ(0, flush_ctx.wait()); MirroringWatcher::get_instance().handle_image_updated( cls::rbd::MIRROR_IMAGE_STATE_DISABLING, "remote id 2", "global id 2"); MirroringWatcher::get_instance().handle_image_updated( cls::rbd::MIRROR_IMAGE_STATE_DISABLED, "remote id 2", "global id 2"); MirroringWatcher::get_instance().handle_image_updated( cls::rbd::MIRROR_IMAGE_STATE_ENABLED, "remote id 1a", "global id 1"); MirroringWatcher::get_instance().handle_image_updated( cls::rbd::MIRROR_IMAGE_STATE_ENABLED, "remote id 3", "global id 3"); notify_ctx->complete(0); ASSERT_TRUE(wait_for_update(1)); expect_mirroring_watcher_unregister(mock_mirroring_watcher, 0); ASSERT_EQ(0, when_shut_down(mock_pool_watcher)); } TEST_F(TestMockPoolWatcher, RegisterWatcherBlocklist) { MockThreads mock_threads(m_threads); expect_work_queue(mock_threads); InSequence seq; MockMirroringWatcher mock_mirroring_watcher; expect_mirroring_watcher_is_unregistered(mock_mirroring_watcher, true); expect_mirroring_watcher_register(mock_mirroring_watcher, -EBLOCKLISTED); MockListener mock_listener(this); MockPoolWatcher mock_pool_watcher(&mock_threads, m_remote_io_ctx, "remote uuid", mock_listener); C_SaferCond ctx; mock_pool_watcher.init(&ctx); ASSERT_EQ(-EBLOCKLISTED, ctx.wait()); ASSERT_TRUE(mock_pool_watcher.is_blocklisted()); expect_mirroring_watcher_unregister(mock_mirroring_watcher, 0); ASSERT_EQ(0, when_shut_down(mock_pool_watcher)); } TEST_F(TestMockPoolWatcher, RegisterWatcherMissing) { MockThreads mock_threads(m_threads); expect_work_queue(mock_threads); InSequence seq; MockMirroringWatcher mock_mirroring_watcher; expect_mirroring_watcher_is_unregistered(mock_mirroring_watcher, true); expect_mirroring_watcher_register(mock_mirroring_watcher, -ENOENT); expect_timer_add_event(mock_threads); expect_mirroring_watcher_is_unregistered(mock_mirroring_watcher, true); expect_mirroring_watcher_register(mock_mirroring_watcher, 0); MockRefreshImagesRequest mock_refresh_images_request; expect_refresh_images(mock_refresh_images_request, {}, 0); MockListener mock_listener(this); expect_listener_handle_update(mock_listener, "remote uuid", {}, {}); MockPoolWatcher mock_pool_watcher(&mock_threads, m_remote_io_ctx, "remote uuid", mock_listener); C_SaferCond ctx; mock_pool_watcher.init(&ctx); ASSERT_EQ(-ENOENT, ctx.wait()); ASSERT_TRUE(wait_for_update(1)); expect_mirroring_watcher_unregister(mock_mirroring_watcher, 0); ASSERT_EQ(0, when_shut_down(mock_pool_watcher)); } TEST_F(TestMockPoolWatcher, RegisterWatcherError) { MockThreads mock_threads(m_threads); expect_work_queue(mock_threads); InSequence seq; MockMirroringWatcher mock_mirroring_watcher; expect_mirroring_watcher_is_unregistered(mock_mirroring_watcher, true); expect_mirroring_watcher_register(mock_mirroring_watcher, -EINVAL); expect_timer_add_event(mock_threads); expect_mirroring_watcher_is_unregistered(mock_mirroring_watcher, true); expect_mirroring_watcher_register(mock_mirroring_watcher, 0); MockRefreshImagesRequest mock_refresh_images_request; expect_refresh_images(mock_refresh_images_request, {}, 0); MockListener mock_listener(this); expect_listener_handle_update(mock_listener, "remote uuid", {}, {}); MockPoolWatcher mock_pool_watcher(&mock_threads, m_remote_io_ctx, "remote uuid", mock_listener); C_SaferCond ctx; mock_pool_watcher.init(&ctx); ASSERT_EQ(0, ctx.wait()); ASSERT_TRUE(wait_for_update(1)); expect_mirroring_watcher_unregister(mock_mirroring_watcher, 0); ASSERT_EQ(0, when_shut_down(mock_pool_watcher)); } TEST_F(TestMockPoolWatcher, RefreshBlocklist) { MockThreads mock_threads(m_threads); expect_work_queue(mock_threads); InSequence seq; MockMirroringWatcher mock_mirroring_watcher; expect_mirroring_watcher_is_unregistered(mock_mirroring_watcher, true); expect_mirroring_watcher_register(mock_mirroring_watcher, 0); MockRefreshImagesRequest mock_refresh_images_request; expect_refresh_images(mock_refresh_images_request, {}, -EBLOCKLISTED); MockListener mock_listener(this); MockPoolWatcher mock_pool_watcher(&mock_threads, m_remote_io_ctx, "remote uuid", mock_listener); C_SaferCond ctx; mock_pool_watcher.init(&ctx); ASSERT_EQ(-EBLOCKLISTED, ctx.wait()); ASSERT_TRUE(mock_pool_watcher.is_blocklisted()); expect_mirroring_watcher_unregister(mock_mirroring_watcher, 0); ASSERT_EQ(0, when_shut_down(mock_pool_watcher)); } TEST_F(TestMockPoolWatcher, RefreshMissing) { MockThreads mock_threads(m_threads); expect_work_queue(mock_threads); InSequence seq; MockMirroringWatcher mock_mirroring_watcher; expect_mirroring_watcher_is_unregistered(mock_mirroring_watcher, true); expect_mirroring_watcher_register(mock_mirroring_watcher, 0); MockRefreshImagesRequest mock_refresh_images_request; expect_refresh_images(mock_refresh_images_request, {}, -ENOENT); MockListener mock_listener(this); expect_listener_handle_update(mock_listener, "remote uuid", {}, {}); MockPoolWatcher mock_pool_watcher(&mock_threads, m_remote_io_ctx, "remote uuid", mock_listener); C_SaferCond ctx; mock_pool_watcher.init(&ctx); ASSERT_EQ(0, ctx.wait()); ASSERT_TRUE(wait_for_update(1)); expect_mirroring_watcher_unregister(mock_mirroring_watcher, 0); ASSERT_EQ(0, when_shut_down(mock_pool_watcher)); } TEST_F(TestMockPoolWatcher, RefreshError) { MockThreads mock_threads(m_threads); expect_work_queue(mock_threads); InSequence seq; MockMirroringWatcher mock_mirroring_watcher; expect_mirroring_watcher_is_unregistered(mock_mirroring_watcher, true); expect_mirroring_watcher_register(mock_mirroring_watcher, 0); MockRefreshImagesRequest mock_refresh_images_request; expect_refresh_images(mock_refresh_images_request, {}, -EINVAL); expect_timer_add_event(mock_threads); expect_mirroring_watcher_is_unregistered(mock_mirroring_watcher, false); expect_refresh_images(mock_refresh_images_request, {}, 0); MockListener mock_listener(this); expect_listener_handle_update(mock_listener, "remote uuid", {}, {}); MockPoolWatcher mock_pool_watcher(&mock_threads, m_remote_io_ctx, "remote uuid", mock_listener); C_SaferCond ctx; mock_pool_watcher.init(&ctx); ASSERT_EQ(0, ctx.wait()); ASSERT_TRUE(wait_for_update(1)); expect_mirroring_watcher_unregister(mock_mirroring_watcher, 0); ASSERT_EQ(0, when_shut_down(mock_pool_watcher)); } TEST_F(TestMockPoolWatcher, Rewatch) { MockThreads mock_threads(m_threads); expect_work_queue(mock_threads); InSequence seq; MockMirroringWatcher mock_mirroring_watcher; expect_mirroring_watcher_is_unregistered(mock_mirroring_watcher, true); expect_mirroring_watcher_register(mock_mirroring_watcher, 0); MockRefreshImagesRequest mock_refresh_images_request; expect_refresh_images(mock_refresh_images_request, {}, 0); MockListener mock_listener(this); expect_listener_handle_update(mock_listener, "remote uuid", {}, {}); expect_timer_add_event(mock_threads); expect_mirroring_watcher_is_unregistered(mock_mirroring_watcher, false); expect_refresh_images(mock_refresh_images_request, {{"global id", "image id"}}, 0); expect_listener_handle_update(mock_listener, "remote uuid", {{"global id", "image id"}}, {}); MockPoolWatcher mock_pool_watcher(&mock_threads, m_remote_io_ctx, "remote uuid", mock_listener); C_SaferCond ctx; mock_pool_watcher.init(&ctx); ASSERT_EQ(0, ctx.wait()); ASSERT_TRUE(wait_for_update(1)); MirroringWatcher::get_instance().handle_rewatch_complete(0); ASSERT_TRUE(wait_for_update(1)); expect_mirroring_watcher_unregister(mock_mirroring_watcher, 0); ASSERT_EQ(0, when_shut_down(mock_pool_watcher)); } TEST_F(TestMockPoolWatcher, RewatchBlocklist) { MockThreads mock_threads(m_threads); expect_work_queue(mock_threads); InSequence seq; MockMirroringWatcher mock_mirroring_watcher; expect_mirroring_watcher_is_unregistered(mock_mirroring_watcher, true); expect_mirroring_watcher_register(mock_mirroring_watcher, 0); MockRefreshImagesRequest mock_refresh_images_request; expect_refresh_images(mock_refresh_images_request, {}, 0); MockListener mock_listener(this); expect_listener_handle_update(mock_listener, "remote uuid", {}, {}); MockPoolWatcher mock_pool_watcher(&mock_threads, m_remote_io_ctx, "remote uuid", mock_listener); C_SaferCond ctx; mock_pool_watcher.init(&ctx); ASSERT_EQ(0, ctx.wait()); ASSERT_TRUE(wait_for_update(1)); MirroringWatcher::get_instance().handle_rewatch_complete(-EBLOCKLISTED); ASSERT_TRUE(mock_pool_watcher.is_blocklisted()); expect_mirroring_watcher_unregister(mock_mirroring_watcher, 0); ASSERT_EQ(0, when_shut_down(mock_pool_watcher)); } TEST_F(TestMockPoolWatcher, RewatchError) { MockThreads mock_threads(m_threads); expect_work_queue(mock_threads); InSequence seq; MockMirroringWatcher mock_mirroring_watcher; expect_mirroring_watcher_is_unregistered(mock_mirroring_watcher, true); expect_mirroring_watcher_register(mock_mirroring_watcher, 0); MockRefreshImagesRequest mock_refresh_images_request; expect_refresh_images(mock_refresh_images_request, {}, 0); MockListener mock_listener(this); expect_listener_handle_update(mock_listener, "remote uuid", {}, {}); expect_timer_add_event(mock_threads); expect_mirroring_watcher_is_unregistered(mock_mirroring_watcher, false); expect_refresh_images(mock_refresh_images_request, {{"global id", "image id"}}, 0); expect_listener_handle_update(mock_listener, "remote uuid", {{"global id", "image id"}}, {}); MockPoolWatcher mock_pool_watcher(&mock_threads, m_remote_io_ctx, "remote uuid", mock_listener); C_SaferCond ctx; mock_pool_watcher.init(&ctx); ASSERT_EQ(0, ctx.wait()); ASSERT_TRUE(wait_for_update(1)); MirroringWatcher::get_instance().handle_rewatch_complete(-EINVAL); ASSERT_TRUE(wait_for_update(1)); expect_mirroring_watcher_unregister(mock_mirroring_watcher, 0); ASSERT_EQ(0, when_shut_down(mock_pool_watcher)); } TEST_F(TestMockPoolWatcher, DeferredRefresh) { MockThreads mock_threads(m_threads); expect_work_queue(mock_threads); InSequence seq; MockMirroringWatcher mock_mirroring_watcher; expect_mirroring_watcher_is_unregistered(mock_mirroring_watcher, true); expect_mirroring_watcher_register(mock_mirroring_watcher, 0); MockRefreshImagesRequest mock_refresh_images_request; EXPECT_CALL(mock_refresh_images_request, send()) .WillOnce(Invoke([&mock_refresh_images_request]() { *mock_refresh_images_request.image_ids = {}; MirroringWatcher::get_instance().handle_rewatch_complete(0); mock_refresh_images_request.on_finish->complete(0); })); expect_timer_add_event(mock_threads); expect_mirroring_watcher_is_unregistered(mock_mirroring_watcher, false); expect_refresh_images(mock_refresh_images_request, {}, 0); MockListener mock_listener(this); expect_listener_handle_update(mock_listener, "remote uuid", {}, {}); MockPoolWatcher mock_pool_watcher(&mock_threads, m_remote_io_ctx, "remote uuid", mock_listener); C_SaferCond ctx; mock_pool_watcher.init(&ctx); ASSERT_EQ(0, ctx.wait()); ASSERT_TRUE(wait_for_update(1)); expect_mirroring_watcher_unregister(mock_mirroring_watcher, 0); ASSERT_EQ(0, when_shut_down(mock_pool_watcher)); } } // namespace mirror } // namespace rbd
25,382
33.723666
117
cc
null
ceph-main/src/test/rbd_mirror/test_mock_Throttler.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system * * Copyright (C) 2016 SUSE LINUX GmbH * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #include "test/rbd_mirror/test_mock_fixture.h" #include "test/librbd/mock/MockImageCtx.h" namespace librbd { namespace { struct MockTestImageCtx : public librbd::MockImageCtx { MockTestImageCtx(librbd::ImageCtx &image_ctx) : librbd::MockImageCtx(image_ctx) { } }; } // anonymous namespace } // namespace librbd // template definitions #include "tools/rbd_mirror/Throttler.cc" namespace rbd { namespace mirror { class TestMockThrottler : public TestMockFixture { public: typedef Throttler<librbd::MockTestImageCtx> MockThrottler; }; TEST_F(TestMockThrottler, Single_Sync) { MockThrottler throttler(g_ceph_context, "rbd_mirror_concurrent_image_syncs"); C_SaferCond on_start; throttler.start_op("ns", "id", &on_start); ASSERT_EQ(0, on_start.wait()); throttler.finish_op("ns", "id"); } TEST_F(TestMockThrottler, Multiple_Syncs) { MockThrottler throttler(g_ceph_context, "rbd_mirror_concurrent_image_syncs"); throttler.set_max_concurrent_ops(2); C_SaferCond on_start1; throttler.start_op("ns", "id1", &on_start1); C_SaferCond on_start2; throttler.start_op("ns", "id2", &on_start2); C_SaferCond on_start3; throttler.start_op("ns", "id3", &on_start3); C_SaferCond on_start4; throttler.start_op("ns", "id4", &on_start4); ASSERT_EQ(0, on_start2.wait()); throttler.finish_op("ns", "id2"); ASSERT_EQ(0, on_start3.wait()); throttler.finish_op("ns", "id3"); ASSERT_EQ(0, on_start1.wait()); throttler.finish_op("ns", "id1"); ASSERT_EQ(0, on_start4.wait()); throttler.finish_op("ns", "id4"); } TEST_F(TestMockThrottler, Cancel_Running_Sync) { MockThrottler throttler(g_ceph_context, "rbd_mirror_concurrent_image_syncs"); C_SaferCond on_start; throttler.start_op("ns", "id", &on_start); ASSERT_EQ(0, on_start.wait()); ASSERT_FALSE(throttler.cancel_op("ns", "id")); throttler.finish_op("ns", "id"); } TEST_F(TestMockThrottler, Cancel_Waiting_Sync) { MockThrottler throttler(g_ceph_context, "rbd_mirror_concurrent_image_syncs"); throttler.set_max_concurrent_ops(1); C_SaferCond on_start1; throttler.start_op("ns", "id1", &on_start1); C_SaferCond on_start2; throttler.start_op("ns", "id2", &on_start2); ASSERT_EQ(0, on_start1.wait()); ASSERT_TRUE(throttler.cancel_op("ns", "id2")); ASSERT_EQ(-ECANCELED, on_start2.wait()); throttler.finish_op("ns", "id1"); } TEST_F(TestMockThrottler, Cancel_Running_Sync_Start_Waiting) { MockThrottler throttler(g_ceph_context, "rbd_mirror_concurrent_image_syncs"); throttler.set_max_concurrent_ops(1); C_SaferCond on_start1; throttler.start_op("ns", "id1", &on_start1); C_SaferCond on_start2; throttler.start_op("ns", "id2", &on_start2); ASSERT_EQ(0, on_start1.wait()); ASSERT_FALSE(throttler.cancel_op("ns", "id1")); throttler.finish_op("ns", "id1"); ASSERT_EQ(0, on_start2.wait()); throttler.finish_op("ns", "id2"); } TEST_F(TestMockThrottler, Duplicate) { MockThrottler throttler(g_ceph_context, "rbd_mirror_concurrent_image_syncs"); throttler.set_max_concurrent_ops(1); C_SaferCond on_start1; throttler.start_op("ns", "id1", &on_start1); ASSERT_EQ(0, on_start1.wait()); C_SaferCond on_start2; throttler.start_op("ns", "id1", &on_start2); ASSERT_EQ(0, on_start2.wait()); C_SaferCond on_start3; throttler.start_op("ns", "id2", &on_start3); C_SaferCond on_start4; throttler.start_op("ns", "id2", &on_start4); ASSERT_EQ(-ENOENT, on_start3.wait()); throttler.finish_op("ns", "id1"); ASSERT_EQ(0, on_start4.wait()); throttler.finish_op("ns", "id2"); } TEST_F(TestMockThrottler, Duplicate2) { MockThrottler throttler(g_ceph_context, "rbd_mirror_concurrent_image_syncs"); throttler.set_max_concurrent_ops(2); C_SaferCond on_start1; throttler.start_op("ns", "id1", &on_start1); ASSERT_EQ(0, on_start1.wait()); C_SaferCond on_start2; throttler.start_op("ns", "id2", &on_start2); ASSERT_EQ(0, on_start2.wait()); C_SaferCond on_start3; throttler.start_op("ns", "id3", &on_start3); C_SaferCond on_start4; throttler.start_op("ns", "id3", &on_start4); // dup ASSERT_EQ(-ENOENT, on_start3.wait()); C_SaferCond on_start5; throttler.start_op("ns", "id4", &on_start5); throttler.finish_op("ns", "id1"); ASSERT_EQ(0, on_start4.wait()); throttler.finish_op("ns", "id2"); ASSERT_EQ(0, on_start5.wait()); C_SaferCond on_start6; throttler.start_op("ns", "id5", &on_start6); throttler.finish_op("ns", "id3"); ASSERT_EQ(0, on_start6.wait()); throttler.finish_op("ns", "id4"); throttler.finish_op("ns", "id5"); } TEST_F(TestMockThrottler, Increase_Max_Concurrent_Syncs) { MockThrottler throttler(g_ceph_context, "rbd_mirror_concurrent_image_syncs"); throttler.set_max_concurrent_ops(2); C_SaferCond on_start1; throttler.start_op("ns", "id1", &on_start1); C_SaferCond on_start2; throttler.start_op("ns", "id2", &on_start2); C_SaferCond on_start3; throttler.start_op("ns", "id3", &on_start3); C_SaferCond on_start4; throttler.start_op("ns", "id4", &on_start4); C_SaferCond on_start5; throttler.start_op("ns", "id5", &on_start5); ASSERT_EQ(0, on_start1.wait()); ASSERT_EQ(0, on_start2.wait()); throttler.set_max_concurrent_ops(4); ASSERT_EQ(0, on_start3.wait()); ASSERT_EQ(0, on_start4.wait()); throttler.finish_op("ns", "id4"); ASSERT_EQ(0, on_start5.wait()); throttler.finish_op("ns", "id1"); throttler.finish_op("ns", "id2"); throttler.finish_op("ns", "id3"); throttler.finish_op("ns", "id5"); } TEST_F(TestMockThrottler, Decrease_Max_Concurrent_Syncs) { MockThrottler throttler(g_ceph_context, "rbd_mirror_concurrent_image_syncs"); throttler.set_max_concurrent_ops(4); C_SaferCond on_start1; throttler.start_op("ns", "id1", &on_start1); C_SaferCond on_start2; throttler.start_op("ns", "id2", &on_start2); C_SaferCond on_start3; throttler.start_op("ns", "id3", &on_start3); C_SaferCond on_start4; throttler.start_op("ns", "id4", &on_start4); C_SaferCond on_start5; throttler.start_op("ns", "id5", &on_start5); ASSERT_EQ(0, on_start1.wait()); ASSERT_EQ(0, on_start2.wait()); ASSERT_EQ(0, on_start3.wait()); ASSERT_EQ(0, on_start4.wait()); throttler.set_max_concurrent_ops(2); throttler.finish_op("ns", "id1"); throttler.finish_op("ns", "id2"); throttler.finish_op("ns", "id3"); ASSERT_EQ(0, on_start5.wait()); throttler.finish_op("ns", "id4"); throttler.finish_op("ns", "id5"); } TEST_F(TestMockThrottler, Drain) { MockThrottler throttler(g_ceph_context, "rbd_mirror_concurrent_image_syncs"); throttler.set_max_concurrent_ops(1); C_SaferCond on_start1; throttler.start_op("ns", "id1", &on_start1); C_SaferCond on_start2; throttler.start_op("ns", "id2", &on_start2); ASSERT_EQ(0, on_start1.wait()); throttler.drain("ns", -ESTALE); ASSERT_EQ(-ESTALE, on_start2.wait()); } } // namespace mirror } // namespace rbd
7,264
27.602362
79
cc
null
ceph-main/src/test/rbd_mirror/test_mock_fixture.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "test/rbd_mirror/test_mock_fixture.h" #include "include/rbd/librbd.hpp" #include "test/librados_test_stub/LibradosTestStub.h" #include "test/librados_test_stub/MockTestMemCluster.h" #include "test/librados_test_stub/MockTestMemRadosClient.h" #include "test/librbd/mock/MockImageCtx.h" namespace rbd { namespace mirror { using ::testing::_; using ::testing::Invoke; using ::testing::WithArg; TestMockFixture::TestClusterRef TestMockFixture::s_test_cluster; void TestMockFixture::SetUpTestCase() { s_test_cluster = librados_test_stub::get_cluster(); // use a mock version of the in-memory rados client librados_test_stub::set_cluster(boost::shared_ptr<librados::TestCluster>( new ::testing::NiceMock<librados::MockTestMemCluster>())); TestFixture::SetUpTestCase(); } void TestMockFixture::TearDownTestCase() { TestFixture::TearDownTestCase(); librados_test_stub::set_cluster(s_test_cluster); } void TestMockFixture::TearDown() { // Mock rados client lives across tests -- reset it to initial state librados::MockTestMemRadosClient *mock_rados_client = get_mock_io_ctx(m_local_io_ctx).get_mock_rados_client(); ASSERT_TRUE(mock_rados_client != nullptr); ::testing::Mock::VerifyAndClear(mock_rados_client); mock_rados_client->default_to_dispatch(); dynamic_cast<librados::MockTestMemCluster*>( librados_test_stub::get_cluster().get())->default_to_dispatch(); TestFixture::TearDown(); } void TestMockFixture::expect_test_features(librbd::MockImageCtx &mock_image_ctx) { EXPECT_CALL(mock_image_ctx, test_features(_, _)) .WillRepeatedly(WithArg<0>(Invoke([&mock_image_ctx](uint64_t features) { return (mock_image_ctx.features & features) != 0; }))); } librados::MockTestMemCluster& TestMockFixture::get_mock_cluster() { librados::MockTestMemCluster* mock_cluster = dynamic_cast< librados::MockTestMemCluster*>(librados_test_stub::get_cluster().get()); ceph_assert(mock_cluster != nullptr); return *mock_cluster; } } // namespace mirror } // namespace rbd
2,136
31.876923
82
cc
null
ceph-main/src/test/rbd_mirror/test_mock_fixture.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_TEST_RBD_MIRROR_TEST_MOCK_FIXTURE_H #define CEPH_TEST_RBD_MIRROR_TEST_MOCK_FIXTURE_H #include "test/rbd_mirror/test_fixture.h" #include "test/librados_test_stub/LibradosTestStub.h" #include "common/WorkQueue.h" #include "librbd/asio/ContextWQ.h" #include <boost/shared_ptr.hpp> #include <gmock/gmock.h> #include "include/ceph_assert.h" namespace librados { class TestRadosClient; class MockTestMemCluster; class MockTestMemIoCtxImpl; class MockTestMemRadosClient; } namespace librbd { class MockImageCtx; } ACTION_P(CopyInBufferlist, str) { arg0->append(str); } ACTION_P(CompleteContext, r) { arg0->complete(r); } ACTION_P2(CompleteContext, wq, r) { auto context_wq = reinterpret_cast<librbd::asio::ContextWQ *>(wq); context_wq->queue(arg0, r); } ACTION_P(GetReference, ref_object) { ref_object->get(); } MATCHER_P(ContentsEqual, bl, "") { // TODO fix const-correctness of bufferlist return const_cast<bufferlist &>(arg).contents_equal( const_cast<bufferlist &>(bl)); } namespace rbd { namespace mirror { class TestMockFixture : public TestFixture { public: typedef boost::shared_ptr<librados::TestCluster> TestClusterRef; static void SetUpTestCase(); static void TearDownTestCase(); void TearDown() override; void expect_test_features(librbd::MockImageCtx &mock_image_ctx); librados::MockTestMemCluster& get_mock_cluster(); private: static TestClusterRef s_test_cluster; }; } // namespace mirror } // namespace rbd #endif // CEPH_TEST_RBD_MIRROR_TEST_MOCK_FIXTURE_H
1,628
21.315068
70
h
null
ceph-main/src/test/rbd_mirror/image_deleter/test_mock_SnapshotPurgeRequest.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "test/rbd_mirror/test_mock_fixture.h" #include "librbd/ExclusiveLock.h" #include "librbd/ImageCtx.h" #include "librbd/ImageState.h" #include "librbd/Operations.h" #include "tools/rbd_mirror/Threads.h" #include "tools/rbd_mirror/image_deleter/SnapshotPurgeRequest.h" #include "test/librados_test_stub/MockTestMemIoCtxImpl.h" #include "test/librbd/mock/MockExclusiveLock.h" #include "test/librbd/mock/MockImageCtx.h" #include "test/librbd/mock/MockImageState.h" #include "test/librbd/mock/MockOperations.h" namespace librbd { namespace { struct MockTestImageCtx : public librbd::MockImageCtx { static MockTestImageCtx *s_instance; static MockTestImageCtx *create(const std::string &image_name, const std::string &image_id, const char *snap, librados::IoCtx& p, bool read_only) { ceph_assert(s_instance != nullptr); return s_instance; } MockTestImageCtx(librbd::ImageCtx &image_ctx) : librbd::MockImageCtx(image_ctx) { s_instance = this; } }; MockTestImageCtx *MockTestImageCtx::s_instance = nullptr; } // anonymous namespace } // namespace librbd #include "tools/rbd_mirror/image_deleter/SnapshotPurgeRequest.cc" namespace rbd { namespace mirror { namespace image_deleter { using ::testing::_; using ::testing::Invoke; using ::testing::InSequence; using ::testing::WithArg; class TestMockImageDeleterSnapshotPurgeRequest : public TestMockFixture { public: typedef SnapshotPurgeRequest<librbd::MockTestImageCtx> MockSnapshotPurgeRequest; void SetUp() override { TestMockFixture::SetUp(); librbd::RBD rbd; ASSERT_EQ(0, create_image(rbd, m_local_io_ctx, m_image_name, m_image_size)); ASSERT_EQ(0, open_image(m_local_io_ctx, m_image_name, &m_local_image_ctx)); } void expect_set_journal_policy(librbd::MockTestImageCtx &mock_image_ctx) { EXPECT_CALL(mock_image_ctx, set_journal_policy(_)) .WillOnce(Invoke([](librbd::journal::Policy* policy) { delete policy; })); } void expect_open(librbd::MockTestImageCtx &mock_image_ctx, int r) { EXPECT_CALL(*mock_image_ctx.state, open(true, _)) .WillOnce(WithArg<1>(Invoke([this, &mock_image_ctx, r](Context* ctx) { EXPECT_EQ(0U, mock_image_ctx.read_only_mask & librbd::IMAGE_READ_ONLY_FLAG_NON_PRIMARY); m_threads->work_queue->queue(ctx, r); }))); } void expect_close(librbd::MockTestImageCtx &mock_image_ctx, int r) { EXPECT_CALL(*mock_image_ctx.state, close(_)) .WillOnce(Invoke([this, r](Context* ctx) { m_threads->work_queue->queue(ctx, r); })); } void expect_acquire_lock(librbd::MockTestImageCtx &mock_image_ctx, int r) { EXPECT_CALL(*mock_image_ctx.exclusive_lock, acquire_lock(_)) .WillOnce(Invoke([this, r](Context* ctx) { m_threads->work_queue->queue(ctx, r); })); } void expect_get_snap_namespace(librbd::MockTestImageCtx &mock_image_ctx, uint64_t snap_id, const cls::rbd::SnapshotNamespace &snap_namespace, int r) { EXPECT_CALL(mock_image_ctx, get_snap_namespace(snap_id, _)) .WillOnce(WithArg<1>(Invoke([snap_namespace, r](cls::rbd::SnapshotNamespace *ns) { *ns = snap_namespace; return r; }))); } void expect_get_snap_name(librbd::MockTestImageCtx &mock_image_ctx, uint64_t snap_id, const std::string& name, int r) { EXPECT_CALL(mock_image_ctx, get_snap_name(snap_id, _)) .WillOnce(WithArg<1>(Invoke([name, r](std::string *n) { *n = name; return r; }))); } void expect_is_snap_protected(librbd::MockTestImageCtx &mock_image_ctx, uint64_t snap_id, bool is_protected, int r) { EXPECT_CALL(mock_image_ctx, is_snap_protected(snap_id, _)) .WillOnce(WithArg<1>(Invoke([is_protected, r](bool *prot) { *prot = is_protected; return r; }))); } void expect_snap_unprotect(librbd::MockTestImageCtx &mock_image_ctx, const cls::rbd::SnapshotNamespace& ns, const std::string& name, int r) { EXPECT_CALL(*mock_image_ctx.operations, execute_snap_unprotect(ns, name, _)) .WillOnce(WithArg<2>(Invoke([this, r](Context* ctx) { m_threads->work_queue->queue(ctx, r); }))); } void expect_snap_remove(librbd::MockTestImageCtx &mock_image_ctx, const cls::rbd::SnapshotNamespace& ns, const std::string& name, int r) { EXPECT_CALL(*mock_image_ctx.operations, execute_snap_remove(ns, name, _)) .WillOnce(WithArg<2>(Invoke([this, r](Context* ctx) { m_threads->work_queue->queue(ctx, r); }))); } void expect_start_op(librbd::MockTestImageCtx &mock_image_ctx, bool success) { EXPECT_CALL(*mock_image_ctx.exclusive_lock, start_op(_)) .WillOnce(Invoke([success](int* r) { auto f = [](int r) {}; if (!success) { *r = -EROFS; return static_cast<LambdaContext<decltype(f)>*>(nullptr); } return new LambdaContext(std::move(f)); })); } librbd::ImageCtx *m_local_image_ctx; }; TEST_F(TestMockImageDeleterSnapshotPurgeRequest, SuccessJournal) { { std::unique_lock image_locker{m_local_image_ctx->image_lock}; m_local_image_ctx->add_snap(cls::rbd::UserSnapshotNamespace{}, "snap1", 1, 0, {}, RBD_PROTECTION_STATUS_PROTECTED, 0, {}); m_local_image_ctx->add_snap(cls::rbd::UserSnapshotNamespace{}, "snap2", 2, 0, {}, RBD_PROTECTION_STATUS_UNPROTECTED, 0, {}); } librbd::MockTestImageCtx mock_image_ctx(*m_local_image_ctx); librbd::MockExclusiveLock mock_exclusive_lock; mock_image_ctx.exclusive_lock = &mock_exclusive_lock; InSequence seq; expect_set_journal_policy(mock_image_ctx); expect_open(mock_image_ctx, 0); expect_acquire_lock(mock_image_ctx, 0); expect_get_snap_namespace(mock_image_ctx, 2, cls::rbd::UserSnapshotNamespace{}, 0); expect_get_snap_name(mock_image_ctx, 2, "snap2", 0); expect_is_snap_protected(mock_image_ctx, 2, false, 0); expect_start_op(mock_image_ctx, true); expect_snap_remove(mock_image_ctx, cls::rbd::UserSnapshotNamespace{}, "snap2", 0); expect_get_snap_namespace(mock_image_ctx, 1, cls::rbd::UserSnapshotNamespace{}, 0); expect_get_snap_name(mock_image_ctx, 1, "snap1", 0); expect_is_snap_protected(mock_image_ctx, 1, true, 0); expect_start_op(mock_image_ctx, true); expect_snap_unprotect(mock_image_ctx, cls::rbd::UserSnapshotNamespace{}, "snap1", 0); expect_start_op(mock_image_ctx, true); expect_snap_remove(mock_image_ctx, cls::rbd::UserSnapshotNamespace{}, "snap1", 0); expect_close(mock_image_ctx, 0); C_SaferCond ctx; auto req = MockSnapshotPurgeRequest::create(m_local_io_ctx, mock_image_ctx.id, &ctx); req->send(); ASSERT_EQ(0, ctx.wait()); } TEST_F(TestMockImageDeleterSnapshotPurgeRequest, SuccessSnapshot) { { std::unique_lock image_locker{m_local_image_ctx->image_lock}; m_local_image_ctx->add_snap(cls::rbd::UserSnapshotNamespace{}, "snap1", 1, 0, {}, RBD_PROTECTION_STATUS_PROTECTED, 0, {}); m_local_image_ctx->add_snap(cls::rbd::UserSnapshotNamespace{}, "snap2", 2, 0, {}, RBD_PROTECTION_STATUS_UNPROTECTED, 0, {}); } librbd::MockTestImageCtx mock_image_ctx(*m_local_image_ctx); InSequence seq; expect_set_journal_policy(mock_image_ctx); expect_open(mock_image_ctx, 0); expect_get_snap_namespace(mock_image_ctx, 2, cls::rbd::UserSnapshotNamespace{}, 0); expect_get_snap_name(mock_image_ctx, 2, "snap2", 0); expect_is_snap_protected(mock_image_ctx, 2, false, 0); expect_snap_remove(mock_image_ctx, cls::rbd::UserSnapshotNamespace{}, "snap2", 0); expect_get_snap_namespace(mock_image_ctx, 1, cls::rbd::UserSnapshotNamespace{}, 0); expect_get_snap_name(mock_image_ctx, 1, "snap1", 0); expect_is_snap_protected(mock_image_ctx, 1, true, 0); expect_snap_unprotect(mock_image_ctx, cls::rbd::UserSnapshotNamespace{}, "snap1", 0); expect_snap_remove(mock_image_ctx, cls::rbd::UserSnapshotNamespace{}, "snap1", 0); expect_close(mock_image_ctx, 0); C_SaferCond ctx; auto req = MockSnapshotPurgeRequest::create(m_local_io_ctx, mock_image_ctx.id, &ctx); req->send(); ASSERT_EQ(0, ctx.wait()); } TEST_F(TestMockImageDeleterSnapshotPurgeRequest, OpenError) { { std::unique_lock image_locker{m_local_image_ctx->image_lock}; m_local_image_ctx->add_snap(cls::rbd::UserSnapshotNamespace{}, "snap1", 1, 0, {}, RBD_PROTECTION_STATUS_UNPROTECTED, 0, {}); } librbd::MockTestImageCtx mock_image_ctx(*m_local_image_ctx); librbd::MockExclusiveLock mock_exclusive_lock; mock_image_ctx.exclusive_lock = &mock_exclusive_lock; InSequence seq; expect_set_journal_policy(mock_image_ctx); expect_open(mock_image_ctx, -EPERM); C_SaferCond ctx; auto req = MockSnapshotPurgeRequest::create(m_local_io_ctx, mock_image_ctx.id, &ctx); req->send(); ASSERT_EQ(-EPERM, ctx.wait()); } TEST_F(TestMockImageDeleterSnapshotPurgeRequest, AcquireLockError) { { std::unique_lock image_locker{m_local_image_ctx->image_lock}; m_local_image_ctx->add_snap(cls::rbd::UserSnapshotNamespace{}, "snap1", 1, 0, {}, RBD_PROTECTION_STATUS_UNPROTECTED, 0, {}); } librbd::MockTestImageCtx mock_image_ctx(*m_local_image_ctx); librbd::MockExclusiveLock mock_exclusive_lock; mock_image_ctx.exclusive_lock = &mock_exclusive_lock; InSequence seq; expect_set_journal_policy(mock_image_ctx); expect_open(mock_image_ctx, 0); expect_acquire_lock(mock_image_ctx, -EPERM); expect_close(mock_image_ctx, -EINVAL); C_SaferCond ctx; auto req = MockSnapshotPurgeRequest::create(m_local_io_ctx, mock_image_ctx.id, &ctx); req->send(); ASSERT_EQ(-EPERM, ctx.wait()); } TEST_F(TestMockImageDeleterSnapshotPurgeRequest, SnapUnprotectBusy) { { std::unique_lock image_locker{m_local_image_ctx->image_lock}; m_local_image_ctx->add_snap(cls::rbd::UserSnapshotNamespace{}, "snap1", 1, 0, {}, RBD_PROTECTION_STATUS_PROTECTED, 0, {}); } librbd::MockTestImageCtx mock_image_ctx(*m_local_image_ctx); librbd::MockExclusiveLock mock_exclusive_lock; mock_image_ctx.exclusive_lock = &mock_exclusive_lock; InSequence seq; expect_set_journal_policy(mock_image_ctx); expect_open(mock_image_ctx, 0); expect_acquire_lock(mock_image_ctx, 0); expect_get_snap_namespace(mock_image_ctx, 1, cls::rbd::UserSnapshotNamespace{}, 0); expect_get_snap_name(mock_image_ctx, 1, "snap1", 0); expect_is_snap_protected(mock_image_ctx, 1, true, 0); expect_start_op(mock_image_ctx, true); expect_snap_unprotect(mock_image_ctx, cls::rbd::UserSnapshotNamespace{}, "snap1", -EBUSY); expect_close(mock_image_ctx, -EINVAL); C_SaferCond ctx; auto req = MockSnapshotPurgeRequest::create(m_local_io_ctx, mock_image_ctx.id, &ctx); req->send(); ASSERT_EQ(-EBUSY, ctx.wait()); } TEST_F(TestMockImageDeleterSnapshotPurgeRequest, SnapUnprotectError) { { std::unique_lock image_locker{m_local_image_ctx->image_lock}; m_local_image_ctx->add_snap(cls::rbd::UserSnapshotNamespace{}, "snap1", 1, 0, {}, RBD_PROTECTION_STATUS_PROTECTED, 0, {}); } librbd::MockTestImageCtx mock_image_ctx(*m_local_image_ctx); librbd::MockExclusiveLock mock_exclusive_lock; mock_image_ctx.exclusive_lock = &mock_exclusive_lock; InSequence seq; expect_set_journal_policy(mock_image_ctx); expect_open(mock_image_ctx, 0); expect_acquire_lock(mock_image_ctx, 0); expect_get_snap_namespace(mock_image_ctx, 1, cls::rbd::UserSnapshotNamespace{}, 0); expect_get_snap_name(mock_image_ctx, 1, "snap1", 0); expect_is_snap_protected(mock_image_ctx, 1, true, 0); expect_start_op(mock_image_ctx, true); expect_snap_unprotect(mock_image_ctx, cls::rbd::UserSnapshotNamespace{}, "snap1", -EPERM); expect_close(mock_image_ctx, -EINVAL); C_SaferCond ctx; auto req = MockSnapshotPurgeRequest::create(m_local_io_ctx, mock_image_ctx.id, &ctx); req->send(); ASSERT_EQ(-EPERM, ctx.wait()); } TEST_F(TestMockImageDeleterSnapshotPurgeRequest, SnapRemoveError) { { std::unique_lock image_locker{m_local_image_ctx->image_lock}; m_local_image_ctx->add_snap(cls::rbd::UserSnapshotNamespace{}, "snap1", 1, 0, {}, RBD_PROTECTION_STATUS_UNPROTECTED, 0, {}); } librbd::MockTestImageCtx mock_image_ctx(*m_local_image_ctx); librbd::MockExclusiveLock mock_exclusive_lock; mock_image_ctx.exclusive_lock = &mock_exclusive_lock; InSequence seq; expect_set_journal_policy(mock_image_ctx); expect_open(mock_image_ctx, 0); expect_acquire_lock(mock_image_ctx, 0); expect_get_snap_namespace(mock_image_ctx, 1, cls::rbd::UserSnapshotNamespace{}, 0); expect_get_snap_name(mock_image_ctx, 1, "snap1", 0); expect_is_snap_protected(mock_image_ctx, 1, false, 0); expect_start_op(mock_image_ctx, true); expect_snap_remove(mock_image_ctx, cls::rbd::UserSnapshotNamespace{}, "snap1", -EINVAL); expect_close(mock_image_ctx, -EPERM); C_SaferCond ctx; auto req = MockSnapshotPurgeRequest::create(m_local_io_ctx, mock_image_ctx.id, &ctx); req->send(); ASSERT_EQ(-EINVAL, ctx.wait()); } TEST_F(TestMockImageDeleterSnapshotPurgeRequest, CloseError) { { std::unique_lock image_locker{m_local_image_ctx->image_lock}; m_local_image_ctx->add_snap(cls::rbd::UserSnapshotNamespace{}, "snap1", 1, 0, {}, RBD_PROTECTION_STATUS_UNPROTECTED, 0, {}); } librbd::MockTestImageCtx mock_image_ctx(*m_local_image_ctx); librbd::MockExclusiveLock mock_exclusive_lock; mock_image_ctx.exclusive_lock = &mock_exclusive_lock; InSequence seq; expect_set_journal_policy(mock_image_ctx); expect_open(mock_image_ctx, 0); expect_acquire_lock(mock_image_ctx, 0); expect_get_snap_namespace(mock_image_ctx, 1, cls::rbd::UserSnapshotNamespace{}, 0); expect_get_snap_name(mock_image_ctx, 1, "snap1", 0); expect_is_snap_protected(mock_image_ctx, 1, false, 0); expect_start_op(mock_image_ctx, true); expect_snap_remove(mock_image_ctx, cls::rbd::UserSnapshotNamespace{}, "snap1", 0); expect_close(mock_image_ctx, -EINVAL); C_SaferCond ctx; auto req = MockSnapshotPurgeRequest::create(m_local_io_ctx, mock_image_ctx.id, &ctx); req->send(); ASSERT_EQ(-EINVAL, ctx.wait()); } } // namespace image_deleter } // namespace mirror } // namespace rbd
16,334
36.900232
88
cc
null
ceph-main/src/test/rbd_mirror/image_deleter/test_mock_TrashMoveRequest.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "test/rbd_mirror/test_mock_fixture.h" #include "librbd/ExclusiveLock.h" #include "librbd/ImageCtx.h" #include "librbd/ImageState.h" #include "librbd/Operations.h" #include "librbd/TrashWatcher.h" #include "librbd/journal/ResetRequest.h" #include "librbd/mirror/GetInfoRequest.h" #include "librbd/mirror/ImageRemoveRequest.h" #include "librbd/trash/MoveRequest.h" #include "tools/rbd_mirror/Threads.h" #include "tools/rbd_mirror/image_deleter/TrashMoveRequest.h" #include "test/librados_test_stub/MockTestMemIoCtxImpl.h" #include "test/librbd/mock/MockExclusiveLock.h" #include "test/librbd/mock/MockImageCtx.h" #include "test/librbd/mock/MockImageState.h" #include "test/librbd/mock/MockOperations.h" namespace librbd { namespace { struct MockTestImageCtx : public librbd::MockImageCtx { static MockTestImageCtx *s_instance; static MockTestImageCtx *create(const std::string &image_name, const std::string &image_id, const char *snap, librados::IoCtx& p, bool read_only) { ceph_assert(s_instance != nullptr); return s_instance; } MockTestImageCtx(librbd::ImageCtx &image_ctx) : librbd::MockImageCtx(image_ctx) { s_instance = this; } }; MockTestImageCtx *MockTestImageCtx::s_instance = nullptr; } // anonymous namespace template<> struct TrashWatcher<MockTestImageCtx> { static TrashWatcher* s_instance; static void notify_image_added(librados::IoCtx&, const std::string& image_id, const cls::rbd::TrashImageSpec& spec, Context *ctx) { ceph_assert(s_instance != nullptr); s_instance->notify_image_added(image_id, spec, ctx); } MOCK_METHOD3(notify_image_added, void(const std::string&, const cls::rbd::TrashImageSpec&, Context*)); TrashWatcher() { s_instance = this; } }; TrashWatcher<MockTestImageCtx>* TrashWatcher<MockTestImageCtx>::s_instance = nullptr; namespace journal { template <> struct ResetRequest<MockTestImageCtx> { static ResetRequest* s_instance; Context* on_finish = nullptr; static ResetRequest* create(librados::IoCtx &io_ctx, const std::string &image_id, const std::string &client_id, const std::string &mirror_uuid, ContextWQ *op_work_queue, Context *on_finish) { ceph_assert(s_instance != nullptr); EXPECT_EQ(librbd::Journal<>::LOCAL_MIRROR_UUID, mirror_uuid); s_instance->on_finish = on_finish; return s_instance; } MOCK_METHOD0(send, void()); ResetRequest() { s_instance = this; } }; ResetRequest<MockTestImageCtx>* ResetRequest<MockTestImageCtx>::s_instance = nullptr; } // namespace journal namespace mirror { template<> struct GetInfoRequest<librbd::MockTestImageCtx> { static GetInfoRequest* s_instance; cls::rbd::MirrorImage *mirror_image; PromotionState *promotion_state; std::string *primary_mirror_uuid; Context *on_finish = nullptr; static GetInfoRequest* create(librados::IoCtx& io_ctx, librbd::asio::ContextWQ* context_wq, const std::string& image_id, cls::rbd::MirrorImage *mirror_image, PromotionState *promotion_state, std::string* primary_mirror_uuid, Context *on_finish) { ceph_assert(s_instance != nullptr); s_instance->mirror_image = mirror_image; s_instance->promotion_state = promotion_state; s_instance->primary_mirror_uuid = primary_mirror_uuid; s_instance->on_finish = on_finish; return s_instance; } GetInfoRequest() { ceph_assert(s_instance == nullptr); s_instance = this; } ~GetInfoRequest() { s_instance = nullptr; } MOCK_METHOD0(send, void()); }; GetInfoRequest<librbd::MockTestImageCtx>* GetInfoRequest<librbd::MockTestImageCtx>::s_instance = nullptr; template<> struct ImageRemoveRequest<librbd::MockTestImageCtx> { static ImageRemoveRequest* s_instance; std::string global_image_id; std::string image_id; Context* on_finish; static ImageRemoveRequest *create(librados::IoCtx& io_ctx, const std::string& global_image_id, const std::string& image_id, Context* on_finish) { ceph_assert(s_instance != nullptr); s_instance->global_image_id = global_image_id; s_instance->image_id = image_id; s_instance->on_finish = on_finish; return s_instance; } ImageRemoveRequest() { ceph_assert(s_instance == nullptr); s_instance = this; } ~ImageRemoveRequest() { s_instance = nullptr; } MOCK_METHOD0(send, void()); }; ImageRemoveRequest<librbd::MockTestImageCtx>* ImageRemoveRequest<librbd::MockTestImageCtx>::s_instance = nullptr; } // namespace mirror namespace trash { template <> struct MoveRequest<MockTestImageCtx> { static MoveRequest* s_instance; Context* on_finish = nullptr; typedef boost::optional<utime_t> DefermentEndTime; static MoveRequest* create(librados::IoCtx& io_ctx, const std::string& image_id, const cls::rbd::TrashImageSpec& trash_image_spec, Context* on_finish) { ceph_assert(s_instance != nullptr); s_instance->construct(image_id, trash_image_spec); s_instance->on_finish = on_finish; return s_instance; } MOCK_METHOD2(construct, void(const std::string&, const cls::rbd::TrashImageSpec&)); MOCK_METHOD0(send, void()); MoveRequest() { s_instance = this; } }; MoveRequest<MockTestImageCtx>* MoveRequest<MockTestImageCtx>::s_instance = nullptr; } // namespace trash } // namespace librbd #include "tools/rbd_mirror/image_deleter/TrashMoveRequest.cc" namespace rbd { namespace mirror { namespace image_deleter { using ::testing::_; using ::testing::DoAll; using ::testing::InSequence; using ::testing::Invoke; using ::testing::Return; using ::testing::StrEq; using ::testing::WithArg; using ::testing::WithArgs; class TestMockImageDeleterTrashMoveRequest : public TestMockFixture { public: typedef TrashMoveRequest<librbd::MockTestImageCtx> MockTrashMoveRequest; typedef librbd::journal::ResetRequest<librbd::MockTestImageCtx> MockJournalResetRequest; typedef librbd::mirror::GetInfoRequest<librbd::MockTestImageCtx> MockGetMirrorInfoRequest; typedef librbd::mirror::ImageRemoveRequest<librbd::MockTestImageCtx> MockImageRemoveRequest; typedef librbd::trash::MoveRequest<librbd::MockTestImageCtx> MockLibrbdTrashMoveRequest; typedef librbd::TrashWatcher<librbd::MockTestImageCtx> MockTrashWatcher; void SetUp() override { TestMockFixture::SetUp(); librbd::RBD rbd; ASSERT_EQ(0, create_image(rbd, m_local_io_ctx, m_image_name, m_image_size)); ASSERT_EQ(0, open_image(m_local_io_ctx, m_image_name, &m_local_image_ctx)); } void expect_mirror_image_get_image_id(const std::string& image_id, int r) { bufferlist bl; encode(image_id, bl); EXPECT_CALL(get_mock_io_ctx(m_local_io_ctx), exec(RBD_MIRRORING, _, StrEq("rbd"), StrEq("mirror_image_get_image_id"), _, _, _, _)) .WillOnce(DoAll(WithArg<5>(Invoke([bl](bufferlist *out_bl) { *out_bl = bl; })), Return(r))); } void expect_get_mirror_info( MockGetMirrorInfoRequest &mock_get_mirror_info_request, const cls::rbd::MirrorImage &mirror_image, librbd::mirror::PromotionState promotion_state, const std::string& primary_mirror_uuid, int r) { EXPECT_CALL(mock_get_mirror_info_request, send()) .WillOnce(Invoke([this, &mock_get_mirror_info_request, mirror_image, promotion_state, primary_mirror_uuid, r]() { *mock_get_mirror_info_request.mirror_image = mirror_image; *mock_get_mirror_info_request.promotion_state = promotion_state; *mock_get_mirror_info_request.primary_mirror_uuid = primary_mirror_uuid; m_threads->work_queue->queue( mock_get_mirror_info_request.on_finish, r); })); } void expect_set_journal_policy(librbd::MockTestImageCtx &mock_image_ctx) { EXPECT_CALL(mock_image_ctx, set_journal_policy(_)) .WillOnce(Invoke([](librbd::journal::Policy* policy) { delete policy; })); } void expect_open(librbd::MockTestImageCtx &mock_image_ctx, int r) { EXPECT_CALL(*mock_image_ctx.state, open(true, _)) .WillOnce(WithArg<1>(Invoke([this, &mock_image_ctx, r](Context* ctx) { EXPECT_EQ(0U, mock_image_ctx.read_only_mask & librbd::IMAGE_READ_ONLY_FLAG_NON_PRIMARY); m_threads->work_queue->queue(ctx, r); }))); } void expect_close(librbd::MockTestImageCtx &mock_image_ctx, int r) { EXPECT_CALL(*mock_image_ctx.state, close(_)) .WillOnce(Invoke([this, r](Context* ctx) { m_threads->work_queue->queue(ctx, r); })); } void expect_block_requests(librbd::MockTestImageCtx &mock_image_ctx) { EXPECT_CALL(*mock_image_ctx.exclusive_lock, block_requests(0)).Times(1); } void expect_acquire_lock(librbd::MockTestImageCtx &mock_image_ctx, int r) { EXPECT_CALL(*mock_image_ctx.exclusive_lock, acquire_lock(_)) .WillOnce(Invoke([this, r](Context* ctx) { m_threads->work_queue->queue(ctx, r); })); } void expect_mirror_image_set(const std::string& image_id, const cls::rbd::MirrorImage& mirror_image, int r) { bufferlist bl; encode(image_id, bl); encode(mirror_image, bl); EXPECT_CALL(get_mock_io_ctx(m_local_io_ctx), exec(RBD_MIRRORING, _, StrEq("rbd"), StrEq("mirror_image_set"), ContentsEqual(bl), _, _, _)) .WillOnce(Return(r)); } void expect_mirror_image_remove_request( MockImageRemoveRequest& mock_image_remove_request, int r) { EXPECT_CALL(mock_image_remove_request, send()) .WillOnce(Invoke([this, &mock_image_remove_request, r]() { m_threads->work_queue->queue(mock_image_remove_request.on_finish, r); })); } void expect_journal_reset(MockJournalResetRequest& mock_journal_reset_request, int r) { EXPECT_CALL(mock_journal_reset_request, send()) .WillOnce(Invoke([this, &mock_journal_reset_request, r]() { m_threads->work_queue->queue(mock_journal_reset_request.on_finish, r); })); } void expect_trash_move(MockLibrbdTrashMoveRequest& mock_trash_move_request, const std::string& image_name, const std::string& image_id, const boost::optional<uint32_t>& delay, int r) { EXPECT_CALL(mock_trash_move_request, construct(image_id, _)) .WillOnce(WithArg<1>(Invoke([image_name, delay](const cls::rbd::TrashImageSpec& spec) { ASSERT_EQ(cls::rbd::TRASH_IMAGE_SOURCE_MIRRORING, spec.source); ASSERT_EQ(image_name, spec.name); if (delay) { utime_t time{spec.deletion_time}; time += *delay; ASSERT_TRUE(time == spec.deferment_end_time); } else { ASSERT_EQ(spec.deletion_time, spec.deferment_end_time); } }))); EXPECT_CALL(mock_trash_move_request, send()) .WillOnce(Invoke([this, &mock_trash_move_request, r]() { m_threads->work_queue->queue(mock_trash_move_request.on_finish, r); })); } void expect_notify_image_added(MockTrashWatcher& mock_trash_watcher, const std::string& image_id) { EXPECT_CALL(mock_trash_watcher, notify_image_added(image_id, _, _)) .WillOnce(WithArg<2>(Invoke([this](Context *ctx) { m_threads->work_queue->queue(ctx, 0); }))); } librbd::ImageCtx *m_local_image_ctx; }; TEST_F(TestMockImageDeleterTrashMoveRequest, SuccessJournal) { librbd::MockTestImageCtx mock_image_ctx(*m_local_image_ctx); librbd::MockExclusiveLock mock_exclusive_lock; mock_image_ctx.exclusive_lock = &mock_exclusive_lock; InSequence seq; expect_mirror_image_get_image_id("image id", 0); MockGetMirrorInfoRequest mock_get_mirror_info_request; expect_get_mirror_info(mock_get_mirror_info_request, {cls::rbd::MIRROR_IMAGE_MODE_JOURNAL, "global image id", cls::rbd::MIRROR_IMAGE_STATE_ENABLED}, librbd::mirror::PROMOTION_STATE_ORPHAN, "remote mirror uuid", 0); expect_mirror_image_set("image id", {cls::rbd::MIRROR_IMAGE_MODE_JOURNAL, "global image id", cls::rbd::MIRROR_IMAGE_STATE_DISABLING}, 0); expect_set_journal_policy(mock_image_ctx); expect_open(mock_image_ctx, 0); MockJournalResetRequest mock_journal_reset_request; expect_journal_reset(mock_journal_reset_request, 0); expect_block_requests(mock_image_ctx); expect_acquire_lock(mock_image_ctx, 0); MockLibrbdTrashMoveRequest mock_librbd_trash_move_request; expect_trash_move(mock_librbd_trash_move_request, m_image_name, "image id", {}, 0); MockImageRemoveRequest mock_image_remove_request; expect_mirror_image_remove_request(mock_image_remove_request, 0); expect_close(mock_image_ctx, 0); MockTrashWatcher mock_trash_watcher; expect_notify_image_added(mock_trash_watcher, "image id"); C_SaferCond ctx; auto req = MockTrashMoveRequest::create(m_local_io_ctx, "global image id", true, m_local_image_ctx->op_work_queue, &ctx); req->send(); ASSERT_EQ(0, ctx.wait()); } TEST_F(TestMockImageDeleterTrashMoveRequest, SuccessSnapshot) { librbd::MockTestImageCtx mock_image_ctx(*m_local_image_ctx); InSequence seq; expect_mirror_image_get_image_id("image id", 0); MockGetMirrorInfoRequest mock_get_mirror_info_request; expect_get_mirror_info(mock_get_mirror_info_request, {cls::rbd::MIRROR_IMAGE_MODE_SNAPSHOT, "global image id", cls::rbd::MIRROR_IMAGE_STATE_ENABLED}, librbd::mirror::PROMOTION_STATE_NON_PRIMARY, "remote mirror uuid", 0); expect_mirror_image_set("image id", {cls::rbd::MIRROR_IMAGE_MODE_SNAPSHOT, "global image id", cls::rbd::MIRROR_IMAGE_STATE_DISABLING}, 0); expect_set_journal_policy(mock_image_ctx); expect_open(mock_image_ctx, 0); MockLibrbdTrashMoveRequest mock_librbd_trash_move_request; expect_trash_move(mock_librbd_trash_move_request, m_image_name, "image id", {}, 0); MockImageRemoveRequest mock_image_remove_request; expect_mirror_image_remove_request(mock_image_remove_request, 0); expect_close(mock_image_ctx, 0); MockTrashWatcher mock_trash_watcher; expect_notify_image_added(mock_trash_watcher, "image id"); C_SaferCond ctx; auto req = MockTrashMoveRequest::create(m_local_io_ctx, "global image id", false, m_local_image_ctx->op_work_queue, &ctx); req->send(); ASSERT_EQ(0, ctx.wait()); } TEST_F(TestMockImageDeleterTrashMoveRequest, GetImageIdDNE) { InSequence seq; expect_mirror_image_get_image_id("image id", -ENOENT); C_SaferCond ctx; auto req = MockTrashMoveRequest::create(m_local_io_ctx, "global image id", true, m_local_image_ctx->op_work_queue, &ctx); req->send(); ASSERT_EQ(-ENOENT, ctx.wait()); } TEST_F(TestMockImageDeleterTrashMoveRequest, GetImageIdError) { InSequence seq; expect_mirror_image_get_image_id("image id", -EINVAL); C_SaferCond ctx; auto req = MockTrashMoveRequest::create(m_local_io_ctx, "global image id", true, m_local_image_ctx->op_work_queue, &ctx); req->send(); ASSERT_EQ(-EINVAL, ctx.wait()); } TEST_F(TestMockImageDeleterTrashMoveRequest, GetMirrorInfoLocalPrimary) { InSequence seq; expect_mirror_image_get_image_id("image id", 0); MockGetMirrorInfoRequest mock_get_mirror_info_request; expect_get_mirror_info(mock_get_mirror_info_request, {cls::rbd::MIRROR_IMAGE_MODE_JOURNAL, "global image id", cls::rbd::MIRROR_IMAGE_STATE_ENABLED}, librbd::mirror::PROMOTION_STATE_PRIMARY, "remote mirror uuid", 0); C_SaferCond ctx; auto req = MockTrashMoveRequest::create(m_local_io_ctx, "global image id", true, m_local_image_ctx->op_work_queue, &ctx); req->send(); ASSERT_EQ(-EPERM, ctx.wait()); } TEST_F(TestMockImageDeleterTrashMoveRequest, GetMirrorInfoOrphan) { InSequence seq; expect_mirror_image_get_image_id("image id", 0); MockGetMirrorInfoRequest mock_get_mirror_info_request; expect_get_mirror_info(mock_get_mirror_info_request, {cls::rbd::MIRROR_IMAGE_MODE_JOURNAL, "global image id", cls::rbd::MIRROR_IMAGE_STATE_ENABLED}, librbd::mirror::PROMOTION_STATE_ORPHAN, "remote mirror uuid", 0); C_SaferCond ctx; auto req = MockTrashMoveRequest::create(m_local_io_ctx, "global image id", false, m_local_image_ctx->op_work_queue, &ctx); req->send(); ASSERT_EQ(-EPERM, ctx.wait()); } TEST_F(TestMockImageDeleterTrashMoveRequest, GetMirrorInfoDNE) { librbd::MockTestImageCtx mock_image_ctx(*m_local_image_ctx); librbd::MockExclusiveLock mock_exclusive_lock; mock_image_ctx.exclusive_lock = &mock_exclusive_lock; InSequence seq; expect_mirror_image_get_image_id("image id", 0); MockGetMirrorInfoRequest mock_get_mirror_info_request; expect_get_mirror_info(mock_get_mirror_info_request, {cls::rbd::MIRROR_IMAGE_MODE_JOURNAL, "global image id", cls::rbd::MIRROR_IMAGE_STATE_ENABLED}, librbd::mirror::PROMOTION_STATE_ORPHAN, "remote mirror uuid", -ENOENT); C_SaferCond ctx; auto req = MockTrashMoveRequest::create(m_local_io_ctx, "global image id", true, m_local_image_ctx->op_work_queue, &ctx); req->send(); ASSERT_EQ(-ENOENT, ctx.wait()); } TEST_F(TestMockImageDeleterTrashMoveRequest, GetMirrorInfoError) { InSequence seq; expect_mirror_image_get_image_id("image id", 0); MockGetMirrorInfoRequest mock_get_mirror_info_request; expect_get_mirror_info(mock_get_mirror_info_request, {cls::rbd::MIRROR_IMAGE_MODE_JOURNAL, "global image id", cls::rbd::MIRROR_IMAGE_STATE_ENABLED}, librbd::mirror::PROMOTION_STATE_ORPHAN, "remote mirror uuid", -EINVAL); C_SaferCond ctx; auto req = MockTrashMoveRequest::create(m_local_io_ctx, "global image id", true, m_local_image_ctx->op_work_queue, &ctx); req->send(); ASSERT_EQ(-EINVAL, ctx.wait()); } TEST_F(TestMockImageDeleterTrashMoveRequest, DisableMirrorImageError) { InSequence seq; expect_mirror_image_get_image_id("image id", 0); MockGetMirrorInfoRequest mock_get_mirror_info_request; expect_get_mirror_info(mock_get_mirror_info_request, {cls::rbd::MIRROR_IMAGE_MODE_JOURNAL, "global image id", cls::rbd::MIRROR_IMAGE_STATE_ENABLED}, librbd::mirror::PROMOTION_STATE_ORPHAN, "remote mirror uuid", 0); expect_mirror_image_set("image id", {cls::rbd::MIRROR_IMAGE_MODE_JOURNAL, "global image id", cls::rbd::MIRROR_IMAGE_STATE_DISABLING}, -EINVAL); C_SaferCond ctx; auto req = MockTrashMoveRequest::create(m_local_io_ctx, "global image id", true, m_local_image_ctx->op_work_queue, &ctx); req->send(); ASSERT_EQ(-EINVAL, ctx.wait()); } TEST_F(TestMockImageDeleterTrashMoveRequest, OpenImageError) { librbd::MockTestImageCtx mock_image_ctx(*m_local_image_ctx); librbd::MockExclusiveLock mock_exclusive_lock; mock_image_ctx.exclusive_lock = &mock_exclusive_lock; InSequence seq; expect_mirror_image_get_image_id("image id", 0); MockGetMirrorInfoRequest mock_get_mirror_info_request; expect_get_mirror_info(mock_get_mirror_info_request, {cls::rbd::MIRROR_IMAGE_MODE_JOURNAL, "global image id", cls::rbd::MIRROR_IMAGE_STATE_ENABLED}, librbd::mirror::PROMOTION_STATE_ORPHAN, "remote mirror uuid", 0); expect_mirror_image_set("image id", {cls::rbd::MIRROR_IMAGE_MODE_JOURNAL, "global image id", cls::rbd::MIRROR_IMAGE_STATE_DISABLING}, 0); expect_set_journal_policy(mock_image_ctx); expect_open(mock_image_ctx, -EINVAL); C_SaferCond ctx; auto req = MockTrashMoveRequest::create(m_local_io_ctx, "global image id", true, m_local_image_ctx->op_work_queue, &ctx); req->send(); ASSERT_EQ(-EINVAL, ctx.wait()); } TEST_F(TestMockImageDeleterTrashMoveRequest, ResetJournalError) { librbd::MockTestImageCtx mock_image_ctx(*m_local_image_ctx); librbd::MockExclusiveLock mock_exclusive_lock; mock_image_ctx.exclusive_lock = &mock_exclusive_lock; InSequence seq; expect_mirror_image_get_image_id("image id", 0); MockGetMirrorInfoRequest mock_get_mirror_info_request; expect_get_mirror_info(mock_get_mirror_info_request, {cls::rbd::MIRROR_IMAGE_MODE_JOURNAL, "global image id", cls::rbd::MIRROR_IMAGE_STATE_ENABLED}, librbd::mirror::PROMOTION_STATE_ORPHAN, "remote mirror uuid", 0); expect_mirror_image_set("image id", {cls::rbd::MIRROR_IMAGE_MODE_JOURNAL, "global image id", cls::rbd::MIRROR_IMAGE_STATE_DISABLING}, 0); expect_set_journal_policy(mock_image_ctx); expect_open(mock_image_ctx, 0); MockJournalResetRequest mock_journal_reset_request; expect_journal_reset(mock_journal_reset_request, -EINVAL); expect_close(mock_image_ctx, 0); C_SaferCond ctx; auto req = MockTrashMoveRequest::create(m_local_io_ctx, "global image id", true, m_local_image_ctx->op_work_queue, &ctx); req->send(); ASSERT_EQ(-EINVAL, ctx.wait()); } TEST_F(TestMockImageDeleterTrashMoveRequest, AcquireLockError) { librbd::MockTestImageCtx mock_image_ctx(*m_local_image_ctx); librbd::MockExclusiveLock mock_exclusive_lock; mock_image_ctx.exclusive_lock = &mock_exclusive_lock; InSequence seq; expect_mirror_image_get_image_id("image id", 0); MockGetMirrorInfoRequest mock_get_mirror_info_request; expect_get_mirror_info(mock_get_mirror_info_request, {cls::rbd::MIRROR_IMAGE_MODE_JOURNAL, "global image id", cls::rbd::MIRROR_IMAGE_STATE_ENABLED}, librbd::mirror::PROMOTION_STATE_ORPHAN, "remote mirror uuid", 0); expect_mirror_image_set("image id", {cls::rbd::MIRROR_IMAGE_MODE_JOURNAL, "global image id", cls::rbd::MIRROR_IMAGE_STATE_DISABLING}, 0); expect_set_journal_policy(mock_image_ctx); expect_open(mock_image_ctx, 0); MockJournalResetRequest mock_journal_reset_request; expect_journal_reset(mock_journal_reset_request, 0); expect_block_requests(mock_image_ctx); expect_acquire_lock(mock_image_ctx, -EINVAL); expect_close(mock_image_ctx, 0); C_SaferCond ctx; auto req = MockTrashMoveRequest::create(m_local_io_ctx, "global image id", true, m_local_image_ctx->op_work_queue, &ctx); req->send(); ASSERT_EQ(-EINVAL, ctx.wait()); } TEST_F(TestMockImageDeleterTrashMoveRequest, TrashMoveError) { librbd::MockTestImageCtx mock_image_ctx(*m_local_image_ctx); librbd::MockExclusiveLock mock_exclusive_lock; mock_image_ctx.exclusive_lock = &mock_exclusive_lock; InSequence seq; expect_mirror_image_get_image_id("image id", 0); MockGetMirrorInfoRequest mock_get_mirror_info_request; expect_get_mirror_info(mock_get_mirror_info_request, {cls::rbd::MIRROR_IMAGE_MODE_JOURNAL, "global image id", cls::rbd::MIRROR_IMAGE_STATE_ENABLED}, librbd::mirror::PROMOTION_STATE_ORPHAN, "remote mirror uuid", 0); expect_mirror_image_set("image id", {cls::rbd::MIRROR_IMAGE_MODE_JOURNAL, "global image id", cls::rbd::MIRROR_IMAGE_STATE_DISABLING}, 0); expect_set_journal_policy(mock_image_ctx); expect_open(mock_image_ctx, 0); MockJournalResetRequest mock_journal_reset_request; expect_journal_reset(mock_journal_reset_request, 0); expect_block_requests(mock_image_ctx); expect_acquire_lock(mock_image_ctx, 0); MockLibrbdTrashMoveRequest mock_librbd_trash_move_request; expect_trash_move(mock_librbd_trash_move_request, m_image_name, "image id", {}, -EINVAL); expect_close(mock_image_ctx, 0); C_SaferCond ctx; auto req = MockTrashMoveRequest::create(m_local_io_ctx, "global image id", true, m_local_image_ctx->op_work_queue, &ctx); req->send(); ASSERT_EQ(-EINVAL, ctx.wait()); } TEST_F(TestMockImageDeleterTrashMoveRequest, RemoveMirrorImageError) { librbd::MockTestImageCtx mock_image_ctx(*m_local_image_ctx); librbd::MockExclusiveLock mock_exclusive_lock; mock_image_ctx.exclusive_lock = &mock_exclusive_lock; InSequence seq; expect_mirror_image_get_image_id("image id", 0); MockGetMirrorInfoRequest mock_get_mirror_info_request; expect_get_mirror_info(mock_get_mirror_info_request, {cls::rbd::MIRROR_IMAGE_MODE_JOURNAL, "global image id", cls::rbd::MIRROR_IMAGE_STATE_ENABLED}, librbd::mirror::PROMOTION_STATE_ORPHAN, "remote mirror uuid", 0); expect_mirror_image_set("image id", {cls::rbd::MIRROR_IMAGE_MODE_JOURNAL, "global image id", cls::rbd::MIRROR_IMAGE_STATE_DISABLING}, 0); expect_set_journal_policy(mock_image_ctx); expect_open(mock_image_ctx, 0); MockJournalResetRequest mock_journal_reset_request; expect_journal_reset(mock_journal_reset_request, 0); expect_block_requests(mock_image_ctx); expect_acquire_lock(mock_image_ctx, 0); MockLibrbdTrashMoveRequest mock_librbd_trash_move_request; expect_trash_move(mock_librbd_trash_move_request, m_image_name, "image id", {}, 0); MockImageRemoveRequest mock_image_remove_request; expect_mirror_image_remove_request(mock_image_remove_request, -EINVAL); expect_close(mock_image_ctx, 0); MockTrashWatcher mock_trash_watcher; expect_notify_image_added(mock_trash_watcher, "image id"); C_SaferCond ctx; auto req = MockTrashMoveRequest::create(m_local_io_ctx, "global image id", true, m_local_image_ctx->op_work_queue, &ctx); req->send(); ASSERT_EQ(-EINVAL, ctx.wait()); } TEST_F(TestMockImageDeleterTrashMoveRequest, CloseImageError) { librbd::MockTestImageCtx mock_image_ctx(*m_local_image_ctx); librbd::MockExclusiveLock mock_exclusive_lock; mock_image_ctx.exclusive_lock = &mock_exclusive_lock; InSequence seq; expect_mirror_image_get_image_id("image id", 0); MockGetMirrorInfoRequest mock_get_mirror_info_request; expect_get_mirror_info(mock_get_mirror_info_request, {cls::rbd::MIRROR_IMAGE_MODE_JOURNAL, "global image id", cls::rbd::MIRROR_IMAGE_STATE_ENABLED}, librbd::mirror::PROMOTION_STATE_ORPHAN, "remote mirror uuid", 0); expect_mirror_image_set("image id", {cls::rbd::MIRROR_IMAGE_MODE_JOURNAL, "global image id", cls::rbd::MIRROR_IMAGE_STATE_DISABLING}, 0); expect_set_journal_policy(mock_image_ctx); expect_open(mock_image_ctx, 0); MockJournalResetRequest mock_journal_reset_request; expect_journal_reset(mock_journal_reset_request, 0); expect_block_requests(mock_image_ctx); expect_acquire_lock(mock_image_ctx, 0); MockLibrbdTrashMoveRequest mock_librbd_trash_move_request; expect_trash_move(mock_librbd_trash_move_request, m_image_name, "image id", {}, 0); MockImageRemoveRequest mock_image_remove_request; expect_mirror_image_remove_request(mock_image_remove_request, 0); expect_close(mock_image_ctx, -EINVAL); MockTrashWatcher mock_trash_watcher; expect_notify_image_added(mock_trash_watcher, "image id"); C_SaferCond ctx; auto req = MockTrashMoveRequest::create(m_local_io_ctx, "global image id", true, m_local_image_ctx->op_work_queue, &ctx); req->send(); ASSERT_EQ(0, ctx.wait()); } TEST_F(TestMockImageDeleterTrashMoveRequest, DelayedDelation) { librbd::MockTestImageCtx mock_image_ctx(*m_local_image_ctx); librbd::MockExclusiveLock mock_exclusive_lock; mock_image_ctx.config.set_val("rbd_mirroring_delete_delay", "600"); mock_image_ctx.exclusive_lock = &mock_exclusive_lock; InSequence seq; expect_mirror_image_get_image_id("image id", 0); MockGetMirrorInfoRequest mock_get_mirror_info_request; expect_get_mirror_info(mock_get_mirror_info_request, {cls::rbd::MIRROR_IMAGE_MODE_JOURNAL, "global image id", cls::rbd::MIRROR_IMAGE_STATE_ENABLED}, librbd::mirror::PROMOTION_STATE_NON_PRIMARY, "remote mirror uuid", 0); expect_mirror_image_set("image id", {cls::rbd::MIRROR_IMAGE_MODE_JOURNAL, "global image id", cls::rbd::MIRROR_IMAGE_STATE_DISABLING}, 0); expect_set_journal_policy(mock_image_ctx); expect_open(mock_image_ctx, 0); MockJournalResetRequest mock_journal_reset_request; expect_journal_reset(mock_journal_reset_request, 0); expect_block_requests(mock_image_ctx); expect_acquire_lock(mock_image_ctx, 0); MockLibrbdTrashMoveRequest mock_librbd_trash_move_request; expect_trash_move(mock_librbd_trash_move_request, m_image_name, "image id", 600, 0); MockImageRemoveRequest mock_image_remove_request; expect_mirror_image_remove_request(mock_image_remove_request, 0); expect_close(mock_image_ctx, 0); MockTrashWatcher mock_trash_watcher; expect_notify_image_added(mock_trash_watcher, "image id"); C_SaferCond ctx; auto req = MockTrashMoveRequest::create(m_local_io_ctx, "global image id", true, m_local_image_ctx->op_work_queue, &ctx); req->send(); ASSERT_EQ(0, ctx.wait()); } } // namespace image_deleter } // namespace mirror } // namespace rbd
34,172
36.885809
113
cc
null
ceph-main/src/test/rbd_mirror/image_deleter/test_mock_TrashRemoveRequest.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "test/rbd_mirror/test_mock_fixture.h" #include "cls/rbd/cls_rbd_types.h" #include "librbd/ImageCtx.h" #include "librbd/TrashWatcher.h" #include "librbd/Utils.h" #include "librbd/trash/RemoveRequest.h" #include "tools/rbd_mirror/Threads.h" #include "tools/rbd_mirror/image_deleter/SnapshotPurgeRequest.h" #include "tools/rbd_mirror/image_deleter/TrashRemoveRequest.h" #include "test/librados_test_stub/MockTestMemIoCtxImpl.h" #include "test/librbd/mock/MockImageCtx.h" namespace librbd { namespace { struct MockTestImageCtx : public librbd::MockImageCtx { MockTestImageCtx(librbd::ImageCtx &image_ctx) : librbd::MockImageCtx(image_ctx) { } }; } // anonymous namespace template<> struct TrashWatcher<MockTestImageCtx> { static TrashWatcher* s_instance; static void notify_image_removed(librados::IoCtx&, const std::string& image_id, Context *ctx) { ceph_assert(s_instance != nullptr); s_instance->notify_image_removed(image_id, ctx); } MOCK_METHOD2(notify_image_removed, void(const std::string&, Context*)); TrashWatcher() { s_instance = this; } }; TrashWatcher<MockTestImageCtx>* TrashWatcher<MockTestImageCtx>::s_instance = nullptr; namespace trash { template <> struct RemoveRequest<librbd::MockTestImageCtx> { static RemoveRequest *s_instance; Context *on_finish = nullptr; static RemoveRequest *create(librados::IoCtx &io_ctx, const std::string &image_id, librbd::asio::ContextWQ *work_queue, bool force, librbd::ProgressContext &progress_ctx, Context *on_finish) { ceph_assert(s_instance != nullptr); EXPECT_TRUE(force); s_instance->construct(image_id); s_instance->on_finish = on_finish; return s_instance; } MOCK_METHOD1(construct, void(const std::string&)); MOCK_METHOD0(send, void()); RemoveRequest() { s_instance = this; } }; RemoveRequest<librbd::MockTestImageCtx>* RemoveRequest<librbd::MockTestImageCtx>::s_instance = nullptr; } // namespace trash } // namespace librbd namespace rbd { namespace mirror { namespace image_deleter { template <> struct SnapshotPurgeRequest<librbd::MockTestImageCtx> { static SnapshotPurgeRequest *s_instance; Context *on_finish = nullptr; static SnapshotPurgeRequest *create(librados::IoCtx &io_ctx, const std::string &image_id, Context *on_finish) { ceph_assert(s_instance != nullptr); s_instance->construct(image_id); s_instance->on_finish = on_finish; return s_instance; } MOCK_METHOD1(construct, void(const std::string&)); MOCK_METHOD0(send, void()); SnapshotPurgeRequest() { s_instance = this; } }; SnapshotPurgeRequest<librbd::MockTestImageCtx>* SnapshotPurgeRequest<librbd::MockTestImageCtx>::s_instance = nullptr; } // namespace image_deleter } // namespace mirror } // namespace rbd #include "tools/rbd_mirror/image_deleter/TrashRemoveRequest.cc" namespace rbd { namespace mirror { namespace image_deleter { using ::testing::_; using ::testing::DoAll; using ::testing::Invoke; using ::testing::InSequence; using ::testing::Return; using ::testing::StrEq; using ::testing::WithArg; using ::testing::WithArgs; class TestMockImageDeleterTrashRemoveRequest : public TestMockFixture { public: typedef TrashRemoveRequest<librbd::MockTestImageCtx> MockTrashRemoveRequest; typedef SnapshotPurgeRequest<librbd::MockTestImageCtx> MockSnapshotPurgeRequest; typedef librbd::TrashWatcher<librbd::MockTestImageCtx> MockTrashWatcher; typedef librbd::trash::RemoveRequest<librbd::MockTestImageCtx> MockLibrbdTrashRemoveRequest; void expect_trash_get(const cls::rbd::TrashImageSpec& trash_spec, int r) { using ceph::encode; EXPECT_CALL(get_mock_io_ctx(m_local_io_ctx), exec(StrEq(RBD_TRASH), _, StrEq("rbd"), StrEq("trash_get"), _, _, _, _)) .WillOnce(WithArg<5>(Invoke([trash_spec, r](bufferlist* bl) { encode(trash_spec, *bl); return r; }))); } void expect_trash_state_set(const std::string& image_id, int r) { bufferlist in_bl; encode(image_id, in_bl); encode(cls::rbd::TRASH_IMAGE_STATE_REMOVING, in_bl); encode(cls::rbd::TRASH_IMAGE_STATE_NORMAL, in_bl); EXPECT_CALL(get_mock_io_ctx(m_local_io_ctx), exec(StrEq(RBD_TRASH), _, StrEq("rbd"), StrEq("trash_state_set"), ContentsEqual(in_bl), _, _, _)) .WillOnce(Return(r)); } void expect_get_snapcontext(const std::string& image_id, const ::SnapContext &snapc, int r) { bufferlist bl; encode(snapc, bl); EXPECT_CALL(get_mock_io_ctx(m_local_io_ctx), exec(librbd::util::header_name(image_id), _, StrEq("rbd"), StrEq("get_snapcontext"), _, _, _, _)) .WillOnce(DoAll(WithArg<5>(Invoke([bl](bufferlist *out_bl) { *out_bl = bl; })), Return(r))); } void expect_snapshot_purge(MockSnapshotPurgeRequest &snapshot_purge_request, const std::string &image_id, int r) { EXPECT_CALL(snapshot_purge_request, construct(image_id)); EXPECT_CALL(snapshot_purge_request, send()) .WillOnce(Invoke([this, &snapshot_purge_request, r]() { m_threads->work_queue->queue( snapshot_purge_request.on_finish, r); })); } void expect_image_remove(MockLibrbdTrashRemoveRequest &image_remove_request, const std::string &image_id, int r) { EXPECT_CALL(image_remove_request, construct(image_id)); EXPECT_CALL(image_remove_request, send()) .WillOnce(Invoke([this, &image_remove_request, r]() { m_threads->work_queue->queue( image_remove_request.on_finish, r); })); } void expect_notify_image_removed(MockTrashWatcher& mock_trash_watcher, const std::string& image_id) { EXPECT_CALL(mock_trash_watcher, notify_image_removed(image_id, _)) .WillOnce(WithArg<1>(Invoke([this](Context *ctx) { m_threads->work_queue->queue(ctx, 0); }))); } }; TEST_F(TestMockImageDeleterTrashRemoveRequest, Success) { InSequence seq; cls::rbd::TrashImageSpec trash_image_spec{ cls::rbd::TRASH_IMAGE_SOURCE_MIRRORING, "image name", {}, {}}; expect_trash_get(trash_image_spec, 0); expect_trash_state_set("image id", 0); expect_get_snapcontext("image id", {1, {1}}, 0); MockSnapshotPurgeRequest mock_snapshot_purge_request; expect_snapshot_purge(mock_snapshot_purge_request, "image id", 0); MockLibrbdTrashRemoveRequest mock_image_remove_request; expect_image_remove(mock_image_remove_request, "image id", 0); MockTrashWatcher mock_trash_watcher; expect_notify_image_removed(mock_trash_watcher, "image id"); C_SaferCond ctx; ErrorResult error_result; auto req = MockTrashRemoveRequest::create(m_local_io_ctx, "image id", &error_result, m_threads->work_queue, &ctx); req->send(); ASSERT_EQ(0, ctx.wait()); } TEST_F(TestMockImageDeleterTrashRemoveRequest, TrashDNE) { InSequence seq; cls::rbd::TrashImageSpec trash_image_spec{ cls::rbd::TRASH_IMAGE_SOURCE_MIRRORING, "image name", {}, {}}; expect_trash_get(trash_image_spec, -ENOENT); C_SaferCond ctx; ErrorResult error_result; auto req = MockTrashRemoveRequest::create(m_local_io_ctx, "image id", &error_result, m_threads->work_queue, &ctx); req->send(); ASSERT_EQ(0, ctx.wait()); } TEST_F(TestMockImageDeleterTrashRemoveRequest, TrashError) { InSequence seq; cls::rbd::TrashImageSpec trash_image_spec{ cls::rbd::TRASH_IMAGE_SOURCE_MIRRORING, "image name", {}, {}}; expect_trash_get(trash_image_spec, -EPERM); C_SaferCond ctx; ErrorResult error_result; auto req = MockTrashRemoveRequest::create(m_local_io_ctx, "image id", &error_result, m_threads->work_queue, &ctx); req->send(); ASSERT_EQ(-EPERM, ctx.wait()); } TEST_F(TestMockImageDeleterTrashRemoveRequest, TrashSourceIncorrect) { InSequence seq; cls::rbd::TrashImageSpec trash_image_spec{ cls::rbd::TRASH_IMAGE_SOURCE_USER, "image name", {}, {}}; expect_trash_get(trash_image_spec, 0); C_SaferCond ctx; ErrorResult error_result; auto req = MockTrashRemoveRequest::create(m_local_io_ctx, "image id", &error_result, m_threads->work_queue, &ctx); req->send(); ASSERT_EQ(0, ctx.wait()); } TEST_F(TestMockImageDeleterTrashRemoveRequest, TrashStateIncorrect) { InSequence seq; cls::rbd::TrashImageSpec trash_image_spec{ cls::rbd::TRASH_IMAGE_SOURCE_MIRRORING, "image name", {}, {}}; trash_image_spec.state = cls::rbd::TRASH_IMAGE_STATE_RESTORING; expect_trash_get(trash_image_spec, 0); C_SaferCond ctx; ErrorResult error_result; auto req = MockTrashRemoveRequest::create(m_local_io_ctx, "image id", &error_result, m_threads->work_queue, &ctx); req->send(); ASSERT_EQ(-EBUSY, ctx.wait()); ASSERT_EQ(ERROR_RESULT_RETRY_IMMEDIATELY, error_result); } TEST_F(TestMockImageDeleterTrashRemoveRequest, TrashSetStateDNE) { InSequence seq; cls::rbd::TrashImageSpec trash_image_spec{ cls::rbd::TRASH_IMAGE_SOURCE_MIRRORING, "image name", {}, {}}; expect_trash_get(trash_image_spec, 0); expect_trash_state_set("image id", -ENOENT); C_SaferCond ctx; ErrorResult error_result; auto req = MockTrashRemoveRequest::create(m_local_io_ctx, "image id", &error_result, m_threads->work_queue, &ctx); req->send(); ASSERT_EQ(0, ctx.wait()); } TEST_F(TestMockImageDeleterTrashRemoveRequest, TrashSetStateError) { InSequence seq; cls::rbd::TrashImageSpec trash_image_spec{ cls::rbd::TRASH_IMAGE_SOURCE_MIRRORING, "image name", {}, {}}; expect_trash_get(trash_image_spec, 0); expect_trash_state_set("image id", -EPERM); C_SaferCond ctx; ErrorResult error_result; auto req = MockTrashRemoveRequest::create(m_local_io_ctx, "image id", &error_result, m_threads->work_queue, &ctx); req->send(); ASSERT_EQ(-EPERM, ctx.wait()); } TEST_F(TestMockImageDeleterTrashRemoveRequest, GetSnapContextDNE) { InSequence seq; cls::rbd::TrashImageSpec trash_image_spec{ cls::rbd::TRASH_IMAGE_SOURCE_MIRRORING, "image name", {}, {}}; expect_trash_get(trash_image_spec, 0); expect_trash_state_set("image id", 0); expect_get_snapcontext("image id", {1, {1}}, -ENOENT); MockLibrbdTrashRemoveRequest mock_image_remove_request; expect_image_remove(mock_image_remove_request, "image id", 0); MockTrashWatcher mock_trash_watcher; expect_notify_image_removed(mock_trash_watcher, "image id"); C_SaferCond ctx; ErrorResult error_result; auto req = MockTrashRemoveRequest::create(m_local_io_ctx, "image id", &error_result, m_threads->work_queue, &ctx); req->send(); ASSERT_EQ(0, ctx.wait()); } TEST_F(TestMockImageDeleterTrashRemoveRequest, GetSnapContextError) { InSequence seq; cls::rbd::TrashImageSpec trash_image_spec{ cls::rbd::TRASH_IMAGE_SOURCE_MIRRORING, "image name", {}, {}}; expect_trash_get(trash_image_spec, 0); expect_trash_state_set("image id", 0); expect_get_snapcontext("image id", {1, {1}}, -EINVAL); C_SaferCond ctx; ErrorResult error_result; auto req = MockTrashRemoveRequest::create(m_local_io_ctx, "image id", &error_result, m_threads->work_queue, &ctx); req->send(); ASSERT_EQ(-EINVAL, ctx.wait()); } TEST_F(TestMockImageDeleterTrashRemoveRequest, PurgeSnapshotBusy) { InSequence seq; cls::rbd::TrashImageSpec trash_image_spec{ cls::rbd::TRASH_IMAGE_SOURCE_MIRRORING, "image name", {}, {}}; expect_trash_get(trash_image_spec, 0); expect_trash_state_set("image id", 0); expect_get_snapcontext("image id", {1, {1}}, 0); MockSnapshotPurgeRequest mock_snapshot_purge_request; expect_snapshot_purge(mock_snapshot_purge_request, "image id", -EBUSY); C_SaferCond ctx; ErrorResult error_result; auto req = MockTrashRemoveRequest::create(m_local_io_ctx, "image id", &error_result, m_threads->work_queue, &ctx); req->send(); ASSERT_EQ(-EBUSY, ctx.wait()); ASSERT_EQ(ERROR_RESULT_RETRY_IMMEDIATELY, error_result); } TEST_F(TestMockImageDeleterTrashRemoveRequest, PurgeSnapshotError) { InSequence seq; cls::rbd::TrashImageSpec trash_image_spec{ cls::rbd::TRASH_IMAGE_SOURCE_MIRRORING, "image name", {}, {}}; expect_trash_get(trash_image_spec, 0); expect_trash_state_set("image id", 0); expect_get_snapcontext("image id", {1, {1}}, 0); MockSnapshotPurgeRequest mock_snapshot_purge_request; expect_snapshot_purge(mock_snapshot_purge_request, "image id", -EINVAL); C_SaferCond ctx; ErrorResult error_result; auto req = MockTrashRemoveRequest::create(m_local_io_ctx, "image id", &error_result, m_threads->work_queue, &ctx); req->send(); ASSERT_EQ(-EINVAL, ctx.wait()); } TEST_F(TestMockImageDeleterTrashRemoveRequest, RemoveError) { InSequence seq; cls::rbd::TrashImageSpec trash_image_spec{ cls::rbd::TRASH_IMAGE_SOURCE_MIRRORING, "image name", {}, {}}; expect_trash_get(trash_image_spec, 0); expect_trash_state_set("image id", 0); expect_get_snapcontext("image id", {1, {1}}, 0); MockSnapshotPurgeRequest mock_snapshot_purge_request; expect_snapshot_purge(mock_snapshot_purge_request, "image id", 0); MockLibrbdTrashRemoveRequest mock_image_remove_request; expect_image_remove(mock_image_remove_request, "image id", -EINVAL); C_SaferCond ctx; ErrorResult error_result; auto req = MockTrashRemoveRequest::create(m_local_io_ctx, "image id", &error_result, m_threads->work_queue, &ctx); req->send(); ASSERT_EQ(-EINVAL, ctx.wait()); } } // namespace image_deleter } // namespace mirror } // namespace rbd
15,149
32.370044
117
cc
null
ceph-main/src/test/rbd_mirror/image_deleter/test_mock_TrashWatcher.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "test/rbd_mirror/test_mock_fixture.h" #include "test/librados_test_stub/MockTestMemIoCtxImpl.h" #include "test/librados_test_stub/MockTestMemRadosClient.h" #include "test/librbd/mock/MockImageCtx.h" #include "test/rbd_mirror/mock/MockContextWQ.h" #include "test/rbd_mirror/mock/MockSafeTimer.h" #include "librbd/TrashWatcher.h" #include "tools/rbd_mirror/Threads.h" #include "tools/rbd_mirror/image_deleter/TrashWatcher.h" namespace librbd { namespace { struct MockTestImageCtx : public librbd::MockImageCtx { MockTestImageCtx(librbd::ImageCtx &image_ctx) : librbd::MockImageCtx(image_ctx) { } }; } // anonymous namespace struct MockTrashWatcher { static MockTrashWatcher *s_instance; static MockTrashWatcher &get_instance() { ceph_assert(s_instance != nullptr); return *s_instance; } MockTrashWatcher() { s_instance = this; } MOCK_CONST_METHOD0(is_unregistered, bool()); MOCK_METHOD1(register_watch, void(Context*)); MOCK_METHOD1(unregister_watch, void(Context*)); }; template <> struct TrashWatcher<MockTestImageCtx> { static TrashWatcher *s_instance; TrashWatcher(librados::IoCtx &io_ctx, ::MockContextWQ *work_queue) { s_instance = this; } virtual ~TrashWatcher() { } static TrashWatcher<MockTestImageCtx> &get_instance() { ceph_assert(s_instance != nullptr); return *s_instance; } virtual void handle_rewatch_complete(int r) = 0; virtual void handle_image_added(const std::string &image_id, const cls::rbd::TrashImageSpec& spec) = 0; virtual void handle_image_removed(const std::string &image_id) = 0; bool is_unregistered() const { return MockTrashWatcher::get_instance().is_unregistered(); } void register_watch(Context *ctx) { MockTrashWatcher::get_instance().register_watch(ctx); } void unregister_watch(Context *ctx) { MockTrashWatcher::get_instance().unregister_watch(ctx); } }; MockTrashWatcher *MockTrashWatcher::s_instance = nullptr; TrashWatcher<MockTestImageCtx> *TrashWatcher<MockTestImageCtx>::s_instance = nullptr; } // namespace librbd namespace rbd { namespace mirror { template <> struct Threads<librbd::MockTestImageCtx> { MockSafeTimer *timer; ceph::mutex &timer_lock; MockContextWQ *work_queue; Threads(Threads<librbd::ImageCtx> *threads) : timer(new MockSafeTimer()), timer_lock(threads->timer_lock), work_queue(new MockContextWQ()) { } ~Threads() { delete timer; delete work_queue; } }; } // namespace mirror } // namespace rbd #include "tools/rbd_mirror/image_deleter/TrashWatcher.cc" namespace rbd { namespace mirror { namespace image_deleter { using ::testing::_; using ::testing::DoAll; using ::testing::InSequence; using ::testing::Invoke; using ::testing::Return; using ::testing::ReturnArg; using ::testing::StrEq; using ::testing::WithArg; class TestMockImageDeleterTrashWatcher : public TestMockFixture { public: typedef TrashWatcher<librbd::MockTestImageCtx> MockTrashWatcher; typedef Threads<librbd::MockTestImageCtx> MockThreads; typedef librbd::MockTrashWatcher MockLibrbdTrashWatcher; typedef librbd::TrashWatcher<librbd::MockTestImageCtx> LibrbdTrashWatcher; struct MockListener : TrashListener { MOCK_METHOD2(handle_trash_image, void(const std::string&, const ceph::real_clock::time_point&)); }; void expect_work_queue(MockThreads &mock_threads) { EXPECT_CALL(*mock_threads.work_queue, queue(_, _)) .WillRepeatedly(Invoke([this](Context *ctx, int r) { m_threads->work_queue->queue(ctx, r); })); } void expect_trash_watcher_is_unregistered(MockLibrbdTrashWatcher &mock_trash_watcher, bool unregistered) { EXPECT_CALL(mock_trash_watcher, is_unregistered()) .WillOnce(Return(unregistered)); } void expect_trash_watcher_register(MockLibrbdTrashWatcher &mock_trash_watcher, int r) { EXPECT_CALL(mock_trash_watcher, register_watch(_)) .WillOnce(CompleteContext(r)); } void expect_trash_watcher_unregister(MockLibrbdTrashWatcher &mock_trash_watcher, int r) { EXPECT_CALL(mock_trash_watcher, unregister_watch(_)) .WillOnce(CompleteContext(r)); } void expect_create_trash(librados::IoCtx &io_ctx, int r) { EXPECT_CALL(get_mock_io_ctx(io_ctx), create(RBD_TRASH, false, _)) .WillOnce(Return(r)); } void expect_trash_list(librados::IoCtx &io_ctx, const std::string& last_image_id, std::map<std::string, cls::rbd::TrashImageSpec>&& images, int r) { bufferlist bl; encode(last_image_id, bl); encode(static_cast<size_t>(1024), bl); bufferlist out_bl; encode(images, out_bl); EXPECT_CALL(get_mock_io_ctx(io_ctx), exec(RBD_TRASH, _, StrEq("rbd"), StrEq("trash_list"), ContentsEqual(bl), _, _, _)) .WillOnce(DoAll(WithArg<5>(Invoke([out_bl](bufferlist *bl) { *bl = out_bl; })), Return(r))); } void expect_timer_add_event(MockThreads &mock_threads) { EXPECT_CALL(*mock_threads.timer, add_event_after(_, _)) .WillOnce(DoAll(WithArg<1>(Invoke([this](Context *ctx) { auto wrapped_ctx = new LambdaContext([this, ctx](int r) { std::lock_guard timer_locker{m_threads->timer_lock}; ctx->complete(r); }); m_threads->work_queue->queue(wrapped_ctx, 0); })), ReturnArg<1>())); } void expect_handle_trash_image(MockListener& mock_listener, const std::string& global_image_id) { EXPECT_CALL(mock_listener, handle_trash_image(global_image_id, _)); } int when_shut_down(MockTrashWatcher &mock_trash_watcher) { C_SaferCond ctx; mock_trash_watcher.shut_down(&ctx); return ctx.wait(); } }; TEST_F(TestMockImageDeleterTrashWatcher, EmptyPool) { MockThreads mock_threads(m_threads); expect_work_queue(mock_threads); InSequence seq; expect_create_trash(m_local_io_ctx, 0); MockLibrbdTrashWatcher mock_librbd_trash_watcher; expect_trash_watcher_is_unregistered(mock_librbd_trash_watcher, true); expect_trash_watcher_register(mock_librbd_trash_watcher, 0); expect_trash_list(m_local_io_ctx, "", {}, 0); MockListener mock_listener; MockTrashWatcher mock_trash_watcher(m_local_io_ctx, &mock_threads, mock_listener); C_SaferCond ctx; mock_trash_watcher.init(&ctx); ASSERT_EQ(0, ctx.wait()); expect_trash_watcher_unregister(mock_librbd_trash_watcher, 0); ASSERT_EQ(0, when_shut_down(mock_trash_watcher)); } TEST_F(TestMockImageDeleterTrashWatcher, NonEmptyPool) { MockThreads mock_threads(m_threads); expect_work_queue(mock_threads); MockListener mock_listener; expect_handle_trash_image(mock_listener, "image0"); InSequence seq; expect_create_trash(m_local_io_ctx, 0); MockLibrbdTrashWatcher mock_librbd_trash_watcher; expect_trash_watcher_is_unregistered(mock_librbd_trash_watcher, true); expect_trash_watcher_register(mock_librbd_trash_watcher, 0); std::map<std::string, cls::rbd::TrashImageSpec> images; images["image0"] = {cls::rbd::TRASH_IMAGE_SOURCE_MIRRORING, "name", {}, {}}; for (auto idx = 1; idx < 1024; ++idx) { images["image" + stringify(idx)] = {}; } expect_trash_list(m_local_io_ctx, "", std::move(images), 0); images.clear(); for (auto idx = 1024; idx < 2000; ++idx) { images["image" + stringify(idx)] = {}; } expect_trash_list(m_local_io_ctx, "image999", std::move(images), 0); MockTrashWatcher mock_trash_watcher(m_local_io_ctx, &mock_threads, mock_listener); C_SaferCond ctx; mock_trash_watcher.init(&ctx); ASSERT_EQ(0, ctx.wait()); m_threads->work_queue->drain(); expect_trash_watcher_unregister(mock_librbd_trash_watcher, 0); ASSERT_EQ(0, when_shut_down(mock_trash_watcher)); } TEST_F(TestMockImageDeleterTrashWatcher, Notify) { MockThreads mock_threads(m_threads); expect_work_queue(mock_threads); MockListener mock_listener; expect_handle_trash_image(mock_listener, "image1"); InSequence seq; expect_create_trash(m_local_io_ctx, 0); MockLibrbdTrashWatcher mock_librbd_trash_watcher; expect_trash_watcher_is_unregistered(mock_librbd_trash_watcher, true); expect_trash_watcher_register(mock_librbd_trash_watcher, 0); expect_trash_list(m_local_io_ctx, "", {}, 0); MockTrashWatcher mock_trash_watcher(m_local_io_ctx, &mock_threads, mock_listener); C_SaferCond ctx; mock_trash_watcher.init(&ctx); ASSERT_EQ(0, ctx.wait()); LibrbdTrashWatcher::get_instance().handle_image_added( "image1", {cls::rbd::TRASH_IMAGE_SOURCE_MIRRORING, "name", {}, {}}); m_threads->work_queue->drain(); expect_trash_watcher_unregister(mock_librbd_trash_watcher, 0); ASSERT_EQ(0, when_shut_down(mock_trash_watcher)); } TEST_F(TestMockImageDeleterTrashWatcher, CreateBlocklist) { MockThreads mock_threads(m_threads); expect_work_queue(mock_threads); InSequence seq; expect_create_trash(m_local_io_ctx, -EBLOCKLISTED); MockListener mock_listener; MockTrashWatcher mock_trash_watcher(m_local_io_ctx, &mock_threads, mock_listener); C_SaferCond ctx; mock_trash_watcher.init(&ctx); ASSERT_EQ(-EBLOCKLISTED, ctx.wait()); MockLibrbdTrashWatcher mock_librbd_trash_watcher; expect_trash_watcher_unregister(mock_librbd_trash_watcher, 0); ASSERT_EQ(0, when_shut_down(mock_trash_watcher)); } TEST_F(TestMockImageDeleterTrashWatcher, CreateDNE) { MockThreads mock_threads(m_threads); expect_work_queue(mock_threads); InSequence seq; expect_create_trash(m_local_io_ctx, -ENOENT); MockListener mock_listener; MockTrashWatcher mock_trash_watcher(m_local_io_ctx, &mock_threads, mock_listener); C_SaferCond ctx; mock_trash_watcher.init(&ctx); ASSERT_EQ(-ENOENT, ctx.wait()); MockLibrbdTrashWatcher mock_librbd_trash_watcher; expect_trash_watcher_unregister(mock_librbd_trash_watcher, 0); ASSERT_EQ(0, when_shut_down(mock_trash_watcher)); } TEST_F(TestMockImageDeleterTrashWatcher, CreateError) { MockThreads mock_threads(m_threads); expect_work_queue(mock_threads); InSequence seq; expect_create_trash(m_local_io_ctx, -EINVAL); expect_timer_add_event(mock_threads); expect_create_trash(m_local_io_ctx, 0); MockLibrbdTrashWatcher mock_librbd_trash_watcher; expect_trash_watcher_is_unregistered(mock_librbd_trash_watcher, true); expect_trash_watcher_register(mock_librbd_trash_watcher, 0); MockListener mock_listener; MockTrashWatcher mock_trash_watcher(m_local_io_ctx, &mock_threads, mock_listener); C_SaferCond ctx; mock_trash_watcher.init(&ctx); ASSERT_EQ(0, ctx.wait()); expect_trash_watcher_unregister(mock_librbd_trash_watcher, 0); ASSERT_EQ(0, when_shut_down(mock_trash_watcher)); } TEST_F(TestMockImageDeleterTrashWatcher, RegisterWatcherBlocklist) { MockThreads mock_threads(m_threads); expect_work_queue(mock_threads); InSequence seq; expect_create_trash(m_local_io_ctx, 0); MockLibrbdTrashWatcher mock_librbd_trash_watcher; expect_trash_watcher_is_unregistered(mock_librbd_trash_watcher, true); expect_trash_watcher_register(mock_librbd_trash_watcher, -EBLOCKLISTED); MockListener mock_listener; MockTrashWatcher mock_trash_watcher(m_local_io_ctx, &mock_threads, mock_listener); C_SaferCond ctx; mock_trash_watcher.init(&ctx); ASSERT_EQ(-EBLOCKLISTED, ctx.wait()); expect_trash_watcher_unregister(mock_librbd_trash_watcher, 0); ASSERT_EQ(0, when_shut_down(mock_trash_watcher)); } TEST_F(TestMockImageDeleterTrashWatcher, RegisterWatcherError) { MockThreads mock_threads(m_threads); expect_work_queue(mock_threads); InSequence seq; expect_create_trash(m_local_io_ctx, 0); MockLibrbdTrashWatcher mock_librbd_trash_watcher; expect_trash_watcher_is_unregistered(mock_librbd_trash_watcher, true); expect_trash_watcher_register(mock_librbd_trash_watcher, -EINVAL); expect_timer_add_event(mock_threads); expect_create_trash(m_local_io_ctx, 0); expect_trash_watcher_is_unregistered(mock_librbd_trash_watcher, true); expect_trash_watcher_register(mock_librbd_trash_watcher, 0); MockListener mock_listener; MockTrashWatcher mock_trash_watcher(m_local_io_ctx, &mock_threads, mock_listener); C_SaferCond ctx; mock_trash_watcher.init(&ctx); ASSERT_EQ(0, ctx.wait()); expect_trash_watcher_unregister(mock_librbd_trash_watcher, 0); ASSERT_EQ(0, when_shut_down(mock_trash_watcher)); } TEST_F(TestMockImageDeleterTrashWatcher, TrashListBlocklist) { MockThreads mock_threads(m_threads); expect_work_queue(mock_threads); InSequence seq; expect_create_trash(m_local_io_ctx, 0); MockLibrbdTrashWatcher mock_librbd_trash_watcher; expect_trash_watcher_is_unregistered(mock_librbd_trash_watcher, true); expect_trash_watcher_register(mock_librbd_trash_watcher, 0); expect_trash_list(m_local_io_ctx, "", {}, -EBLOCKLISTED); MockListener mock_listener; MockTrashWatcher mock_trash_watcher(m_local_io_ctx, &mock_threads, mock_listener); C_SaferCond ctx; mock_trash_watcher.init(&ctx); ASSERT_EQ(-EBLOCKLISTED, ctx.wait()); expect_trash_watcher_unregister(mock_librbd_trash_watcher, 0); ASSERT_EQ(0, when_shut_down(mock_trash_watcher)); } TEST_F(TestMockImageDeleterTrashWatcher, TrashListError) { MockThreads mock_threads(m_threads); expect_work_queue(mock_threads); InSequence seq; expect_create_trash(m_local_io_ctx, 0); MockLibrbdTrashWatcher mock_librbd_trash_watcher; expect_trash_watcher_is_unregistered(mock_librbd_trash_watcher, true); expect_trash_watcher_register(mock_librbd_trash_watcher, 0); expect_trash_list(m_local_io_ctx, "", {}, -EINVAL); expect_timer_add_event(mock_threads); expect_create_trash(m_local_io_ctx, 0); expect_trash_watcher_is_unregistered(mock_librbd_trash_watcher, false); expect_trash_list(m_local_io_ctx, "", {}, 0); MockListener mock_listener; MockTrashWatcher mock_trash_watcher(m_local_io_ctx, &mock_threads, mock_listener); C_SaferCond ctx; mock_trash_watcher.init(&ctx); ASSERT_EQ(0, ctx.wait()); expect_trash_watcher_unregister(mock_librbd_trash_watcher, 0); ASSERT_EQ(0, when_shut_down(mock_trash_watcher)); } TEST_F(TestMockImageDeleterTrashWatcher, Rewatch) { MockThreads mock_threads(m_threads); expect_work_queue(mock_threads); InSequence seq; expect_create_trash(m_local_io_ctx, 0); MockLibrbdTrashWatcher mock_librbd_trash_watcher; expect_trash_watcher_is_unregistered(mock_librbd_trash_watcher, true); expect_trash_watcher_register(mock_librbd_trash_watcher, 0); expect_trash_list(m_local_io_ctx, "", {}, 0); MockListener mock_listener; MockTrashWatcher mock_trash_watcher(m_local_io_ctx, &mock_threads, mock_listener); C_SaferCond ctx; mock_trash_watcher.init(&ctx); ASSERT_EQ(0, ctx.wait()); expect_timer_add_event(mock_threads); expect_create_trash(m_local_io_ctx, 0); expect_trash_watcher_is_unregistered(mock_librbd_trash_watcher, false); expect_trash_list(m_local_io_ctx, "", {}, 0); LibrbdTrashWatcher::get_instance().handle_rewatch_complete(0); m_threads->work_queue->drain(); expect_trash_watcher_unregister(mock_librbd_trash_watcher, 0); ASSERT_EQ(0, when_shut_down(mock_trash_watcher)); } TEST_F(TestMockImageDeleterTrashWatcher, RewatchBlocklist) { MockThreads mock_threads(m_threads); expect_work_queue(mock_threads); InSequence seq; expect_create_trash(m_local_io_ctx, 0); MockLibrbdTrashWatcher mock_librbd_trash_watcher; expect_trash_watcher_is_unregistered(mock_librbd_trash_watcher, true); expect_trash_watcher_register(mock_librbd_trash_watcher, 0); expect_trash_list(m_local_io_ctx, "", {}, 0); MockListener mock_listener; MockTrashWatcher mock_trash_watcher(m_local_io_ctx, &mock_threads, mock_listener); C_SaferCond ctx; mock_trash_watcher.init(&ctx); ASSERT_EQ(0, ctx.wait()); LibrbdTrashWatcher::get_instance().handle_rewatch_complete(-EBLOCKLISTED); m_threads->work_queue->drain(); expect_trash_watcher_unregister(mock_librbd_trash_watcher, 0); ASSERT_EQ(0, when_shut_down(mock_trash_watcher)); } } // namespace image_deleter } // namespace mirror } // namespace rbd
16,868
31.440385
87
cc
null
ceph-main/src/test/rbd_mirror/image_map/test_Policy.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "include/Context.h" #include "test/rbd_mirror/test_fixture.h" #include "tools/rbd_mirror/image_map/Types.h" #include "tools/rbd_mirror/image_map/SimplePolicy.h" #include "include/stringify.h" #include "common/Thread.h" void register_test_image_policy() { } namespace rbd { namespace mirror { namespace image_map { class TestImageMapPolicy : public TestFixture { public: void SetUp() override { TestFixture::SetUp(); EXPECT_EQ(0, _rados->conf_set("rbd_mirror_image_policy_migration_throttle", "0")); CephContext *cct = reinterpret_cast<CephContext *>(m_local_io_ctx.cct()); std::string policy_type = cct->_conf.get_val<std::string>("rbd_mirror_image_policy_type"); if (policy_type == "none" || policy_type == "simple") { m_policy = image_map::SimplePolicy::create(m_local_io_ctx); } else { ceph_abort(); } m_policy->init({}); } void TearDown() override { TestFixture::TearDown(); delete m_policy; } void map_image(const std::string &global_image_id) { ASSERT_TRUE(m_policy->add_image(global_image_id)); ASSERT_EQ(ACTION_TYPE_MAP_UPDATE, m_policy->start_action(global_image_id)); ASSERT_TRUE(m_policy->finish_action(global_image_id, 0)); ASSERT_EQ(ACTION_TYPE_ACQUIRE, m_policy->start_action(global_image_id)); ASSERT_FALSE(m_policy->finish_action(global_image_id, 0)); } void unmap_image(const std::string &global_image_id) { ASSERT_TRUE(m_policy->remove_image(global_image_id)); ASSERT_EQ(ACTION_TYPE_RELEASE, m_policy->start_action(global_image_id)); ASSERT_TRUE(m_policy->finish_action(global_image_id, 0)); ASSERT_EQ(ACTION_TYPE_MAP_REMOVE, m_policy->start_action(global_image_id)); ASSERT_FALSE(m_policy->finish_action(global_image_id, 0)); } void shuffle_image(const std::string &global_image_id) { ASSERT_EQ(ACTION_TYPE_RELEASE, m_policy->start_action(global_image_id)); ASSERT_TRUE(m_policy->finish_action(global_image_id, 0)); ASSERT_EQ(ACTION_TYPE_MAP_UPDATE, m_policy->start_action(global_image_id)); ASSERT_TRUE(m_policy->finish_action(global_image_id, 0)); ASSERT_EQ(ACTION_TYPE_ACQUIRE, m_policy->start_action(global_image_id)); ASSERT_FALSE(m_policy->finish_action(global_image_id, 0)); } Policy *m_policy; }; TEST_F(TestImageMapPolicy, NegativeLookup) { const std::string global_image_id = "global id 1"; LookupInfo info = m_policy->lookup(global_image_id); ASSERT_TRUE(info.instance_id == UNMAPPED_INSTANCE_ID); } TEST_F(TestImageMapPolicy, Init) { const std::string global_image_id = "global id 1"; m_policy->init({{global_image_id, {"9876", {}, {}}}}); ASSERT_EQ(ACTION_TYPE_ACQUIRE, m_policy->start_action(global_image_id)); ASSERT_FALSE(m_policy->finish_action(global_image_id, 0)); } TEST_F(TestImageMapPolicy, MapImage) { const std::string global_image_id = "global id 1"; map_image(global_image_id); LookupInfo info = m_policy->lookup(global_image_id); ASSERT_TRUE(info.instance_id != UNMAPPED_INSTANCE_ID); } TEST_F(TestImageMapPolicy, UnmapImage) { const std::string global_image_id = "global id 1"; // map image map_image(global_image_id); LookupInfo info = m_policy->lookup(global_image_id); ASSERT_TRUE(info.instance_id != UNMAPPED_INSTANCE_ID); // unmap image unmap_image(global_image_id); info = m_policy->lookup(global_image_id); ASSERT_TRUE(info.instance_id == UNMAPPED_INSTANCE_ID); } TEST_F(TestImageMapPolicy, ShuffleImageAddInstance) { std::set<std::string> global_image_ids { "global id 1", "global id 2", "global id 3", "global id 4", "global id 5", "global id 6" }; for (auto const &global_image_id : global_image_ids) { // map image map_image(global_image_id); LookupInfo info = m_policy->lookup(global_image_id); ASSERT_TRUE(info.instance_id != UNMAPPED_INSTANCE_ID); } std::set<std::string> shuffle_global_image_ids; m_policy->add_instances({"9876"}, &shuffle_global_image_ids); for (auto const &global_image_id : shuffle_global_image_ids) { shuffle_image(global_image_id); LookupInfo info = m_policy->lookup(global_image_id); ASSERT_TRUE(info.instance_id != UNMAPPED_INSTANCE_ID); } } TEST_F(TestImageMapPolicy, ShuffleImageRemoveInstance) { std::set<std::string> global_image_ids { "global id 1", "global id 2", "global id 3", "global id 4", "global id 5" }; std::set<std::string> shuffle_global_image_ids; m_policy->add_instances({stringify(m_local_io_ctx.get_instance_id())}, &shuffle_global_image_ids); for (auto const &global_image_id : global_image_ids) { // map image map_image(global_image_id); LookupInfo info = m_policy->lookup(global_image_id); ASSERT_TRUE(info.instance_id != UNMAPPED_INSTANCE_ID); } m_policy->add_instances({"9876"}, &shuffle_global_image_ids); for (auto const &global_image_id : shuffle_global_image_ids) { shuffle_image(global_image_id); LookupInfo info = m_policy->lookup(global_image_id); ASSERT_TRUE(info.instance_id != UNMAPPED_INSTANCE_ID); } // record which of the images got migrated to the new instance std::set<std::string> remapped_global_image_ids; for (auto const &global_image_id: shuffle_global_image_ids) { LookupInfo info = m_policy->lookup(global_image_id); if (info.instance_id == "9876") { remapped_global_image_ids.emplace(global_image_id); } } shuffle_global_image_ids.clear(); m_policy->remove_instances({"9876"}, &shuffle_global_image_ids); ASSERT_TRUE(shuffle_global_image_ids == remapped_global_image_ids); for (auto const &global_image_id : shuffle_global_image_ids) { shuffle_image(global_image_id); LookupInfo info = m_policy->lookup(global_image_id); ASSERT_TRUE(info.instance_id != UNMAPPED_INSTANCE_ID); } } TEST_F(TestImageMapPolicy, RetryMapUpdate) { const std::string global_image_id = "global id 1"; ASSERT_TRUE(m_policy->add_image(global_image_id)); ASSERT_EQ(ACTION_TYPE_MAP_UPDATE, m_policy->start_action(global_image_id)); // on-disk map update failed ASSERT_TRUE(m_policy->finish_action(global_image_id, -EIO)); ASSERT_EQ(ACTION_TYPE_MAP_UPDATE, m_policy->start_action(global_image_id)); ASSERT_TRUE(m_policy->finish_action(global_image_id, 0)); ASSERT_EQ(ACTION_TYPE_ACQUIRE, m_policy->start_action(global_image_id)); ASSERT_FALSE(m_policy->finish_action(global_image_id, 0)); LookupInfo info = m_policy->lookup(global_image_id); ASSERT_TRUE(info.instance_id != UNMAPPED_INSTANCE_ID); } TEST_F(TestImageMapPolicy, MapFailureAndUnmap) { const std::string global_image_id = "global id 1"; ASSERT_TRUE(m_policy->add_image(global_image_id)); ASSERT_EQ(ACTION_TYPE_MAP_UPDATE, m_policy->start_action(global_image_id)); ASSERT_TRUE(m_policy->finish_action(global_image_id, 0)); ASSERT_EQ(ACTION_TYPE_ACQUIRE, m_policy->start_action(global_image_id)); std::set<std::string> shuffle_global_image_ids; m_policy->add_instances({"9876"}, &shuffle_global_image_ids); ASSERT_TRUE(shuffle_global_image_ids.empty()); m_policy->remove_instances({stringify(m_local_io_ctx.get_instance_id())}, &shuffle_global_image_ids); ASSERT_TRUE(shuffle_global_image_ids.empty()); ASSERT_TRUE(m_policy->finish_action(global_image_id, -EBLOCKLISTED)); ASSERT_EQ(ACTION_TYPE_RELEASE, m_policy->start_action(global_image_id)); ASSERT_TRUE(m_policy->finish_action(global_image_id, -ENOENT)); ASSERT_EQ(ACTION_TYPE_MAP_UPDATE, m_policy->start_action(global_image_id)); ASSERT_TRUE(m_policy->finish_action(global_image_id, 0)); ASSERT_EQ(ACTION_TYPE_ACQUIRE, m_policy->start_action(global_image_id)); ASSERT_FALSE(m_policy->finish_action(global_image_id, 0)); ASSERT_TRUE(m_policy->remove_image(global_image_id)); ASSERT_EQ(ACTION_TYPE_RELEASE, m_policy->start_action(global_image_id)); ASSERT_TRUE(m_policy->finish_action(global_image_id, 0)); ASSERT_EQ(ACTION_TYPE_MAP_REMOVE, m_policy->start_action(global_image_id)); ASSERT_FALSE(m_policy->finish_action(global_image_id, 0)); } TEST_F(TestImageMapPolicy, ReshuffleWithMapFailure) { std::set<std::string> global_image_ids { "global id 1", "global id 2", "global id 3", "global id 4", "global id 5", "global id 6" }; std::set<std::string> shuffle_global_image_ids; m_policy->add_instances({stringify(m_local_io_ctx.get_instance_id())}, &shuffle_global_image_ids); for (auto const &global_image_id : global_image_ids) { // map image map_image(global_image_id); LookupInfo info = m_policy->lookup(global_image_id); ASSERT_TRUE(info.instance_id != UNMAPPED_INSTANCE_ID); } m_policy->add_instances({"9876"}, &shuffle_global_image_ids); ASSERT_FALSE(shuffle_global_image_ids.empty()); const std::string global_image_id = *(shuffle_global_image_ids.begin()); shuffle_global_image_ids.clear(); ASSERT_EQ(ACTION_TYPE_RELEASE, m_policy->start_action(global_image_id)); ASSERT_TRUE(m_policy->finish_action(global_image_id, 0)); ASSERT_EQ(ACTION_TYPE_MAP_UPDATE, m_policy->start_action(global_image_id)); ASSERT_TRUE(m_policy->finish_action(global_image_id, 0)); ASSERT_EQ(ACTION_TYPE_ACQUIRE, m_policy->start_action(global_image_id)); // peer unavailable m_policy->remove_instances({"9876"}, &shuffle_global_image_ids); ASSERT_TRUE(shuffle_global_image_ids.empty()); ASSERT_TRUE(m_policy->finish_action(global_image_id, -EBLOCKLISTED)); ASSERT_EQ(ACTION_TYPE_RELEASE, m_policy->start_action(global_image_id)); ASSERT_TRUE(m_policy->finish_action(global_image_id, 0)); ASSERT_EQ(ACTION_TYPE_MAP_UPDATE, m_policy->start_action(global_image_id)); ASSERT_TRUE(m_policy->finish_action(global_image_id, 0)); ASSERT_EQ(ACTION_TYPE_ACQUIRE, m_policy->start_action(global_image_id)); ASSERT_FALSE(m_policy->finish_action(global_image_id, 0)); } TEST_F(TestImageMapPolicy, ShuffleFailureAndRemove) { std::set<std::string> global_image_ids { "global id 1", "global id 2", "global id 3", "global id 4", "global id 5", "global id 6" }; std::set<std::string> shuffle_global_image_ids; m_policy->add_instances({stringify(m_local_io_ctx.get_instance_id())}, &shuffle_global_image_ids); for (auto const &global_image_id : global_image_ids) { // map image map_image(global_image_id); LookupInfo info = m_policy->lookup(global_image_id); ASSERT_TRUE(info.instance_id != UNMAPPED_INSTANCE_ID); } m_policy->add_instances({"9876"}, &shuffle_global_image_ids); ASSERT_FALSE(shuffle_global_image_ids.empty()); std::string global_image_id = *(shuffle_global_image_ids.begin()); shuffle_global_image_ids.clear(); ASSERT_EQ(ACTION_TYPE_RELEASE, m_policy->start_action(global_image_id)); ASSERT_TRUE(m_policy->finish_action(global_image_id, 0)); ASSERT_EQ(ACTION_TYPE_MAP_UPDATE, m_policy->start_action(global_image_id)); ASSERT_TRUE(m_policy->finish_action(global_image_id, 0)); ASSERT_EQ(ACTION_TYPE_ACQUIRE, m_policy->start_action(global_image_id)); // peer unavailable m_policy->remove_instances({"9876"}, &shuffle_global_image_ids); ASSERT_TRUE(shuffle_global_image_ids.empty()); ASSERT_TRUE(m_policy->finish_action(global_image_id, -EBLOCKLISTED)); ASSERT_EQ(ACTION_TYPE_RELEASE, m_policy->start_action(global_image_id)); ASSERT_TRUE(m_policy->finish_action(global_image_id, 0)); ASSERT_EQ(ACTION_TYPE_MAP_UPDATE, m_policy->start_action(global_image_id)); ASSERT_TRUE(m_policy->finish_action(global_image_id, 0)); ASSERT_EQ(ACTION_TYPE_ACQUIRE, m_policy->start_action(global_image_id)); ASSERT_FALSE(m_policy->finish_action(global_image_id, 0)); ASSERT_TRUE(m_policy->remove_image(global_image_id)); ASSERT_EQ(ACTION_TYPE_RELEASE, m_policy->start_action(global_image_id)); ASSERT_TRUE(m_policy->finish_action(global_image_id, 0)); ASSERT_EQ(ACTION_TYPE_MAP_REMOVE, m_policy->start_action(global_image_id)); ASSERT_FALSE(m_policy->finish_action(global_image_id, 0)); LookupInfo info = m_policy->lookup(global_image_id); ASSERT_TRUE(info.instance_id == UNMAPPED_INSTANCE_ID); } TEST_F(TestImageMapPolicy, InitialInstanceUpdate) { const std::string global_image_id = "global id 1"; m_policy->init({{global_image_id, {"9876", {}, {}}}}); ASSERT_EQ(ACTION_TYPE_ACQUIRE, m_policy->start_action(global_image_id)); auto instance_id = stringify(m_local_io_ctx.get_instance_id()); std::set<std::string> shuffle_global_image_ids; m_policy->add_instances({instance_id}, &shuffle_global_image_ids); ASSERT_EQ(0U, shuffle_global_image_ids.size()); ASSERT_TRUE(m_policy->finish_action(global_image_id, -ENOENT)); ASSERT_EQ(ACTION_TYPE_RELEASE, m_policy->start_action(global_image_id)); ASSERT_TRUE(m_policy->finish_action(global_image_id, 0)); ASSERT_EQ(ACTION_TYPE_MAP_UPDATE, m_policy->start_action(global_image_id)); ASSERT_TRUE(m_policy->finish_action(global_image_id, 0)); ASSERT_EQ(ACTION_TYPE_ACQUIRE, m_policy->start_action(global_image_id)); ASSERT_FALSE(m_policy->finish_action(global_image_id, 0)); } } // namespace image_map } // namespace mirror } // namespace rbd
13,298
34.18254
94
cc
null
ceph-main/src/test/rbd_mirror/image_replayer/test_mock_BootstrapRequest.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "test/rbd_mirror/test_mock_fixture.h" #include "librbd/journal/TypeTraits.h" #include "tools/rbd_mirror/BaseRequest.h" #include "tools/rbd_mirror/InstanceWatcher.h" #include "tools/rbd_mirror/Threads.h" #include "tools/rbd_mirror/image_replayer/BootstrapRequest.h" #include "tools/rbd_mirror/image_replayer/OpenImageRequest.h" #include "tools/rbd_mirror/image_replayer/OpenLocalImageRequest.h" #include "tools/rbd_mirror/image_replayer/PrepareLocalImageRequest.h" #include "tools/rbd_mirror/image_replayer/PrepareRemoteImageRequest.h" #include "tools/rbd_mirror/image_replayer/StateBuilder.h" #include "test/librados_test_stub/MockTestMemIoCtxImpl.h" #include "test/librbd/mock/MockImageCtx.h" #include "test/rbd_mirror/mock/image_sync/MockSyncPointHandler.h" #include "test/rbd_mirror/mock/MockBaseRequest.h" namespace librbd { namespace { struct MockTestImageCtx : public librbd::MockImageCtx { MockTestImageCtx(librbd::ImageCtx &image_ctx) : librbd::MockImageCtx(image_ctx) { } }; } // anonymous namespace } // namespace librbd namespace rbd { namespace mirror { class ProgressContext; template <> struct Threads<librbd::MockTestImageCtx> { ceph::mutex &timer_lock; SafeTimer *timer; librbd::asio::ContextWQ *work_queue; Threads(Threads<librbd::ImageCtx> *threads) : timer_lock(threads->timer_lock), timer(threads->timer), work_queue(threads->work_queue) { } }; template<> struct ImageSync<librbd::MockTestImageCtx> { static ImageSync* s_instance; Context *on_finish = nullptr; static ImageSync* create( Threads<librbd::MockTestImageCtx>* threads, librbd::MockTestImageCtx *local_image_ctx, librbd::MockTestImageCtx *remote_image_ctx, const std::string &local_mirror_uuid, image_sync::SyncPointHandler* sync_point_handler, InstanceWatcher<librbd::MockTestImageCtx> *instance_watcher, ProgressContext *progress_ctx, Context *on_finish) { ceph_assert(s_instance != nullptr); s_instance->on_finish = on_finish; return s_instance; } ImageSync() { ceph_assert(s_instance == nullptr); s_instance = this; } ~ImageSync() { s_instance = nullptr; } MOCK_METHOD0(get, void()); MOCK_METHOD0(put, void()); MOCK_METHOD0(send, void()); MOCK_METHOD0(cancel, void()); }; ImageSync<librbd::MockTestImageCtx>* ImageSync<librbd::MockTestImageCtx>::s_instance = nullptr; template<> struct InstanceWatcher<librbd::MockTestImageCtx> { }; namespace image_replayer { template<> struct OpenImageRequest<librbd::MockTestImageCtx> { static OpenImageRequest* s_instance; librbd::MockTestImageCtx **image_ctx = nullptr; Context *on_finish = nullptr; static OpenImageRequest* create(librados::IoCtx &io_ctx, librbd::MockTestImageCtx **image_ctx, const std::string &image_id, bool read_only, Context *on_finish) { ceph_assert(s_instance != nullptr); s_instance->image_ctx = image_ctx; s_instance->on_finish = on_finish; s_instance->construct(io_ctx, image_id); return s_instance; } OpenImageRequest() { ceph_assert(s_instance == nullptr); s_instance = this; } ~OpenImageRequest() { s_instance = nullptr; } MOCK_METHOD2(construct, void(librados::IoCtx &io_ctx, const std::string &image_id)); MOCK_METHOD0(send, void()); }; template<> struct OpenLocalImageRequest<librbd::MockTestImageCtx> { static OpenLocalImageRequest* s_instance; librbd::MockTestImageCtx **image_ctx = nullptr; Context *on_finish = nullptr; static OpenLocalImageRequest* create(librados::IoCtx &local_io_ctx, librbd::MockTestImageCtx **local_image_ctx, const std::string &local_image_id, librbd::asio::ContextWQ *work_queue, Context *on_finish) { ceph_assert(s_instance != nullptr); s_instance->image_ctx = local_image_ctx; s_instance->on_finish = on_finish; s_instance->construct(local_io_ctx, local_image_id); return s_instance; } OpenLocalImageRequest() { ceph_assert(s_instance == nullptr); s_instance = this; } ~OpenLocalImageRequest() { s_instance = nullptr; } MOCK_METHOD2(construct, void(librados::IoCtx &io_ctx, const std::string &image_id)); MOCK_METHOD0(send, void()); }; template<> struct PrepareLocalImageRequest<librbd::MockTestImageCtx> { static PrepareLocalImageRequest* s_instance; std::string *local_image_name = nullptr; StateBuilder<librbd::MockTestImageCtx>** state_builder = nullptr; Context *on_finish = nullptr; static PrepareLocalImageRequest* create(librados::IoCtx &, const std::string &global_image_id, std::string *local_image_name, StateBuilder<librbd::MockTestImageCtx>** state_builder, librbd::asio::ContextWQ *work_queue, Context *on_finish) { ceph_assert(s_instance != nullptr); s_instance->local_image_name = local_image_name; s_instance->state_builder = state_builder; s_instance->on_finish = on_finish; return s_instance; } PrepareLocalImageRequest() { s_instance = this; } MOCK_METHOD0(send, void()); }; template<> struct PrepareRemoteImageRequest<librbd::MockTestImageCtx> { static PrepareRemoteImageRequest* s_instance; StateBuilder<librbd::MockTestImageCtx>** state_builder = nullptr; Context *on_finish = nullptr; static PrepareRemoteImageRequest* create(Threads<librbd::MockTestImageCtx> *threads, librados::IoCtx &, librados::IoCtx &, const std::string &global_image_id, const std::string &local_mirror_uuid, const RemotePoolMeta& remote_pool_meta, ::journal::CacheManagerHandler *cache_manager_handler, StateBuilder<librbd::MockTestImageCtx>** state_builder, Context *on_finish) { ceph_assert(s_instance != nullptr); s_instance->state_builder = state_builder; s_instance->on_finish = on_finish; return s_instance; } PrepareRemoteImageRequest() { s_instance = this; } MOCK_METHOD0(send, void()); }; template<> struct StateBuilder<librbd::MockTestImageCtx> { static StateBuilder* s_instance; image_sync::MockSyncPointHandler mock_sync_point_handler; MockBaseRequest mock_base_request; librbd::MockTestImageCtx* local_image_ctx = nullptr; librbd::MockTestImageCtx* remote_image_ctx = nullptr; std::string local_image_id; std::string remote_mirror_uuid; std::string remote_image_id; static StateBuilder* create(const std::string&) { ceph_assert(s_instance != nullptr); return s_instance; } image_sync::MockSyncPointHandler* create_sync_point_handler() { return &mock_sync_point_handler; } StateBuilder() { s_instance = this; } MOCK_CONST_METHOD0(is_disconnected, bool()); MOCK_CONST_METHOD0(is_local_primary, bool()); MOCK_CONST_METHOD0(is_remote_primary, bool()); MOCK_CONST_METHOD0(is_linked, bool()); MOCK_CONST_METHOD0(replay_requires_remote_image, bool()); MOCK_METHOD1(close_remote_image, void(Context*)); MOCK_METHOD6(create_local_image_request, BaseRequest*(Threads<librbd::MockTestImageCtx>*, librados::IoCtx&, const std::string&, PoolMetaCache*, ProgressContext*, Context*)); MOCK_METHOD5(create_prepare_replay_request, BaseRequest*(const std::string&, ProgressContext*, bool*, bool*, Context*)); void destroy_sync_point_handler() { } void destroy() { } }; OpenImageRequest<librbd::MockTestImageCtx>* OpenImageRequest<librbd::MockTestImageCtx>::s_instance = nullptr; OpenLocalImageRequest<librbd::MockTestImageCtx>* OpenLocalImageRequest<librbd::MockTestImageCtx>::s_instance = nullptr; PrepareLocalImageRequest<librbd::MockTestImageCtx>* PrepareLocalImageRequest<librbd::MockTestImageCtx>::s_instance = nullptr; PrepareRemoteImageRequest<librbd::MockTestImageCtx>* PrepareRemoteImageRequest<librbd::MockTestImageCtx>::s_instance = nullptr; StateBuilder<librbd::MockTestImageCtx>* StateBuilder<librbd::MockTestImageCtx>::s_instance = nullptr; } // namespace image_replayer } // namespace mirror } // namespace rbd // template definitions #include "tools/rbd_mirror/image_replayer/BootstrapRequest.cc" namespace rbd { namespace mirror { namespace image_replayer { using ::testing::_; using ::testing::DoAll; using ::testing::InSequence; using ::testing::Invoke; using ::testing::Return; using ::testing::SetArgPointee; using ::testing::StrEq; using ::testing::WithArg; using ::testing::WithArgs; MATCHER_P(IsSameIoCtx, io_ctx, "") { return &get_mock_io_ctx(arg) == &get_mock_io_ctx(*io_ctx); } class TestMockImageReplayerBootstrapRequest : public TestMockFixture { public: typedef Threads<librbd::MockTestImageCtx> MockThreads; typedef BootstrapRequest<librbd::MockTestImageCtx> MockBootstrapRequest; typedef ImageSync<librbd::MockTestImageCtx> MockImageSync; typedef InstanceWatcher<librbd::MockTestImageCtx> MockInstanceWatcher; typedef OpenImageRequest<librbd::MockTestImageCtx> MockOpenImageRequest; typedef OpenLocalImageRequest<librbd::MockTestImageCtx> MockOpenLocalImageRequest; typedef PrepareLocalImageRequest<librbd::MockTestImageCtx> MockPrepareLocalImageRequest; typedef PrepareRemoteImageRequest<librbd::MockTestImageCtx> MockPrepareRemoteImageRequest; typedef StateBuilder<librbd::MockTestImageCtx> MockStateBuilder; typedef std::list<cls::journal::Tag> Tags; void SetUp() override { TestMockFixture::SetUp(); librbd::RBD rbd; ASSERT_EQ(0, create_image(rbd, m_remote_io_ctx, m_image_name, m_image_size)); ASSERT_EQ(0, open_image(m_remote_io_ctx, m_image_name, &m_remote_image_ctx)); ASSERT_EQ(0, create_image(rbd, m_local_io_ctx, m_image_name, m_image_size)); ASSERT_EQ(0, open_image(m_local_io_ctx, m_image_name, &m_local_image_ctx)); } void expect_send(MockPrepareLocalImageRequest &mock_request, MockStateBuilder& mock_state_builder, const std::string& local_image_id, const std::string& local_image_name, int r) { EXPECT_CALL(mock_request, send()) .WillOnce(Invoke([&mock_request, &mock_state_builder, local_image_id, local_image_name, r]() { if (r == 0) { *mock_request.state_builder = &mock_state_builder; mock_state_builder.local_image_id = local_image_id; *mock_request.local_image_name = local_image_name; } mock_request.on_finish->complete(r); })); } void expect_send(MockPrepareRemoteImageRequest& mock_request, MockStateBuilder& mock_state_builder, const std::string& remote_mirror_uuid, const std::string& remote_image_id, int r) { EXPECT_CALL(mock_request, send()) .WillOnce(Invoke([&mock_request, &mock_state_builder, remote_mirror_uuid, remote_image_id, r]() { if (r >= 0) { *mock_request.state_builder = &mock_state_builder; mock_state_builder.remote_image_id = remote_image_id; } mock_state_builder.remote_mirror_uuid = remote_mirror_uuid; mock_request.on_finish->complete(r); })); } void expect_is_local_primary(MockStateBuilder& mock_state_builder, bool is_primary) { EXPECT_CALL(mock_state_builder, is_local_primary()) .WillOnce(Return(is_primary)); } void expect_is_remote_primary(MockStateBuilder& mock_state_builder, bool is_primary) { EXPECT_CALL(mock_state_builder, is_remote_primary()) .WillOnce(Return(is_primary)); } void expect_is_linked(MockStateBuilder& mock_state_builder, bool is_linked) { EXPECT_CALL(mock_state_builder, is_linked()) .WillOnce(Return(is_linked)); } void expect_is_disconnected(MockStateBuilder& mock_state_builder, bool is_disconnected) { EXPECT_CALL(mock_state_builder, is_disconnected()) .WillOnce(Return(is_disconnected)); } void expect_replay_requires_remote_image(MockStateBuilder& mock_state_builder, bool requires_image) { EXPECT_CALL(mock_state_builder, replay_requires_remote_image()) .WillOnce(Return(requires_image)); } void expect_open_image(MockOpenImageRequest &mock_open_image_request, librados::IoCtx &io_ctx, const std::string &image_id, librbd::MockTestImageCtx &mock_image_ctx, int r) { EXPECT_CALL(mock_open_image_request, construct(IsSameIoCtx(&io_ctx), image_id)); EXPECT_CALL(mock_open_image_request, send()) .WillOnce(Invoke([this, &mock_open_image_request, &mock_image_ctx, r]() { *mock_open_image_request.image_ctx = &mock_image_ctx; m_threads->work_queue->queue(mock_open_image_request.on_finish, r); })); } void expect_open_local_image(MockOpenLocalImageRequest &mock_open_local_image_request, librados::IoCtx &io_ctx, const std::string &image_id, librbd::MockTestImageCtx *mock_image_ctx, int r) { EXPECT_CALL(mock_open_local_image_request, construct(IsSameIoCtx(&io_ctx), image_id)); EXPECT_CALL(mock_open_local_image_request, send()) .WillOnce(Invoke([this, &mock_open_local_image_request, mock_image_ctx, r]() { if (r >= 0) { *mock_open_local_image_request.image_ctx = mock_image_ctx; } m_threads->work_queue->queue(mock_open_local_image_request.on_finish, r); })); } void expect_close_remote_image( MockStateBuilder& mock_state_builder, int r) { EXPECT_CALL(mock_state_builder, close_remote_image(_)) .WillOnce(Invoke([&mock_state_builder, r] (Context* on_finish) { mock_state_builder.remote_image_ctx = nullptr; on_finish->complete(r); })); } void expect_create_local_image(MockStateBuilder& mock_state_builder, const std::string& local_image_id, int r) { EXPECT_CALL(mock_state_builder, create_local_image_request(_, _, _, _, _, _)) .WillOnce(WithArg<5>( Invoke([&mock_state_builder, local_image_id, r](Context* ctx) { if (r >= 0) { mock_state_builder.local_image_id = local_image_id; } mock_state_builder.mock_base_request.on_finish = ctx; return &mock_state_builder.mock_base_request; }))); EXPECT_CALL(mock_state_builder.mock_base_request, send()) .WillOnce(Invoke([this, &mock_state_builder, r]() { m_threads->work_queue->queue( mock_state_builder.mock_base_request.on_finish, r); })); } void expect_prepare_replay(MockStateBuilder& mock_state_builder, bool resync_requested, bool syncing, int r) { EXPECT_CALL(mock_state_builder, create_prepare_replay_request(_, _, _, _, _)) .WillOnce(WithArgs<2, 3, 4>( Invoke([&mock_state_builder, resync_requested, syncing, r] (bool* resync, bool* sync, Context* ctx) { if (r >= 0) { *resync = resync_requested; *sync = syncing; } mock_state_builder.mock_base_request.on_finish = ctx; return &mock_state_builder.mock_base_request; }))); EXPECT_CALL(mock_state_builder.mock_base_request, send()) .WillOnce(Invoke([this, &mock_state_builder, r]() { m_threads->work_queue->queue( mock_state_builder.mock_base_request.on_finish, r); })); } void expect_image_sync(MockImageSync &mock_image_sync, int r) { EXPECT_CALL(mock_image_sync, get()); EXPECT_CALL(mock_image_sync, send()) .WillOnce(Invoke([this, &mock_image_sync, r]() { m_threads->work_queue->queue(mock_image_sync.on_finish, r); })); EXPECT_CALL(mock_image_sync, put()); } MockBootstrapRequest *create_request(MockThreads* mock_threads, MockInstanceWatcher *mock_instance_watcher, const std::string &global_image_id, const std::string &local_mirror_uuid, Context *on_finish) { return new MockBootstrapRequest(mock_threads, m_local_io_ctx, m_remote_io_ctx, mock_instance_watcher, global_image_id, local_mirror_uuid, {"remote mirror uuid", "remote mirror peer uuid"}, nullptr, nullptr, nullptr, &m_mock_state_builder, &m_do_resync, on_finish); } librbd::ImageCtx *m_remote_image_ctx; librbd::ImageCtx *m_local_image_ctx = nullptr; MockStateBuilder* m_mock_state_builder = nullptr; bool m_do_resync = false; }; TEST_F(TestMockImageReplayerBootstrapRequest, Success) { InSequence seq; // prepare local image MockStateBuilder mock_state_builder; MockPrepareLocalImageRequest mock_prepare_local_image_request; expect_send(mock_prepare_local_image_request, mock_state_builder, m_local_image_ctx->id, m_local_image_ctx->name, 0); // prepare remote image MockPrepareRemoteImageRequest mock_prepare_remote_image_request; expect_send(mock_prepare_remote_image_request, mock_state_builder, "remote mirror uuid", m_remote_image_ctx->id, 0); expect_is_local_primary(mock_state_builder, false); expect_is_remote_primary(mock_state_builder, true); // open the remote image librbd::MockTestImageCtx mock_remote_image_ctx(*m_remote_image_ctx); MockOpenImageRequest mock_open_image_request; expect_open_image(mock_open_image_request, m_remote_io_ctx, mock_remote_image_ctx.id, mock_remote_image_ctx, 0); // open the local image librbd::MockTestImageCtx mock_local_image_ctx(*m_local_image_ctx); MockOpenLocalImageRequest mock_open_local_image_request; expect_open_local_image(mock_open_local_image_request, m_local_io_ctx, mock_local_image_ctx.id, &mock_local_image_ctx, 0); // prepare replay expect_prepare_replay(mock_state_builder, false, false, 0); expect_is_disconnected(mock_state_builder, false); // close remote image expect_replay_requires_remote_image(mock_state_builder, false); expect_close_remote_image(mock_state_builder, 0); C_SaferCond ctx; MockThreads mock_threads(m_threads); MockInstanceWatcher mock_instance_watcher; MockBootstrapRequest *request = create_request( &mock_threads, &mock_instance_watcher, "global image id", "local mirror uuid", &ctx); request->send(); ASSERT_EQ(0, ctx.wait()); } TEST_F(TestMockImageReplayerBootstrapRequest, PrepareRemoteImageNotPrimaryLocalDNE) { InSequence seq; // prepare local image MockStateBuilder mock_state_builder; MockPrepareLocalImageRequest mock_prepare_local_image_request; expect_send(mock_prepare_local_image_request, mock_state_builder, m_local_image_ctx->id, m_local_image_ctx->name, -ENOENT); // prepare remote image MockPrepareRemoteImageRequest mock_prepare_remote_image_request; expect_send(mock_prepare_remote_image_request, mock_state_builder, "remote mirror uuid", m_remote_image_ctx->id, 0); expect_is_local_primary(mock_state_builder, false); expect_is_remote_primary(mock_state_builder, false); C_SaferCond ctx; MockThreads mock_threads(m_threads); MockInstanceWatcher mock_instance_watcher; MockBootstrapRequest *request = create_request( &mock_threads, &mock_instance_watcher, "global image id", "local mirror uuid", &ctx); request->send(); ASSERT_EQ(-EREMOTEIO, ctx.wait()); } TEST_F(TestMockImageReplayerBootstrapRequest, PrepareRemoteImageNotPrimaryLocalUnlinked) { InSequence seq; // prepare local image MockStateBuilder mock_state_builder; MockPrepareLocalImageRequest mock_prepare_local_image_request; expect_send(mock_prepare_local_image_request, mock_state_builder, m_local_image_ctx->id, m_local_image_ctx->name, 0); // prepare remote image MockPrepareRemoteImageRequest mock_prepare_remote_image_request; expect_send(mock_prepare_remote_image_request, mock_state_builder, "remote mirror uuid", m_remote_image_ctx->id, 0); expect_is_local_primary(mock_state_builder, false); expect_is_remote_primary(mock_state_builder, false); expect_is_linked(mock_state_builder, false); C_SaferCond ctx; MockThreads mock_threads(m_threads); MockInstanceWatcher mock_instance_watcher; MockBootstrapRequest *request = create_request( &mock_threads, &mock_instance_watcher, "global image id", "local mirror uuid", &ctx); request->send(); ASSERT_EQ(-EREMOTEIO, ctx.wait()); } TEST_F(TestMockImageReplayerBootstrapRequest, PrepareRemoteImageNotPrimaryLocalLinked) { InSequence seq; // prepare local image MockStateBuilder mock_state_builder; MockPrepareLocalImageRequest mock_prepare_local_image_request; expect_send(mock_prepare_local_image_request, mock_state_builder, m_local_image_ctx->id, m_local_image_ctx->name, 0); // prepare remote image MockPrepareRemoteImageRequest mock_prepare_remote_image_request; expect_send(mock_prepare_remote_image_request, mock_state_builder, "remote mirror uuid", m_remote_image_ctx->id, 0); expect_is_local_primary(mock_state_builder, false); expect_is_remote_primary(mock_state_builder, false); expect_is_linked(mock_state_builder, true); // open the remote image librbd::MockTestImageCtx mock_remote_image_ctx(*m_remote_image_ctx); MockOpenImageRequest mock_open_image_request; expect_open_image(mock_open_image_request, m_remote_io_ctx, mock_remote_image_ctx.id, mock_remote_image_ctx, 0); // open the local image librbd::MockTestImageCtx mock_local_image_ctx(*m_local_image_ctx); MockOpenLocalImageRequest mock_open_local_image_request; expect_open_local_image(mock_open_local_image_request, m_local_io_ctx, mock_local_image_ctx.id, &mock_local_image_ctx, 0); // prepare replay expect_prepare_replay(mock_state_builder, false, false, 0); expect_is_disconnected(mock_state_builder, false); // close remote image expect_replay_requires_remote_image(mock_state_builder, false); expect_close_remote_image(mock_state_builder, 0); C_SaferCond ctx; MockThreads mock_threads(m_threads); MockInstanceWatcher mock_instance_watcher; MockBootstrapRequest *request = create_request( &mock_threads, &mock_instance_watcher, "global image id", "local mirror uuid", &ctx); request->send(); ASSERT_EQ(0, ctx.wait()); } TEST_F(TestMockImageReplayerBootstrapRequest, OpenLocalImageError) { InSequence seq; // prepare local image MockPrepareLocalImageRequest mock_prepare_local_image_request; MockStateBuilder mock_state_builder; expect_send(mock_prepare_local_image_request, mock_state_builder, m_local_image_ctx->id, m_local_image_ctx->name, 0); // prepare remote image MockPrepareRemoteImageRequest mock_prepare_remote_image_request; expect_send(mock_prepare_remote_image_request, mock_state_builder, "remote mirror uuid", m_remote_image_ctx->id, 0); expect_is_local_primary(mock_state_builder, false); expect_is_remote_primary(mock_state_builder, true); // open the remote image librbd::MockTestImageCtx mock_remote_image_ctx(*m_remote_image_ctx); MockOpenImageRequest mock_open_image_request; expect_open_image(mock_open_image_request, m_remote_io_ctx, mock_remote_image_ctx.id, mock_remote_image_ctx, 0); // open the local image librbd::MockTestImageCtx mock_local_image_ctx(*m_local_image_ctx); MockOpenLocalImageRequest mock_open_local_image_request; expect_open_local_image(mock_open_local_image_request, m_local_io_ctx, mock_local_image_ctx.id, &mock_local_image_ctx, -EINVAL); // close remote image expect_replay_requires_remote_image(mock_state_builder, false); expect_close_remote_image(mock_state_builder, 0); C_SaferCond ctx; MockThreads mock_threads(m_threads); MockInstanceWatcher mock_instance_watcher; MockBootstrapRequest *request = create_request( &mock_threads, &mock_instance_watcher, "global image id", "local mirror uuid", &ctx); request->send(); ASSERT_EQ(-EINVAL, ctx.wait()); } TEST_F(TestMockImageReplayerBootstrapRequest, OpenLocalImageDNE) { InSequence seq; // prepare local image MockPrepareLocalImageRequest mock_prepare_local_image_request; MockStateBuilder mock_state_builder; expect_send(mock_prepare_local_image_request, mock_state_builder, m_local_image_ctx->id, m_local_image_ctx->name, 0); // prepare remote image MockPrepareRemoteImageRequest mock_prepare_remote_image_request; expect_send(mock_prepare_remote_image_request, mock_state_builder, "remote mirror uuid", m_remote_image_ctx->id, 0); expect_is_local_primary(mock_state_builder, false); expect_is_remote_primary(mock_state_builder, true); // open the remote image librbd::MockTestImageCtx mock_remote_image_ctx(*m_remote_image_ctx); MockOpenImageRequest mock_open_image_request; expect_open_image(mock_open_image_request, m_remote_io_ctx, mock_remote_image_ctx.id, mock_remote_image_ctx, 0); // open the local image librbd::MockTestImageCtx mock_local_image_ctx(*m_local_image_ctx); MockOpenLocalImageRequest mock_open_local_image_request; expect_open_local_image(mock_open_local_image_request, m_local_io_ctx, mock_local_image_ctx.id, &mock_local_image_ctx, -ENOENT); // create local image expect_create_local_image(mock_state_builder, "local image id", 0); // re-open the local image expect_open_local_image(mock_open_local_image_request, m_local_io_ctx, "local image id", &mock_local_image_ctx, 0); // prepare replay expect_prepare_replay(mock_state_builder, false, false, 0); expect_is_disconnected(mock_state_builder, false); // close remote image expect_replay_requires_remote_image(mock_state_builder, false); expect_close_remote_image(mock_state_builder, 0); C_SaferCond ctx; MockThreads mock_threads(m_threads); MockInstanceWatcher mock_instance_watcher; MockBootstrapRequest *request = create_request( &mock_threads, &mock_instance_watcher, "global image id", "local mirror uuid", &ctx); request->send(); ASSERT_EQ(0, ctx.wait()); } TEST_F(TestMockImageReplayerBootstrapRequest, OpenLocalImagePrimary) { InSequence seq; // prepare local image MockPrepareLocalImageRequest mock_prepare_local_image_request; MockStateBuilder mock_state_builder; expect_send(mock_prepare_local_image_request, mock_state_builder, m_local_image_ctx->id, m_local_image_ctx->name, 0); // prepare remote image MockPrepareRemoteImageRequest mock_prepare_remote_image_request; expect_send(mock_prepare_remote_image_request, mock_state_builder, "remote mirror uuid", m_remote_image_ctx->id, 0); expect_is_local_primary(mock_state_builder, false); expect_is_remote_primary(mock_state_builder, true); // open the remote image librbd::MockTestImageCtx mock_remote_image_ctx(*m_remote_image_ctx); MockOpenImageRequest mock_open_image_request; expect_open_image(mock_open_image_request, m_remote_io_ctx, mock_remote_image_ctx.id, mock_remote_image_ctx, 0); // open the local image librbd::MockTestImageCtx mock_local_image_ctx(*m_local_image_ctx); MockOpenLocalImageRequest mock_open_local_image_request; expect_open_local_image(mock_open_local_image_request, m_local_io_ctx, mock_local_image_ctx.id, &mock_local_image_ctx, -EREMOTEIO); // close remote image expect_replay_requires_remote_image(mock_state_builder, false); expect_close_remote_image(mock_state_builder, 0); C_SaferCond ctx; MockThreads mock_threads(m_threads); MockInstanceWatcher mock_instance_watcher; MockBootstrapRequest *request = create_request( &mock_threads, &mock_instance_watcher, "global image id", "local mirror uuid", &ctx); request->send(); ASSERT_EQ(-EREMOTEIO, ctx.wait()); } TEST_F(TestMockImageReplayerBootstrapRequest, CreateLocalImageError) { InSequence seq; // prepare local image MockPrepareLocalImageRequest mock_prepare_local_image_request; MockStateBuilder mock_state_builder; expect_send(mock_prepare_local_image_request, mock_state_builder, "", "", -ENOENT); // prepare remote image MockPrepareRemoteImageRequest mock_prepare_remote_image_request; expect_send(mock_prepare_remote_image_request, mock_state_builder, "remote mirror uuid", m_remote_image_ctx->id, 0); expect_is_local_primary(mock_state_builder, false); expect_is_remote_primary(mock_state_builder, true); // open the remote image librbd::MockTestImageCtx mock_remote_image_ctx(*m_remote_image_ctx); MockOpenImageRequest mock_open_image_request; expect_open_image(mock_open_image_request, m_remote_io_ctx, mock_remote_image_ctx.id, mock_remote_image_ctx, 0); // create local image expect_create_local_image(mock_state_builder, "local image id", -EINVAL); // close remote image expect_replay_requires_remote_image(mock_state_builder, false); expect_close_remote_image(mock_state_builder, 0); C_SaferCond ctx; MockThreads mock_threads(m_threads); MockInstanceWatcher mock_instance_watcher; MockBootstrapRequest *request = create_request( &mock_threads, &mock_instance_watcher, "global image id", "local mirror uuid", &ctx); request->send(); ASSERT_EQ(-EINVAL, ctx.wait()); } TEST_F(TestMockImageReplayerBootstrapRequest, PrepareReplayError) { InSequence seq; // prepare local image MockPrepareLocalImageRequest mock_prepare_local_image_request; MockStateBuilder mock_state_builder; expect_send(mock_prepare_local_image_request, mock_state_builder, m_local_image_ctx->id, m_local_image_ctx->name, 0); // prepare remote image MockPrepareRemoteImageRequest mock_prepare_remote_image_request; expect_send(mock_prepare_remote_image_request, mock_state_builder, "remote mirror uuid", m_remote_image_ctx->id, 0); expect_is_local_primary(mock_state_builder, false); expect_is_remote_primary(mock_state_builder, true); // open the remote image librbd::MockTestImageCtx mock_remote_image_ctx(*m_remote_image_ctx); MockOpenImageRequest mock_open_image_request; expect_open_image(mock_open_image_request, m_remote_io_ctx, mock_remote_image_ctx.id, mock_remote_image_ctx, 0); // open the local image librbd::MockTestImageCtx mock_local_image_ctx(*m_local_image_ctx); MockOpenLocalImageRequest mock_open_local_image_request; expect_open_local_image(mock_open_local_image_request, m_local_io_ctx, mock_local_image_ctx.id, &mock_local_image_ctx, 0); // prepare replay expect_prepare_replay(mock_state_builder, false, false, -EINVAL); // close remote image expect_replay_requires_remote_image(mock_state_builder, false); expect_close_remote_image(mock_state_builder, 0); C_SaferCond ctx; MockThreads mock_threads(m_threads); MockInstanceWatcher mock_instance_watcher; MockBootstrapRequest *request = create_request( &mock_threads, &mock_instance_watcher, "global image id", "local mirror uuid", &ctx); request->send(); ASSERT_EQ(-EINVAL, ctx.wait()); } TEST_F(TestMockImageReplayerBootstrapRequest, PrepareReplayResyncRequested) { InSequence seq; // prepare local image MockPrepareLocalImageRequest mock_prepare_local_image_request; MockStateBuilder mock_state_builder; expect_send(mock_prepare_local_image_request, mock_state_builder, m_local_image_ctx->id, m_local_image_ctx->name, 0); // prepare remote image MockPrepareRemoteImageRequest mock_prepare_remote_image_request; expect_send(mock_prepare_remote_image_request, mock_state_builder, "remote mirror uuid", m_remote_image_ctx->id, 0); expect_is_local_primary(mock_state_builder, false); expect_is_remote_primary(mock_state_builder, true); // open the remote image librbd::MockTestImageCtx mock_remote_image_ctx(*m_remote_image_ctx); MockOpenImageRequest mock_open_image_request; expect_open_image(mock_open_image_request, m_remote_io_ctx, mock_remote_image_ctx.id, mock_remote_image_ctx, 0); // open the local image librbd::MockTestImageCtx mock_local_image_ctx(*m_local_image_ctx); MockOpenLocalImageRequest mock_open_local_image_request; expect_open_local_image(mock_open_local_image_request, m_local_io_ctx, mock_local_image_ctx.id, &mock_local_image_ctx, 0); // prepare replay expect_prepare_replay(mock_state_builder, true, false, 0); // close remote image expect_replay_requires_remote_image(mock_state_builder, false); expect_close_remote_image(mock_state_builder, 0); C_SaferCond ctx; MockThreads mock_threads(m_threads); MockInstanceWatcher mock_instance_watcher; MockBootstrapRequest *request = create_request( &mock_threads, &mock_instance_watcher, "global image id", "local mirror uuid", &ctx); request->send(); ASSERT_EQ(0, ctx.wait()); ASSERT_TRUE(m_do_resync); } TEST_F(TestMockImageReplayerBootstrapRequest, PrepareReplaySyncing) { InSequence seq; // prepare local image MockPrepareLocalImageRequest mock_prepare_local_image_request; MockStateBuilder mock_state_builder; expect_send(mock_prepare_local_image_request, mock_state_builder, m_local_image_ctx->id, m_local_image_ctx->name, 0); // prepare remote image MockPrepareRemoteImageRequest mock_prepare_remote_image_request; expect_send(mock_prepare_remote_image_request, mock_state_builder, "remote mirror uuid", m_remote_image_ctx->id, 0); expect_is_local_primary(mock_state_builder, false); expect_is_remote_primary(mock_state_builder, true); // open the remote image librbd::MockTestImageCtx mock_remote_image_ctx(*m_remote_image_ctx); MockOpenImageRequest mock_open_image_request; expect_open_image(mock_open_image_request, m_remote_io_ctx, mock_remote_image_ctx.id, mock_remote_image_ctx, 0); // open the local image librbd::MockTestImageCtx mock_local_image_ctx(*m_local_image_ctx); MockOpenLocalImageRequest mock_open_local_image_request; expect_open_local_image(mock_open_local_image_request, m_local_io_ctx, mock_local_image_ctx.id, &mock_local_image_ctx, 0); // prepare replay expect_prepare_replay(mock_state_builder, false, true, 0); expect_is_disconnected(mock_state_builder, false); // image sync MockImageSync mock_image_sync; expect_image_sync(mock_image_sync, 0); // close remote image expect_replay_requires_remote_image(mock_state_builder, false); expect_close_remote_image(mock_state_builder, 0); C_SaferCond ctx; MockThreads mock_threads(m_threads); MockInstanceWatcher mock_instance_watcher; MockBootstrapRequest *request = create_request( &mock_threads, &mock_instance_watcher, "global image id", "local mirror uuid", &ctx); request->send(); ASSERT_EQ(0, ctx.wait()); } TEST_F(TestMockImageReplayerBootstrapRequest, PrepareReplayDisconnected) { InSequence seq; // prepare local image MockPrepareLocalImageRequest mock_prepare_local_image_request; MockStateBuilder mock_state_builder; expect_send(mock_prepare_local_image_request, mock_state_builder, m_local_image_ctx->id, m_local_image_ctx->name, 0); // prepare remote image MockPrepareRemoteImageRequest mock_prepare_remote_image_request; expect_send(mock_prepare_remote_image_request, mock_state_builder, "remote mirror uuid", m_remote_image_ctx->id, 0); expect_is_local_primary(mock_state_builder, false); expect_is_remote_primary(mock_state_builder, true); // open the remote image librbd::MockTestImageCtx mock_remote_image_ctx(*m_remote_image_ctx); MockOpenImageRequest mock_open_image_request; expect_open_image(mock_open_image_request, m_remote_io_ctx, mock_remote_image_ctx.id, mock_remote_image_ctx, 0); // open the local image librbd::MockTestImageCtx mock_local_image_ctx(*m_local_image_ctx); MockOpenLocalImageRequest mock_open_local_image_request; expect_open_local_image(mock_open_local_image_request, m_local_io_ctx, mock_local_image_ctx.id, &mock_local_image_ctx, 0); // prepare replay expect_prepare_replay(mock_state_builder, false, false, 0); expect_is_disconnected(mock_state_builder, true); // close remote image expect_replay_requires_remote_image(mock_state_builder, false); expect_close_remote_image(mock_state_builder, 0); C_SaferCond ctx; MockThreads mock_threads(m_threads); MockInstanceWatcher mock_instance_watcher; MockBootstrapRequest *request = create_request( &mock_threads, &mock_instance_watcher, "global image id", "local mirror uuid", &ctx); request->send(); ASSERT_EQ(0, ctx.wait()); } TEST_F(TestMockImageReplayerBootstrapRequest, ImageSyncError) { InSequence seq; // prepare local image MockPrepareLocalImageRequest mock_prepare_local_image_request; MockStateBuilder mock_state_builder; expect_send(mock_prepare_local_image_request, mock_state_builder, m_local_image_ctx->id, m_local_image_ctx->name, 0); // prepare remote image MockPrepareRemoteImageRequest mock_prepare_remote_image_request; expect_send(mock_prepare_remote_image_request, mock_state_builder, "remote mirror uuid", m_remote_image_ctx->id, 0); expect_is_local_primary(mock_state_builder, false); expect_is_remote_primary(mock_state_builder, true); // open the remote image librbd::MockTestImageCtx mock_remote_image_ctx(*m_remote_image_ctx); MockOpenImageRequest mock_open_image_request; expect_open_image(mock_open_image_request, m_remote_io_ctx, mock_remote_image_ctx.id, mock_remote_image_ctx, 0); // open the local image librbd::MockTestImageCtx mock_local_image_ctx(*m_local_image_ctx); MockOpenLocalImageRequest mock_open_local_image_request; expect_open_local_image(mock_open_local_image_request, m_local_io_ctx, mock_local_image_ctx.id, &mock_local_image_ctx, 0); // prepare replay expect_prepare_replay(mock_state_builder, false, true, 0); expect_is_disconnected(mock_state_builder, false); // image sync MockImageSync mock_image_sync; expect_image_sync(mock_image_sync, -EINVAL); // close remote image expect_replay_requires_remote_image(mock_state_builder, false); expect_close_remote_image(mock_state_builder, 0); C_SaferCond ctx; MockThreads mock_threads(m_threads); MockInstanceWatcher mock_instance_watcher; MockBootstrapRequest *request = create_request( &mock_threads, &mock_instance_watcher, "global image id", "local mirror uuid", &ctx); request->send(); ASSERT_EQ(-EINVAL, ctx.wait()); } TEST_F(TestMockImageReplayerBootstrapRequest, ImageSyncCanceled) { InSequence seq; // prepare local image MockPrepareLocalImageRequest mock_prepare_local_image_request; MockStateBuilder mock_state_builder; expect_send(mock_prepare_local_image_request, mock_state_builder, m_local_image_ctx->id, m_local_image_ctx->name, 0); // prepare remote image MockPrepareRemoteImageRequest mock_prepare_remote_image_request; expect_send(mock_prepare_remote_image_request, mock_state_builder, "remote mirror uuid", m_remote_image_ctx->id, 0); expect_is_local_primary(mock_state_builder, false); expect_is_remote_primary(mock_state_builder, true); // open the remote image librbd::MockTestImageCtx mock_remote_image_ctx(*m_remote_image_ctx); MockOpenImageRequest mock_open_image_request; expect_open_image(mock_open_image_request, m_remote_io_ctx, mock_remote_image_ctx.id, mock_remote_image_ctx, 0); // open the local image librbd::MockTestImageCtx mock_local_image_ctx(*m_local_image_ctx); MockOpenLocalImageRequest mock_open_local_image_request; expect_open_local_image(mock_open_local_image_request, m_local_io_ctx, mock_local_image_ctx.id, &mock_local_image_ctx, 0); // prepare replay expect_prepare_replay(mock_state_builder, false, true, 0); expect_is_disconnected(mock_state_builder, false); // close remote image expect_replay_requires_remote_image(mock_state_builder, false); expect_close_remote_image(mock_state_builder, 0); C_SaferCond ctx; MockThreads mock_threads(m_threads); MockInstanceWatcher mock_instance_watcher; MockBootstrapRequest *request = create_request( &mock_threads, &mock_instance_watcher, "global image id", "local mirror uuid", &ctx); request->cancel(); request->send(); ASSERT_EQ(-ECANCELED, ctx.wait()); } TEST_F(TestMockImageReplayerBootstrapRequest, CloseRemoteImageError) { InSequence seq; // prepare local image MockPrepareLocalImageRequest mock_prepare_local_image_request; MockStateBuilder mock_state_builder; expect_send(mock_prepare_local_image_request, mock_state_builder, m_local_image_ctx->id, m_local_image_ctx->name, 0); // prepare remote image MockPrepareRemoteImageRequest mock_prepare_remote_image_request; expect_send(mock_prepare_remote_image_request, mock_state_builder, "remote mirror uuid", m_remote_image_ctx->id, 0); expect_is_local_primary(mock_state_builder, false); expect_is_remote_primary(mock_state_builder, true); // open the remote image librbd::MockTestImageCtx mock_remote_image_ctx(*m_remote_image_ctx); MockOpenImageRequest mock_open_image_request; expect_open_image(mock_open_image_request, m_remote_io_ctx, mock_remote_image_ctx.id, mock_remote_image_ctx, 0); // open the local image librbd::MockTestImageCtx mock_local_image_ctx(*m_local_image_ctx); MockOpenLocalImageRequest mock_open_local_image_request; expect_open_local_image(mock_open_local_image_request, m_local_io_ctx, mock_local_image_ctx.id, &mock_local_image_ctx, 0); // prepare replay expect_prepare_replay(mock_state_builder, false, false, 0); expect_is_disconnected(mock_state_builder, false); // attempt to close remote image expect_replay_requires_remote_image(mock_state_builder, false); expect_close_remote_image(mock_state_builder, -EINVAL); C_SaferCond ctx; MockThreads mock_threads(m_threads); MockInstanceWatcher mock_instance_watcher; MockBootstrapRequest *request = create_request( &mock_threads, &mock_instance_watcher, "global image id", "local mirror uuid", &ctx); request->send(); ASSERT_EQ(0, ctx.wait()); } TEST_F(TestMockImageReplayerBootstrapRequest, ReplayRequiresRemoteImage) { InSequence seq; // prepare local image MockPrepareLocalImageRequest mock_prepare_local_image_request; MockStateBuilder mock_state_builder; expect_send(mock_prepare_local_image_request, mock_state_builder, m_local_image_ctx->id, m_local_image_ctx->name, 0); // prepare remote image MockPrepareRemoteImageRequest mock_prepare_remote_image_request; expect_send(mock_prepare_remote_image_request, mock_state_builder, "remote mirror uuid", m_remote_image_ctx->id, 0); expect_is_local_primary(mock_state_builder, false); expect_is_remote_primary(mock_state_builder, true); // open the remote image librbd::MockTestImageCtx mock_remote_image_ctx(*m_remote_image_ctx); MockOpenImageRequest mock_open_image_request; expect_open_image(mock_open_image_request, m_remote_io_ctx, mock_remote_image_ctx.id, mock_remote_image_ctx, 0); // open the local image librbd::MockTestImageCtx mock_local_image_ctx(*m_local_image_ctx); MockOpenLocalImageRequest mock_open_local_image_request; expect_open_local_image(mock_open_local_image_request, m_local_io_ctx, mock_local_image_ctx.id, &mock_local_image_ctx, 0); // prepare replay expect_prepare_replay(mock_state_builder, false, false, 0); expect_is_disconnected(mock_state_builder, false); // remote image is left open expect_replay_requires_remote_image(mock_state_builder, true); C_SaferCond ctx; MockThreads mock_threads(m_threads); MockInstanceWatcher mock_instance_watcher; MockBootstrapRequest *request = create_request( &mock_threads, &mock_instance_watcher, "global image id", "local mirror uuid", &ctx); request->send(); ASSERT_EQ(0, ctx.wait()); } } // namespace image_replayer } // namespace mirror } // namespace rbd
46,238
37.661371
98
cc
null
ceph-main/src/test/rbd_mirror/image_replayer/test_mock_CreateImageRequest.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "test/rbd_mirror/test_mock_fixture.h" #include "include/rbd/librbd.hpp" #include "librbd/ImageState.h" #include "librbd/Operations.h" #include "test/librados_test_stub/MockTestMemIoCtxImpl.h" #include "test/librados_test_stub/MockTestMemRadosClient.h" #include "test/librbd/mock/MockImageCtx.h" #include "tools/rbd_mirror/PoolMetaCache.h" #include "tools/rbd_mirror/image_replayer/CreateImageRequest.h" #include "tools/rbd_mirror/image_replayer/CloseImageRequest.h" #include "tools/rbd_mirror/image_replayer/OpenImageRequest.h" #include "tools/rbd_mirror/image_replayer/OpenLocalImageRequest.h" #include "librbd/image/CreateRequest.h" #include "librbd/image/CloneRequest.h" #include "tools/rbd_mirror/Threads.h" namespace librbd { namespace { struct MockTestImageCtx : public librbd::MockImageCtx { explicit MockTestImageCtx(librbd::ImageCtx &image_ctx) : librbd::MockImageCtx(image_ctx) { } }; } // anonymous namespace namespace image { template<> struct CreateRequest<librbd::MockTestImageCtx> { static CreateRequest *s_instance; Context *on_finish = nullptr; static CreateRequest *create(const ConfigProxy& config, IoCtx &ioctx, const std::string &imgname, const std::string &imageid, uint64_t size, const librbd::ImageOptions &image_options, bool skip_mirror_enable, cls::rbd::MirrorImageMode mode, const std::string &non_primary_global_image_id, const std::string &primary_mirror_uuid, MockContextWQ *op_work_queue, Context *on_finish) { ceph_assert(s_instance != nullptr); EXPECT_FALSE(non_primary_global_image_id.empty()); EXPECT_FALSE(primary_mirror_uuid.empty()); EXPECT_FALSE(skip_mirror_enable); s_instance->on_finish = on_finish; s_instance->construct(ioctx); return s_instance; } CreateRequest() { s_instance = this; } ~CreateRequest() { s_instance = nullptr; } MOCK_METHOD0(send, void()); MOCK_METHOD1(construct, void(librados::IoCtx &ioctx)); }; CreateRequest<librbd::MockTestImageCtx>* CreateRequest<librbd::MockTestImageCtx>::s_instance = nullptr; template<> struct CloneRequest<librbd::MockTestImageCtx> { static CloneRequest *s_instance; Context *on_finish = nullptr; static CloneRequest *create(ConfigProxy& config, IoCtx &p_ioctx, const std::string &p_id, const std::string &p_snap_name, const cls::rbd::SnapshotNamespace& snap_ns, uint64_t p_snap_id, IoCtx &c_ioctx, const std::string &c_name, const std::string &c_id, ImageOptions c_options, cls::rbd::MirrorImageMode mode, const std::string &non_primary_global_image_id, const std::string &primary_mirror_uuid, MockContextWQ *op_work_queue, Context *on_finish) { ceph_assert(s_instance != nullptr); s_instance->on_finish = on_finish; s_instance->construct(); return s_instance; } CloneRequest() { s_instance = this; } ~CloneRequest() { s_instance = nullptr; } MOCK_METHOD0(send, void()); MOCK_METHOD0(construct, void()); }; CloneRequest<librbd::MockTestImageCtx>* CloneRequest<librbd::MockTestImageCtx>::s_instance = nullptr; } // namespace image } // namespace librbd namespace rbd { namespace mirror { template <> struct Threads<librbd::MockTestImageCtx> { ceph::mutex &timer_lock; SafeTimer *timer; librbd::asio::ContextWQ *work_queue; Threads(Threads<librbd::ImageCtx> *threads) : timer_lock(threads->timer_lock), timer(threads->timer), work_queue(threads->work_queue) { } }; namespace image_replayer { template<> struct CloseImageRequest<librbd::MockTestImageCtx> { static CloseImageRequest* s_instance; Context *on_finish = nullptr; static CloseImageRequest* create(librbd::MockTestImageCtx **image_ctx, Context *on_finish) { ceph_assert(s_instance != nullptr); s_instance->construct(*image_ctx); s_instance->on_finish = on_finish; return s_instance; } CloseImageRequest() { ceph_assert(s_instance == nullptr); s_instance = this; } ~CloseImageRequest() { s_instance = nullptr; } MOCK_METHOD1(construct, void(librbd::MockTestImageCtx *image_ctx)); MOCK_METHOD0(send, void()); }; template<> struct OpenImageRequest<librbd::MockTestImageCtx> { static OpenImageRequest* s_instance; librbd::MockTestImageCtx **image_ctx = nullptr; Context *on_finish = nullptr; static OpenImageRequest* create(librados::IoCtx &io_ctx, librbd::MockTestImageCtx **image_ctx, const std::string &image_id, bool read_only, Context *on_finish) { ceph_assert(s_instance != nullptr); s_instance->image_ctx = image_ctx; s_instance->on_finish = on_finish; s_instance->construct(io_ctx, image_id); return s_instance; } OpenImageRequest() { ceph_assert(s_instance == nullptr); s_instance = this; } ~OpenImageRequest() { s_instance = nullptr; } MOCK_METHOD2(construct, void(librados::IoCtx &io_ctx, const std::string &image_id)); MOCK_METHOD0(send, void()); }; CloseImageRequest<librbd::MockTestImageCtx>* CloseImageRequest<librbd::MockTestImageCtx>::s_instance = nullptr; OpenImageRequest<librbd::MockTestImageCtx>* OpenImageRequest<librbd::MockTestImageCtx>::s_instance = nullptr; } // namespace image_replayer } // namespace mirror } // namespace rbd // template definitions #include "tools/rbd_mirror/image_replayer/CreateImageRequest.cc" template class rbd::mirror::image_replayer::CreateImageRequest<librbd::MockTestImageCtx>; namespace rbd { namespace mirror { namespace image_replayer { using ::testing::_; using ::testing::DoAll; using ::testing::InSequence; using ::testing::Invoke; using ::testing::Return; using ::testing::StrEq; using ::testing::WithArg; MATCHER_P(IsSameIoCtx, io_ctx, "") { return &get_mock_io_ctx(arg) == &get_mock_io_ctx(*io_ctx); } class TestMockImageReplayerCreateImageRequest : public TestMockFixture { public: typedef Threads<librbd::MockTestImageCtx> MockThreads; typedef librbd::image::CreateRequest<librbd::MockTestImageCtx> MockCreateRequest; typedef librbd::image::CloneRequest<librbd::MockTestImageCtx> MockCloneRequest; typedef CreateImageRequest<librbd::MockTestImageCtx> MockCreateImageRequest; typedef OpenImageRequest<librbd::MockTestImageCtx> MockOpenImageRequest; typedef CloseImageRequest<librbd::MockTestImageCtx> MockCloseImageRequest; void SetUp() override { TestMockFixture::SetUp(); librbd::RBD rbd; ASSERT_EQ(0, create_image(rbd, m_remote_io_ctx, m_image_name, m_image_size)); ASSERT_EQ(0, open_image(m_remote_io_ctx, m_image_name, &m_remote_image_ctx)); } void snap_create(librbd::ImageCtx *image_ctx, const std::string &snap_name) { librbd::NoOpProgressContext prog_ctx; ASSERT_EQ(0, image_ctx->operations->snap_create(cls::rbd::UserSnapshotNamespace(), snap_name, 0, prog_ctx)); ASSERT_EQ(0, image_ctx->operations->snap_protect(cls::rbd::UserSnapshotNamespace(), snap_name)); ASSERT_EQ(0, image_ctx->state->refresh()); } int clone_image(librbd::ImageCtx *parent_image_ctx, const std::string &snap_name, const std::string &clone_name) { snap_create(parent_image_ctx, snap_name); int order = 0; return librbd::clone(m_remote_io_ctx, parent_image_ctx->name.c_str(), snap_name.c_str(), m_remote_io_ctx, clone_name.c_str(), parent_image_ctx->features, &order, 0, 0); } void expect_create_image(MockCreateRequest &mock_create_request, librados::IoCtx &ioctx, int r) { EXPECT_CALL(mock_create_request, construct(IsSameIoCtx(&ioctx))); EXPECT_CALL(mock_create_request, send()) .WillOnce(Invoke([this, &mock_create_request, r]() { m_threads->work_queue->queue(mock_create_request.on_finish, r); })); } void expect_ioctx_create(librados::IoCtx &io_ctx) { librados::MockTestMemIoCtxImpl &io_ctx_impl = get_mock_io_ctx(io_ctx); EXPECT_CALL(*get_mock_io_ctx(io_ctx).get_mock_rados_client(), create_ioctx(_, _)) .WillOnce(DoAll(GetReference(&io_ctx_impl), Return(&get_mock_io_ctx(io_ctx)))); } void expect_get_parent_global_image_id(librados::IoCtx &io_ctx, const std::string &global_id, int r) { cls::rbd::MirrorImage mirror_image; mirror_image.global_image_id = global_id; bufferlist bl; encode(mirror_image, bl); EXPECT_CALL(get_mock_io_ctx(io_ctx), exec(RBD_MIRRORING, _, StrEq("rbd"), StrEq("mirror_image_get"), _, _, _, _)) .WillOnce(DoAll(WithArg<5>(Invoke([bl](bufferlist *out_bl) { *out_bl = bl; })), Return(r))); } void expect_mirror_image_get_image_id(librados::IoCtx &io_ctx, const std::string &image_id, int r) { bufferlist bl; encode(image_id, bl); EXPECT_CALL(get_mock_io_ctx(io_ctx), exec(RBD_MIRRORING, _, StrEq("rbd"), StrEq("mirror_image_get_image_id"), _, _, _, _)) .WillOnce(DoAll(WithArg<5>(Invoke([bl](bufferlist *out_bl) { *out_bl = bl; })), Return(r))); } void expect_open_image(MockOpenImageRequest &mock_open_image_request, librados::IoCtx &io_ctx, const std::string &image_id, librbd::MockTestImageCtx &mock_image_ctx, int r) { EXPECT_CALL(mock_open_image_request, construct(IsSameIoCtx(&io_ctx), image_id)); EXPECT_CALL(mock_open_image_request, send()) .WillOnce(Invoke([this, &mock_open_image_request, &mock_image_ctx, r]() { *mock_open_image_request.image_ctx = &mock_image_ctx; m_threads->work_queue->queue(mock_open_image_request.on_finish, r); })); } void expect_test_op_features(librbd::MockTestImageCtx& mock_image_ctx, bool enabled) { EXPECT_CALL(mock_image_ctx, test_op_features(RBD_OPERATION_FEATURE_CLONE_CHILD)) .WillOnce(Return(enabled)); } void expect_clone_image(MockCloneRequest &mock_clone_request, int r) { EXPECT_CALL(mock_clone_request, construct()); EXPECT_CALL(mock_clone_request, send()) .WillOnce(Invoke([this, &mock_clone_request, r]() { m_threads->work_queue->queue(mock_clone_request.on_finish, r); })); } void expect_close_image(MockCloseImageRequest &mock_close_image_request, librbd::MockTestImageCtx &mock_image_ctx, int r) { EXPECT_CALL(mock_close_image_request, construct(&mock_image_ctx)); EXPECT_CALL(mock_close_image_request, send()) .WillOnce(Invoke([this, &mock_close_image_request, r]() { m_threads->work_queue->queue(mock_close_image_request.on_finish, r); })); } MockCreateImageRequest *create_request(MockThreads* mock_threads, const std::string &global_image_id, const std::string &remote_mirror_uuid, const std::string &local_image_name, const std::string &local_image_id, librbd::MockTestImageCtx &mock_remote_image_ctx, Context *on_finish) { return new MockCreateImageRequest(mock_threads, m_local_io_ctx, global_image_id, remote_mirror_uuid, local_image_name, local_image_id, &mock_remote_image_ctx, &m_pool_meta_cache, cls::rbd::MIRROR_IMAGE_MODE_JOURNAL, on_finish); } PoolMetaCache m_pool_meta_cache{g_ceph_context}; librbd::ImageCtx *m_remote_image_ctx; }; TEST_F(TestMockImageReplayerCreateImageRequest, Create) { librbd::MockTestImageCtx mock_remote_image_ctx(*m_remote_image_ctx); MockCreateRequest mock_create_request; InSequence seq; expect_create_image(mock_create_request, m_local_io_ctx, 0); C_SaferCond ctx; MockThreads mock_threads(m_threads); MockCreateImageRequest *request = create_request(&mock_threads, "global uuid", "remote uuid", "image name", "101241a7c4c9", mock_remote_image_ctx, &ctx); request->send(); ASSERT_EQ(0, ctx.wait()); } TEST_F(TestMockImageReplayerCreateImageRequest, CreateError) { librbd::MockTestImageCtx mock_remote_image_ctx(*m_remote_image_ctx); MockCreateRequest mock_create_request; InSequence seq; expect_create_image(mock_create_request, m_local_io_ctx, -EINVAL); C_SaferCond ctx; MockThreads mock_threads(m_threads); MockCreateImageRequest *request = create_request(&mock_threads, "global uuid", "remote uuid", "image name", "101241a7c4c9", mock_remote_image_ctx, &ctx); request->send(); ASSERT_EQ(-EINVAL, ctx.wait()); } TEST_F(TestMockImageReplayerCreateImageRequest, CloneGetGlobalImageIdError) { std::string clone_image_name = get_temp_image_name(); ASSERT_EQ(0, clone_image(m_remote_image_ctx, "snap", clone_image_name)); librbd::ImageCtx *remote_clone_image_ctx; ASSERT_EQ(0, open_image(m_remote_io_ctx, clone_image_name, &remote_clone_image_ctx)); librbd::MockTestImageCtx mock_remote_clone_image_ctx(*remote_clone_image_ctx); InSequence seq; expect_ioctx_create(m_remote_io_ctx); expect_ioctx_create(m_local_io_ctx); expect_get_parent_global_image_id(m_remote_io_ctx, "global uuid", -ENOENT); C_SaferCond ctx; MockThreads mock_threads(m_threads); MockCreateImageRequest *request = create_request(&mock_threads, "global uuid", "remote uuid", "image name", "101241a7c4c9", mock_remote_clone_image_ctx, &ctx); request->send(); ASSERT_EQ(-ENOENT, ctx.wait()); } TEST_F(TestMockImageReplayerCreateImageRequest, CloneGetLocalParentImageIdError) { std::string clone_image_name = get_temp_image_name(); ASSERT_EQ(0, clone_image(m_remote_image_ctx, "snap", clone_image_name)); librbd::ImageCtx *remote_clone_image_ctx; ASSERT_EQ(0, open_image(m_remote_io_ctx, clone_image_name, &remote_clone_image_ctx)); librbd::MockTestImageCtx mock_remote_clone_image_ctx(*remote_clone_image_ctx); InSequence seq; expect_ioctx_create(m_remote_io_ctx); expect_ioctx_create(m_local_io_ctx); expect_get_parent_global_image_id(m_remote_io_ctx, "global uuid", 0); expect_mirror_image_get_image_id(m_local_io_ctx, "local parent id", -ENOENT); C_SaferCond ctx; MockThreads mock_threads(m_threads); MockCreateImageRequest *request = create_request(&mock_threads, "global uuid", "remote uuid", "image name", "101241a7c4c9", mock_remote_clone_image_ctx, &ctx); request->send(); ASSERT_EQ(-ENOENT, ctx.wait()); } TEST_F(TestMockImageReplayerCreateImageRequest, CloneOpenRemoteParentError) { std::string clone_image_name = get_temp_image_name(); ASSERT_EQ(0, clone_image(m_remote_image_ctx, "snap", clone_image_name)); librbd::ImageCtx *remote_clone_image_ctx; ASSERT_EQ(0, open_image(m_remote_io_ctx, clone_image_name, &remote_clone_image_ctx)); librbd::MockTestImageCtx mock_remote_parent_image_ctx(*m_remote_image_ctx); librbd::MockTestImageCtx mock_remote_clone_image_ctx(*remote_clone_image_ctx); MockOpenImageRequest mock_open_image_request; InSequence seq; expect_ioctx_create(m_remote_io_ctx); expect_ioctx_create(m_local_io_ctx); expect_get_parent_global_image_id(m_remote_io_ctx, "global uuid", 0); expect_mirror_image_get_image_id(m_local_io_ctx, "local parent id", 0); expect_open_image(mock_open_image_request, m_remote_io_ctx, m_remote_image_ctx->id, mock_remote_parent_image_ctx, -ENOENT); C_SaferCond ctx; MockThreads mock_threads(m_threads); MockCreateImageRequest *request = create_request(&mock_threads, "global uuid", "remote uuid", "image name", "101241a7c4c9", mock_remote_clone_image_ctx, &ctx); request->send(); ASSERT_EQ(-ENOENT, ctx.wait()); } TEST_F(TestMockImageReplayerCreateImageRequest, CloneParentImageSyncing) { librbd::RBD rbd; librbd::ImageCtx *local_image_ctx; ASSERT_EQ(0, create_image(rbd, m_local_io_ctx, m_image_name, m_image_size)); ASSERT_EQ(0, open_image(m_local_io_ctx, m_image_name, &local_image_ctx)); snap_create(local_image_ctx, "snap"); snap_create(m_remote_image_ctx, ".rbd-mirror.local parent uuid.1234"); std::string clone_image_name = get_temp_image_name(); ASSERT_EQ(0, clone_image(m_remote_image_ctx, "snap", clone_image_name)); librbd::ImageCtx *remote_clone_image_ctx; ASSERT_EQ(0, open_image(m_remote_io_ctx, clone_image_name, &remote_clone_image_ctx)); m_pool_meta_cache.set_local_pool_meta( m_local_io_ctx.get_id(), {"local parent uuid"}); librbd::MockTestImageCtx mock_remote_parent_image_ctx(*m_remote_image_ctx); librbd::MockTestImageCtx mock_remote_clone_image_ctx(*remote_clone_image_ctx); MockOpenImageRequest mock_open_image_request; MockCloseImageRequest mock_close_image_request; InSequence seq; expect_ioctx_create(m_remote_io_ctx); expect_ioctx_create(m_local_io_ctx); expect_get_parent_global_image_id(m_remote_io_ctx, "global uuid", 0); expect_mirror_image_get_image_id(m_local_io_ctx, "local parent id", 0); expect_open_image(mock_open_image_request, m_remote_io_ctx, m_remote_image_ctx->id, mock_remote_parent_image_ctx, 0); expect_close_image(mock_close_image_request, mock_remote_parent_image_ctx, 0); C_SaferCond ctx; MockThreads mock_threads(m_threads); MockCreateImageRequest *request = create_request(&mock_threads, "global uuid", "remote uuid", "image name", "101241a7c4c9", mock_remote_clone_image_ctx, &ctx); request->send(); ASSERT_EQ(-ENOENT, ctx.wait()); } TEST_F(TestMockImageReplayerCreateImageRequest, CloneError) { librbd::RBD rbd; librbd::ImageCtx *local_image_ctx; ASSERT_EQ(0, create_image(rbd, m_local_io_ctx, m_image_name, m_image_size)); ASSERT_EQ(0, open_image(m_local_io_ctx, m_image_name, &local_image_ctx)); snap_create(local_image_ctx, "snap"); std::string clone_image_name = get_temp_image_name(); ASSERT_EQ(0, clone_image(m_remote_image_ctx, "snap", clone_image_name)); librbd::ImageCtx *remote_clone_image_ctx; ASSERT_EQ(0, open_image(m_remote_io_ctx, clone_image_name, &remote_clone_image_ctx)); m_pool_meta_cache.set_local_pool_meta( m_local_io_ctx.get_id(), {"local parent uuid"}); librbd::MockTestImageCtx mock_remote_parent_image_ctx(*m_remote_image_ctx); librbd::MockTestImageCtx mock_remote_clone_image_ctx(*remote_clone_image_ctx); MockCloneRequest mock_clone_request; MockOpenImageRequest mock_open_image_request; MockCloseImageRequest mock_close_image_request; InSequence seq; expect_ioctx_create(m_remote_io_ctx); expect_ioctx_create(m_local_io_ctx); expect_get_parent_global_image_id(m_remote_io_ctx, "global uuid", 0); expect_mirror_image_get_image_id(m_local_io_ctx, "local parent id", 0); expect_open_image(mock_open_image_request, m_remote_io_ctx, m_remote_image_ctx->id, mock_remote_parent_image_ctx, 0); expect_test_op_features(mock_remote_clone_image_ctx, false); expect_clone_image(mock_clone_request, -EINVAL); expect_close_image(mock_close_image_request, mock_remote_parent_image_ctx, 0); C_SaferCond ctx; MockThreads mock_threads(m_threads); MockCreateImageRequest *request = create_request(&mock_threads, "global uuid", "remote uuid", "image name", "101241a7c4c9", mock_remote_clone_image_ctx, &ctx); request->send(); ASSERT_EQ(-EINVAL, ctx.wait()); } TEST_F(TestMockImageReplayerCreateImageRequest, CloneRemoteParentCloseError) { librbd::RBD rbd; librbd::ImageCtx *local_image_ctx; ASSERT_EQ(0, create_image(rbd, m_local_io_ctx, m_image_name, m_image_size)); ASSERT_EQ(0, open_image(m_local_io_ctx, m_image_name, &local_image_ctx)); snap_create(local_image_ctx, "snap"); std::string clone_image_name = get_temp_image_name(); ASSERT_EQ(0, clone_image(m_remote_image_ctx, "snap", clone_image_name)); librbd::ImageCtx *remote_clone_image_ctx; ASSERT_EQ(0, open_image(m_remote_io_ctx, clone_image_name, &remote_clone_image_ctx)); m_pool_meta_cache.set_local_pool_meta( m_local_io_ctx.get_id(), {"local parent uuid"}); librbd::MockTestImageCtx mock_remote_parent_image_ctx(*m_remote_image_ctx); librbd::MockTestImageCtx mock_remote_clone_image_ctx(*remote_clone_image_ctx); MockCloneRequest mock_clone_request; MockOpenImageRequest mock_open_image_request; MockCloseImageRequest mock_close_image_request; InSequence seq; expect_ioctx_create(m_remote_io_ctx); expect_ioctx_create(m_local_io_ctx); expect_get_parent_global_image_id(m_remote_io_ctx, "global uuid", 0); expect_mirror_image_get_image_id(m_local_io_ctx, "local parent id", 0); expect_open_image(mock_open_image_request, m_remote_io_ctx, m_remote_image_ctx->id, mock_remote_parent_image_ctx, 0); expect_test_op_features(mock_remote_clone_image_ctx, false); expect_clone_image(mock_clone_request, 0); expect_close_image(mock_close_image_request, mock_remote_parent_image_ctx, -EINVAL); C_SaferCond ctx; MockThreads mock_threads(m_threads); MockCreateImageRequest *request = create_request(&mock_threads, "global uuid", "remote uuid", "image name", "101241a7c4c9", mock_remote_clone_image_ctx, &ctx); request->send(); ASSERT_EQ(0, ctx.wait()); } } // namespace image_replayer } // namespace mirror } // namespace rbd
24,010
38.042276
89
cc
null
ceph-main/src/test/rbd_mirror/image_replayer/test_mock_GetMirrorImageIdRequest.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "test/rbd_mirror/test_mock_fixture.h" #include "cls/rbd/cls_rbd_types.h" #include "librbd/journal/TypeTraits.h" #include "tools/rbd_mirror/image_replayer/GetMirrorImageIdRequest.h" #include "test/journal/mock/MockJournaler.h" #include "test/librados_test_stub/MockTestMemIoCtxImpl.h" #include "test/librbd/mock/MockImageCtx.h" #include "test/librbd/mock/MockJournal.h" namespace librbd { namespace { struct MockTestImageCtx : public librbd::MockImageCtx { MockTestImageCtx(librbd::ImageCtx &image_ctx) : librbd::MockImageCtx(image_ctx) { } }; } // anonymous namespace } // namespace librbd // template definitions #include "tools/rbd_mirror/image_replayer/GetMirrorImageIdRequest.cc" namespace rbd { namespace mirror { namespace image_replayer { using ::testing::_; using ::testing::DoAll; using ::testing::InSequence; using ::testing::Invoke; using ::testing::Return; using ::testing::StrEq; using ::testing::WithArg; using ::testing::WithArgs; class TestMockImageReplayerGetMirrorImageIdRequest : public TestMockFixture { public: typedef GetMirrorImageIdRequest<librbd::MockTestImageCtx> MockGetMirrorImageIdRequest; void expect_mirror_image_get_image_id(librados::IoCtx &io_ctx, const std::string &image_id, int r) { bufferlist bl; encode(image_id, bl); EXPECT_CALL(get_mock_io_ctx(io_ctx), exec(RBD_MIRRORING, _, StrEq("rbd"), StrEq("mirror_image_get_image_id"), _, _, _, _)) .WillOnce(DoAll(WithArg<5>(Invoke([bl](bufferlist *out_bl) { *out_bl = bl; })), Return(r))); } }; TEST_F(TestMockImageReplayerGetMirrorImageIdRequest, Success) { InSequence seq; expect_mirror_image_get_image_id(m_local_io_ctx, "image id", 0); std::string image_id; C_SaferCond ctx; auto req = MockGetMirrorImageIdRequest::create(m_local_io_ctx, "global image id", &image_id, &ctx); req->send(); ASSERT_EQ(0, ctx.wait()); ASSERT_EQ(std::string("image id"), image_id); } TEST_F(TestMockImageReplayerGetMirrorImageIdRequest, MirrorImageIdDNE) { InSequence seq; expect_mirror_image_get_image_id(m_local_io_ctx, "", -ENOENT); std::string image_id; C_SaferCond ctx; auto req = MockGetMirrorImageIdRequest::create(m_local_io_ctx, "global image id", &image_id, &ctx); req->send(); ASSERT_EQ(-ENOENT, ctx.wait()); } TEST_F(TestMockImageReplayerGetMirrorImageIdRequest, MirrorImageIdError) { InSequence seq; expect_mirror_image_get_image_id(m_local_io_ctx, "", -EINVAL); std::string image_id; C_SaferCond ctx; auto req = MockGetMirrorImageIdRequest::create(m_local_io_ctx, "global image id", &image_id, &ctx); req->send(); ASSERT_EQ(-EINVAL, ctx.wait()); } } // namespace image_replayer } // namespace mirror } // namespace rbd
3,278
29.361111
88
cc
null
ceph-main/src/test/rbd_mirror/image_replayer/test_mock_PrepareLocalImageRequest.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "test/rbd_mirror/test_mock_fixture.h" #include "cls/rbd/cls_rbd_types.h" #include "librbd/journal/TypeTraits.h" #include "librbd/mirror/GetInfoRequest.h" #include "tools/rbd_mirror/ImageDeleter.h" #include "tools/rbd_mirror/image_replayer/GetMirrorImageIdRequest.h" #include "tools/rbd_mirror/image_replayer/PrepareLocalImageRequest.h" #include "tools/rbd_mirror/image_replayer/StateBuilder.h" #include "tools/rbd_mirror/image_replayer/journal/StateBuilder.h" #include "tools/rbd_mirror/image_replayer/snapshot/StateBuilder.h" #include "test/journal/mock/MockJournaler.h" #include "test/librados_test_stub/MockTestMemIoCtxImpl.h" #include "test/librbd/mock/MockImageCtx.h" #include "test/librbd/mock/MockJournal.h" namespace librbd { namespace { struct MockTestImageCtx : public librbd::MockImageCtx { MockTestImageCtx(librbd::ImageCtx &image_ctx) : librbd::MockImageCtx(image_ctx) { } }; } // anonymous namespace namespace mirror { template<> struct GetInfoRequest<librbd::MockTestImageCtx> { static GetInfoRequest* s_instance; cls::rbd::MirrorImage *mirror_image; PromotionState *promotion_state; std::string *primary_mirror_uuid; Context *on_finish = nullptr; static GetInfoRequest* create(librados::IoCtx& io_ctx, librbd::asio::ContextWQ* context_wq, const std::string& image_id, cls::rbd::MirrorImage *mirror_image, PromotionState *promotion_state, std::string* primary_mirror_uuid, Context *on_finish) { ceph_assert(s_instance != nullptr); s_instance->mirror_image = mirror_image; s_instance->promotion_state = promotion_state; s_instance->primary_mirror_uuid = primary_mirror_uuid; s_instance->on_finish = on_finish; return s_instance; } GetInfoRequest() { ceph_assert(s_instance == nullptr); s_instance = this; } ~GetInfoRequest() { s_instance = nullptr; } MOCK_METHOD0(send, void()); }; GetInfoRequest<librbd::MockTestImageCtx>* GetInfoRequest<librbd::MockTestImageCtx>::s_instance = nullptr; } // namespace mirror } // namespace librbd namespace rbd { namespace mirror { template <> struct ImageDeleter<librbd::MockTestImageCtx> { static ImageDeleter* s_instance; static void trash_move(librados::IoCtx& local_io_ctx, const std::string& global_image_id, bool resync, librbd::asio::ContextWQ* work_queue, Context* on_finish) { ceph_assert(s_instance != nullptr); s_instance->trash_move(global_image_id, resync, on_finish); } MOCK_METHOD3(trash_move, void(const std::string&, bool, Context*)); ImageDeleter() { s_instance = this; } }; ImageDeleter<librbd::MockTestImageCtx>* ImageDeleter<librbd::MockTestImageCtx>::s_instance = nullptr; namespace image_replayer { template <> struct GetMirrorImageIdRequest<librbd::MockTestImageCtx> { static GetMirrorImageIdRequest* s_instance; std::string* image_id = nullptr; Context* on_finish = nullptr; static GetMirrorImageIdRequest* create(librados::IoCtx& io_ctx, const std::string& global_image_id, std::string* image_id, Context* on_finish) { ceph_assert(s_instance != nullptr); s_instance->image_id = image_id; s_instance->on_finish = on_finish; return s_instance; } GetMirrorImageIdRequest() { s_instance = this; } MOCK_METHOD0(send, void()); }; template<> struct StateBuilder<librbd::MockTestImageCtx> { virtual ~StateBuilder() {} std::string local_image_id; librbd::mirror::PromotionState local_promotion_state; }; GetMirrorImageIdRequest<librbd::MockTestImageCtx>* GetMirrorImageIdRequest<librbd::MockTestImageCtx>::s_instance = nullptr; namespace journal { template<> struct StateBuilder<librbd::MockTestImageCtx> : public image_replayer::StateBuilder<librbd::MockTestImageCtx> { static StateBuilder* s_instance; cls::rbd::MirrorImageMode mirror_image_mode = cls::rbd::MIRROR_IMAGE_MODE_JOURNAL; std::string local_primary_mirror_uuid; static StateBuilder* create(const std::string&) { ceph_assert(s_instance != nullptr); return s_instance; } StateBuilder() { s_instance = this; } }; StateBuilder<librbd::MockTestImageCtx>* StateBuilder<librbd::MockTestImageCtx>::s_instance = nullptr; } // namespace journal namespace snapshot { template<> struct StateBuilder<librbd::MockTestImageCtx> : public image_replayer::StateBuilder<librbd::MockTestImageCtx> { static StateBuilder* s_instance; cls::rbd::MirrorImageMode mirror_image_mode = cls::rbd::MIRROR_IMAGE_MODE_SNAPSHOT; static StateBuilder* create(const std::string&) { ceph_assert(s_instance != nullptr); return s_instance; } StateBuilder() { s_instance = this; } }; StateBuilder<librbd::MockTestImageCtx>* StateBuilder<librbd::MockTestImageCtx>::s_instance = nullptr; } // namespace snapshot } // namespace image_replayer } // namespace mirror } // namespace rbd // template definitions #include "tools/rbd_mirror/image_replayer/PrepareLocalImageRequest.cc" namespace rbd { namespace mirror { namespace image_replayer { using ::testing::_; using ::testing::DoAll; using ::testing::InSequence; using ::testing::Invoke; using ::testing::Return; using ::testing::StrEq; using ::testing::WithArg; using ::testing::WithArgs; class TestMockImageReplayerPrepareLocalImageRequest : public TestMockFixture { public: typedef ImageDeleter<librbd::MockTestImageCtx> MockImageDeleter; typedef PrepareLocalImageRequest<librbd::MockTestImageCtx> MockPrepareLocalImageRequest; typedef GetMirrorImageIdRequest<librbd::MockTestImageCtx> MockGetMirrorImageIdRequest; typedef StateBuilder<librbd::MockTestImageCtx> MockStateBuilder; typedef journal::StateBuilder<librbd::MockTestImageCtx> MockJournalStateBuilder; typedef snapshot::StateBuilder<librbd::MockTestImageCtx> MockSnapshotStateBuilder; typedef librbd::mirror::GetInfoRequest<librbd::MockTestImageCtx> MockGetMirrorInfoRequest; void expect_get_mirror_image_id(MockGetMirrorImageIdRequest& mock_get_mirror_image_id_request, const std::string& image_id, int r) { EXPECT_CALL(mock_get_mirror_image_id_request, send()) .WillOnce(Invoke([&mock_get_mirror_image_id_request, image_id, r]() { *mock_get_mirror_image_id_request.image_id = image_id; mock_get_mirror_image_id_request.on_finish->complete(r); })); } void expect_dir_get_name(librados::IoCtx &io_ctx, const std::string &image_name, int r) { bufferlist bl; encode(image_name, bl); EXPECT_CALL(get_mock_io_ctx(io_ctx), exec(RBD_DIRECTORY, _, StrEq("rbd"), StrEq("dir_get_name"), _, _, _, _)) .WillOnce(DoAll(WithArg<5>(Invoke([bl](bufferlist *out_bl) { *out_bl = bl; })), Return(r))); } void expect_get_mirror_info( MockGetMirrorInfoRequest &mock_get_mirror_info_request, const cls::rbd::MirrorImage &mirror_image, librbd::mirror::PromotionState promotion_state, const std::string& primary_mirror_uuid, int r) { EXPECT_CALL(mock_get_mirror_info_request, send()) .WillOnce(Invoke([this, &mock_get_mirror_info_request, mirror_image, promotion_state, primary_mirror_uuid, r]() { *mock_get_mirror_info_request.mirror_image = mirror_image; *mock_get_mirror_info_request.promotion_state = promotion_state; *mock_get_mirror_info_request.primary_mirror_uuid = primary_mirror_uuid; m_threads->work_queue->queue( mock_get_mirror_info_request.on_finish, r); })); } void expect_trash_move(MockImageDeleter& mock_image_deleter, const std::string& global_image_id, bool ignore_orphan, int r) { EXPECT_CALL(mock_image_deleter, trash_move(global_image_id, ignore_orphan, _)) .WillOnce(WithArg<2>(Invoke([this, r](Context* ctx) { m_threads->work_queue->queue(ctx, r); }))); } }; TEST_F(TestMockImageReplayerPrepareLocalImageRequest, SuccessJournal) { InSequence seq; MockGetMirrorImageIdRequest mock_get_mirror_image_id_request; expect_get_mirror_image_id(mock_get_mirror_image_id_request, "local image id", 0); expect_dir_get_name(m_local_io_ctx, "local image name", 0); MockGetMirrorInfoRequest mock_get_mirror_info_request; expect_get_mirror_info(mock_get_mirror_info_request, {cls::rbd::MIRROR_IMAGE_MODE_JOURNAL, "global image id", cls::rbd::MIRROR_IMAGE_STATE_ENABLED}, librbd::mirror::PROMOTION_STATE_NON_PRIMARY, "remote mirror uuid", 0); MockJournalStateBuilder mock_journal_state_builder; MockStateBuilder* mock_state_builder = nullptr; std::string local_image_name; C_SaferCond ctx; auto req = MockPrepareLocalImageRequest::create(m_local_io_ctx, "global image id", &local_image_name, &mock_state_builder, m_threads->work_queue, &ctx); req->send(); ASSERT_EQ(0, ctx.wait()); ASSERT_TRUE(mock_state_builder != nullptr); ASSERT_EQ(std::string("local image name"), local_image_name); ASSERT_EQ(std::string("local image id"), mock_journal_state_builder.local_image_id); ASSERT_EQ(cls::rbd::MIRROR_IMAGE_MODE_JOURNAL, mock_journal_state_builder.mirror_image_mode); ASSERT_EQ(librbd::mirror::PROMOTION_STATE_NON_PRIMARY, mock_journal_state_builder.local_promotion_state); ASSERT_EQ(std::string("remote mirror uuid"), mock_journal_state_builder.local_primary_mirror_uuid); } TEST_F(TestMockImageReplayerPrepareLocalImageRequest, SuccessSnapshot) { InSequence seq; MockGetMirrorImageIdRequest mock_get_mirror_image_id_request; expect_get_mirror_image_id(mock_get_mirror_image_id_request, "local image id", 0); expect_dir_get_name(m_local_io_ctx, "local image name", 0); MockGetMirrorInfoRequest mock_get_mirror_info_request; expect_get_mirror_info(mock_get_mirror_info_request, {cls::rbd::MIRROR_IMAGE_MODE_SNAPSHOT, "global image id", cls::rbd::MIRROR_IMAGE_STATE_ENABLED}, librbd::mirror::PROMOTION_STATE_NON_PRIMARY, "remote mirror uuid", 0); MockSnapshotStateBuilder mock_journal_state_builder; MockStateBuilder* mock_state_builder = nullptr; std::string local_image_name; C_SaferCond ctx; auto req = MockPrepareLocalImageRequest::create(m_local_io_ctx, "global image id", &local_image_name, &mock_state_builder, m_threads->work_queue, &ctx); req->send(); ASSERT_EQ(0, ctx.wait()); ASSERT_TRUE(mock_state_builder != nullptr); ASSERT_EQ(std::string("local image name"), local_image_name); ASSERT_EQ(std::string("local image id"), mock_journal_state_builder.local_image_id); ASSERT_EQ(cls::rbd::MIRROR_IMAGE_MODE_SNAPSHOT, mock_journal_state_builder.mirror_image_mode); ASSERT_EQ(librbd::mirror::PROMOTION_STATE_NON_PRIMARY, mock_journal_state_builder.local_promotion_state); } TEST_F(TestMockImageReplayerPrepareLocalImageRequest, MirrorImageIdError) { InSequence seq; MockGetMirrorImageIdRequest mock_get_mirror_image_id_request; expect_get_mirror_image_id(mock_get_mirror_image_id_request, "", -EINVAL); MockStateBuilder* mock_state_builder = nullptr; std::string local_image_name; C_SaferCond ctx; auto req = MockPrepareLocalImageRequest::create(m_local_io_ctx, "global image id", &local_image_name, &mock_state_builder, m_threads->work_queue, &ctx); req->send(); ASSERT_EQ(-EINVAL, ctx.wait()); } TEST_F(TestMockImageReplayerPrepareLocalImageRequest, DirGetNameDNE) { InSequence seq; MockGetMirrorImageIdRequest mock_get_mirror_image_id_request; expect_get_mirror_image_id(mock_get_mirror_image_id_request, "local image id", 0); expect_dir_get_name(m_local_io_ctx, "", -ENOENT); MockGetMirrorInfoRequest mock_get_mirror_info_request; expect_get_mirror_info(mock_get_mirror_info_request, {cls::rbd::MIRROR_IMAGE_MODE_JOURNAL, "global image id", cls::rbd::MIRROR_IMAGE_STATE_ENABLED}, librbd::mirror::PROMOTION_STATE_NON_PRIMARY, "remote mirror uuid", 0); MockJournalStateBuilder mock_journal_state_builder; MockStateBuilder* mock_state_builder = nullptr; std::string local_image_name; C_SaferCond ctx; auto req = MockPrepareLocalImageRequest::create(m_local_io_ctx, "global image id", &local_image_name, &mock_state_builder, m_threads->work_queue, &ctx); req->send(); ASSERT_EQ(0, ctx.wait()); } TEST_F(TestMockImageReplayerPrepareLocalImageRequest, DirGetNameError) { InSequence seq; MockGetMirrorImageIdRequest mock_get_mirror_image_id_request; expect_get_mirror_image_id(mock_get_mirror_image_id_request, "local image id", 0); expect_dir_get_name(m_local_io_ctx, "", -EPERM); MockStateBuilder* mock_state_builder = nullptr; std::string local_image_name; C_SaferCond ctx; auto req = MockPrepareLocalImageRequest::create(m_local_io_ctx, "global image id", &local_image_name, &mock_state_builder, m_threads->work_queue, &ctx); req->send(); ASSERT_EQ(-EPERM, ctx.wait()); } TEST_F(TestMockImageReplayerPrepareLocalImageRequest, MirrorImageInfoError) { InSequence seq; MockGetMirrorImageIdRequest mock_get_mirror_image_id_request; expect_get_mirror_image_id(mock_get_mirror_image_id_request, "local image id", 0); expect_dir_get_name(m_local_io_ctx, "local image name", 0); MockGetMirrorInfoRequest mock_get_mirror_info_request; expect_get_mirror_info(mock_get_mirror_info_request, {cls::rbd::MIRROR_IMAGE_MODE_JOURNAL, "global image id", cls::rbd::MIRROR_IMAGE_STATE_ENABLED}, librbd::mirror::PROMOTION_STATE_NON_PRIMARY, "remote mirror uuid", -EINVAL); MockStateBuilder* mock_state_builder = nullptr; std::string local_image_name; C_SaferCond ctx; auto req = MockPrepareLocalImageRequest::create(m_local_io_ctx, "global image id", &local_image_name, &mock_state_builder, m_threads->work_queue, &ctx); req->send(); ASSERT_EQ(-EINVAL, ctx.wait()); } TEST_F(TestMockImageReplayerPrepareLocalImageRequest, ImageCreating) { InSequence seq; MockGetMirrorImageIdRequest mock_get_mirror_image_id_request; expect_get_mirror_image_id(mock_get_mirror_image_id_request, "local image id", 0); expect_dir_get_name(m_local_io_ctx, "local image name", 0); MockGetMirrorInfoRequest mock_get_mirror_info_request; expect_get_mirror_info(mock_get_mirror_info_request, {cls::rbd::MIRROR_IMAGE_MODE_SNAPSHOT, "global image id", cls::rbd::MIRROR_IMAGE_STATE_CREATING}, librbd::mirror::PROMOTION_STATE_NON_PRIMARY, "remote mirror uuid", 0); MockImageDeleter mock_image_deleter; expect_trash_move(mock_image_deleter, "global image id", false, 0); MockSnapshotStateBuilder mock_journal_state_builder; MockStateBuilder* mock_state_builder = nullptr; std::string local_image_name; C_SaferCond ctx; auto req = MockPrepareLocalImageRequest::create(m_local_io_ctx, "global image id", &local_image_name, &mock_state_builder, m_threads->work_queue, &ctx); req->send(); ASSERT_EQ(-ENOENT, ctx.wait()); ASSERT_TRUE(mock_state_builder == nullptr); } TEST_F(TestMockImageReplayerPrepareLocalImageRequest, ImageDisabling) { InSequence seq; MockGetMirrorImageIdRequest mock_get_mirror_image_id_request; expect_get_mirror_image_id(mock_get_mirror_image_id_request, "local image id", 0); expect_dir_get_name(m_local_io_ctx, "local image name", 0); MockGetMirrorInfoRequest mock_get_mirror_info_request; expect_get_mirror_info(mock_get_mirror_info_request, {cls::rbd::MIRROR_IMAGE_MODE_SNAPSHOT, "global image id", cls::rbd::MIRROR_IMAGE_STATE_DISABLING}, librbd::mirror::PROMOTION_STATE_NON_PRIMARY, "remote mirror uuid", 0); MockSnapshotStateBuilder mock_journal_state_builder; MockStateBuilder* mock_state_builder = nullptr; std::string local_image_name; C_SaferCond ctx; auto req = MockPrepareLocalImageRequest::create(m_local_io_ctx, "global image id", &local_image_name, &mock_state_builder, m_threads->work_queue, &ctx); req->send(); ASSERT_EQ(-ERESTART, ctx.wait()); ASSERT_TRUE(mock_state_builder == nullptr); } } // namespace image_replayer } // namespace mirror } // namespace rbd
19,562
37.662055
123
cc
null
ceph-main/src/test/rbd_mirror/image_replayer/test_mock_PrepareRemoteImageRequest.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "test/rbd_mirror/test_mock_fixture.h" #include "cls/rbd/cls_rbd_types.h" #include "librbd/journal/TypeTraits.h" #include "librbd/mirror/GetInfoRequest.h" #include "tools/rbd_mirror/Threads.h" #include "tools/rbd_mirror/image_replayer/GetMirrorImageIdRequest.h" #include "tools/rbd_mirror/image_replayer/PrepareRemoteImageRequest.h" #include "tools/rbd_mirror/image_replayer/StateBuilder.h" #include "tools/rbd_mirror/image_replayer/journal/StateBuilder.h" #include "tools/rbd_mirror/image_replayer/snapshot/StateBuilder.h" #include "test/journal/mock/MockJournaler.h" #include "test/librados_test_stub/MockTestMemIoCtxImpl.h" #include "test/librbd/mock/MockImageCtx.h" namespace librbd { namespace { struct MockTestImageCtx : public librbd::MockImageCtx { MockTestImageCtx(librbd::ImageCtx &image_ctx) : librbd::MockImageCtx(image_ctx) { } }; } // anonymous namespace namespace journal { template <> struct TypeTraits<MockTestImageCtx> { typedef ::journal::MockJournalerProxy Journaler; }; } // namespace journal namespace mirror { template<> struct GetInfoRequest<librbd::MockTestImageCtx> { static GetInfoRequest* s_instance; cls::rbd::MirrorImage *mirror_image; PromotionState *promotion_state; std::string *primary_mirror_uuid; Context *on_finish = nullptr; static GetInfoRequest* create(librados::IoCtx& io_ctx, librbd::asio::ContextWQ* context_wq, const std::string& image_id, cls::rbd::MirrorImage *mirror_image, PromotionState *promotion_state, std::string* primary_mirror_uuid, Context *on_finish) { ceph_assert(s_instance != nullptr); s_instance->mirror_image = mirror_image; s_instance->promotion_state = promotion_state; s_instance->primary_mirror_uuid = primary_mirror_uuid; s_instance->on_finish = on_finish; return s_instance; } GetInfoRequest() { ceph_assert(s_instance == nullptr); s_instance = this; } ~GetInfoRequest() { s_instance = nullptr; } MOCK_METHOD0(send, void()); }; GetInfoRequest<librbd::MockTestImageCtx>* GetInfoRequest<librbd::MockTestImageCtx>::s_instance = nullptr; } // namespace mirror } // namespace librbd namespace rbd { namespace mirror { template <> struct Threads<librbd::MockTestImageCtx> { ceph::mutex &timer_lock; SafeTimer *timer; librbd::asio::ContextWQ *work_queue; Threads(Threads<librbd::ImageCtx> *threads) : timer_lock(threads->timer_lock), timer(threads->timer), work_queue(threads->work_queue) { } }; namespace image_replayer { template <> struct GetMirrorImageIdRequest<librbd::MockTestImageCtx> { static GetMirrorImageIdRequest* s_instance; std::string* image_id = nullptr; Context* on_finish = nullptr; static GetMirrorImageIdRequest* create(librados::IoCtx& io_ctx, const std::string& global_image_id, std::string* image_id, Context* on_finish) { ceph_assert(s_instance != nullptr); s_instance->image_id = image_id; s_instance->on_finish = on_finish; return s_instance; } GetMirrorImageIdRequest() { s_instance = this; } MOCK_METHOD0(send, void()); }; template<> struct StateBuilder<librbd::MockTestImageCtx> { std::string local_image_id; librbd::mirror::PromotionState local_promotion_state = librbd::mirror::PROMOTION_STATE_NON_PRIMARY; std::string remote_image_id; std::string remote_mirror_uuid; librbd::mirror::PromotionState remote_promotion_state; virtual ~StateBuilder() {} MOCK_CONST_METHOD0(get_mirror_image_mode, cls::rbd::MirrorImageMode()); }; GetMirrorImageIdRequest<librbd::MockTestImageCtx>* GetMirrorImageIdRequest<librbd::MockTestImageCtx>::s_instance = nullptr; namespace journal { template<> struct StateBuilder<librbd::MockTestImageCtx> : public image_replayer::StateBuilder<librbd::MockTestImageCtx> { static StateBuilder* s_instance; cls::rbd::MirrorImageMode mirror_image_mode = cls::rbd::MIRROR_IMAGE_MODE_JOURNAL; ::journal::MockJournalerProxy* remote_journaler = nullptr; cls::journal::ClientState remote_client_state; librbd::journal::MirrorPeerClientMeta remote_client_meta; static StateBuilder* create(const std::string&) { ceph_assert(s_instance != nullptr); return s_instance; } StateBuilder() { s_instance = this; } }; StateBuilder<librbd::MockTestImageCtx>* StateBuilder<librbd::MockTestImageCtx>::s_instance = nullptr; } // namespace journal namespace snapshot { template<> struct StateBuilder<librbd::MockTestImageCtx> : public image_replayer::StateBuilder<librbd::MockTestImageCtx> { static StateBuilder* s_instance; cls::rbd::MirrorImageMode mirror_image_mode = cls::rbd::MIRROR_IMAGE_MODE_SNAPSHOT; std::string remote_mirror_peer_uuid; static StateBuilder* create(const std::string&) { ceph_assert(s_instance != nullptr); return s_instance; } StateBuilder() { s_instance = this; } }; StateBuilder<librbd::MockTestImageCtx>* StateBuilder<librbd::MockTestImageCtx>::s_instance = nullptr; } // namespace snapshot } // namespace image_replayer } // namespace mirror } // namespace rbd // template definitions #include "tools/rbd_mirror/image_replayer/PrepareRemoteImageRequest.cc" namespace rbd { namespace mirror { namespace image_replayer { using ::testing::_; using ::testing::DoAll; using ::testing::InSequence; using ::testing::Invoke; using ::testing::Return; using ::testing::StrEq; using ::testing::WithArg; class TestMockImageReplayerPrepareRemoteImageRequest : public TestMockFixture { public: typedef Threads<librbd::MockTestImageCtx> MockThreads; typedef PrepareRemoteImageRequest<librbd::MockTestImageCtx> MockPrepareRemoteImageRequest; typedef GetMirrorImageIdRequest<librbd::MockTestImageCtx> MockGetMirrorImageIdRequest; typedef StateBuilder<librbd::MockTestImageCtx> MockStateBuilder; typedef journal::StateBuilder<librbd::MockTestImageCtx> MockJournalStateBuilder; typedef snapshot::StateBuilder<librbd::MockTestImageCtx> MockSnapshotStateBuilder; typedef librbd::mirror::GetInfoRequest<librbd::MockTestImageCtx> MockGetMirrorInfoRequest; void expect_get_mirror_image_mode(MockStateBuilder& mock_state_builder, cls::rbd::MirrorImageMode mirror_image_mode) { EXPECT_CALL(mock_state_builder, get_mirror_image_mode()) .WillOnce(Return(mirror_image_mode)); } void expect_get_mirror_image_id(MockGetMirrorImageIdRequest& mock_get_mirror_image_id_request, const std::string& image_id, int r) { EXPECT_CALL(mock_get_mirror_image_id_request, send()) .WillOnce(Invoke([&mock_get_mirror_image_id_request, image_id, r]() { *mock_get_mirror_image_id_request.image_id = image_id; mock_get_mirror_image_id_request.on_finish->complete(r); })); } void expect_get_mirror_info( MockGetMirrorInfoRequest &mock_get_mirror_info_request, const cls::rbd::MirrorImage &mirror_image, librbd::mirror::PromotionState promotion_state, const std::string& primary_mirror_uuid, int r) { EXPECT_CALL(mock_get_mirror_info_request, send()) .WillOnce(Invoke([this, &mock_get_mirror_info_request, mirror_image, promotion_state, primary_mirror_uuid, r]() { *mock_get_mirror_info_request.mirror_image = mirror_image; *mock_get_mirror_info_request.promotion_state = promotion_state; *mock_get_mirror_info_request.primary_mirror_uuid = primary_mirror_uuid; m_threads->work_queue->queue( mock_get_mirror_info_request.on_finish, r); })); } void expect_journaler_get_client(::journal::MockJournaler &mock_journaler, const std::string &client_id, cls::journal::Client &client, int r) { EXPECT_CALL(mock_journaler, get_client(StrEq(client_id), _, _)) .WillOnce(DoAll(WithArg<1>(Invoke([client](cls::journal::Client *out_client) { *out_client = client; })), WithArg<2>(Invoke([this, r](Context *on_finish) { m_threads->work_queue->queue(on_finish, r); })))); } void expect_journaler_register_client(::journal::MockJournaler &mock_journaler, const librbd::journal::ClientData &client_data, int r) { bufferlist bl; encode(client_data, bl); EXPECT_CALL(mock_journaler, register_client(ContentsEqual(bl), _)) .WillOnce(WithArg<1>(Invoke([this, r](Context *on_finish) { m_threads->work_queue->queue(on_finish, r); }))); } }; TEST_F(TestMockImageReplayerPrepareRemoteImageRequest, SuccessJournal) { ::journal::MockJournaler mock_remote_journaler; MockThreads mock_threads(m_threads); InSequence seq; MockGetMirrorImageIdRequest mock_get_mirror_image_id_request; expect_get_mirror_image_id(mock_get_mirror_image_id_request, "remote image id", 0); MockGetMirrorInfoRequest mock_get_mirror_info_request; expect_get_mirror_info(mock_get_mirror_info_request, {cls::rbd::MIRROR_IMAGE_MODE_JOURNAL, "global image id", cls::rbd::MIRROR_IMAGE_STATE_ENABLED}, librbd::mirror::PROMOTION_STATE_PRIMARY, "remote mirror uuid", 0); EXPECT_CALL(mock_remote_journaler, construct()); librbd::journal::MirrorPeerClientMeta mirror_peer_client_meta; mirror_peer_client_meta.image_id = "local image id"; mirror_peer_client_meta.state = librbd::journal::MIRROR_PEER_STATE_SYNCING; librbd::journal::ClientData client_data{mirror_peer_client_meta}; cls::journal::Client client; client.state = cls::journal::CLIENT_STATE_DISCONNECTED; encode(client_data, client.data); expect_journaler_get_client(mock_remote_journaler, "local mirror uuid", client, 0); MockJournalStateBuilder mock_journal_state_builder; MockStateBuilder* mock_state_builder = nullptr; C_SaferCond ctx; auto req = MockPrepareRemoteImageRequest::create(&mock_threads, m_local_io_ctx, m_remote_io_ctx, "global image id", "local mirror uuid", {"remote mirror uuid", ""}, nullptr, &mock_state_builder, &ctx); req->send(); ASSERT_EQ(0, ctx.wait()); ASSERT_TRUE(mock_state_builder != nullptr); ASSERT_EQ(cls::rbd::MIRROR_IMAGE_MODE_JOURNAL, mock_journal_state_builder.mirror_image_mode); ASSERT_EQ(std::string("remote mirror uuid"), mock_journal_state_builder.remote_mirror_uuid); ASSERT_EQ(std::string("remote image id"), mock_journal_state_builder.remote_image_id); ASSERT_EQ(librbd::mirror::PROMOTION_STATE_PRIMARY, mock_journal_state_builder.remote_promotion_state); ASSERT_TRUE(mock_journal_state_builder.remote_journaler != nullptr); ASSERT_EQ(cls::journal::CLIENT_STATE_DISCONNECTED, mock_journal_state_builder.remote_client_state); } TEST_F(TestMockImageReplayerPrepareRemoteImageRequest, SuccessSnapshot) { MockThreads mock_threads(m_threads); InSequence seq; MockGetMirrorImageIdRequest mock_get_mirror_image_id_request; expect_get_mirror_image_id(mock_get_mirror_image_id_request, "remote image id", 0); MockGetMirrorInfoRequest mock_get_mirror_info_request; expect_get_mirror_info(mock_get_mirror_info_request, {cls::rbd::MIRROR_IMAGE_MODE_SNAPSHOT, "global image id", cls::rbd::MIRROR_IMAGE_STATE_ENABLED}, librbd::mirror::PROMOTION_STATE_PRIMARY, "remote mirror uuid", 0); MockSnapshotStateBuilder mock_snapshot_state_builder; MockStateBuilder* mock_state_builder = nullptr; C_SaferCond ctx; auto req = MockPrepareRemoteImageRequest::create(&mock_threads, m_local_io_ctx, m_remote_io_ctx, "global image id", "local mirror uuid", {"remote mirror uuid", "remote mirror peer uuid"}, nullptr, &mock_state_builder, &ctx); req->send(); ASSERT_EQ(0, ctx.wait()); ASSERT_TRUE(mock_state_builder != nullptr); ASSERT_EQ(cls::rbd::MIRROR_IMAGE_MODE_SNAPSHOT, mock_snapshot_state_builder.mirror_image_mode); ASSERT_EQ(std::string("remote mirror uuid"), mock_snapshot_state_builder.remote_mirror_uuid); ASSERT_EQ(std::string("remote mirror peer uuid"), mock_snapshot_state_builder.remote_mirror_peer_uuid); ASSERT_EQ(std::string("remote image id"), mock_snapshot_state_builder.remote_image_id); ASSERT_EQ(librbd::mirror::PROMOTION_STATE_PRIMARY, mock_snapshot_state_builder.remote_promotion_state); } TEST_F(TestMockImageReplayerPrepareRemoteImageRequest, SuccessNotRegistered) { ::journal::MockJournaler mock_remote_journaler; MockThreads mock_threads(m_threads); InSequence seq; MockGetMirrorImageIdRequest mock_get_mirror_image_id_request; expect_get_mirror_image_id(mock_get_mirror_image_id_request, "remote image id", 0); MockGetMirrorInfoRequest mock_get_mirror_info_request; expect_get_mirror_info(mock_get_mirror_info_request, {cls::rbd::MIRROR_IMAGE_MODE_JOURNAL, "global image id", cls::rbd::MIRROR_IMAGE_STATE_ENABLED}, librbd::mirror::PROMOTION_STATE_PRIMARY, "remote mirror uuid", 0); MockJournalStateBuilder mock_journal_state_builder; expect_get_mirror_image_mode(mock_journal_state_builder, cls::rbd::MIRROR_IMAGE_MODE_JOURNAL); EXPECT_CALL(mock_remote_journaler, construct()); cls::journal::Client client; expect_journaler_get_client(mock_remote_journaler, "local mirror uuid", client, -ENOENT); librbd::journal::MirrorPeerClientMeta mirror_peer_client_meta; mirror_peer_client_meta.image_id = "local image id"; mirror_peer_client_meta.state = librbd::journal::MIRROR_PEER_STATE_REPLAYING; librbd::journal::ClientData client_data{mirror_peer_client_meta}; expect_journaler_register_client(mock_remote_journaler, client_data, 0); mock_journal_state_builder.local_image_id = "local image id"; MockStateBuilder* mock_state_builder = &mock_journal_state_builder; C_SaferCond ctx; auto req = MockPrepareRemoteImageRequest::create(&mock_threads, m_local_io_ctx, m_remote_io_ctx, "global image id", "local mirror uuid", {"remote mirror uuid", ""}, nullptr, &mock_state_builder, &ctx); req->send(); ASSERT_EQ(0, ctx.wait()); ASSERT_TRUE(mock_state_builder != nullptr); ASSERT_EQ(std::string("remote image id"), mock_journal_state_builder.remote_image_id); ASSERT_EQ(librbd::mirror::PROMOTION_STATE_PRIMARY, mock_journal_state_builder.remote_promotion_state); ASSERT_TRUE(mock_journal_state_builder.remote_journaler != nullptr); ASSERT_EQ(cls::journal::CLIENT_STATE_CONNECTED, mock_journal_state_builder.remote_client_state); } TEST_F(TestMockImageReplayerPrepareRemoteImageRequest, GetMirrorImageIdError) { MockThreads mock_threads(m_threads); InSequence seq; MockGetMirrorImageIdRequest mock_get_mirror_image_id_request; expect_get_mirror_image_id(mock_get_mirror_image_id_request, "", -EINVAL); MockJournalStateBuilder mock_journal_state_builder; MockStateBuilder* mock_state_builder = &mock_journal_state_builder; C_SaferCond ctx; auto req = MockPrepareRemoteImageRequest::create(&mock_threads, m_local_io_ctx, m_remote_io_ctx, "global image id", "local mirror uuid", {"remote mirror uuid", ""}, nullptr, &mock_state_builder, &ctx); req->send(); ASSERT_EQ(-EINVAL, ctx.wait()); ASSERT_TRUE(mock_journal_state_builder.remote_journaler == nullptr); } TEST_F(TestMockImageReplayerPrepareRemoteImageRequest, GetMirrorInfoError) { MockThreads mock_threads(m_threads); InSequence seq; MockGetMirrorImageIdRequest mock_get_mirror_image_id_request; expect_get_mirror_image_id(mock_get_mirror_image_id_request, "remote image id", 0); MockGetMirrorInfoRequest mock_get_mirror_info_request; expect_get_mirror_info(mock_get_mirror_info_request, {cls::rbd::MIRROR_IMAGE_MODE_JOURNAL, "global image id", cls::rbd::MIRROR_IMAGE_STATE_ENABLED}, librbd::mirror::PROMOTION_STATE_PRIMARY, "remote mirror uuid", -EINVAL); MockJournalStateBuilder mock_journal_state_builder; MockStateBuilder* mock_state_builder = nullptr; C_SaferCond ctx; auto req = MockPrepareRemoteImageRequest::create(&mock_threads, m_local_io_ctx, m_remote_io_ctx, "global image id", "local mirror uuid", {"remote mirror uuid", ""}, nullptr, &mock_state_builder, &ctx); req->send(); ASSERT_EQ(-EINVAL, ctx.wait()); ASSERT_TRUE(mock_state_builder == nullptr); } TEST_F(TestMockImageReplayerPrepareRemoteImageRequest, GetClientError) { ::journal::MockJournaler mock_remote_journaler; MockThreads mock_threads(m_threads); InSequence seq; MockGetMirrorImageIdRequest mock_get_mirror_image_id_request; expect_get_mirror_image_id(mock_get_mirror_image_id_request, "remote image id", 0); MockGetMirrorInfoRequest mock_get_mirror_info_request; expect_get_mirror_info(mock_get_mirror_info_request, {cls::rbd::MIRROR_IMAGE_MODE_JOURNAL, "global image id", cls::rbd::MIRROR_IMAGE_STATE_ENABLED}, librbd::mirror::PROMOTION_STATE_PRIMARY, "remote mirror uuid", 0); EXPECT_CALL(mock_remote_journaler, construct()); cls::journal::Client client; expect_journaler_get_client(mock_remote_journaler, "local mirror uuid", client, -EINVAL); MockJournalStateBuilder mock_journal_state_builder; MockStateBuilder* mock_state_builder = nullptr; C_SaferCond ctx; auto req = MockPrepareRemoteImageRequest::create(&mock_threads, m_local_io_ctx, m_remote_io_ctx, "global image id", "local mirror uuid", {"remote mirror uuid", ""}, nullptr, &mock_state_builder, &ctx); req->send(); ASSERT_EQ(-EINVAL, ctx.wait()); ASSERT_TRUE(mock_state_builder == nullptr); } TEST_F(TestMockImageReplayerPrepareRemoteImageRequest, RegisterClientError) { ::journal::MockJournaler mock_remote_journaler; MockThreads mock_threads(m_threads); InSequence seq; MockGetMirrorImageIdRequest mock_get_mirror_image_id_request; expect_get_mirror_image_id(mock_get_mirror_image_id_request, "remote image id", 0); MockGetMirrorInfoRequest mock_get_mirror_info_request; expect_get_mirror_info(mock_get_mirror_info_request, {cls::rbd::MIRROR_IMAGE_MODE_JOURNAL, "global image id", cls::rbd::MIRROR_IMAGE_STATE_ENABLED}, librbd::mirror::PROMOTION_STATE_PRIMARY, "remote mirror uuid", 0); MockJournalStateBuilder mock_journal_state_builder; expect_get_mirror_image_mode(mock_journal_state_builder, cls::rbd::MIRROR_IMAGE_MODE_JOURNAL); EXPECT_CALL(mock_remote_journaler, construct()); cls::journal::Client client; expect_journaler_get_client(mock_remote_journaler, "local mirror uuid", client, -ENOENT); librbd::journal::MirrorPeerClientMeta mirror_peer_client_meta; mirror_peer_client_meta.image_id = "local image id"; mirror_peer_client_meta.state = librbd::journal::MIRROR_PEER_STATE_REPLAYING; librbd::journal::ClientData client_data{mirror_peer_client_meta}; expect_journaler_register_client(mock_remote_journaler, client_data, -EINVAL); mock_journal_state_builder.local_image_id = "local image id"; MockStateBuilder* mock_state_builder = &mock_journal_state_builder; C_SaferCond ctx; auto req = MockPrepareRemoteImageRequest::create(&mock_threads, m_local_io_ctx, m_remote_io_ctx, "global image id", "local mirror uuid", {"remote mirror uuid", ""}, nullptr, &mock_state_builder, &ctx); req->send(); ASSERT_EQ(-EINVAL, ctx.wait()); } TEST_F(TestMockImageReplayerPrepareRemoteImageRequest, MirrorImageIdDNEJournal) { MockThreads mock_threads(m_threads); InSequence seq; MockGetMirrorImageIdRequest mock_get_mirror_image_id_request; expect_get_mirror_image_id(mock_get_mirror_image_id_request, "", -ENOENT); MockJournalStateBuilder mock_journal_state_builder; MockStateBuilder* mock_state_builder = &mock_journal_state_builder; C_SaferCond ctx; auto req = MockPrepareRemoteImageRequest::create(&mock_threads, m_local_io_ctx, m_remote_io_ctx, "global image id", "local mirror uuid", {"remote mirror uuid", ""}, nullptr, &mock_state_builder, &ctx); req->send(); ASSERT_EQ(-ENOENT, ctx.wait()); ASSERT_EQ(cls::rbd::MIRROR_IMAGE_MODE_JOURNAL, mock_journal_state_builder.mirror_image_mode); ASSERT_EQ("remote mirror uuid", mock_journal_state_builder.remote_mirror_uuid); ASSERT_EQ("", mock_journal_state_builder.remote_image_id); } TEST_F(TestMockImageReplayerPrepareRemoteImageRequest, MirrorImageIdDNESnapshot) { MockThreads mock_threads(m_threads); InSequence seq; MockGetMirrorImageIdRequest mock_get_mirror_image_id_request; expect_get_mirror_image_id(mock_get_mirror_image_id_request, "", -ENOENT); MockSnapshotStateBuilder mock_snapshot_state_builder; MockStateBuilder* mock_state_builder = &mock_snapshot_state_builder; C_SaferCond ctx; auto req = MockPrepareRemoteImageRequest::create(&mock_threads, m_local_io_ctx, m_remote_io_ctx, "global image id", "local mirror uuid", {"remote mirror uuid", "remote mirror peer uuid"}, nullptr, &mock_state_builder, &ctx); req->send(); ASSERT_EQ(-ENOENT, ctx.wait()); ASSERT_EQ(cls::rbd::MIRROR_IMAGE_MODE_SNAPSHOT, mock_snapshot_state_builder.mirror_image_mode); ASSERT_EQ("remote mirror uuid", mock_snapshot_state_builder.remote_mirror_uuid); ASSERT_EQ("remote mirror peer uuid", mock_snapshot_state_builder.remote_mirror_peer_uuid); ASSERT_EQ("", mock_snapshot_state_builder.remote_image_id); } TEST_F(TestMockImageReplayerPrepareRemoteImageRequest, MirrorInfoDNEJournal) { MockThreads mock_threads(m_threads); InSequence seq; MockGetMirrorImageIdRequest mock_get_mirror_image_id_request; expect_get_mirror_image_id(mock_get_mirror_image_id_request, "remote image id", 0); MockGetMirrorInfoRequest mock_get_mirror_info_request; expect_get_mirror_info(mock_get_mirror_info_request, {cls::rbd::MIRROR_IMAGE_MODE_JOURNAL, "global image id", cls::rbd::MIRROR_IMAGE_STATE_ENABLED}, librbd::mirror::PROMOTION_STATE_PRIMARY, "remote mirror uuid", -ENOENT); MockJournalStateBuilder mock_journal_state_builder; MockStateBuilder* mock_state_builder = &mock_journal_state_builder; C_SaferCond ctx; auto req = MockPrepareRemoteImageRequest::create(&mock_threads, m_local_io_ctx, m_remote_io_ctx, "global image id", "local mirror uuid", {"remote mirror uuid", ""}, nullptr, &mock_state_builder, &ctx); req->send(); ASSERT_EQ(-ENOENT, ctx.wait()); ASSERT_EQ(cls::rbd::MIRROR_IMAGE_MODE_JOURNAL, mock_journal_state_builder.mirror_image_mode); ASSERT_EQ("remote mirror uuid", mock_journal_state_builder.remote_mirror_uuid); ASSERT_EQ("", mock_journal_state_builder.remote_image_id); } TEST_F(TestMockImageReplayerPrepareRemoteImageRequest, MirrorInfoDNESnapshot) { MockThreads mock_threads(m_threads); InSequence seq; MockGetMirrorImageIdRequest mock_get_mirror_image_id_request; expect_get_mirror_image_id(mock_get_mirror_image_id_request, "remote image id", 0); MockGetMirrorInfoRequest mock_get_mirror_info_request; expect_get_mirror_info(mock_get_mirror_info_request, {cls::rbd::MIRROR_IMAGE_MODE_SNAPSHOT, "global image id", cls::rbd::MIRROR_IMAGE_STATE_ENABLED}, librbd::mirror::PROMOTION_STATE_PRIMARY, "remote mirror uuid", -ENOENT); MockSnapshotStateBuilder mock_snapshot_state_builder; MockStateBuilder* mock_state_builder = &mock_snapshot_state_builder; C_SaferCond ctx; auto req = MockPrepareRemoteImageRequest::create(&mock_threads, m_local_io_ctx, m_remote_io_ctx, "global image id", "local mirror uuid", {"remote mirror uuid", "remote mirror peer uuid"}, nullptr, &mock_state_builder, &ctx); req->send(); ASSERT_EQ(-ENOENT, ctx.wait()); ASSERT_EQ(cls::rbd::MIRROR_IMAGE_MODE_SNAPSHOT, mock_snapshot_state_builder.mirror_image_mode); ASSERT_EQ("remote mirror uuid", mock_snapshot_state_builder.remote_mirror_uuid); ASSERT_EQ("remote mirror peer uuid", mock_snapshot_state_builder.remote_mirror_peer_uuid); ASSERT_EQ("", mock_snapshot_state_builder.remote_image_id); } TEST_F(TestMockImageReplayerPrepareRemoteImageRequest, MirrorInfoDisablingJournal) { MockThreads mock_threads(m_threads); InSequence seq; MockGetMirrorImageIdRequest mock_get_mirror_image_id_request; expect_get_mirror_image_id(mock_get_mirror_image_id_request, "remote image id", 0); MockGetMirrorInfoRequest mock_get_mirror_info_request; expect_get_mirror_info(mock_get_mirror_info_request, {cls::rbd::MIRROR_IMAGE_MODE_JOURNAL, "global image id", cls::rbd::MIRROR_IMAGE_STATE_DISABLING}, librbd::mirror::PROMOTION_STATE_PRIMARY, "remote mirror uuid", 0); MockJournalStateBuilder mock_journal_state_builder; expect_get_mirror_image_mode(mock_journal_state_builder, cls::rbd::MIRROR_IMAGE_MODE_JOURNAL); MockStateBuilder* mock_state_builder = &mock_journal_state_builder; C_SaferCond ctx; auto req = MockPrepareRemoteImageRequest::create(&mock_threads, m_local_io_ctx, m_remote_io_ctx, "global image id", "local mirror uuid", {"remote mirror uuid", ""}, nullptr, &mock_state_builder, &ctx); req->send(); ASSERT_EQ(-ENOENT, ctx.wait()); ASSERT_EQ(cls::rbd::MIRROR_IMAGE_MODE_JOURNAL, mock_journal_state_builder.mirror_image_mode); ASSERT_EQ("remote mirror uuid", mock_journal_state_builder.remote_mirror_uuid); ASSERT_EQ("", mock_journal_state_builder.remote_image_id); } TEST_F(TestMockImageReplayerPrepareRemoteImageRequest, MirrorInfoDisablingSnapshot) { MockThreads mock_threads(m_threads); InSequence seq; MockGetMirrorImageIdRequest mock_get_mirror_image_id_request; expect_get_mirror_image_id(mock_get_mirror_image_id_request, "remote image id", 0); MockGetMirrorInfoRequest mock_get_mirror_info_request; expect_get_mirror_info(mock_get_mirror_info_request, {cls::rbd::MIRROR_IMAGE_MODE_SNAPSHOT, "global image id", cls::rbd::MIRROR_IMAGE_STATE_DISABLING}, librbd::mirror::PROMOTION_STATE_PRIMARY, "remote mirror uuid", 0); MockSnapshotStateBuilder mock_snapshot_state_builder; expect_get_mirror_image_mode(mock_snapshot_state_builder, cls::rbd::MIRROR_IMAGE_MODE_SNAPSHOT); MockStateBuilder* mock_state_builder = &mock_snapshot_state_builder; C_SaferCond ctx; auto req = MockPrepareRemoteImageRequest::create(&mock_threads, m_local_io_ctx, m_remote_io_ctx, "global image id", "local mirror uuid", {"remote mirror uuid", "remote mirror peer uuid"}, nullptr, &mock_state_builder, &ctx); req->send(); ASSERT_EQ(-ENOENT, ctx.wait()); ASSERT_EQ(cls::rbd::MIRROR_IMAGE_MODE_SNAPSHOT, mock_snapshot_state_builder.mirror_image_mode); ASSERT_EQ("remote mirror uuid", mock_snapshot_state_builder.remote_mirror_uuid); ASSERT_EQ("remote mirror peer uuid", mock_snapshot_state_builder.remote_mirror_peer_uuid); ASSERT_EQ("", mock_snapshot_state_builder.remote_image_id); } } // namespace image_replayer } // namespace mirror } // namespace rbd
34,554
41.555419
123
cc
null
ceph-main/src/test/rbd_mirror/image_replayer/journal/test_mock_CreateLocalImageRequest.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "test/rbd_mirror/test_mock_fixture.h" #include "librbd/journal/Types.h" #include "librbd/journal/TypeTraits.h" #include "tools/rbd_mirror/Threads.h" #include "tools/rbd_mirror/image_replayer/CreateImageRequest.h" #include "tools/rbd_mirror/image_replayer/journal/CreateLocalImageRequest.h" #include "tools/rbd_mirror/image_replayer/journal/StateBuilder.h" #include "test/journal/mock/MockJournaler.h" #include "test/librbd/mock/MockImageCtx.h" #include "test/rbd_mirror/mock/MockContextWQ.h" #include "test/rbd_mirror/mock/MockSafeTimer.h" #include <boost/intrusive_ptr.hpp> namespace librbd { namespace { struct MockTestImageCtx : public librbd::MockImageCtx { explicit MockTestImageCtx(librbd::ImageCtx &image_ctx) : librbd::MockImageCtx(image_ctx) { } }; } // anonymous namespace namespace journal { template <> struct TypeTraits<librbd::MockTestImageCtx> { typedef ::journal::MockJournaler Journaler; }; } // namespace journal namespace util { static std::string s_image_id; template <> std::string generate_image_id<MockTestImageCtx>(librados::IoCtx&) { ceph_assert(!s_image_id.empty()); return s_image_id; } } // namespace util } // namespace librbd namespace rbd { namespace mirror { template <> struct Threads<librbd::MockTestImageCtx> { }; namespace image_replayer { template<> struct CreateImageRequest<librbd::MockTestImageCtx> { static CreateImageRequest* s_instance; Context *on_finish = nullptr; static CreateImageRequest* create(Threads<librbd::MockTestImageCtx>* threads, librados::IoCtx &local_io_ctx, const std::string &global_image_id, const std::string &remote_mirror_uuid, const std::string &local_image_name, const std::string &local_image_id, librbd::MockTestImageCtx *remote_image_ctx, PoolMetaCache* pool_meta_cache, cls::rbd::MirrorImageMode mirror_image_mode, Context *on_finish) { ceph_assert(s_instance != nullptr); s_instance->on_finish = on_finish; s_instance->construct(local_image_id); return s_instance; } CreateImageRequest() { ceph_assert(s_instance == nullptr); s_instance = this; } ~CreateImageRequest() { s_instance = nullptr; } MOCK_METHOD1(construct, void(const std::string&)); MOCK_METHOD0(send, void()); }; CreateImageRequest<librbd::MockTestImageCtx>* CreateImageRequest<librbd::MockTestImageCtx>::s_instance = nullptr; namespace journal { template<> struct StateBuilder<librbd::MockTestImageCtx> { std::string local_image_id; std::string remote_mirror_uuid; ::journal::MockJournalerProxy* remote_journaler = nullptr; cls::journal::ClientState remote_client_state; librbd::journal::MirrorPeerClientMeta remote_client_meta; }; } // namespace journal } // namespace image_replayer } // namespace mirror } // namespace rbd #include "tools/rbd_mirror/image_replayer/journal/CreateLocalImageRequest.cc" using ::testing::_; using ::testing::InSequence; using ::testing::Invoke; using ::testing::WithArg; namespace rbd { namespace mirror { namespace image_replayer { namespace journal { class TestMockImageReplayerJournalCreateLocalImageRequest : public TestMockFixture { public: typedef CreateLocalImageRequest<librbd::MockTestImageCtx> MockCreateLocalImageRequest; typedef Threads<librbd::MockTestImageCtx> MockThreads; typedef CreateImageRequest<librbd::MockTestImageCtx> MockCreateImageRequest; typedef StateBuilder<librbd::MockTestImageCtx> MockStateBuilder; void SetUp() override { TestMockFixture::SetUp(); librbd::RBD rbd; ASSERT_EQ(0, create_image(rbd, m_remote_io_ctx, m_image_name, m_image_size)); ASSERT_EQ(0, open_image(m_remote_io_ctx, m_image_name, &m_remote_image_ctx)); m_mock_remote_image_ctx = new librbd::MockTestImageCtx(*m_remote_image_ctx); } void TearDown() override { delete m_mock_remote_image_ctx; TestMockFixture::TearDown(); } void expect_journaler_register_client( ::journal::MockJournaler& mock_journaler, const librbd::journal::ClientData& client_data, int r) { bufferlist bl; encode(client_data, bl); EXPECT_CALL(mock_journaler, register_client(ContentsEqual(bl), _)) .WillOnce(WithArg<1>(Invoke([this, r](Context *on_finish) { m_threads->work_queue->queue(on_finish, r); }))); } void expect_journaler_unregister_client( ::journal::MockJournaler& mock_journaler, int r) { EXPECT_CALL(mock_journaler, unregister_client(_)) .WillOnce(Invoke([this, r](Context *on_finish) { m_threads->work_queue->queue(on_finish, r); })); } void expect_journaler_update_client( ::journal::MockJournaler& mock_journaler, const librbd::journal::ClientData& client_data, int r) { bufferlist bl; encode(client_data, bl); EXPECT_CALL(mock_journaler, update_client(ContentsEqual(bl), _)) .WillOnce(WithArg<1>(Invoke([this, r](Context *on_finish) { m_threads->work_queue->queue(on_finish, r); }))); } void expect_create_image(MockCreateImageRequest& mock_create_image_request, const std::string& image_id, int r) { EXPECT_CALL(mock_create_image_request, construct(image_id)); EXPECT_CALL(mock_create_image_request, send()) .WillOnce(Invoke([this, &mock_create_image_request, r]() { m_threads->work_queue->queue(mock_create_image_request.on_finish, r); })); } MockCreateLocalImageRequest* create_request( MockThreads& mock_threads, MockStateBuilder& mock_state_builder, const std::string& global_image_id, Context* on_finish) { return new MockCreateLocalImageRequest( &mock_threads, m_local_io_ctx, m_mock_remote_image_ctx, global_image_id, nullptr, nullptr, &mock_state_builder, on_finish); } librbd::ImageCtx *m_remote_image_ctx; librbd::MockTestImageCtx *m_mock_remote_image_ctx = nullptr; }; TEST_F(TestMockImageReplayerJournalCreateLocalImageRequest, Success) { InSequence seq; // re-register the client ::journal::MockJournaler mock_journaler; expect_journaler_unregister_client(mock_journaler, 0); librbd::journal::MirrorPeerClientMeta mirror_peer_client_meta; librbd::util::s_image_id = "local image id"; mirror_peer_client_meta.image_id = "local image id"; mirror_peer_client_meta.state = librbd::journal::MIRROR_PEER_STATE_SYNCING; librbd::journal::ClientData client_data; client_data.client_meta = mirror_peer_client_meta; expect_journaler_register_client(mock_journaler, client_data, 0); // create the missing local image MockCreateImageRequest mock_create_image_request; expect_create_image(mock_create_image_request, "local image id", 0); C_SaferCond ctx; MockThreads mock_threads; MockStateBuilder mock_state_builder; auto request = create_request( mock_threads, mock_state_builder, "global image id", &ctx); request->send(); ASSERT_EQ(0, ctx.wait()); ASSERT_EQ("local image id", mock_state_builder.local_image_id); ASSERT_EQ("local image id", mock_state_builder.remote_client_meta.image_id); ASSERT_EQ(librbd::journal::MIRROR_PEER_STATE_SYNCING, mock_state_builder.remote_client_meta.state); } TEST_F(TestMockImageReplayerJournalCreateLocalImageRequest, UnregisterError) { InSequence seq; // re-register the client ::journal::MockJournaler mock_journaler; expect_journaler_unregister_client(mock_journaler, -EINVAL); C_SaferCond ctx; MockThreads mock_threads; MockStateBuilder mock_state_builder; auto request = create_request( mock_threads, mock_state_builder, "global image id", &ctx); request->send(); ASSERT_EQ(-EINVAL, ctx.wait()); } TEST_F(TestMockImageReplayerJournalCreateLocalImageRequest, RegisterError) { InSequence seq; // re-register the client ::journal::MockJournaler mock_journaler; expect_journaler_unregister_client(mock_journaler, 0); librbd::journal::MirrorPeerClientMeta mirror_peer_client_meta; librbd::util::s_image_id = "local image id"; mirror_peer_client_meta.image_id = "local image id"; mirror_peer_client_meta.state = librbd::journal::MIRROR_PEER_STATE_SYNCING; librbd::journal::ClientData client_data; client_data.client_meta = mirror_peer_client_meta; expect_journaler_register_client(mock_journaler, client_data, -EINVAL); C_SaferCond ctx; MockThreads mock_threads; MockStateBuilder mock_state_builder; auto request = create_request( mock_threads, mock_state_builder, "global image id", &ctx); request->send(); ASSERT_EQ(-EINVAL, ctx.wait()); } TEST_F(TestMockImageReplayerJournalCreateLocalImageRequest, CreateImageError) { InSequence seq; // re-register the client ::journal::MockJournaler mock_journaler; expect_journaler_unregister_client(mock_journaler, 0); librbd::journal::MirrorPeerClientMeta mirror_peer_client_meta; librbd::util::s_image_id = "local image id"; mirror_peer_client_meta.image_id = "local image id"; mirror_peer_client_meta.state = librbd::journal::MIRROR_PEER_STATE_SYNCING; librbd::journal::ClientData client_data; client_data.client_meta = mirror_peer_client_meta; expect_journaler_register_client(mock_journaler, client_data, 0); // create the missing local image MockCreateImageRequest mock_create_image_request; expect_create_image(mock_create_image_request, "local image id", -EINVAL); C_SaferCond ctx; MockThreads mock_threads; MockStateBuilder mock_state_builder; auto request = create_request( mock_threads, mock_state_builder, "global image id", &ctx); request->send(); ASSERT_EQ(-EINVAL, ctx.wait()); } TEST_F(TestMockImageReplayerJournalCreateLocalImageRequest, CreateImageDuplicate) { InSequence seq; // re-register the client ::journal::MockJournaler mock_journaler; expect_journaler_unregister_client(mock_journaler, 0); librbd::journal::MirrorPeerClientMeta mirror_peer_client_meta; librbd::util::s_image_id = "local image id"; mirror_peer_client_meta.image_id = "local image id"; mirror_peer_client_meta.state = librbd::journal::MIRROR_PEER_STATE_SYNCING; librbd::journal::ClientData client_data; client_data.client_meta = mirror_peer_client_meta; expect_journaler_register_client(mock_journaler, client_data, 0); // create the missing local image MockCreateImageRequest mock_create_image_request; expect_create_image(mock_create_image_request, "local image id", -EBADF); // re-register the client expect_journaler_unregister_client(mock_journaler, 0); expect_journaler_register_client(mock_journaler, client_data, 0); // re-create the local image expect_create_image(mock_create_image_request, "local image id", 0); C_SaferCond ctx; MockThreads mock_threads; MockStateBuilder mock_state_builder; auto request = create_request( mock_threads, mock_state_builder, "global image id", &ctx); request->send(); ASSERT_EQ(0, ctx.wait()); } } // namespace journal } // namespace image_replayer } // namespace mirror } // namespace rbd
11,447
32.473684
88
cc
null
ceph-main/src/test/rbd_mirror/image_replayer/journal/test_mock_EventPreprocessor.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "test/rbd_mirror/test_mock_fixture.h" #include "librbd/journal/Types.h" #include "librbd/journal/TypeTraits.h" #include "tools/rbd_mirror/Threads.h" #include "tools/rbd_mirror/image_replayer/journal/EventPreprocessor.h" #include "test/journal/mock/MockJournaler.h" #include "test/librbd/mock/MockImageCtx.h" namespace librbd { namespace { struct MockTestImageCtx : public librbd::MockImageCtx { explicit MockTestImageCtx(librbd::ImageCtx &image_ctx) : librbd::MockImageCtx(image_ctx) { } }; } // anonymous namespace namespace journal { template <> struct TypeTraits<librbd::MockTestImageCtx> { typedef ::journal::MockJournaler Journaler; }; } // namespace journal } // namespace librbd // template definitions #include "tools/rbd_mirror/image_replayer/journal/EventPreprocessor.cc" namespace rbd { namespace mirror { namespace image_replayer { namespace journal { using testing::_; using testing::WithArg; class TestMockImageReplayerJournalEventPreprocessor : public TestMockFixture { public: typedef EventPreprocessor<librbd::MockTestImageCtx> MockEventPreprocessor; void SetUp() override { TestMockFixture::SetUp(); librbd::RBD rbd; ASSERT_EQ(0, create_image(rbd, m_local_io_ctx, m_image_name, m_image_size)); ASSERT_EQ(0, open_image(m_local_io_ctx, m_image_name, &m_local_image_ctx)); } void expect_image_refresh(librbd::MockTestImageCtx &mock_remote_image_ctx, int r) { EXPECT_CALL(*mock_remote_image_ctx.state, refresh(_)) .WillOnce(CompleteContext(r)); } void expect_update_client(::journal::MockJournaler &mock_journaler, int r) { EXPECT_CALL(mock_journaler, update_client(_, _)) .WillOnce(WithArg<1>(CompleteContext(r))); } librbd::ImageCtx *m_local_image_ctx; librbd::journal::MirrorPeerClientMeta m_client_meta; }; TEST_F(TestMockImageReplayerJournalEventPreprocessor, IsNotRequired) { librbd::MockTestImageCtx mock_local_image_ctx(*m_local_image_ctx); ::journal::MockJournaler mock_remote_journaler; MockEventPreprocessor event_preprocessor(mock_local_image_ctx, mock_remote_journaler, "local mirror uuid", &m_client_meta, m_threads->work_queue); librbd::journal::EventEntry event_entry{librbd::journal::RenameEvent{}}; ASSERT_FALSE(event_preprocessor.is_required(event_entry)); } TEST_F(TestMockImageReplayerJournalEventPreprocessor, IsRequiredSnapMapPrune) { librbd::MockTestImageCtx mock_local_image_ctx(*m_local_image_ctx); ::journal::MockJournaler mock_remote_journaler; m_client_meta.snap_seqs = {{1, 2}, {3, 4}}; MockEventPreprocessor event_preprocessor(mock_local_image_ctx, mock_remote_journaler, "local mirror uuid", &m_client_meta, m_threads->work_queue); librbd::journal::EventEntry event_entry{librbd::journal::RenameEvent{}}; ASSERT_TRUE(event_preprocessor.is_required(event_entry)); } TEST_F(TestMockImageReplayerJournalEventPreprocessor, IsRequiredSnapRename) { librbd::MockTestImageCtx mock_local_image_ctx(*m_local_image_ctx); ::journal::MockJournaler mock_remote_journaler; MockEventPreprocessor event_preprocessor(mock_local_image_ctx, mock_remote_journaler, "local mirror uuid", &m_client_meta, m_threads->work_queue); librbd::journal::EventEntry event_entry{librbd::journal::SnapRenameEvent{}}; ASSERT_TRUE(event_preprocessor.is_required(event_entry)); } TEST_F(TestMockImageReplayerJournalEventPreprocessor, PreprocessSnapMapPrune) { librbd::MockTestImageCtx mock_local_image_ctx(*m_local_image_ctx); ::journal::MockJournaler mock_remote_journaler; expect_image_refresh(mock_local_image_ctx, 0); expect_update_client(mock_remote_journaler, 0); mock_local_image_ctx.snap_info = { {6, librbd::SnapInfo{"snap", cls::rbd::UserSnapshotNamespace(), 0U, {}, 0U, 0U, utime_t()}}}; m_client_meta.snap_seqs = {{1, 2}, {3, 4}, {5, 6}}; MockEventPreprocessor event_preprocessor(mock_local_image_ctx, mock_remote_journaler, "local mirror uuid", &m_client_meta, m_threads->work_queue); librbd::journal::EventEntry event_entry{librbd::journal::RenameEvent{}}; C_SaferCond ctx; event_preprocessor.preprocess(&event_entry, &ctx); ASSERT_EQ(0, ctx.wait()); librbd::SnapSeqs expected_snap_seqs = {{5, 6}}; ASSERT_EQ(expected_snap_seqs, m_client_meta.snap_seqs); } TEST_F(TestMockImageReplayerJournalEventPreprocessor, PreprocessSnapRename) { librbd::MockTestImageCtx mock_local_image_ctx(*m_local_image_ctx); ::journal::MockJournaler mock_remote_journaler; expect_image_refresh(mock_local_image_ctx, 0); expect_update_client(mock_remote_journaler, 0); mock_local_image_ctx.snap_ids = {{{cls::rbd::UserSnapshotNamespace(), "snap"}, 6}}; mock_local_image_ctx.snap_info = { {6, librbd::SnapInfo{"snap", cls::rbd::UserSnapshotNamespace(), 0U, {}, 0U, 0U, utime_t()}}}; MockEventPreprocessor event_preprocessor(mock_local_image_ctx, mock_remote_journaler, "local mirror uuid", &m_client_meta, m_threads->work_queue); librbd::journal::EventEntry event_entry{ librbd::journal::SnapRenameEvent{0, 5, "snap", "new_snap"}}; C_SaferCond ctx; event_preprocessor.preprocess(&event_entry, &ctx); ASSERT_EQ(0, ctx.wait()); librbd::SnapSeqs expected_snap_seqs = {{5, 6}}; ASSERT_EQ(expected_snap_seqs, m_client_meta.snap_seqs); librbd::journal::SnapRenameEvent *event = boost::get<librbd::journal::SnapRenameEvent>(&event_entry.event); ASSERT_EQ(6U, event->snap_id); } TEST_F(TestMockImageReplayerJournalEventPreprocessor, PreprocessSnapRenameMissing) { librbd::MockTestImageCtx mock_local_image_ctx(*m_local_image_ctx); ::journal::MockJournaler mock_remote_journaler; expect_image_refresh(mock_local_image_ctx, 0); MockEventPreprocessor event_preprocessor(mock_local_image_ctx, mock_remote_journaler, "local mirror uuid", &m_client_meta, m_threads->work_queue); librbd::journal::EventEntry event_entry{ librbd::journal::SnapRenameEvent{0, 5, "snap", "new_snap"}}; C_SaferCond ctx; event_preprocessor.preprocess(&event_entry, &ctx); ASSERT_EQ(-ENOENT, ctx.wait()); librbd::journal::SnapRenameEvent *event = boost::get<librbd::journal::SnapRenameEvent>(&event_entry.event); ASSERT_EQ(CEPH_NOSNAP, event->snap_id); } TEST_F(TestMockImageReplayerJournalEventPreprocessor, PreprocessSnapRenameKnown) { librbd::MockTestImageCtx mock_local_image_ctx(*m_local_image_ctx); ::journal::MockJournaler mock_remote_journaler; expect_image_refresh(mock_local_image_ctx, 0); mock_local_image_ctx.snap_info = { {6, librbd::SnapInfo{"snap", cls::rbd::UserSnapshotNamespace(), 0U, {}, 0U, 0U, utime_t()}}}; m_client_meta.snap_seqs = {{5, 6}}; MockEventPreprocessor event_preprocessor(mock_local_image_ctx, mock_remote_journaler, "local mirror uuid", &m_client_meta, m_threads->work_queue); librbd::journal::EventEntry event_entry{ librbd::journal::SnapRenameEvent{0, 5, "snap", "new_snap"}}; C_SaferCond ctx; event_preprocessor.preprocess(&event_entry, &ctx); ASSERT_EQ(0, ctx.wait()); librbd::SnapSeqs expected_snap_seqs = {{5, 6}}; ASSERT_EQ(expected_snap_seqs, m_client_meta.snap_seqs); librbd::journal::SnapRenameEvent *event = boost::get<librbd::journal::SnapRenameEvent>(&event_entry.event); ASSERT_EQ(6U, event->snap_id); } TEST_F(TestMockImageReplayerJournalEventPreprocessor, PreprocessRefreshError) { librbd::MockTestImageCtx mock_local_image_ctx(*m_local_image_ctx); ::journal::MockJournaler mock_remote_journaler; expect_image_refresh(mock_local_image_ctx, -EINVAL); MockEventPreprocessor event_preprocessor(mock_local_image_ctx, mock_remote_journaler, "local mirror uuid", &m_client_meta, m_threads->work_queue); librbd::journal::EventEntry event_entry{librbd::journal::RenameEvent{}}; C_SaferCond ctx; event_preprocessor.preprocess(&event_entry, &ctx); ASSERT_EQ(-EINVAL, ctx.wait()); } TEST_F(TestMockImageReplayerJournalEventPreprocessor, PreprocessClientUpdateError) { librbd::MockTestImageCtx mock_local_image_ctx(*m_local_image_ctx); ::journal::MockJournaler mock_remote_journaler; expect_image_refresh(mock_local_image_ctx, 0); expect_update_client(mock_remote_journaler, -EINVAL); mock_local_image_ctx.snap_ids = {{{cls::rbd::UserSnapshotNamespace(), "snap"}, 6}}; mock_local_image_ctx.snap_info = { {6, librbd::SnapInfo{"snap", cls::rbd::UserSnapshotNamespace(), 0U, {}, 0U, 0U, utime_t()}}}; MockEventPreprocessor event_preprocessor(mock_local_image_ctx, mock_remote_journaler, "local mirror uuid", &m_client_meta, m_threads->work_queue); librbd::journal::EventEntry event_entry{ librbd::journal::SnapRenameEvent{0, 5, "snap", "new_snap"}}; C_SaferCond ctx; event_preprocessor.preprocess(&event_entry, &ctx); ASSERT_EQ(-EINVAL, ctx.wait()); } } // namespace journal } // namespace image_replayer } // namespace mirror } // namespace rbd
10,436
38.089888
97
cc
null
ceph-main/src/test/rbd_mirror/image_replayer/journal/test_mock_PrepareReplayRequest.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "test/rbd_mirror/test_mock_fixture.h" #include "librbd/journal/Types.h" #include "librbd/journal/TypeTraits.h" #include "tools/rbd_mirror/Threads.h" #include "tools/rbd_mirror/image_replayer/journal/PrepareReplayRequest.h" #include "tools/rbd_mirror/image_replayer/journal/StateBuilder.h" #include "test/journal/mock/MockJournaler.h" #include "test/librbd/mock/MockImageCtx.h" #include "test/librbd/mock/MockJournal.h" namespace librbd { namespace { struct MockTestImageCtx : public librbd::MockImageCtx { MockTestImageCtx(librbd::ImageCtx &image_ctx) : librbd::MockImageCtx(image_ctx) { } }; } // anonymous namespace namespace journal { template <> struct TypeTraits<librbd::MockTestImageCtx> { typedef ::journal::MockJournaler Journaler; }; } // namespace journal } // namespace rbd namespace rbd { namespace mirror { namespace image_replayer { namespace journal { template<> struct StateBuilder<librbd::MockTestImageCtx> { StateBuilder(librbd::MockTestImageCtx& local_image_ctx, ::journal::MockJournaler& remote_journaler, const librbd::journal::MirrorPeerClientMeta& remote_client_meta) : local_image_ctx(&local_image_ctx), local_image_id(local_image_ctx.id), remote_journaler(&remote_journaler), remote_client_meta(remote_client_meta) { } librbd::MockTestImageCtx* local_image_ctx; std::string local_image_id; std::string remote_mirror_uuid = "remote mirror uuid"; ::journal::MockJournaler* remote_journaler = nullptr; librbd::journal::MirrorPeerClientMeta remote_client_meta; }; } // namespace journal } // namespace image_replayer } // namespace mirror } // namespace rbd // template definitions #include "tools/rbd_mirror/image_replayer/journal/PrepareReplayRequest.cc" namespace rbd { namespace mirror { namespace image_replayer { namespace journal { using ::testing::_; using ::testing::DoAll; using ::testing::InSequence; using ::testing::Invoke; using ::testing::Return; using ::testing::SetArgPointee; using ::testing::StrEq; using ::testing::WithArg; class TestMockImageReplayerJournalPrepareReplayRequest : public TestMockFixture { public: typedef PrepareReplayRequest<librbd::MockTestImageCtx> MockPrepareReplayRequest; typedef StateBuilder<librbd::MockTestImageCtx> MockStateBuilder; typedef std::list<cls::journal::Tag> Tags; void SetUp() override { TestMockFixture::SetUp(); librbd::RBD rbd; ASSERT_EQ(0, create_image(rbd, m_local_io_ctx, m_image_name, m_image_size)); ASSERT_EQ(0, open_image(m_local_io_ctx, m_image_name, &m_local_image_ctx)); } void expect_journaler_get_client(::journal::MockJournaler &mock_journaler, const std::string &client_id, cls::journal::Client &client, int r) { EXPECT_CALL(mock_journaler, get_client(StrEq(client_id), _, _)) .WillOnce(DoAll(WithArg<1>(Invoke([client](cls::journal::Client *out_client) { *out_client = client; })), WithArg<2>(Invoke([this, r](Context *on_finish) { m_threads->work_queue->queue(on_finish, r); })))); } void expect_journaler_update_client(::journal::MockJournaler &mock_journaler, const librbd::journal::ClientData &client_data, int r) { bufferlist bl; encode(client_data, bl); EXPECT_CALL(mock_journaler, update_client(ContentsEqual(bl), _)) .WillOnce(WithArg<1>(Invoke([this, r](Context *on_finish) { m_threads->work_queue->queue(on_finish, r); }))); } void expect_journaler_get_tags(::journal::MockJournaler &mock_journaler, uint64_t tag_class, const Tags& tags, int r) { EXPECT_CALL(mock_journaler, get_tags(tag_class, _, _)) .WillOnce(DoAll(WithArg<1>(Invoke([tags](Tags *out_tags) { *out_tags = tags; })), WithArg<2>(Invoke([this, r](Context *on_finish) { m_threads->work_queue->queue(on_finish, r); })))); } void expect_journal_get_tag_tid(librbd::MockJournal &mock_journal, uint64_t tag_tid) { EXPECT_CALL(mock_journal, get_tag_tid()).WillOnce(Return(tag_tid)); } void expect_journal_get_tag_data(librbd::MockJournal &mock_journal, const librbd::journal::TagData &tag_data) { EXPECT_CALL(mock_journal, get_tag_data()).WillOnce(Return(tag_data)); } void expect_is_resync_requested(librbd::MockJournal &mock_journal, bool do_resync, int r) { EXPECT_CALL(mock_journal, is_resync_requested(_)) .WillOnce(DoAll(SetArgPointee<0>(do_resync), Return(r))); } bufferlist encode_tag_data(const librbd::journal::TagData &tag_data) { bufferlist bl; encode(tag_data, bl); return bl; } MockPrepareReplayRequest* create_request( MockStateBuilder& mock_state_builder, const std::string& local_mirror_uuid, bool* resync_requested, bool* syncing, Context* on_finish) { return new MockPrepareReplayRequest( local_mirror_uuid, nullptr, &mock_state_builder, resync_requested, syncing, on_finish); } librbd::ImageCtx *m_local_image_ctx = nullptr; }; TEST_F(TestMockImageReplayerJournalPrepareReplayRequest, Success) { InSequence seq; librbd::MockJournal mock_journal; librbd::MockTestImageCtx mock_local_image_ctx(*m_local_image_ctx); mock_local_image_ctx.journal = &mock_journal; // check initial state expect_is_resync_requested(mock_journal, false, 0); expect_journal_get_tag_tid(mock_journal, 345); expect_journal_get_tag_data(mock_journal, {"remote mirror uuid"}); // lookup remote image tag class librbd::journal::ClientData client_data{ librbd::journal::ImageClientMeta{123}}; cls::journal::Client client; encode(client_data, client.data); ::journal::MockJournaler mock_remote_journaler; expect_journaler_get_client(mock_remote_journaler, librbd::Journal<>::IMAGE_CLIENT_ID, client, 0); // single promotion event Tags tags = { {2, 123, encode_tag_data({librbd::Journal<>::LOCAL_MIRROR_UUID, librbd::Journal<>::LOCAL_MIRROR_UUID, true, 344, 99})}, }; expect_journaler_get_tags(mock_remote_journaler, 123, tags, 0); C_SaferCond ctx; librbd::journal::MirrorPeerClientMeta mirror_peer_client_meta; mirror_peer_client_meta.state = librbd::journal::MIRROR_PEER_STATE_REPLAYING; mirror_peer_client_meta.image_id = mock_local_image_ctx.id; MockStateBuilder mock_state_builder(mock_local_image_ctx, mock_remote_journaler, mirror_peer_client_meta); bool resync_requested; bool syncing; auto request = create_request(mock_state_builder, "local mirror uuid", &resync_requested, &syncing, &ctx); request->send(); ASSERT_EQ(0, ctx.wait()); ASSERT_FALSE(resync_requested); ASSERT_FALSE(syncing); } TEST_F(TestMockImageReplayerJournalPrepareReplayRequest, NoLocalJournal) { InSequence seq; librbd::MockTestImageCtx mock_local_image_ctx(*m_local_image_ctx); C_SaferCond ctx; ::journal::MockJournaler mock_remote_journaler; librbd::journal::MirrorPeerClientMeta mirror_peer_client_meta; mirror_peer_client_meta.state = librbd::journal::MIRROR_PEER_STATE_REPLAYING; mirror_peer_client_meta.image_id = mock_local_image_ctx.id; MockStateBuilder mock_state_builder(mock_local_image_ctx, mock_remote_journaler, mirror_peer_client_meta); bool resync_requested; bool syncing; auto request = create_request(mock_state_builder, "local mirror uuid", &resync_requested, &syncing, &ctx); request->send(); ASSERT_EQ(-EINVAL, ctx.wait()); } TEST_F(TestMockImageReplayerJournalPrepareReplayRequest, ResyncRequested) { InSequence seq; librbd::MockJournal mock_journal; librbd::MockTestImageCtx mock_local_image_ctx(*m_local_image_ctx); mock_local_image_ctx.journal = &mock_journal; // check initial state expect_is_resync_requested(mock_journal, true, 0); expect_journal_get_tag_tid(mock_journal, 345); expect_journal_get_tag_data(mock_journal, {"remote mirror uuid"}); C_SaferCond ctx; ::journal::MockJournaler mock_remote_journaler; librbd::journal::MirrorPeerClientMeta mirror_peer_client_meta; mirror_peer_client_meta.state = librbd::journal::MIRROR_PEER_STATE_REPLAYING; mirror_peer_client_meta.image_id = mock_local_image_ctx.id; MockStateBuilder mock_state_builder(mock_local_image_ctx, mock_remote_journaler, mirror_peer_client_meta); bool resync_requested; bool syncing; auto request = create_request(mock_state_builder, "local mirror uuid", &resync_requested, &syncing, &ctx); request->send(); ASSERT_EQ(0, ctx.wait()); ASSERT_TRUE(resync_requested); ASSERT_FALSE(syncing); } TEST_F(TestMockImageReplayerJournalPrepareReplayRequest, ResyncRequestedError) { InSequence seq; librbd::MockJournal mock_journal; librbd::MockTestImageCtx mock_local_image_ctx(*m_local_image_ctx); mock_local_image_ctx.journal = &mock_journal; // check initial state expect_is_resync_requested(mock_journal, false, -EINVAL); C_SaferCond ctx; ::journal::MockJournaler mock_remote_journaler; librbd::journal::MirrorPeerClientMeta mirror_peer_client_meta; mirror_peer_client_meta.state = librbd::journal::MIRROR_PEER_STATE_REPLAYING; mirror_peer_client_meta.image_id = mock_local_image_ctx.id; MockStateBuilder mock_state_builder(mock_local_image_ctx, mock_remote_journaler, mirror_peer_client_meta); bool resync_requested; bool syncing; auto request = create_request(mock_state_builder, "local mirror uuid", &resync_requested, &syncing, &ctx); request->send(); ASSERT_EQ(-EINVAL, ctx.wait()); } TEST_F(TestMockImageReplayerJournalPrepareReplayRequest, Syncing) { InSequence seq; librbd::MockJournal mock_journal; librbd::MockTestImageCtx mock_local_image_ctx(*m_local_image_ctx); mock_local_image_ctx.journal = &mock_journal; // check initial state expect_is_resync_requested(mock_journal, false, 0); expect_journal_get_tag_tid(mock_journal, 345); expect_journal_get_tag_data(mock_journal, {"remote mirror uuid"}); C_SaferCond ctx; ::journal::MockJournaler mock_remote_journaler; librbd::journal::MirrorPeerClientMeta mirror_peer_client_meta; mirror_peer_client_meta.state = librbd::journal::MIRROR_PEER_STATE_SYNCING; mirror_peer_client_meta.image_id = mock_local_image_ctx.id; MockStateBuilder mock_state_builder(mock_local_image_ctx, mock_remote_journaler, mirror_peer_client_meta); bool resync_requested; bool syncing; auto request = create_request(mock_state_builder, "local mirror uuid", &resync_requested, &syncing, &ctx); request->send(); ASSERT_EQ(0, ctx.wait()); ASSERT_FALSE(resync_requested); ASSERT_TRUE(syncing); } TEST_F(TestMockImageReplayerJournalPrepareReplayRequest, GetRemoteTagClassError) { InSequence seq; librbd::MockJournal mock_journal; librbd::MockTestImageCtx mock_local_image_ctx(*m_local_image_ctx); mock_local_image_ctx.journal = &mock_journal; // check initial state expect_is_resync_requested(mock_journal, false, 0); expect_journal_get_tag_tid(mock_journal, 345); expect_journal_get_tag_data(mock_journal, {"remote mirror uuid"}); // lookup remote image tag class librbd::journal::ClientData client_data{ librbd::journal::ImageClientMeta{123}}; cls::journal::Client client; encode(client_data, client.data); ::journal::MockJournaler mock_remote_journaler; expect_journaler_get_client(mock_remote_journaler, librbd::Journal<>::IMAGE_CLIENT_ID, client, -EINVAL); C_SaferCond ctx; librbd::journal::MirrorPeerClientMeta mirror_peer_client_meta; mirror_peer_client_meta.state = librbd::journal::MIRROR_PEER_STATE_REPLAYING; mirror_peer_client_meta.image_id = mock_local_image_ctx.id; MockStateBuilder mock_state_builder(mock_local_image_ctx, mock_remote_journaler, mirror_peer_client_meta); bool resync_requested; bool syncing; auto request = create_request(mock_state_builder, "local mirror uuid", &resync_requested, &syncing, &ctx); request->send(); ASSERT_EQ(-EINVAL, ctx.wait()); } TEST_F(TestMockImageReplayerJournalPrepareReplayRequest, GetRemoteTagsError) { InSequence seq; librbd::MockJournal mock_journal; librbd::MockTestImageCtx mock_local_image_ctx(*m_local_image_ctx); mock_local_image_ctx.journal = &mock_journal; // check initial state expect_is_resync_requested(mock_journal, false, 0); expect_journal_get_tag_tid(mock_journal, 345); expect_journal_get_tag_data(mock_journal, {"remote mirror uuid"}); // lookup remote image tag class librbd::journal::ClientData client_data{ librbd::journal::ImageClientMeta{123}}; cls::journal::Client client; encode(client_data, client.data); ::journal::MockJournaler mock_remote_journaler; expect_journaler_get_client(mock_remote_journaler, librbd::Journal<>::IMAGE_CLIENT_ID, client, 0); // single promotion event Tags tags = { {2, 123, encode_tag_data({librbd::Journal<>::LOCAL_MIRROR_UUID, librbd::Journal<>::LOCAL_MIRROR_UUID, true, 344, 99})}, }; expect_journaler_get_tags(mock_remote_journaler, 123, tags, -EINVAL); C_SaferCond ctx; librbd::journal::MirrorPeerClientMeta mirror_peer_client_meta; mirror_peer_client_meta.state = librbd::journal::MIRROR_PEER_STATE_REPLAYING; mirror_peer_client_meta.image_id = mock_local_image_ctx.id; MockStateBuilder mock_state_builder(mock_local_image_ctx, mock_remote_journaler, mirror_peer_client_meta); bool resync_requested; bool syncing; auto request = create_request(mock_state_builder, "local mirror uuid", &resync_requested, &syncing, &ctx); request->send(); ASSERT_EQ(-EINVAL, ctx.wait()); } TEST_F(TestMockImageReplayerJournalPrepareReplayRequest, LocalDemotedRemoteSyncingState) { InSequence seq; librbd::MockJournal mock_journal; librbd::MockTestImageCtx mock_local_image_ctx(*m_local_image_ctx); mock_local_image_ctx.journal = &mock_journal; // check initial state expect_is_resync_requested(mock_journal, false, 0); expect_journal_get_tag_tid(mock_journal, 345); expect_journal_get_tag_data(mock_journal, {librbd::Journal<>::ORPHAN_MIRROR_UUID, "remote mirror uuid", true, 4, 1}); // update client state librbd::journal::MirrorPeerClientMeta mirror_peer_client_meta{ mock_local_image_ctx.id}; mirror_peer_client_meta.state = librbd::journal::MIRROR_PEER_STATE_REPLAYING; librbd::journal::ClientData client_data; client_data.client_meta = mirror_peer_client_meta; ::journal::MockJournaler mock_remote_journaler; expect_journaler_update_client(mock_remote_journaler, client_data, 0); // lookup remote image tag class client_data = {librbd::journal::ImageClientMeta{123}}; cls::journal::Client client; encode(client_data, client.data); expect_journaler_get_client(mock_remote_journaler, librbd::Journal<>::IMAGE_CLIENT_ID, client, 0); // remote demotion / promotion event Tags tags = { {2, 123, encode_tag_data({librbd::Journal<>::LOCAL_MIRROR_UUID, librbd::Journal<>::LOCAL_MIRROR_UUID, true, 1, 99})}, {3, 123, encode_tag_data({librbd::Journal<>::ORPHAN_MIRROR_UUID, librbd::Journal<>::LOCAL_MIRROR_UUID, true, 2, 1})}, {4, 123, encode_tag_data({librbd::Journal<>::LOCAL_MIRROR_UUID, librbd::Journal<>::ORPHAN_MIRROR_UUID, true, 3, 1})}, {5, 123, encode_tag_data({librbd::Journal<>::ORPHAN_MIRROR_UUID, librbd::Journal<>::LOCAL_MIRROR_UUID, true, 4, 1})}, {6, 123, encode_tag_data({librbd::Journal<>::LOCAL_MIRROR_UUID, librbd::Journal<>::ORPHAN_MIRROR_UUID, true, 5, 1})}, {7, 123, encode_tag_data({librbd::Journal<>::ORPHAN_MIRROR_UUID, librbd::Journal<>::LOCAL_MIRROR_UUID, true, 6, 1})}, {8, 123, encode_tag_data({librbd::Journal<>::LOCAL_MIRROR_UUID, librbd::Journal<>::ORPHAN_MIRROR_UUID, true, 7, 1})} }; expect_journaler_get_tags(mock_remote_journaler, 123, tags, 0); C_SaferCond ctx; mirror_peer_client_meta.state = librbd::journal::MIRROR_PEER_STATE_SYNCING; mirror_peer_client_meta.image_id = mock_local_image_ctx.id; MockStateBuilder mock_state_builder(mock_local_image_ctx, mock_remote_journaler, mirror_peer_client_meta); bool resync_requested; bool syncing; auto request = create_request(mock_state_builder, "local mirror uuid", &resync_requested, &syncing, &ctx); request->send(); ASSERT_EQ(0, ctx.wait()); ASSERT_FALSE(resync_requested); ASSERT_FALSE(syncing); } TEST_F(TestMockImageReplayerJournalPrepareReplayRequest, UpdateClientError) { InSequence seq; librbd::MockJournal mock_journal; librbd::MockTestImageCtx mock_local_image_ctx(*m_local_image_ctx); mock_local_image_ctx.journal = &mock_journal; // check initial state expect_is_resync_requested(mock_journal, false, 0); expect_journal_get_tag_tid(mock_journal, 345); expect_journal_get_tag_data(mock_journal, {"remote mirror uuid"}); // lookup remote image tag class librbd::journal::ClientData client_data{ librbd::journal::ImageClientMeta{123}}; cls::journal::Client client; encode(client_data, client.data); ::journal::MockJournaler mock_remote_journaler; expect_journaler_get_client(mock_remote_journaler, librbd::Journal<>::IMAGE_CLIENT_ID, client, 0); // single promotion event Tags tags = { {2, 123, encode_tag_data({librbd::Journal<>::LOCAL_MIRROR_UUID, librbd::Journal<>::LOCAL_MIRROR_UUID, true, 344, 99})}, }; expect_journaler_get_tags(mock_remote_journaler, 123, tags, 0); C_SaferCond ctx; librbd::journal::MirrorPeerClientMeta mirror_peer_client_meta; mirror_peer_client_meta.state = librbd::journal::MIRROR_PEER_STATE_REPLAYING; mirror_peer_client_meta.image_id = mock_local_image_ctx.id; MockStateBuilder mock_state_builder(mock_local_image_ctx, mock_remote_journaler, mirror_peer_client_meta); bool resync_requested; bool syncing; auto request = create_request(mock_state_builder, "local mirror uuid", &resync_requested, &syncing, &ctx); request->send(); ASSERT_EQ(0, ctx.wait()); ASSERT_FALSE(resync_requested); ASSERT_FALSE(syncing); } TEST_F(TestMockImageReplayerJournalPrepareReplayRequest, RemoteDemotePromote) { InSequence seq; librbd::MockJournal mock_journal; librbd::MockTestImageCtx mock_local_image_ctx(*m_local_image_ctx); mock_local_image_ctx.journal = &mock_journal; // check initial state expect_is_resync_requested(mock_journal, false, 0); expect_journal_get_tag_tid(mock_journal, 345); expect_journal_get_tag_data(mock_journal, {"remote mirror uuid"}); // lookup remote image tag class librbd::journal::ClientData client_data{ librbd::journal::ImageClientMeta{123}}; cls::journal::Client client; encode(client_data, client.data); ::journal::MockJournaler mock_remote_journaler; expect_journaler_get_client(mock_remote_journaler, librbd::Journal<>::IMAGE_CLIENT_ID, client, 0); // remote demotion / promotion event Tags tags = { {2, 123, encode_tag_data({librbd::Journal<>::LOCAL_MIRROR_UUID, librbd::Journal<>::LOCAL_MIRROR_UUID, true, 1, 99})}, {3, 123, encode_tag_data({librbd::Journal<>::ORPHAN_MIRROR_UUID, librbd::Journal<>::LOCAL_MIRROR_UUID, true, 2, 1})}, {4, 123, encode_tag_data({librbd::Journal<>::LOCAL_MIRROR_UUID, librbd::Journal<>::ORPHAN_MIRROR_UUID, true, 2, 1})}, {5, 123, encode_tag_data({librbd::Journal<>::LOCAL_MIRROR_UUID, librbd::Journal<>::ORPHAN_MIRROR_UUID, true, 4, 369})} }; expect_journaler_get_tags(mock_remote_journaler, 123, tags, 0); C_SaferCond ctx; librbd::journal::MirrorPeerClientMeta mirror_peer_client_meta; mirror_peer_client_meta.state = librbd::journal::MIRROR_PEER_STATE_REPLAYING; mirror_peer_client_meta.image_id = mock_local_image_ctx.id; MockStateBuilder mock_state_builder(mock_local_image_ctx, mock_remote_journaler, mirror_peer_client_meta); bool resync_requested; bool syncing; auto request = create_request(mock_state_builder, "local mirror uuid", &resync_requested, &syncing, &ctx); request->send(); ASSERT_EQ(0, ctx.wait()); ASSERT_FALSE(resync_requested); ASSERT_FALSE(syncing); } TEST_F(TestMockImageReplayerJournalPrepareReplayRequest, MultipleRemoteDemotePromotes) { InSequence seq; librbd::MockJournal mock_journal; librbd::MockTestImageCtx mock_local_image_ctx(*m_local_image_ctx); mock_local_image_ctx.journal = &mock_journal; // check initial state expect_is_resync_requested(mock_journal, false, 0); expect_journal_get_tag_tid(mock_journal, 345); expect_journal_get_tag_data(mock_journal, {librbd::Journal<>::ORPHAN_MIRROR_UUID, "remote mirror uuid", true, 4, 1}); // lookup remote image tag class librbd::journal::ClientData client_data{ librbd::journal::ImageClientMeta{123}}; cls::journal::Client client; encode(client_data, client.data); ::journal::MockJournaler mock_remote_journaler; expect_journaler_get_client(mock_remote_journaler, librbd::Journal<>::IMAGE_CLIENT_ID, client, 0); // remote demotion / promotion event Tags tags = { {2, 123, encode_tag_data({librbd::Journal<>::LOCAL_MIRROR_UUID, librbd::Journal<>::LOCAL_MIRROR_UUID, true, 1, 99})}, {3, 123, encode_tag_data({librbd::Journal<>::ORPHAN_MIRROR_UUID, librbd::Journal<>::LOCAL_MIRROR_UUID, true, 2, 1})}, {4, 123, encode_tag_data({librbd::Journal<>::LOCAL_MIRROR_UUID, librbd::Journal<>::ORPHAN_MIRROR_UUID, true, 3, 1})}, {5, 123, encode_tag_data({librbd::Journal<>::ORPHAN_MIRROR_UUID, librbd::Journal<>::LOCAL_MIRROR_UUID, true, 4, 1})}, {6, 123, encode_tag_data({librbd::Journal<>::LOCAL_MIRROR_UUID, librbd::Journal<>::ORPHAN_MIRROR_UUID, true, 5, 1})}, {7, 123, encode_tag_data({librbd::Journal<>::ORPHAN_MIRROR_UUID, librbd::Journal<>::LOCAL_MIRROR_UUID, true, 6, 1})}, {8, 123, encode_tag_data({librbd::Journal<>::LOCAL_MIRROR_UUID, librbd::Journal<>::ORPHAN_MIRROR_UUID, true, 7, 1})} }; expect_journaler_get_tags(mock_remote_journaler, 123, tags, 0); C_SaferCond ctx; librbd::journal::MirrorPeerClientMeta mirror_peer_client_meta; mirror_peer_client_meta.state = librbd::journal::MIRROR_PEER_STATE_REPLAYING; mirror_peer_client_meta.image_id = mock_local_image_ctx.id; MockStateBuilder mock_state_builder(mock_local_image_ctx, mock_remote_journaler, mirror_peer_client_meta); bool resync_requested; bool syncing; auto request = create_request(mock_state_builder, "local mirror uuid", &resync_requested, &syncing, &ctx); request->send(); ASSERT_EQ(0, ctx.wait()); ASSERT_FALSE(resync_requested); ASSERT_FALSE(syncing); } TEST_F(TestMockImageReplayerJournalPrepareReplayRequest, LocalDemoteRemotePromote) { InSequence seq; librbd::MockJournal mock_journal; librbd::MockTestImageCtx mock_local_image_ctx(*m_local_image_ctx); mock_local_image_ctx.journal = &mock_journal; // check initial state expect_is_resync_requested(mock_journal, false, 0); expect_journal_get_tag_tid(mock_journal, 346); expect_journal_get_tag_data(mock_journal, {librbd::Journal<>::ORPHAN_MIRROR_UUID, librbd::Journal<>::LOCAL_MIRROR_UUID, true, 345, 1}); // lookup remote image tag class librbd::journal::ClientData client_data{ librbd::journal::ImageClientMeta{123}}; cls::journal::Client client; encode(client_data, client.data); ::journal::MockJournaler mock_remote_journaler; expect_journaler_get_client(mock_remote_journaler, librbd::Journal<>::IMAGE_CLIENT_ID, client, 0); // remote demotion / promotion event Tags tags = { {2, 123, encode_tag_data({"local mirror uuid", "local mirror uuid", true, 344, 99})}, {3, 123, encode_tag_data({librbd::Journal<>::ORPHAN_MIRROR_UUID, "local mirror uuid", true, 345, 1})}, {4, 123, encode_tag_data({librbd::Journal<>::LOCAL_MIRROR_UUID, librbd::Journal<>::ORPHAN_MIRROR_UUID, true, 3, 1})} }; expect_journaler_get_tags(mock_remote_journaler, 123, tags, 0); C_SaferCond ctx; librbd::journal::MirrorPeerClientMeta mirror_peer_client_meta; mirror_peer_client_meta.state = librbd::journal::MIRROR_PEER_STATE_REPLAYING; mirror_peer_client_meta.image_id = mock_local_image_ctx.id; MockStateBuilder mock_state_builder(mock_local_image_ctx, mock_remote_journaler, mirror_peer_client_meta); bool resync_requested; bool syncing; auto request = create_request(mock_state_builder, "local mirror uuid", &resync_requested, &syncing, &ctx); request->send(); ASSERT_EQ(0, ctx.wait()); ASSERT_FALSE(resync_requested); ASSERT_FALSE(syncing); } TEST_F(TestMockImageReplayerJournalPrepareReplayRequest, SplitBrainForcePromote) { InSequence seq; librbd::MockJournal mock_journal; librbd::MockTestImageCtx mock_local_image_ctx(*m_local_image_ctx); mock_local_image_ctx.journal = &mock_journal; // check initial state expect_is_resync_requested(mock_journal, false, 0); expect_journal_get_tag_tid(mock_journal, 345); expect_journal_get_tag_data(mock_journal, {librbd::Journal<>::LOCAL_MIRROR_UUID, librbd::Journal<>::ORPHAN_MIRROR_UUID, true, 344, 0}); // lookup remote image tag class librbd::journal::ClientData client_data{ librbd::journal::ImageClientMeta{123}}; cls::journal::Client client; encode(client_data, client.data); ::journal::MockJournaler mock_remote_journaler; expect_journaler_get_client(mock_remote_journaler, librbd::Journal<>::IMAGE_CLIENT_ID, client, 0); // remote demotion / promotion event Tags tags = { {2, 123, encode_tag_data({librbd::Journal<>::LOCAL_MIRROR_UUID, librbd::Journal<>::LOCAL_MIRROR_UUID, true, 1, 99})}, {3, 123, encode_tag_data({librbd::Journal<>::ORPHAN_MIRROR_UUID, librbd::Journal<>::LOCAL_MIRROR_UUID, true, 2, 1})} }; expect_journaler_get_tags(mock_remote_journaler, 123, tags, 0); C_SaferCond ctx; librbd::journal::MirrorPeerClientMeta mirror_peer_client_meta; mirror_peer_client_meta.state = librbd::journal::MIRROR_PEER_STATE_REPLAYING; mirror_peer_client_meta.image_id = mock_local_image_ctx.id; MockStateBuilder mock_state_builder(mock_local_image_ctx, mock_remote_journaler, mirror_peer_client_meta); bool resync_requested; bool syncing; auto request = create_request(mock_state_builder, "local mirror uuid", &resync_requested, &syncing, &ctx); request->send(); ASSERT_EQ(-EEXIST, ctx.wait()); } } // namespace journal } // namespace image_replayer } // namespace mirror } // namespace rbd
30,401
39.428191
90
cc
null
ceph-main/src/test/rbd_mirror/image_replayer/journal/test_mock_Replayer.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "test/rbd_mirror/test_mock_fixture.h" #include "librbd/journal/Types.h" #include "librbd/journal/TypeTraits.h" #include "tools/rbd_mirror/Threads.h" #include "tools/rbd_mirror/image_replayer/CloseImageRequest.h" #include "tools/rbd_mirror/image_replayer/ReplayerListener.h" #include "tools/rbd_mirror/image_replayer/Utils.h" #include "tools/rbd_mirror/image_replayer/journal/Replayer.h" #include "tools/rbd_mirror/image_replayer/journal/EventPreprocessor.h" #include "tools/rbd_mirror/image_replayer/journal/ReplayStatusFormatter.h" #include "tools/rbd_mirror/image_replayer/journal/StateBuilder.h" #include "test/journal/mock/MockJournaler.h" #include "test/librbd/mock/MockImageCtx.h" #include "test/rbd_mirror/mock/MockContextWQ.h" #include "test/rbd_mirror/mock/MockSafeTimer.h" #include <boost/intrusive_ptr.hpp> using namespace std::chrono_literals; namespace librbd { namespace { struct MockTestJournal; struct MockTestImageCtx : public librbd::MockImageCtx { explicit MockTestImageCtx(librbd::ImageCtx &image_ctx, MockTestJournal& mock_test_journal) : librbd::MockImageCtx(image_ctx), journal(&mock_test_journal) { } MockTestJournal* journal = nullptr; }; struct MockTestJournal : public MockJournal { MOCK_METHOD2(start_external_replay, void(journal::Replay<MockTestImageCtx> **, Context *on_start)); MOCK_METHOD0(stop_external_replay, void()); }; } // anonymous namespace namespace journal { template <> struct TypeTraits<librbd::MockTestImageCtx> { typedef ::journal::MockJournaler Journaler; typedef ::journal::MockReplayEntryProxy ReplayEntry; }; template<> struct Replay<MockTestImageCtx> { MOCK_METHOD2(decode, int(bufferlist::const_iterator *, EventEntry *)); MOCK_METHOD3(process, void(const EventEntry &, Context *, Context *)); MOCK_METHOD1(flush, void(Context*)); MOCK_METHOD2(shut_down, void(bool, Context*)); }; } // namespace journal } // namespace librbd namespace boost { template<> struct intrusive_ptr<librbd::MockTestJournal> { intrusive_ptr() { } intrusive_ptr(librbd::MockTestJournal* mock_test_journal) : mock_test_journal(mock_test_journal) { } librbd::MockTestJournal* operator->() { return mock_test_journal; } void reset() { mock_test_journal = nullptr; } const librbd::MockTestJournal* get() const { return mock_test_journal; } template<typename T> bool operator==(T* t) const { return (mock_test_journal == t); } librbd::MockTestJournal* mock_test_journal = nullptr; }; } // namespace boost namespace rbd { namespace mirror { template <> struct Threads<librbd::MockTestImageCtx> { MockSafeTimer *timer; ceph::mutex &timer_lock; MockContextWQ *work_queue; Threads(Threads<librbd::ImageCtx>* threads) : timer(new MockSafeTimer()), timer_lock(threads->timer_lock), work_queue(new MockContextWQ()) { } ~Threads() { delete timer; delete work_queue; } }; namespace { struct MockReplayerListener : public image_replayer::ReplayerListener { MOCK_METHOD0(handle_notification, void()); }; } // anonymous namespace namespace image_replayer { template<> struct CloseImageRequest<librbd::MockTestImageCtx> { static CloseImageRequest* s_instance; librbd::MockTestImageCtx **image_ctx = nullptr; Context *on_finish = nullptr; static CloseImageRequest* create(librbd::MockTestImageCtx **image_ctx, Context *on_finish) { ceph_assert(s_instance != nullptr); s_instance->image_ctx = image_ctx; s_instance->on_finish = on_finish; return s_instance; } CloseImageRequest() { ceph_assert(s_instance == nullptr); s_instance = this; } ~CloseImageRequest() { ceph_assert(s_instance == this); s_instance = nullptr; } MOCK_METHOD0(send, void()); }; CloseImageRequest<librbd::MockTestImageCtx>* CloseImageRequest<librbd::MockTestImageCtx>::s_instance = nullptr; namespace journal { template <> struct EventPreprocessor<librbd::MockTestImageCtx> { static EventPreprocessor *s_instance; static EventPreprocessor *create(librbd::MockTestImageCtx &local_image_ctx, ::journal::MockJournaler &remote_journaler, const std::string &local_mirror_uuid, librbd::journal::MirrorPeerClientMeta *client_meta, MockContextWQ *work_queue) { ceph_assert(s_instance != nullptr); return s_instance; } static void destroy(EventPreprocessor* processor) { } EventPreprocessor() { ceph_assert(s_instance == nullptr); s_instance = this; } ~EventPreprocessor() { ceph_assert(s_instance == this); s_instance = nullptr; } MOCK_METHOD1(is_required, bool(const librbd::journal::EventEntry &)); MOCK_METHOD2(preprocess, void(librbd::journal::EventEntry *, Context *)); }; template<> struct ReplayStatusFormatter<librbd::MockTestImageCtx> { static ReplayStatusFormatter* s_instance; static ReplayStatusFormatter* create(::journal::MockJournaler *journaler, const std::string &mirror_uuid) { ceph_assert(s_instance != nullptr); return s_instance; } static void destroy(ReplayStatusFormatter* formatter) { } ReplayStatusFormatter() { ceph_assert(s_instance == nullptr); s_instance = this; } ~ReplayStatusFormatter() { ceph_assert(s_instance == this); s_instance = nullptr; } MOCK_METHOD1(handle_entry_processed, void(uint64_t)); MOCK_METHOD2(get_or_send_update, bool(std::string *description, Context *on_finish)); }; template<> struct StateBuilder<librbd::MockTestImageCtx> { StateBuilder(librbd::MockTestImageCtx& local_image_ctx, ::journal::MockJournaler& remote_journaler, const librbd::journal::MirrorPeerClientMeta& remote_client_meta) : local_image_ctx(&local_image_ctx), remote_journaler(&remote_journaler), remote_client_meta(remote_client_meta) { } librbd::MockTestImageCtx* local_image_ctx; std::string remote_mirror_uuid = "remote mirror uuid"; ::journal::MockJournaler* remote_journaler = nullptr; librbd::journal::MirrorPeerClientMeta remote_client_meta; }; EventPreprocessor<librbd::MockTestImageCtx>* EventPreprocessor<librbd::MockTestImageCtx>::s_instance = nullptr; ReplayStatusFormatter<librbd::MockTestImageCtx>* ReplayStatusFormatter<librbd::MockTestImageCtx>::s_instance = nullptr; } // namespace journal } // namespace image_replayer } // namespace mirror } // namespace rbd #include "tools/rbd_mirror/image_replayer/journal/Replayer.cc" namespace rbd { namespace mirror { namespace image_replayer { namespace journal { using ::testing::_; using ::testing::AtLeast; using ::testing::DoAll; using ::testing::InSequence; using ::testing::Invoke; using ::testing::MatcherCast; using ::testing::Return; using ::testing::ReturnArg; using ::testing::SaveArg; using ::testing::SetArgPointee; using ::testing::WithArg; class TestMockImageReplayerJournalReplayer : public TestMockFixture { public: typedef Replayer<librbd::MockTestImageCtx> MockReplayer; typedef EventPreprocessor<librbd::MockTestImageCtx> MockEventPreprocessor; typedef ReplayStatusFormatter<librbd::MockTestImageCtx> MockReplayStatusFormatter; typedef StateBuilder<librbd::MockTestImageCtx> MockStateBuilder; typedef Threads<librbd::MockTestImageCtx> MockThreads; typedef CloseImageRequest<librbd::MockTestImageCtx> MockCloseImageRequest; typedef librbd::journal::Replay<librbd::MockTestImageCtx> MockReplay; void SetUp() override { TestMockFixture::SetUp(); librbd::RBD rbd; ASSERT_EQ(0, create_image(rbd, m_local_io_ctx, m_image_name, m_image_size)); ASSERT_EQ(0, open_image(m_local_io_ctx, m_image_name, &m_local_image_ctx)); } bufferlist encode_tag_data(const librbd::journal::TagData &tag_data) { bufferlist bl; encode(tag_data, bl); return bl; } void expect_work_queue_repeatedly(MockThreads &mock_threads) { EXPECT_CALL(*mock_threads.work_queue, queue(_, _)) .WillRepeatedly(Invoke([this](Context *ctx, int r) { m_threads->work_queue->queue(ctx, r); })); } void expect_add_event_after_repeatedly(MockThreads &mock_threads) { EXPECT_CALL(*mock_threads.timer, add_event_after(_, _)) .WillRepeatedly( DoAll(Invoke([this](double seconds, Context *ctx) { m_threads->timer->add_event_after(seconds, ctx); }), ReturnArg<1>())); EXPECT_CALL(*mock_threads.timer, cancel_event(_)) .WillRepeatedly( Invoke([this](Context *ctx) { return m_threads->timer->cancel_event(ctx); })); } void expect_init(::journal::MockJournaler &mock_journaler, int r) { EXPECT_CALL(mock_journaler, init(_)) .WillOnce(CompleteContext(m_threads->work_queue, r)); } void expect_stop_replay(::journal::MockJournaler &mock_journaler, int r) { EXPECT_CALL(mock_journaler, stop_replay(_)) .WillOnce(CompleteContext(r)); } void expect_shut_down(MockReplay &mock_replay, bool cancel_ops, int r) { EXPECT_CALL(mock_replay, shut_down(cancel_ops, _)) .WillOnce(WithArg<1>(CompleteContext(m_threads->work_queue, r))); } void expect_get_cached_client(::journal::MockJournaler &mock_journaler, const std::string& client_id, const cls::journal::Client& client, const librbd::journal::ClientMeta& client_meta, int r) { librbd::journal::ClientData client_data; client_data.client_meta = client_meta; cls::journal::Client client_copy{client}; encode(client_data, client_copy.data); EXPECT_CALL(mock_journaler, get_cached_client(client_id, _)) .WillOnce(DoAll(SetArgPointee<1>(client_copy), Return(r))); } void expect_start_external_replay(librbd::MockTestJournal &mock_journal, MockReplay *mock_replay, int r) { EXPECT_CALL(mock_journal, start_external_replay(_, _)) .WillOnce(DoAll(SetArgPointee<0>(mock_replay), WithArg<1>(CompleteContext(m_threads->work_queue, r)))); } void expect_is_tag_owner(librbd::MockTestJournal &mock_journal, bool is_owner) { EXPECT_CALL(mock_journal, is_tag_owner()).WillOnce(Return(is_owner)); } void expect_is_resync_requested(librbd::MockTestJournal &mock_journal, int r, bool resync_requested) { EXPECT_CALL(mock_journal, is_resync_requested(_)).WillOnce( DoAll(SetArgPointee<0>(resync_requested), Return(r))); } void expect_get_commit_tid_in_debug( ::journal::MockReplayEntry &mock_replay_entry) { // It is used in debug messages and depends on debug level EXPECT_CALL(mock_replay_entry, get_commit_tid()) .Times(AtLeast(0)) .WillRepeatedly(Return(0)); } void expect_get_tag_tid_in_debug(librbd::MockTestJournal &mock_journal) { // It is used in debug messages and depends on debug level EXPECT_CALL(mock_journal, get_tag_tid()).Times(AtLeast(0)) .WillRepeatedly(Return(0)); } void expect_committed(::journal::MockReplayEntry &mock_replay_entry, ::journal::MockJournaler &mock_journaler, int times) { EXPECT_CALL(mock_replay_entry, get_data()).Times(times); EXPECT_CALL(mock_journaler, committed( MatcherCast<const ::journal::MockReplayEntryProxy&>(_))) .Times(times); } void expect_try_pop_front(::journal::MockJournaler &mock_journaler, uint64_t replay_tag_tid, bool entries_available) { EXPECT_CALL(mock_journaler, try_pop_front(_, _)) .WillOnce(DoAll(SetArgPointee<0>(::journal::MockReplayEntryProxy()), SetArgPointee<1>(replay_tag_tid), Return(entries_available))); } void expect_try_pop_front_return_no_entries( ::journal::MockJournaler &mock_journaler, Context *on_finish) { EXPECT_CALL(mock_journaler, try_pop_front(_, _)) .WillOnce(DoAll(Invoke([on_finish](::journal::MockReplayEntryProxy *e, uint64_t *t) { on_finish->complete(0); }), Return(false))); } void expect_get_tag(::journal::MockJournaler &mock_journaler, const cls::journal::Tag &tag, int r) { EXPECT_CALL(mock_journaler, get_tag(_, _, _)) .WillOnce(DoAll(SetArgPointee<1>(tag), WithArg<2>(CompleteContext(r)))); } void expect_allocate_tag(librbd::MockTestJournal &mock_journal, int r) { EXPECT_CALL(mock_journal, allocate_tag(_, _, _)) .WillOnce(WithArg<2>(CompleteContext(r))); } void expect_preprocess(MockEventPreprocessor &mock_event_preprocessor, bool required, int r) { EXPECT_CALL(mock_event_preprocessor, is_required(_)) .WillOnce(Return(required)); if (required) { EXPECT_CALL(mock_event_preprocessor, preprocess(_, _)) .WillOnce(WithArg<1>(CompleteContext(r))); } } void expect_process(MockReplay &mock_replay, int on_ready_r, int on_commit_r) { EXPECT_CALL(mock_replay, process(_, _, _)) .WillOnce(DoAll(WithArg<1>(CompleteContext(on_ready_r)), WithArg<2>(CompleteContext(on_commit_r)))); } void expect_flush(MockReplay& mock_replay, int r) { EXPECT_CALL(mock_replay, flush(_)) .WillOnce(CompleteContext(m_threads->work_queue, r)); } void expect_flush_commit_position(::journal::MockJournaler& mock_journal, int r) { EXPECT_CALL(mock_journal, flush_commit_position(_)) .WillOnce(CompleteContext(m_threads->work_queue, r)); } void expect_get_tag_data(librbd::MockTestJournal& mock_local_journal, const librbd::journal::TagData& tag_data) { EXPECT_CALL(mock_local_journal, get_tag_data()) .WillOnce(Return(tag_data)); } void expect_send(MockCloseImageRequest &mock_close_image_request, int r) { EXPECT_CALL(mock_close_image_request, send()) .WillOnce(Invoke([this, &mock_close_image_request, r]() { *mock_close_image_request.image_ctx = nullptr; m_threads->work_queue->queue(mock_close_image_request.on_finish, r); })); } void expect_notification(MockThreads& mock_threads, MockReplayerListener& mock_replayer_listener) { EXPECT_CALL(mock_replayer_listener, handle_notification()) .WillOnce(Invoke([this]() { std::unique_lock locker{m_lock}; m_notified = true; m_cond.notify_all(); })); } int wait_for_notification() { std::unique_lock locker{m_lock}; while (!m_notified) { if (m_cond.wait_for(locker, 10s) == std::cv_status::timeout) { return -ETIMEDOUT; } } m_notified = false; return 0; } void expect_local_journal_add_listener( librbd::MockTestJournal& mock_local_journal, librbd::journal::Listener** local_journal_listener) { EXPECT_CALL(mock_local_journal, add_listener(_)) .WillOnce(SaveArg<0>(local_journal_listener)); expect_is_tag_owner(mock_local_journal, false); expect_is_resync_requested(mock_local_journal, 0, false); } int init_entry_replayer(MockReplayer& mock_replayer, MockThreads& mock_threads, MockReplayerListener& mock_replayer_listener, librbd::MockTestJournal& mock_local_journal, ::journal::MockJournaler& mock_remote_journaler, MockReplay& mock_local_journal_replay, librbd::journal::Listener** local_journal_listener, ::journal::ReplayHandler** remote_replay_handler, ::journal::JournalMetadataListener** remote_journal_listener) { expect_init(mock_remote_journaler, 0); EXPECT_CALL(mock_remote_journaler, add_listener(_)) .WillOnce(SaveArg<0>(remote_journal_listener)); expect_get_cached_client(mock_remote_journaler, "local mirror uuid", {}, {librbd::journal::MirrorPeerClientMeta{}}, 0); expect_start_external_replay(mock_local_journal, &mock_local_journal_replay, 0); expect_local_journal_add_listener(mock_local_journal, local_journal_listener); EXPECT_CALL(mock_remote_journaler, start_live_replay(_, _)) .WillOnce(SaveArg<0>(remote_replay_handler)); expect_notification(mock_threads, mock_replayer_listener); C_SaferCond init_ctx; mock_replayer.init(&init_ctx); int r = init_ctx.wait(); if (r < 0) { return r; } return wait_for_notification(); } int shut_down_entry_replayer(MockReplayer& mock_replayer, MockThreads& mock_threads, librbd::MockTestJournal& mock_local_journal, ::journal::MockJournaler& mock_remote_journaler, MockReplay& mock_local_journal_replay) { expect_shut_down(mock_local_journal_replay, true, 0); EXPECT_CALL(mock_local_journal, remove_listener(_)); EXPECT_CALL(mock_local_journal, stop_external_replay()); MockCloseImageRequest mock_close_image_request; expect_send(mock_close_image_request, 0); expect_stop_replay(mock_remote_journaler, 0); EXPECT_CALL(mock_remote_journaler, remove_listener(_)); C_SaferCond shutdown_ctx; mock_replayer.shut_down(&shutdown_ctx); return shutdown_ctx.wait(); } librbd::ImageCtx* m_local_image_ctx = nullptr; ceph::mutex m_lock = ceph::make_mutex( "TestMockImageReplayerJournalReplayer"); ceph::condition_variable m_cond; bool m_notified = false; }; TEST_F(TestMockImageReplayerJournalReplayer, InitShutDown) { librbd::MockTestJournal mock_local_journal; librbd::MockTestImageCtx mock_local_image_ctx{*m_local_image_ctx, mock_local_journal}; ::journal::MockJournaler mock_remote_journaler; MockReplayerListener mock_replayer_listener; MockThreads mock_threads{m_threads}; MockStateBuilder mock_state_builder(mock_local_image_ctx, mock_remote_journaler, {}); MockReplayer mock_replayer{ &mock_threads, "local mirror uuid", &mock_state_builder, &mock_replayer_listener}; expect_work_queue_repeatedly(mock_threads); InSequence seq; MockReplay mock_local_journal_replay; MockEventPreprocessor mock_event_preprocessor; MockReplayStatusFormatter mock_replay_status_formatter; librbd::journal::Listener* local_journal_listener = nullptr; ::journal::ReplayHandler* remote_replay_handler = nullptr; ::journal::JournalMetadataListener* remote_journaler_listener = nullptr; ASSERT_EQ(0, init_entry_replayer(mock_replayer, mock_threads, mock_replayer_listener, mock_local_journal, mock_remote_journaler, mock_local_journal_replay, &local_journal_listener, &remote_replay_handler, &remote_journaler_listener)); ASSERT_EQ(0, shut_down_entry_replayer(mock_replayer, mock_threads, mock_local_journal, mock_remote_journaler, mock_local_journal_replay)); } TEST_F(TestMockImageReplayerJournalReplayer, InitRemoteJournalerError) { librbd::MockTestJournal mock_local_journal; librbd::MockTestImageCtx mock_local_image_ctx{*m_local_image_ctx, mock_local_journal}; ::journal::MockJournaler mock_remote_journaler; MockReplayerListener mock_replayer_listener; MockThreads mock_threads{m_threads}; MockStateBuilder mock_state_builder(mock_local_image_ctx, mock_remote_journaler, {}); MockReplayer mock_replayer{ &mock_threads, "local mirror uuid", &mock_state_builder, &mock_replayer_listener}; expect_work_queue_repeatedly(mock_threads); InSequence seq; expect_init(mock_remote_journaler, -EINVAL); MockCloseImageRequest mock_close_image_request; expect_send(mock_close_image_request, 0); C_SaferCond init_ctx; mock_replayer.init(&init_ctx); ASSERT_EQ(-EINVAL, init_ctx.wait()); } TEST_F(TestMockImageReplayerJournalReplayer, InitRemoteJournalerGetClientError) { librbd::MockTestJournal mock_local_journal; librbd::MockTestImageCtx mock_local_image_ctx{*m_local_image_ctx, mock_local_journal}; ::journal::MockJournaler mock_remote_journaler; MockReplayerListener mock_replayer_listener; MockThreads mock_threads{m_threads}; MockStateBuilder mock_state_builder(mock_local_image_ctx, mock_remote_journaler, {}); MockReplayer mock_replayer{ &mock_threads, "local mirror uuid", &mock_state_builder, &mock_replayer_listener}; expect_work_queue_repeatedly(mock_threads); InSequence seq; expect_init(mock_remote_journaler, 0); EXPECT_CALL(mock_remote_journaler, add_listener(_)); expect_get_cached_client(mock_remote_journaler, "local mirror uuid", {}, {librbd::journal::MirrorPeerClientMeta{}}, -EINVAL); MockCloseImageRequest mock_close_image_request; expect_send(mock_close_image_request, 0); EXPECT_CALL(mock_remote_journaler, remove_listener(_)); C_SaferCond init_ctx; mock_replayer.init(&init_ctx); ASSERT_EQ(-EINVAL, init_ctx.wait()); } TEST_F(TestMockImageReplayerJournalReplayer, InitNoLocalJournal) { librbd::MockTestJournal mock_local_journal; librbd::MockTestImageCtx mock_local_image_ctx{*m_local_image_ctx, mock_local_journal}; ::journal::MockJournaler mock_remote_journaler; MockReplayerListener mock_replayer_listener; MockThreads mock_threads{m_threads}; mock_local_image_ctx.journal = nullptr; MockStateBuilder mock_state_builder(mock_local_image_ctx, mock_remote_journaler, {}); MockReplayer mock_replayer{ &mock_threads, "local mirror uuid", &mock_state_builder, &mock_replayer_listener}; expect_work_queue_repeatedly(mock_threads); InSequence seq; expect_init(mock_remote_journaler, 0); EXPECT_CALL(mock_remote_journaler, add_listener(_)); expect_get_cached_client(mock_remote_journaler, "local mirror uuid", {}, {librbd::journal::MirrorPeerClientMeta{}}, 0); MockCloseImageRequest mock_close_image_request; expect_send(mock_close_image_request, 0); EXPECT_CALL(mock_remote_journaler, remove_listener(_)); C_SaferCond init_ctx; mock_replayer.init(&init_ctx); ASSERT_EQ(-EINVAL, init_ctx.wait()); } TEST_F(TestMockImageReplayerJournalReplayer, InitLocalJournalStartExternalReplayError) { librbd::MockTestJournal mock_local_journal; librbd::MockTestImageCtx mock_local_image_ctx{*m_local_image_ctx, mock_local_journal}; ::journal::MockJournaler mock_remote_journaler; MockReplayerListener mock_replayer_listener; MockThreads mock_threads{m_threads}; MockStateBuilder mock_state_builder(mock_local_image_ctx, mock_remote_journaler, {}); MockReplayer mock_replayer{ &mock_threads, "local mirror uuid", &mock_state_builder, &mock_replayer_listener}; expect_work_queue_repeatedly(mock_threads); InSequence seq; expect_init(mock_remote_journaler, 0); EXPECT_CALL(mock_remote_journaler, add_listener(_)); expect_get_cached_client(mock_remote_journaler, "local mirror uuid", {}, {librbd::journal::MirrorPeerClientMeta{}}, 0); expect_start_external_replay(mock_local_journal, nullptr, -EINVAL); MockCloseImageRequest mock_close_image_request; expect_send(mock_close_image_request, 0); EXPECT_CALL(mock_remote_journaler, remove_listener(_)); C_SaferCond init_ctx; mock_replayer.init(&init_ctx); ASSERT_EQ(-EINVAL, init_ctx.wait()); } TEST_F(TestMockImageReplayerJournalReplayer, InitIsPromoted) { librbd::MockTestJournal mock_local_journal; librbd::MockTestImageCtx mock_local_image_ctx{*m_local_image_ctx, mock_local_journal}; ::journal::MockJournaler mock_remote_journaler; MockReplayerListener mock_replayer_listener; MockThreads mock_threads{m_threads}; MockStateBuilder mock_state_builder(mock_local_image_ctx, mock_remote_journaler, {}); MockReplayer mock_replayer{ &mock_threads, "local mirror uuid", &mock_state_builder, &mock_replayer_listener}; expect_work_queue_repeatedly(mock_threads); InSequence seq; expect_init(mock_remote_journaler, 0); EXPECT_CALL(mock_remote_journaler, add_listener(_)); expect_get_cached_client(mock_remote_journaler, "local mirror uuid", {}, {librbd::journal::MirrorPeerClientMeta{}}, 0); MockReplay mock_local_journal_replay; expect_start_external_replay(mock_local_journal, &mock_local_journal_replay, 0); EXPECT_CALL(mock_local_journal, add_listener(_)); expect_is_tag_owner(mock_local_journal, true); expect_notification(mock_threads, mock_replayer_listener); C_SaferCond init_ctx; mock_replayer.init(&init_ctx); ASSERT_EQ(0, init_ctx.wait()); ASSERT_EQ(0, wait_for_notification()); expect_shut_down(mock_local_journal_replay, true, 0); EXPECT_CALL(mock_local_journal, remove_listener(_)); EXPECT_CALL(mock_local_journal, stop_external_replay()); MockCloseImageRequest mock_close_image_request; expect_send(mock_close_image_request, 0); EXPECT_CALL(mock_remote_journaler, remove_listener(_)); C_SaferCond shutdown_ctx; mock_replayer.shut_down(&shutdown_ctx); ASSERT_EQ(0, shutdown_ctx.wait()); } TEST_F(TestMockImageReplayerJournalReplayer, InitDisconnected) { librbd::MockTestJournal mock_local_journal; librbd::MockTestImageCtx mock_local_image_ctx{*m_local_image_ctx, mock_local_journal}; mock_local_image_ctx.config.set_val("rbd_mirroring_resync_after_disconnect", "false"); ::journal::MockJournaler mock_remote_journaler; MockReplayerListener mock_replayer_listener; MockThreads mock_threads{m_threads}; MockStateBuilder mock_state_builder(mock_local_image_ctx, mock_remote_journaler, {}); MockReplayer mock_replayer{ &mock_threads, "local mirror uuid", &mock_state_builder, &mock_replayer_listener}; expect_work_queue_repeatedly(mock_threads); InSequence seq; expect_init(mock_remote_journaler, 0); EXPECT_CALL(mock_remote_journaler, add_listener(_)); expect_get_cached_client(mock_remote_journaler, "local mirror uuid", {{}, {}, {}, cls::journal::CLIENT_STATE_DISCONNECTED}, {librbd::journal::MirrorPeerClientMeta{ mock_local_image_ctx.id}}, 0); MockCloseImageRequest mock_close_image_request; expect_send(mock_close_image_request, 0); EXPECT_CALL(mock_remote_journaler, remove_listener(_)); C_SaferCond init_ctx; mock_replayer.init(&init_ctx); ASSERT_EQ(-ENOTCONN, init_ctx.wait()); ASSERT_FALSE(mock_replayer.is_resync_requested()); } TEST_F(TestMockImageReplayerJournalReplayer, InitDisconnectedResync) { librbd::MockTestJournal mock_local_journal; librbd::MockTestImageCtx mock_local_image_ctx{*m_local_image_ctx, mock_local_journal}; mock_local_image_ctx.config.set_val("rbd_mirroring_resync_after_disconnect", "true"); ::journal::MockJournaler mock_remote_journaler; MockReplayerListener mock_replayer_listener; MockThreads mock_threads{m_threads}; MockStateBuilder mock_state_builder(mock_local_image_ctx, mock_remote_journaler, {}); MockReplayer mock_replayer{ &mock_threads, "local mirror uuid", &mock_state_builder, &mock_replayer_listener}; expect_work_queue_repeatedly(mock_threads); InSequence seq; expect_init(mock_remote_journaler, 0); EXPECT_CALL(mock_remote_journaler, add_listener(_)); expect_get_cached_client(mock_remote_journaler, "local mirror uuid", {{}, {}, {}, cls::journal::CLIENT_STATE_DISCONNECTED}, {librbd::journal::MirrorPeerClientMeta{ mock_local_image_ctx.id}}, 0); MockCloseImageRequest mock_close_image_request; expect_send(mock_close_image_request, 0); EXPECT_CALL(mock_remote_journaler, remove_listener(_)); C_SaferCond init_ctx; mock_replayer.init(&init_ctx); ASSERT_EQ(-ENOTCONN, init_ctx.wait()); ASSERT_TRUE(mock_replayer.is_resync_requested()); } TEST_F(TestMockImageReplayerJournalReplayer, InitResyncRequested) { librbd::MockTestJournal mock_local_journal; librbd::MockTestImageCtx mock_local_image_ctx{*m_local_image_ctx, mock_local_journal}; ::journal::MockJournaler mock_remote_journaler; MockReplayerListener mock_replayer_listener; MockThreads mock_threads{m_threads}; MockStateBuilder mock_state_builder(mock_local_image_ctx, mock_remote_journaler, {}); MockReplayer mock_replayer{ &mock_threads, "local mirror uuid", &mock_state_builder, &mock_replayer_listener}; expect_work_queue_repeatedly(mock_threads); InSequence seq; expect_init(mock_remote_journaler, 0); EXPECT_CALL(mock_remote_journaler, add_listener(_)); expect_get_cached_client(mock_remote_journaler, "local mirror uuid", {}, {librbd::journal::MirrorPeerClientMeta{}}, 0); MockReplay mock_local_journal_replay; expect_start_external_replay(mock_local_journal, &mock_local_journal_replay, 0); EXPECT_CALL(mock_local_journal, add_listener(_)); expect_is_tag_owner(mock_local_journal, false); expect_is_resync_requested(mock_local_journal, 0, true); expect_notification(mock_threads, mock_replayer_listener); C_SaferCond init_ctx; mock_replayer.init(&init_ctx); ASSERT_EQ(0, init_ctx.wait()); ASSERT_EQ(0, wait_for_notification()); expect_shut_down(mock_local_journal_replay, true, 0); EXPECT_CALL(mock_local_journal, remove_listener(_)); EXPECT_CALL(mock_local_journal, stop_external_replay()); MockCloseImageRequest mock_close_image_request; expect_send(mock_close_image_request, 0); EXPECT_CALL(mock_remote_journaler, remove_listener(_)); C_SaferCond shutdown_ctx; mock_replayer.shut_down(&shutdown_ctx); ASSERT_EQ(0, shutdown_ctx.wait()); } TEST_F(TestMockImageReplayerJournalReplayer, InitResyncRequestedError) { librbd::MockTestJournal mock_local_journal; librbd::MockTestImageCtx mock_local_image_ctx{*m_local_image_ctx, mock_local_journal}; ::journal::MockJournaler mock_remote_journaler; MockReplayerListener mock_replayer_listener; MockThreads mock_threads{m_threads}; MockStateBuilder mock_state_builder(mock_local_image_ctx, mock_remote_journaler, {}); MockReplayer mock_replayer{ &mock_threads, "local mirror uuid", &mock_state_builder, &mock_replayer_listener}; expect_work_queue_repeatedly(mock_threads); InSequence seq; expect_init(mock_remote_journaler, 0); EXPECT_CALL(mock_remote_journaler, add_listener(_)); expect_get_cached_client(mock_remote_journaler, "local mirror uuid", {}, {librbd::journal::MirrorPeerClientMeta{}}, 0); MockReplay mock_local_journal_replay; expect_start_external_replay(mock_local_journal, &mock_local_journal_replay, 0); EXPECT_CALL(mock_local_journal, add_listener(_)); expect_is_tag_owner(mock_local_journal, false); expect_is_resync_requested(mock_local_journal, -EINVAL, false); expect_notification(mock_threads, mock_replayer_listener); C_SaferCond init_ctx; mock_replayer.init(&init_ctx); ASSERT_EQ(0, init_ctx.wait()); ASSERT_EQ(0, wait_for_notification()); ASSERT_EQ(-EINVAL, mock_replayer.get_error_code()); expect_shut_down(mock_local_journal_replay, true, 0); EXPECT_CALL(mock_local_journal, remove_listener(_)); EXPECT_CALL(mock_local_journal, stop_external_replay()); MockCloseImageRequest mock_close_image_request; expect_send(mock_close_image_request, 0); EXPECT_CALL(mock_remote_journaler, remove_listener(_)); C_SaferCond shutdown_ctx; mock_replayer.shut_down(&shutdown_ctx); ASSERT_EQ(0, shutdown_ctx.wait()); } TEST_F(TestMockImageReplayerJournalReplayer, ShutDownLocalJournalReplayError) { librbd::MockTestJournal mock_local_journal; librbd::MockTestImageCtx mock_local_image_ctx{*m_local_image_ctx, mock_local_journal}; ::journal::MockJournaler mock_remote_journaler; MockReplayerListener mock_replayer_listener; MockThreads mock_threads{m_threads}; MockStateBuilder mock_state_builder(mock_local_image_ctx, mock_remote_journaler, {}); MockReplayer mock_replayer{ &mock_threads, "local mirror uuid", &mock_state_builder, &mock_replayer_listener}; expect_work_queue_repeatedly(mock_threads); InSequence seq; MockReplay mock_local_journal_replay; MockEventPreprocessor mock_event_preprocessor; MockReplayStatusFormatter mock_replay_status_formatter; librbd::journal::Listener* local_journal_listener = nullptr; ::journal::ReplayHandler* remote_replay_handler = nullptr; ::journal::JournalMetadataListener* remote_journaler_listener = nullptr; ASSERT_EQ(0, init_entry_replayer(mock_replayer, mock_threads, mock_replayer_listener, mock_local_journal, mock_remote_journaler, mock_local_journal_replay, &local_journal_listener, &remote_replay_handler, &remote_journaler_listener)); expect_shut_down(mock_local_journal_replay, true, -EINVAL); EXPECT_CALL(mock_local_journal, remove_listener(_)); EXPECT_CALL(mock_local_journal, stop_external_replay()); MockCloseImageRequest mock_close_image_request; expect_send(mock_close_image_request, 0); expect_stop_replay(mock_remote_journaler, 0); EXPECT_CALL(mock_remote_journaler, remove_listener(_)); C_SaferCond shutdown_ctx; mock_replayer.shut_down(&shutdown_ctx); ASSERT_EQ(-EINVAL, shutdown_ctx.wait()); } TEST_F(TestMockImageReplayerJournalReplayer, CloseLocalImageError) { librbd::MockTestJournal mock_local_journal; librbd::MockTestImageCtx mock_local_image_ctx{*m_local_image_ctx, mock_local_journal}; ::journal::MockJournaler mock_remote_journaler; MockReplayerListener mock_replayer_listener; MockThreads mock_threads{m_threads}; MockStateBuilder mock_state_builder(mock_local_image_ctx, mock_remote_journaler, {}); MockReplayer mock_replayer{ &mock_threads, "local mirror uuid", &mock_state_builder, &mock_replayer_listener}; expect_work_queue_repeatedly(mock_threads); InSequence seq; MockReplay mock_local_journal_replay; MockEventPreprocessor mock_event_preprocessor; MockReplayStatusFormatter mock_replay_status_formatter; librbd::journal::Listener* local_journal_listener = nullptr; ::journal::ReplayHandler* remote_replay_handler = nullptr; ::journal::JournalMetadataListener* remote_journaler_listener = nullptr; ASSERT_EQ(0, init_entry_replayer(mock_replayer, mock_threads, mock_replayer_listener, mock_local_journal, mock_remote_journaler, mock_local_journal_replay, &local_journal_listener, &remote_replay_handler, &remote_journaler_listener)); expect_shut_down(mock_local_journal_replay, true, 0); EXPECT_CALL(mock_local_journal, remove_listener(_)); EXPECT_CALL(mock_local_journal, stop_external_replay()); MockCloseImageRequest mock_close_image_request; expect_send(mock_close_image_request, -EINVAL); expect_stop_replay(mock_remote_journaler, 0); EXPECT_CALL(mock_remote_journaler, remove_listener(_)); C_SaferCond shutdown_ctx; mock_replayer.shut_down(&shutdown_ctx); ASSERT_EQ(-EINVAL, shutdown_ctx.wait()); } TEST_F(TestMockImageReplayerJournalReplayer, StopRemoteJournalerError) { librbd::MockTestJournal mock_local_journal; librbd::MockTestImageCtx mock_local_image_ctx{*m_local_image_ctx, mock_local_journal}; ::journal::MockJournaler mock_remote_journaler; MockReplayerListener mock_replayer_listener; MockThreads mock_threads{m_threads}; MockStateBuilder mock_state_builder(mock_local_image_ctx, mock_remote_journaler, {}); MockReplayer mock_replayer{ &mock_threads, "local mirror uuid", &mock_state_builder, &mock_replayer_listener}; expect_work_queue_repeatedly(mock_threads); InSequence seq; MockReplay mock_local_journal_replay; MockEventPreprocessor mock_event_preprocessor; MockReplayStatusFormatter mock_replay_status_formatter; librbd::journal::Listener* local_journal_listener = nullptr; ::journal::ReplayHandler* remote_replay_handler = nullptr; ::journal::JournalMetadataListener* remote_journaler_listener = nullptr; ASSERT_EQ(0, init_entry_replayer(mock_replayer, mock_threads, mock_replayer_listener, mock_local_journal, mock_remote_journaler, mock_local_journal_replay, &local_journal_listener, &remote_replay_handler, &remote_journaler_listener)); expect_shut_down(mock_local_journal_replay, true, 0); EXPECT_CALL(mock_local_journal, remove_listener(_)); EXPECT_CALL(mock_local_journal, stop_external_replay()); MockCloseImageRequest mock_close_image_request; expect_send(mock_close_image_request, 0); expect_stop_replay(mock_remote_journaler, -EPERM); EXPECT_CALL(mock_remote_journaler, remove_listener(_)); C_SaferCond shutdown_ctx; mock_replayer.shut_down(&shutdown_ctx); ASSERT_EQ(-EPERM, shutdown_ctx.wait()); } TEST_F(TestMockImageReplayerJournalReplayer, Replay) { librbd::MockTestJournal mock_local_journal; librbd::MockTestImageCtx mock_local_image_ctx{*m_local_image_ctx, mock_local_journal}; ::journal::MockJournaler mock_remote_journaler; MockReplayerListener mock_replayer_listener; MockThreads mock_threads{m_threads}; MockStateBuilder mock_state_builder(mock_local_image_ctx, mock_remote_journaler, {}); MockReplayer mock_replayer{ &mock_threads, "local mirror uuid", &mock_state_builder, &mock_replayer_listener}; ::journal::MockReplayEntry mock_replay_entry; expect_work_queue_repeatedly(mock_threads); expect_add_event_after_repeatedly(mock_threads); expect_get_commit_tid_in_debug(mock_replay_entry); expect_get_tag_tid_in_debug(mock_local_journal); expect_committed(mock_replay_entry, mock_remote_journaler, 2); InSequence seq; MockReplay mock_local_journal_replay; MockEventPreprocessor mock_event_preprocessor; MockReplayStatusFormatter mock_replay_status_formatter; librbd::journal::Listener* local_journal_listener = nullptr; ::journal::ReplayHandler* remote_replay_handler = nullptr; ::journal::JournalMetadataListener* remote_journaler_listener = nullptr; ASSERT_EQ(0, init_entry_replayer(mock_replayer, mock_threads, mock_replayer_listener, mock_local_journal, mock_remote_journaler, mock_local_journal_replay, &local_journal_listener, &remote_replay_handler, &remote_journaler_listener)); cls::journal::Tag tag = {1, 0, encode_tag_data({librbd::Journal<>::LOCAL_MIRROR_UUID, librbd::Journal<>::LOCAL_MIRROR_UUID, true, 0, 0})}; expect_try_pop_front(mock_remote_journaler, tag.tid, true); // replay_flush expect_shut_down(mock_local_journal_replay, false, 0); EXPECT_CALL(mock_local_journal, remove_listener(_)); EXPECT_CALL(mock_local_journal, stop_external_replay()); expect_start_external_replay(mock_local_journal, &mock_local_journal_replay, 0); expect_local_journal_add_listener(mock_local_journal, &local_journal_listener); expect_get_tag(mock_remote_journaler, tag, 0); expect_allocate_tag(mock_local_journal, 0); // process EXPECT_CALL(mock_local_journal_replay, decode(_, _)).WillOnce(Return(0)); expect_preprocess(mock_event_preprocessor, false, 0); expect_process(mock_local_journal_replay, 0, 0); EXPECT_CALL(mock_replay_status_formatter, handle_entry_processed(_)); // the next event with preprocess expect_try_pop_front(mock_remote_journaler, tag.tid, true); EXPECT_CALL(mock_local_journal_replay, decode(_, _)).WillOnce(Return(0)); expect_preprocess(mock_event_preprocessor, true, 0); expect_process(mock_local_journal_replay, 0, 0); EXPECT_CALL(mock_replay_status_formatter, handle_entry_processed(_)); // attempt to process the next event C_SaferCond replay_ctx; expect_try_pop_front_return_no_entries(mock_remote_journaler, &replay_ctx); // fire remote_replay_handler->handle_entries_available(); ASSERT_EQ(0, replay_ctx.wait()); ASSERT_EQ(0, shut_down_entry_replayer(mock_replayer, mock_threads, mock_local_journal, mock_remote_journaler, mock_local_journal_replay)); } TEST_F(TestMockImageReplayerJournalReplayer, DecodeError) { librbd::MockTestJournal mock_local_journal; librbd::MockTestImageCtx mock_local_image_ctx{*m_local_image_ctx, mock_local_journal}; ::journal::MockJournaler mock_remote_journaler; MockReplayerListener mock_replayer_listener; MockThreads mock_threads{m_threads}; MockStateBuilder mock_state_builder(mock_local_image_ctx, mock_remote_journaler, {}); MockReplayer mock_replayer{ &mock_threads, "local mirror uuid", &mock_state_builder, &mock_replayer_listener}; ::journal::MockReplayEntry mock_replay_entry; expect_work_queue_repeatedly(mock_threads); expect_add_event_after_repeatedly(mock_threads); expect_get_commit_tid_in_debug(mock_replay_entry); expect_get_tag_tid_in_debug(mock_local_journal); InSequence seq; MockReplay mock_local_journal_replay; MockEventPreprocessor mock_event_preprocessor; MockReplayStatusFormatter mock_replay_status_formatter; librbd::journal::Listener* local_journal_listener = nullptr; ::journal::ReplayHandler* remote_replay_handler = nullptr; ::journal::JournalMetadataListener* remote_journaler_listener = nullptr; ASSERT_EQ(0, init_entry_replayer(mock_replayer, mock_threads, mock_replayer_listener, mock_local_journal, mock_remote_journaler, mock_local_journal_replay, &local_journal_listener, &remote_replay_handler, &remote_journaler_listener)); cls::journal::Tag tag = {1, 0, encode_tag_data({librbd::Journal<>::LOCAL_MIRROR_UUID, librbd::Journal<>::LOCAL_MIRROR_UUID, true, 0, 0})}; expect_try_pop_front(mock_remote_journaler, tag.tid, true); // replay_flush expect_shut_down(mock_local_journal_replay, false, 0); EXPECT_CALL(mock_local_journal, remove_listener(_)); EXPECT_CALL(mock_local_journal, stop_external_replay()); expect_start_external_replay(mock_local_journal, &mock_local_journal_replay, 0); expect_local_journal_add_listener(mock_local_journal, &local_journal_listener); expect_get_tag(mock_remote_journaler, tag, 0); expect_allocate_tag(mock_local_journal, 0); // process EXPECT_CALL(mock_replay_entry, get_data()); EXPECT_CALL(mock_local_journal_replay, decode(_, _)) .WillOnce(Return(-EINVAL)); expect_notification(mock_threads, mock_replayer_listener); // fire remote_replay_handler->handle_entries_available(); wait_for_notification(); ASSERT_EQ(-EINVAL, mock_replayer.get_error_code()); ASSERT_EQ(0, shut_down_entry_replayer(mock_replayer, mock_threads, mock_local_journal, mock_remote_journaler, mock_local_journal_replay)); } TEST_F(TestMockImageReplayerJournalReplayer, DelayedReplay) { librbd::MockTestJournal mock_local_journal; librbd::MockTestImageCtx mock_local_image_ctx{*m_local_image_ctx, mock_local_journal}; ::journal::MockJournaler mock_remote_journaler; MockReplayerListener mock_replayer_listener; MockThreads mock_threads{m_threads}; MockStateBuilder mock_state_builder(mock_local_image_ctx, mock_remote_journaler, {}); MockReplayer mock_replayer{ &mock_threads, "local mirror uuid", &mock_state_builder, &mock_replayer_listener}; ::journal::MockReplayEntry mock_replay_entry; expect_work_queue_repeatedly(mock_threads); expect_add_event_after_repeatedly(mock_threads); expect_get_commit_tid_in_debug(mock_replay_entry); expect_get_tag_tid_in_debug(mock_local_journal); expect_committed(mock_replay_entry, mock_remote_journaler, 1); InSequence seq; MockReplay mock_local_journal_replay; MockEventPreprocessor mock_event_preprocessor; MockReplayStatusFormatter mock_replay_status_formatter; librbd::journal::Listener* local_journal_listener = nullptr; ::journal::ReplayHandler* remote_replay_handler = nullptr; ::journal::JournalMetadataListener* remote_journaler_listener = nullptr; ASSERT_EQ(0, init_entry_replayer(mock_replayer, mock_threads, mock_replayer_listener, mock_local_journal, mock_remote_journaler, mock_local_journal_replay, &local_journal_listener, &remote_replay_handler, &remote_journaler_listener)); cls::journal::Tag tag = {1, 0, encode_tag_data({librbd::Journal<>::LOCAL_MIRROR_UUID, librbd::Journal<>::LOCAL_MIRROR_UUID, true, 0, 0})}; expect_try_pop_front(mock_remote_journaler, tag.tid, true); // replay_flush expect_shut_down(mock_local_journal_replay, false, 0); EXPECT_CALL(mock_local_journal, remove_listener(_)); EXPECT_CALL(mock_local_journal, stop_external_replay()); expect_start_external_replay(mock_local_journal, &mock_local_journal_replay, 0); expect_local_journal_add_listener(mock_local_journal, &local_journal_listener); expect_get_tag(mock_remote_journaler, tag, 0); expect_allocate_tag(mock_local_journal, 0); // process with delay EXPECT_CALL(mock_replay_entry, get_data()); librbd::journal::EventEntry event_entry( librbd::journal::AioDiscardEvent(123, 345, 0), ceph_clock_now()); EXPECT_CALL(mock_local_journal_replay, decode(_, _)) .WillOnce(DoAll(SetArgPointee<1>(event_entry), Return(0))); Context* delayed_task_ctx = nullptr; EXPECT_CALL(*mock_threads.timer, add_event_after(_, _)) .WillOnce( DoAll(Invoke([this, &delayed_task_ctx](double seconds, Context *ctx) { std::unique_lock locker{m_lock}; delayed_task_ctx = ctx; m_cond.notify_all(); }), ReturnArg<1>())); expect_preprocess(mock_event_preprocessor, false, 0); expect_process(mock_local_journal_replay, 0, 0); EXPECT_CALL(mock_replay_status_formatter, handle_entry_processed(_)); // attempt to process the next event C_SaferCond replay_ctx; expect_try_pop_front_return_no_entries(mock_remote_journaler, &replay_ctx); // fire mock_local_image_ctx.mirroring_replay_delay = 600; remote_replay_handler->handle_entries_available(); { std::unique_lock locker{m_lock}; while (delayed_task_ctx == nullptr) { if (m_cond.wait_for(locker, 10s) == std::cv_status::timeout) { FAIL() << "timed out waiting for task"; break; } } } { std::unique_lock timer_locker{mock_threads.timer_lock}; delayed_task_ctx->complete(0); } ASSERT_EQ(0, replay_ctx.wait()); // add a pending (delayed) entry before stop expect_try_pop_front(mock_remote_journaler, tag.tid, true); C_SaferCond decode_ctx; EXPECT_CALL(mock_local_journal_replay, decode(_, _)) .WillOnce(DoAll(Invoke([&decode_ctx](bufferlist::const_iterator* it, librbd::journal::EventEntry *e) { decode_ctx.complete(0); }), Return(0))); remote_replay_handler->handle_entries_available(); ASSERT_EQ(0, decode_ctx.wait()); ASSERT_EQ(0, shut_down_entry_replayer(mock_replayer, mock_threads, mock_local_journal, mock_remote_journaler, mock_local_journal_replay)); } TEST_F(TestMockImageReplayerJournalReplayer, ReplayNoMemoryError) { librbd::MockTestJournal mock_local_journal; librbd::MockTestImageCtx mock_local_image_ctx{*m_local_image_ctx, mock_local_journal}; ::journal::MockJournaler mock_remote_journaler; MockReplayerListener mock_replayer_listener; MockThreads mock_threads{m_threads}; MockStateBuilder mock_state_builder(mock_local_image_ctx, mock_remote_journaler, {}); MockReplayer mock_replayer{ &mock_threads, "local mirror uuid", &mock_state_builder, &mock_replayer_listener}; expect_work_queue_repeatedly(mock_threads); InSequence seq; MockReplay mock_local_journal_replay; MockEventPreprocessor mock_event_preprocessor; MockReplayStatusFormatter mock_replay_status_formatter; librbd::journal::Listener* local_journal_listener = nullptr; ::journal::ReplayHandler* remote_replay_handler = nullptr; ::journal::JournalMetadataListener* remote_journaler_listener = nullptr; ASSERT_EQ(0, init_entry_replayer(mock_replayer, mock_threads, mock_replayer_listener, mock_local_journal, mock_remote_journaler, mock_local_journal_replay, &local_journal_listener, &remote_replay_handler, &remote_journaler_listener)); expect_notification(mock_threads, mock_replayer_listener); remote_replay_handler->handle_complete(-ENOMEM); wait_for_notification(); ASSERT_EQ(false, mock_replayer.is_replaying()); ASSERT_EQ(-ENOMEM, mock_replayer.get_error_code()); ASSERT_EQ(0, shut_down_entry_replayer(mock_replayer, mock_threads, mock_local_journal, mock_remote_journaler, mock_local_journal_replay)); } TEST_F(TestMockImageReplayerJournalReplayer, LocalJournalForcePromoted) { librbd::MockTestJournal mock_local_journal; librbd::MockTestImageCtx mock_local_image_ctx{*m_local_image_ctx, mock_local_journal}; ::journal::MockJournaler mock_remote_journaler; MockReplayerListener mock_replayer_listener; MockThreads mock_threads{m_threads}; MockStateBuilder mock_state_builder(mock_local_image_ctx, mock_remote_journaler, {}); MockReplayer mock_replayer{ &mock_threads, "local mirror uuid", &mock_state_builder, &mock_replayer_listener}; expect_work_queue_repeatedly(mock_threads); InSequence seq; MockReplay mock_local_journal_replay; MockEventPreprocessor mock_event_preprocessor; MockReplayStatusFormatter mock_replay_status_formatter; librbd::journal::Listener* local_journal_listener = nullptr; ::journal::ReplayHandler* remote_replay_handler = nullptr; ::journal::JournalMetadataListener* remote_journaler_listener = nullptr; ASSERT_EQ(0, init_entry_replayer(mock_replayer, mock_threads, mock_replayer_listener, mock_local_journal, mock_remote_journaler, mock_local_journal_replay, &local_journal_listener, &remote_replay_handler, &remote_journaler_listener)); expect_notification(mock_threads, mock_replayer_listener); local_journal_listener->handle_promoted(); wait_for_notification(); ASSERT_EQ(0, shut_down_entry_replayer(mock_replayer, mock_threads, mock_local_journal, mock_remote_journaler, mock_local_journal_replay)); } TEST_F(TestMockImageReplayerJournalReplayer, LocalJournalResyncRequested) { librbd::MockTestJournal mock_local_journal; librbd::MockTestImageCtx mock_local_image_ctx{*m_local_image_ctx, mock_local_journal}; ::journal::MockJournaler mock_remote_journaler; MockReplayerListener mock_replayer_listener; MockThreads mock_threads{m_threads}; MockStateBuilder mock_state_builder(mock_local_image_ctx, mock_remote_journaler, {}); MockReplayer mock_replayer{ &mock_threads, "local mirror uuid", &mock_state_builder, &mock_replayer_listener}; expect_work_queue_repeatedly(mock_threads); InSequence seq; MockReplay mock_local_journal_replay; MockEventPreprocessor mock_event_preprocessor; MockReplayStatusFormatter mock_replay_status_formatter; librbd::journal::Listener* local_journal_listener = nullptr; ::journal::ReplayHandler* remote_replay_handler = nullptr; ::journal::JournalMetadataListener* remote_journaler_listener = nullptr; ASSERT_EQ(0, init_entry_replayer(mock_replayer, mock_threads, mock_replayer_listener, mock_local_journal, mock_remote_journaler, mock_local_journal_replay, &local_journal_listener, &remote_replay_handler, &remote_journaler_listener)); expect_notification(mock_threads, mock_replayer_listener); local_journal_listener->handle_resync(); wait_for_notification(); ASSERT_TRUE(mock_replayer.is_resync_requested()); ASSERT_EQ(0, shut_down_entry_replayer(mock_replayer, mock_threads, mock_local_journal, mock_remote_journaler, mock_local_journal_replay)); } TEST_F(TestMockImageReplayerJournalReplayer, RemoteJournalDisconnected) { librbd::MockTestJournal mock_local_journal; librbd::MockTestImageCtx mock_local_image_ctx{*m_local_image_ctx, mock_local_journal}; mock_local_image_ctx.config.set_val("rbd_mirroring_resync_after_disconnect", "true"); ::journal::MockJournaler mock_remote_journaler; MockReplayerListener mock_replayer_listener; MockThreads mock_threads{m_threads}; MockStateBuilder mock_state_builder(mock_local_image_ctx, mock_remote_journaler, {}); MockReplayer mock_replayer{ &mock_threads, "local mirror uuid", &mock_state_builder, &mock_replayer_listener}; expect_work_queue_repeatedly(mock_threads); InSequence seq; MockReplay mock_local_journal_replay; MockEventPreprocessor mock_event_preprocessor; MockReplayStatusFormatter mock_replay_status_formatter; librbd::journal::Listener* local_journal_listener = nullptr; ::journal::ReplayHandler* remote_replay_handler = nullptr; ::journal::JournalMetadataListener* remote_journaler_listener = nullptr; ASSERT_EQ(0, init_entry_replayer(mock_replayer, mock_threads, mock_replayer_listener, mock_local_journal, mock_remote_journaler, mock_local_journal_replay, &local_journal_listener, &remote_replay_handler, &remote_journaler_listener)); expect_get_cached_client(mock_remote_journaler, "local mirror uuid", {{}, {}, {}, cls::journal::CLIENT_STATE_DISCONNECTED}, {librbd::journal::MirrorPeerClientMeta{ mock_local_image_ctx.id}}, 0); expect_notification(mock_threads, mock_replayer_listener); remote_journaler_listener->handle_update(nullptr); wait_for_notification(); ASSERT_EQ(-ENOTCONN, mock_replayer.get_error_code()); ASSERT_FALSE(mock_replayer.is_replaying()); ASSERT_TRUE(mock_replayer.is_resync_requested()); ASSERT_EQ(0, shut_down_entry_replayer(mock_replayer, mock_threads, mock_local_journal, mock_remote_journaler, mock_local_journal_replay)); } TEST_F(TestMockImageReplayerJournalReplayer, Flush) { librbd::MockTestJournal mock_local_journal; librbd::MockTestImageCtx mock_local_image_ctx{*m_local_image_ctx, mock_local_journal}; ::journal::MockJournaler mock_remote_journaler; MockReplayerListener mock_replayer_listener; MockThreads mock_threads{m_threads}; MockStateBuilder mock_state_builder(mock_local_image_ctx, mock_remote_journaler, {}); MockReplayer mock_replayer{ &mock_threads, "local mirror uuid", &mock_state_builder, &mock_replayer_listener}; expect_work_queue_repeatedly(mock_threads); InSequence seq; MockReplay mock_local_journal_replay; MockEventPreprocessor mock_event_preprocessor; MockReplayStatusFormatter mock_replay_status_formatter; librbd::journal::Listener* local_journal_listener = nullptr; ::journal::ReplayHandler* remote_replay_handler = nullptr; ::journal::JournalMetadataListener* remote_journaler_listener = nullptr; ASSERT_EQ(0, init_entry_replayer(mock_replayer, mock_threads, mock_replayer_listener, mock_local_journal, mock_remote_journaler, mock_local_journal_replay, &local_journal_listener, &remote_replay_handler, &remote_journaler_listener)); expect_flush(mock_local_journal_replay, 0); expect_flush_commit_position(mock_remote_journaler, 0); C_SaferCond ctx; mock_replayer.flush(&ctx); ASSERT_EQ(0, ctx.wait()); ASSERT_EQ(0, shut_down_entry_replayer(mock_replayer, mock_threads, mock_local_journal, mock_remote_journaler, mock_local_journal_replay)); } TEST_F(TestMockImageReplayerJournalReplayer, FlushError) { librbd::MockTestJournal mock_local_journal; librbd::MockTestImageCtx mock_local_image_ctx{*m_local_image_ctx, mock_local_journal}; ::journal::MockJournaler mock_remote_journaler; MockReplayerListener mock_replayer_listener; MockThreads mock_threads{m_threads}; MockStateBuilder mock_state_builder(mock_local_image_ctx, mock_remote_journaler, {}); MockReplayer mock_replayer{ &mock_threads, "local mirror uuid", &mock_state_builder, &mock_replayer_listener}; expect_work_queue_repeatedly(mock_threads); InSequence seq; MockReplay mock_local_journal_replay; MockEventPreprocessor mock_event_preprocessor; MockReplayStatusFormatter mock_replay_status_formatter; librbd::journal::Listener* local_journal_listener = nullptr; ::journal::ReplayHandler* remote_replay_handler = nullptr; ::journal::JournalMetadataListener* remote_journaler_listener = nullptr; ASSERT_EQ(0, init_entry_replayer(mock_replayer, mock_threads, mock_replayer_listener, mock_local_journal, mock_remote_journaler, mock_local_journal_replay, &local_journal_listener, &remote_replay_handler, &remote_journaler_listener)); expect_flush(mock_local_journal_replay, -EINVAL); C_SaferCond ctx; mock_replayer.flush(&ctx); ASSERT_EQ(-EINVAL, ctx.wait()); ASSERT_EQ(0, shut_down_entry_replayer(mock_replayer, mock_threads, mock_local_journal, mock_remote_journaler, mock_local_journal_replay)); } TEST_F(TestMockImageReplayerJournalReplayer, FlushCommitPositionError) { librbd::MockTestJournal mock_local_journal; librbd::MockTestImageCtx mock_local_image_ctx{*m_local_image_ctx, mock_local_journal}; ::journal::MockJournaler mock_remote_journaler; MockReplayerListener mock_replayer_listener; MockThreads mock_threads{m_threads}; MockStateBuilder mock_state_builder(mock_local_image_ctx, mock_remote_journaler, {}); MockReplayer mock_replayer{ &mock_threads, "local mirror uuid", &mock_state_builder, &mock_replayer_listener}; expect_work_queue_repeatedly(mock_threads); InSequence seq; MockReplay mock_local_journal_replay; MockEventPreprocessor mock_event_preprocessor; MockReplayStatusFormatter mock_replay_status_formatter; librbd::journal::Listener* local_journal_listener = nullptr; ::journal::ReplayHandler* remote_replay_handler = nullptr; ::journal::JournalMetadataListener* remote_journaler_listener = nullptr; ASSERT_EQ(0, init_entry_replayer(mock_replayer, mock_threads, mock_replayer_listener, mock_local_journal, mock_remote_journaler, mock_local_journal_replay, &local_journal_listener, &remote_replay_handler, &remote_journaler_listener)); expect_flush(mock_local_journal_replay, 0); expect_flush_commit_position(mock_remote_journaler, -EINVAL); C_SaferCond ctx; mock_replayer.flush(&ctx); ASSERT_EQ(-EINVAL, ctx.wait()); ASSERT_EQ(0, shut_down_entry_replayer(mock_replayer, mock_threads, mock_local_journal, mock_remote_journaler, mock_local_journal_replay)); } TEST_F(TestMockImageReplayerJournalReplayer, ReplayFlushShutDownError) { librbd::MockTestJournal mock_local_journal; librbd::MockTestImageCtx mock_local_image_ctx{*m_local_image_ctx, mock_local_journal}; ::journal::MockJournaler mock_remote_journaler; MockReplayerListener mock_replayer_listener; MockThreads mock_threads{m_threads}; MockStateBuilder mock_state_builder(mock_local_image_ctx, mock_remote_journaler, {}); MockReplayer mock_replayer{ &mock_threads, "local mirror uuid", &mock_state_builder, &mock_replayer_listener}; ::journal::MockReplayEntry mock_replay_entry; expect_get_commit_tid_in_debug(mock_replay_entry); expect_work_queue_repeatedly(mock_threads); InSequence seq; MockReplay mock_local_journal_replay; MockEventPreprocessor mock_event_preprocessor; MockReplayStatusFormatter mock_replay_status_formatter; librbd::journal::Listener* local_journal_listener = nullptr; ::journal::ReplayHandler* remote_replay_handler = nullptr; ::journal::JournalMetadataListener* remote_journaler_listener = nullptr; ASSERT_EQ(0, init_entry_replayer(mock_replayer, mock_threads, mock_replayer_listener, mock_local_journal, mock_remote_journaler, mock_local_journal_replay, &local_journal_listener, &remote_replay_handler, &remote_journaler_listener)); expect_try_pop_front(mock_remote_journaler, 1, true); expect_shut_down(mock_local_journal_replay, false, -EINVAL); EXPECT_CALL(mock_local_journal, remove_listener(_)); EXPECT_CALL(mock_local_journal, stop_external_replay()); expect_notification(mock_threads, mock_replayer_listener); remote_replay_handler->handle_entries_available(); wait_for_notification(); ASSERT_EQ(-EINVAL, mock_replayer.get_error_code()); MockCloseImageRequest mock_close_image_request; expect_send(mock_close_image_request, 0); expect_stop_replay(mock_remote_journaler, 0); EXPECT_CALL(mock_remote_journaler, remove_listener(_)); C_SaferCond shutdown_ctx; mock_replayer.shut_down(&shutdown_ctx); ASSERT_EQ(0, shutdown_ctx.wait()); } TEST_F(TestMockImageReplayerJournalReplayer, ReplayFlushStartError) { librbd::MockTestJournal mock_local_journal; librbd::MockTestImageCtx mock_local_image_ctx{*m_local_image_ctx, mock_local_journal}; ::journal::MockJournaler mock_remote_journaler; MockReplayerListener mock_replayer_listener; MockThreads mock_threads{m_threads}; MockStateBuilder mock_state_builder(mock_local_image_ctx, mock_remote_journaler, {}); MockReplayer mock_replayer{ &mock_threads, "local mirror uuid", &mock_state_builder, &mock_replayer_listener}; ::journal::MockReplayEntry mock_replay_entry; expect_get_commit_tid_in_debug(mock_replay_entry); expect_work_queue_repeatedly(mock_threads); InSequence seq; MockReplay mock_local_journal_replay; MockEventPreprocessor mock_event_preprocessor; MockReplayStatusFormatter mock_replay_status_formatter; librbd::journal::Listener* local_journal_listener = nullptr; ::journal::ReplayHandler* remote_replay_handler = nullptr; ::journal::JournalMetadataListener* remote_journaler_listener = nullptr; ASSERT_EQ(0, init_entry_replayer(mock_replayer, mock_threads, mock_replayer_listener, mock_local_journal, mock_remote_journaler, mock_local_journal_replay, &local_journal_listener, &remote_replay_handler, &remote_journaler_listener)); expect_try_pop_front(mock_remote_journaler, 1, true); expect_shut_down(mock_local_journal_replay, false, 0); EXPECT_CALL(mock_local_journal, remove_listener(_)); EXPECT_CALL(mock_local_journal, stop_external_replay()); expect_start_external_replay(mock_local_journal, nullptr, -EINVAL); expect_notification(mock_threads, mock_replayer_listener); remote_replay_handler->handle_entries_available(); wait_for_notification(); ASSERT_EQ(-EINVAL, mock_replayer.get_error_code()); MockCloseImageRequest mock_close_image_request; expect_send(mock_close_image_request, 0); expect_stop_replay(mock_remote_journaler, 0); EXPECT_CALL(mock_remote_journaler, remove_listener(_)); C_SaferCond shutdown_ctx; mock_replayer.shut_down(&shutdown_ctx); ASSERT_EQ(0, shutdown_ctx.wait()); } TEST_F(TestMockImageReplayerJournalReplayer, GetTagError) { librbd::MockTestJournal mock_local_journal; librbd::MockTestImageCtx mock_local_image_ctx{*m_local_image_ctx, mock_local_journal}; ::journal::MockJournaler mock_remote_journaler; MockReplayerListener mock_replayer_listener; MockThreads mock_threads{m_threads}; MockStateBuilder mock_state_builder(mock_local_image_ctx, mock_remote_journaler, {}); MockReplayer mock_replayer{ &mock_threads, "local mirror uuid", &mock_state_builder, &mock_replayer_listener}; ::journal::MockReplayEntry mock_replay_entry; expect_get_commit_tid_in_debug(mock_replay_entry); expect_work_queue_repeatedly(mock_threads); InSequence seq; MockReplay mock_local_journal_replay; MockEventPreprocessor mock_event_preprocessor; MockReplayStatusFormatter mock_replay_status_formatter; librbd::journal::Listener* local_journal_listener = nullptr; ::journal::ReplayHandler* remote_replay_handler = nullptr; ::journal::JournalMetadataListener* remote_journaler_listener = nullptr; ASSERT_EQ(0, init_entry_replayer(mock_replayer, mock_threads, mock_replayer_listener, mock_local_journal, mock_remote_journaler, mock_local_journal_replay, &local_journal_listener, &remote_replay_handler, &remote_journaler_listener)); cls::journal::Tag tag = {1, 0, encode_tag_data({librbd::Journal<>::LOCAL_MIRROR_UUID, librbd::Journal<>::LOCAL_MIRROR_UUID, true, 0, 0})}; expect_try_pop_front(mock_remote_journaler, tag.tid, true); expect_shut_down(mock_local_journal_replay, false, 0); EXPECT_CALL(mock_local_journal, remove_listener(_)); EXPECT_CALL(mock_local_journal, stop_external_replay()); expect_start_external_replay(mock_local_journal, &mock_local_journal_replay, 0); expect_local_journal_add_listener(mock_local_journal, &local_journal_listener); expect_get_tag(mock_remote_journaler, tag, -EINVAL); expect_notification(mock_threads, mock_replayer_listener); remote_replay_handler->handle_entries_available(); wait_for_notification(); ASSERT_EQ(-EINVAL, mock_replayer.get_error_code()); ASSERT_EQ(0, shut_down_entry_replayer(mock_replayer, mock_threads, mock_local_journal, mock_remote_journaler, mock_local_journal_replay)); } TEST_F(TestMockImageReplayerJournalReplayer, AllocateTagDemotion) { librbd::MockTestJournal mock_local_journal; librbd::MockTestImageCtx mock_local_image_ctx{*m_local_image_ctx, mock_local_journal}; ::journal::MockJournaler mock_remote_journaler; MockReplayerListener mock_replayer_listener; MockThreads mock_threads{m_threads}; MockStateBuilder mock_state_builder(mock_local_image_ctx, mock_remote_journaler, {}); MockReplayer mock_replayer{ &mock_threads, "local mirror uuid", &mock_state_builder, &mock_replayer_listener}; ::journal::MockReplayEntry mock_replay_entry; expect_work_queue_repeatedly(mock_threads); expect_notification(mock_threads, mock_replayer_listener); expect_get_commit_tid_in_debug(mock_replay_entry); expect_get_tag_tid_in_debug(mock_local_journal); expect_committed(mock_replay_entry, mock_remote_journaler, 1); InSequence seq; MockReplay mock_local_journal_replay; MockEventPreprocessor mock_event_preprocessor; MockReplayStatusFormatter mock_replay_status_formatter; librbd::journal::Listener* local_journal_listener = nullptr; ::journal::ReplayHandler* remote_replay_handler = nullptr; ::journal::JournalMetadataListener* remote_journaler_listener = nullptr; ASSERT_EQ(0, init_entry_replayer(mock_replayer, mock_threads, mock_replayer_listener, mock_local_journal, mock_remote_journaler, mock_local_journal_replay, &local_journal_listener, &remote_replay_handler, &remote_journaler_listener)); cls::journal::Tag tag = {1, 0, encode_tag_data({librbd::Journal<>::ORPHAN_MIRROR_UUID, librbd::Journal<>::LOCAL_MIRROR_UUID, true, 0, 0})}; expect_try_pop_front(mock_remote_journaler, tag.tid, true); expect_shut_down(mock_local_journal_replay, false, 0); EXPECT_CALL(mock_local_journal, remove_listener(_)); EXPECT_CALL(mock_local_journal, stop_external_replay()); expect_start_external_replay(mock_local_journal, &mock_local_journal_replay, 0); expect_local_journal_add_listener(mock_local_journal, &local_journal_listener); expect_get_tag(mock_remote_journaler, tag, 0); expect_get_tag_data(mock_local_journal, {}); expect_allocate_tag(mock_local_journal, 0); EXPECT_CALL(mock_local_journal_replay, decode(_, _)).WillOnce(Return(0)); expect_preprocess(mock_event_preprocessor, false, 0); expect_process(mock_local_journal_replay, 0, 0); EXPECT_CALL(mock_replay_status_formatter, handle_entry_processed(_)); remote_replay_handler->handle_entries_available(); wait_for_notification(); ASSERT_FALSE(mock_replayer.is_replaying()); ASSERT_EQ(0, shut_down_entry_replayer(mock_replayer, mock_threads, mock_local_journal, mock_remote_journaler, mock_local_journal_replay)); } TEST_F(TestMockImageReplayerJournalReplayer, AllocateTagError) { librbd::MockTestJournal mock_local_journal; librbd::MockTestImageCtx mock_local_image_ctx{*m_local_image_ctx, mock_local_journal}; ::journal::MockJournaler mock_remote_journaler; MockReplayerListener mock_replayer_listener; MockThreads mock_threads{m_threads}; MockStateBuilder mock_state_builder(mock_local_image_ctx, mock_remote_journaler, {}); MockReplayer mock_replayer{ &mock_threads, "local mirror uuid", &mock_state_builder, &mock_replayer_listener}; ::journal::MockReplayEntry mock_replay_entry; expect_work_queue_repeatedly(mock_threads); expect_get_commit_tid_in_debug(mock_replay_entry); expect_get_tag_tid_in_debug(mock_local_journal); InSequence seq; MockReplay mock_local_journal_replay; MockEventPreprocessor mock_event_preprocessor; MockReplayStatusFormatter mock_replay_status_formatter; librbd::journal::Listener* local_journal_listener = nullptr; ::journal::ReplayHandler* remote_replay_handler = nullptr; ::journal::JournalMetadataListener* remote_journaler_listener = nullptr; ASSERT_EQ(0, init_entry_replayer(mock_replayer, mock_threads, mock_replayer_listener, mock_local_journal, mock_remote_journaler, mock_local_journal_replay, &local_journal_listener, &remote_replay_handler, &remote_journaler_listener)); cls::journal::Tag tag = {1, 0, encode_tag_data({librbd::Journal<>::LOCAL_MIRROR_UUID, librbd::Journal<>::LOCAL_MIRROR_UUID, true, 0, 0})}; expect_try_pop_front(mock_remote_journaler, tag.tid, true); expect_shut_down(mock_local_journal_replay, false, 0); EXPECT_CALL(mock_local_journal, remove_listener(_)); EXPECT_CALL(mock_local_journal, stop_external_replay()); expect_start_external_replay(mock_local_journal, &mock_local_journal_replay, 0); expect_local_journal_add_listener(mock_local_journal, &local_journal_listener); expect_get_tag(mock_remote_journaler, tag, 0); expect_allocate_tag(mock_local_journal, -EINVAL); expect_notification(mock_threads, mock_replayer_listener); remote_replay_handler->handle_entries_available(); wait_for_notification(); ASSERT_FALSE(mock_replayer.is_replaying()); ASSERT_EQ(-EINVAL, mock_replayer.get_error_code()); ASSERT_EQ(0, shut_down_entry_replayer(mock_replayer, mock_threads, mock_local_journal, mock_remote_journaler, mock_local_journal_replay)); } TEST_F(TestMockImageReplayerJournalReplayer, PreprocessError) { librbd::MockTestJournal mock_local_journal; librbd::MockTestImageCtx mock_local_image_ctx{*m_local_image_ctx, mock_local_journal}; ::journal::MockJournaler mock_remote_journaler; MockReplayerListener mock_replayer_listener; MockThreads mock_threads{m_threads}; MockStateBuilder mock_state_builder(mock_local_image_ctx, mock_remote_journaler, {}); MockReplayer mock_replayer{ &mock_threads, "local mirror uuid", &mock_state_builder, &mock_replayer_listener}; ::journal::MockReplayEntry mock_replay_entry; expect_work_queue_repeatedly(mock_threads); expect_get_commit_tid_in_debug(mock_replay_entry); expect_get_tag_tid_in_debug(mock_local_journal); InSequence seq; MockReplay mock_local_journal_replay; MockEventPreprocessor mock_event_preprocessor; MockReplayStatusFormatter mock_replay_status_formatter; librbd::journal::Listener* local_journal_listener = nullptr; ::journal::ReplayHandler* remote_replay_handler = nullptr; ::journal::JournalMetadataListener* remote_journaler_listener = nullptr; ASSERT_EQ(0, init_entry_replayer(mock_replayer, mock_threads, mock_replayer_listener, mock_local_journal, mock_remote_journaler, mock_local_journal_replay, &local_journal_listener, &remote_replay_handler, &remote_journaler_listener)); cls::journal::Tag tag = {1, 0, encode_tag_data({librbd::Journal<>::LOCAL_MIRROR_UUID, librbd::Journal<>::LOCAL_MIRROR_UUID, true, 0, 0})}; expect_try_pop_front(mock_remote_journaler, tag.tid, true); expect_shut_down(mock_local_journal_replay, false, 0); EXPECT_CALL(mock_local_journal, remove_listener(_)); EXPECT_CALL(mock_local_journal, stop_external_replay()); expect_start_external_replay(mock_local_journal, &mock_local_journal_replay, 0); expect_local_journal_add_listener(mock_local_journal, &local_journal_listener); expect_get_tag(mock_remote_journaler, tag, 0); expect_allocate_tag(mock_local_journal, 0); EXPECT_CALL(mock_replay_entry, get_data()); EXPECT_CALL(mock_local_journal_replay, decode(_, _)).WillOnce(Return(0)); expect_preprocess(mock_event_preprocessor, true, -EINVAL); expect_notification(mock_threads, mock_replayer_listener); remote_replay_handler->handle_entries_available(); wait_for_notification(); ASSERT_FALSE(mock_replayer.is_replaying()); ASSERT_EQ(-EINVAL, mock_replayer.get_error_code()); ASSERT_EQ(0, shut_down_entry_replayer(mock_replayer, mock_threads, mock_local_journal, mock_remote_journaler, mock_local_journal_replay)); } TEST_F(TestMockImageReplayerJournalReplayer, ProcessError) { librbd::MockTestJournal mock_local_journal; librbd::MockTestImageCtx mock_local_image_ctx{*m_local_image_ctx, mock_local_journal}; ::journal::MockJournaler mock_remote_journaler; MockReplayerListener mock_replayer_listener; MockThreads mock_threads{m_threads}; MockStateBuilder mock_state_builder(mock_local_image_ctx, mock_remote_journaler, {}); MockReplayer mock_replayer{ &mock_threads, "local mirror uuid", &mock_state_builder, &mock_replayer_listener}; ::journal::MockReplayEntry mock_replay_entry; expect_work_queue_repeatedly(mock_threads); expect_get_commit_tid_in_debug(mock_replay_entry); expect_get_tag_tid_in_debug(mock_local_journal); expect_notification(mock_threads, mock_replayer_listener); InSequence seq; MockReplay mock_local_journal_replay; MockEventPreprocessor mock_event_preprocessor; MockReplayStatusFormatter mock_replay_status_formatter; librbd::journal::Listener* local_journal_listener = nullptr; ::journal::ReplayHandler* remote_replay_handler = nullptr; ::journal::JournalMetadataListener* remote_journaler_listener = nullptr; ASSERT_EQ(0, init_entry_replayer(mock_replayer, mock_threads, mock_replayer_listener, mock_local_journal, mock_remote_journaler, mock_local_journal_replay, &local_journal_listener, &remote_replay_handler, &remote_journaler_listener)); cls::journal::Tag tag = {1, 0, encode_tag_data({librbd::Journal<>::LOCAL_MIRROR_UUID, librbd::Journal<>::LOCAL_MIRROR_UUID, true, 0, 0})}; expect_try_pop_front(mock_remote_journaler, tag.tid, true); expect_shut_down(mock_local_journal_replay, false, 0); EXPECT_CALL(mock_local_journal, remove_listener(_)); EXPECT_CALL(mock_local_journal, stop_external_replay()); expect_start_external_replay(mock_local_journal, &mock_local_journal_replay, 0); expect_local_journal_add_listener(mock_local_journal, &local_journal_listener); expect_get_tag(mock_remote_journaler, tag, 0); expect_allocate_tag(mock_local_journal, 0); EXPECT_CALL(mock_replay_entry, get_data()); EXPECT_CALL(mock_local_journal_replay, decode(_, _)).WillOnce(Return(0)); expect_preprocess(mock_event_preprocessor, false, 0); expect_process(mock_local_journal_replay, 0, -EINVAL); EXPECT_CALL(mock_replay_status_formatter, handle_entry_processed(_)); // attempt to process the next event C_SaferCond replay_ctx; expect_try_pop_front_return_no_entries(mock_remote_journaler, &replay_ctx); remote_replay_handler->handle_entries_available(); wait_for_notification(); ASSERT_FALSE(mock_replayer.is_replaying()); ASSERT_EQ(-EINVAL, mock_replayer.get_error_code()); ASSERT_EQ(0, replay_ctx.wait()); ASSERT_EQ(0, shut_down_entry_replayer(mock_replayer, mock_threads, mock_local_journal, mock_remote_journaler, mock_local_journal_replay)); } TEST_F(TestMockImageReplayerJournalReplayer, ImageNameUpdated) { librbd::MockTestJournal mock_local_journal; librbd::MockTestImageCtx mock_local_image_ctx{*m_local_image_ctx, mock_local_journal}; ::journal::MockJournaler mock_remote_journaler; MockReplayerListener mock_replayer_listener; MockThreads mock_threads{m_threads}; MockStateBuilder mock_state_builder(mock_local_image_ctx, mock_remote_journaler, {}); MockReplayer mock_replayer{ &mock_threads, "local mirror uuid", &mock_state_builder, &mock_replayer_listener}; ::journal::MockReplayEntry mock_replay_entry; expect_work_queue_repeatedly(mock_threads); expect_add_event_after_repeatedly(mock_threads); expect_get_commit_tid_in_debug(mock_replay_entry); expect_get_tag_tid_in_debug(mock_local_journal); expect_committed(mock_replay_entry, mock_remote_journaler, 1); expect_notification(mock_threads, mock_replayer_listener); InSequence seq; MockReplay mock_local_journal_replay; MockEventPreprocessor mock_event_preprocessor; MockReplayStatusFormatter mock_replay_status_formatter; librbd::journal::Listener* local_journal_listener = nullptr; ::journal::ReplayHandler* remote_replay_handler = nullptr; ::journal::JournalMetadataListener* remote_journaler_listener = nullptr; ASSERT_EQ(0, init_entry_replayer(mock_replayer, mock_threads, mock_replayer_listener, mock_local_journal, mock_remote_journaler, mock_local_journal_replay, &local_journal_listener, &remote_replay_handler, &remote_journaler_listener)); mock_local_image_ctx.name = "NEW NAME"; cls::journal::Tag tag = {1, 0, encode_tag_data({librbd::Journal<>::LOCAL_MIRROR_UUID, librbd::Journal<>::LOCAL_MIRROR_UUID, true, 0, 0})}; expect_try_pop_front(mock_remote_journaler, tag.tid, true); expect_shut_down(mock_local_journal_replay, false, 0); EXPECT_CALL(mock_local_journal, remove_listener(_)); EXPECT_CALL(mock_local_journal, stop_external_replay()); expect_start_external_replay(mock_local_journal, &mock_local_journal_replay, 0); expect_local_journal_add_listener(mock_local_journal, &local_journal_listener); expect_get_tag(mock_remote_journaler, tag, 0); expect_allocate_tag(mock_local_journal, 0); EXPECT_CALL(mock_local_journal_replay, decode(_, _)).WillOnce(Return(0)); expect_preprocess(mock_event_preprocessor, false, 0); expect_process(mock_local_journal_replay, 0, 0); EXPECT_CALL(mock_replay_status_formatter, handle_entry_processed(_)); // attempt to process the next event C_SaferCond replay_ctx; expect_try_pop_front_return_no_entries(mock_remote_journaler, &replay_ctx); remote_replay_handler->handle_entries_available(); wait_for_notification(); auto image_spec = util::compute_image_spec(m_local_io_ctx, "NEW NAME"); ASSERT_EQ(image_spec, mock_replayer.get_image_spec()); ASSERT_EQ(0, replay_ctx.wait()); ASSERT_TRUE(mock_replayer.is_replaying()); ASSERT_EQ(0, shut_down_entry_replayer(mock_replayer, mock_threads, mock_local_journal, mock_remote_journaler, mock_local_journal_replay)); } } // namespace journal } // namespace image_replayer } // namespace mirror } // namespace rbd
90,150
40.678687
119
cc
null
ceph-main/src/test/rbd_mirror/image_replayer/snapshot/test_mock_ApplyImageStateRequest.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "test/rbd_mirror/test_mock_fixture.h" #include "librbd/internal.h" #include "librbd/Operations.h" #include "librbd/image/GetMetadataRequest.h" #include "tools/rbd_mirror/Threads.h" #include "tools/rbd_mirror/image_replayer/snapshot/ApplyImageStateRequest.h" #include "test/librados_test_stub/MockTestMemIoCtxImpl.h" #include "test/librbd/mock/MockImageCtx.h" namespace librbd { namespace { struct MockTestImageCtx : public librbd::MockImageCtx { explicit MockTestImageCtx(librbd::ImageCtx &image_ctx) : librbd::MockImageCtx(image_ctx) { } }; } // anonymous namespace namespace image { template <> struct GetMetadataRequest<MockTestImageCtx> { std::map<std::string, bufferlist>* pairs = nullptr; Context* on_finish = nullptr; static GetMetadataRequest* s_instance; static GetMetadataRequest* create(librados::IoCtx& io_ctx, const std::string& oid, bool filter_internal, const std::string& filter_key_prefix, const std::string& last_key, size_t max_results, std::map<std::string, bufferlist>* pairs, Context* on_finish) { ceph_assert(s_instance != nullptr); s_instance->pairs = pairs; s_instance->on_finish = on_finish; return s_instance; } GetMetadataRequest() { s_instance = this; } MOCK_METHOD0(send, void()); }; GetMetadataRequest<MockTestImageCtx>* GetMetadataRequest<MockTestImageCtx>::s_instance = nullptr; } // namespace image } // namespace librbd #include "tools/rbd_mirror/image_replayer/snapshot/ApplyImageStateRequest.cc" using ::testing::_; using ::testing::InSequence; using ::testing::Invoke; using ::testing::Return; using ::testing::StrEq; using ::testing::WithArg; namespace rbd { namespace mirror { namespace image_replayer { namespace snapshot { class TestMockImageReplayerSnapshotApplyImageStateRequest : public TestMockFixture { public: typedef ApplyImageStateRequest<librbd::MockTestImageCtx> MockApplyImageStateRequest; typedef librbd::image::GetMetadataRequest<librbd::MockTestImageCtx> MockGetMetadataRequest; void SetUp() override { TestMockFixture::SetUp(); librbd::RBD rbd; ASSERT_EQ(0, create_image(rbd, m_local_io_ctx, m_image_name, m_image_size)); ASSERT_EQ(0, open_image(m_local_io_ctx, m_image_name, &m_local_image_ctx)); ASSERT_EQ(0, create_image(rbd, m_remote_io_ctx, m_image_name, m_image_size)); ASSERT_EQ(0, open_image(m_remote_io_ctx, m_image_name, &m_remote_image_ctx)); m_mock_local_image_ctx = new librbd::MockTestImageCtx(*m_local_image_ctx); m_mock_remote_image_ctx = new librbd::MockTestImageCtx(*m_remote_image_ctx); } void TearDown() override { delete m_mock_remote_image_ctx; delete m_mock_local_image_ctx; TestMockFixture::TearDown(); } void expect_rename_image(const std::string& name, int r) { EXPECT_CALL(*m_mock_local_image_ctx->operations, execute_rename(name, _)) .WillOnce(WithArg<1>(Invoke([this, r](Context* ctx) { m_threads->work_queue->queue(ctx, r); }))); } void expect_update_features(uint64_t features, bool enable, int r) { EXPECT_CALL(*m_mock_local_image_ctx->operations, execute_update_features(features, enable, _, 0U)) .WillOnce(WithArg<2>(Invoke([this, r](Context* ctx) { m_threads->work_queue->queue(ctx, r); }))); } void expect_get_metadata(MockGetMetadataRequest& mock_get_metadata_request, const std::map<std::string, bufferlist>& pairs, int r) { EXPECT_CALL(mock_get_metadata_request, send()) .WillOnce(Invoke([this, &mock_get_metadata_request, pairs, r]() { *mock_get_metadata_request.pairs = pairs; m_threads->work_queue->queue(mock_get_metadata_request.on_finish, r); })); } void expect_update_metadata(const std::vector<std::string>& remove, const std::map<std::string, bufferlist>& pairs, int r) { for (auto& key : remove) { bufferlist bl; ceph::encode(key, bl); EXPECT_CALL(get_mock_io_ctx(m_mock_local_image_ctx->md_ctx), exec(m_mock_local_image_ctx->header_oid, _, StrEq("rbd"), StrEq("metadata_remove"), ContentsEqual(bl), _, _, _)) .WillOnce(Return(r)); if (r < 0) { return; } } if (!pairs.empty()) { bufferlist bl; ceph::encode(pairs, bl); EXPECT_CALL(get_mock_io_ctx(m_mock_local_image_ctx->md_ctx), exec(m_mock_local_image_ctx->header_oid, _, StrEq("rbd"), StrEq("metadata_set"), ContentsEqual(bl), _, _, _)) .WillOnce(Return(r)); } } void expect_unprotect_snapshot(const std::string& name, int r) { EXPECT_CALL(*m_mock_local_image_ctx->operations, execute_snap_unprotect({cls::rbd::UserSnapshotNamespace{}}, name, _)) .WillOnce(WithArg<2>(Invoke([this, r](Context* ctx) { m_threads->work_queue->queue(ctx, r); }))); } void expect_remove_snapshot(const std::string& name, int r) { EXPECT_CALL(*m_mock_local_image_ctx->operations, execute_snap_remove({cls::rbd::UserSnapshotNamespace{}}, name, _)) .WillOnce(WithArg<2>(Invoke([this, r](Context* ctx) { m_threads->work_queue->queue(ctx, r); }))); } void expect_protect_snapshot(const std::string& name, int r) { EXPECT_CALL(*m_mock_local_image_ctx->operations, execute_snap_protect({cls::rbd::UserSnapshotNamespace{}}, name, _)) .WillOnce(WithArg<2>(Invoke([this, r](Context* ctx) { m_threads->work_queue->queue(ctx, r); }))); } void expect_rename_snapshot(uint64_t snap_id, const std::string& name, int r) { EXPECT_CALL(*m_mock_local_image_ctx->operations, execute_snap_rename(snap_id, name, _)) .WillOnce(WithArg<2>(Invoke([this, r](Context* ctx) { m_threads->work_queue->queue(ctx, r); }))); } void expect_set_snap_limit(uint64_t limit, int r) { EXPECT_CALL(*m_mock_local_image_ctx->operations, execute_snap_set_limit(limit, _)) .WillOnce(WithArg<1>(Invoke([this, r](Context* ctx) { m_threads->work_queue->queue(ctx, r); }))); } librbd::ImageCtx *m_local_image_ctx; librbd::ImageCtx *m_remote_image_ctx; librbd::MockTestImageCtx *m_mock_local_image_ctx = nullptr; librbd::MockTestImageCtx *m_mock_remote_image_ctx = nullptr; }; TEST_F(TestMockImageReplayerSnapshotApplyImageStateRequest, NoChanges) { InSequence seq; MockGetMetadataRequest mock_get_metadata_request; expect_get_metadata(mock_get_metadata_request, {}, 0); expect_set_snap_limit(0, 0); librbd::mirror::snapshot::ImageState image_state; image_state.name = m_image_name; image_state.features = m_remote_image_ctx->features; C_SaferCond ctx; auto req = MockApplyImageStateRequest::create( "local mirror uuid", "remote mirror uuid", m_mock_local_image_ctx, m_mock_remote_image_ctx, image_state, &ctx); req->send(); ASSERT_EQ(0, ctx.wait()); } TEST_F(TestMockImageReplayerSnapshotApplyImageStateRequest, RenameImage) { InSequence seq; expect_rename_image("new name", 0); MockGetMetadataRequest mock_get_metadata_request; expect_get_metadata(mock_get_metadata_request, {}, 0); expect_set_snap_limit(0, 0); librbd::mirror::snapshot::ImageState image_state; image_state.name = "new name"; image_state.features = m_remote_image_ctx->features; C_SaferCond ctx; auto req = MockApplyImageStateRequest::create( "local mirror uuid", "remote mirror uuid", m_mock_local_image_ctx, m_mock_remote_image_ctx, image_state, &ctx); req->send(); ASSERT_EQ(0, ctx.wait()); } TEST_F(TestMockImageReplayerSnapshotApplyImageStateRequest, RenameImageError) { InSequence seq; expect_rename_image("new name", -EINVAL); librbd::mirror::snapshot::ImageState image_state; image_state.name = "new name"; image_state.features = m_remote_image_ctx->features; C_SaferCond ctx; auto req = MockApplyImageStateRequest::create( "local mirror uuid", "remote mirror uuid", m_mock_local_image_ctx, m_mock_remote_image_ctx, image_state, &ctx); req->send(); ASSERT_EQ(-EINVAL, ctx.wait()); } TEST_F(TestMockImageReplayerSnapshotApplyImageStateRequest, UpdateFeatures) { InSequence seq; expect_update_features(RBD_FEATURE_DEEP_FLATTEN, false, 0); expect_update_features(RBD_FEATURE_OBJECT_MAP, true, 0); MockGetMetadataRequest mock_get_metadata_request; expect_get_metadata(mock_get_metadata_request, {}, 0); expect_set_snap_limit(0, 0); librbd::mirror::snapshot::ImageState image_state; image_state.name = m_image_name; image_state.features = RBD_FEATURE_EXCLUSIVE_LOCK | RBD_FEATURE_OBJECT_MAP; m_mock_local_image_ctx->features = RBD_FEATURE_EXCLUSIVE_LOCK | RBD_FEATURE_DEEP_FLATTEN; C_SaferCond ctx; auto req = MockApplyImageStateRequest::create( "local mirror uuid", "remote mirror uuid", m_mock_local_image_ctx, m_mock_remote_image_ctx, image_state, &ctx); req->send(); ASSERT_EQ(0, ctx.wait()); } TEST_F(TestMockImageReplayerSnapshotApplyImageStateRequest, UpdateFeaturesError) { InSequence seq; expect_update_features(RBD_FEATURE_DEEP_FLATTEN, false, -EINVAL); librbd::mirror::snapshot::ImageState image_state; image_state.name = m_image_name; image_state.features = RBD_FEATURE_EXCLUSIVE_LOCK | RBD_FEATURE_OBJECT_MAP; m_mock_local_image_ctx->features = RBD_FEATURE_EXCLUSIVE_LOCK | RBD_FEATURE_DEEP_FLATTEN; C_SaferCond ctx; auto req = MockApplyImageStateRequest::create( "local mirror uuid", "remote mirror uuid", m_mock_local_image_ctx, m_mock_remote_image_ctx, image_state, &ctx); req->send(); ASSERT_EQ(-EINVAL, ctx.wait()); } TEST_F(TestMockImageReplayerSnapshotApplyImageStateRequest, UpdateImageMeta) { InSequence seq; bufferlist data_bl; ceph::encode("data", data_bl); MockGetMetadataRequest mock_get_metadata_request; expect_get_metadata(mock_get_metadata_request, {{"key1", {}}, {"key2", {}}}, 0); expect_update_metadata({"key2"}, {{"key1", data_bl}}, 0); expect_set_snap_limit(0, 0); librbd::mirror::snapshot::ImageState image_state; image_state.name = m_image_name; image_state.features = m_remote_image_ctx->features; image_state.metadata = {{"key1", data_bl}}; C_SaferCond ctx; auto req = MockApplyImageStateRequest::create( "local mirror uuid", "remote mirror uuid", m_mock_local_image_ctx, m_mock_remote_image_ctx, image_state, &ctx); req->send(); ASSERT_EQ(0, ctx.wait()); } TEST_F(TestMockImageReplayerSnapshotApplyImageStateRequest, GetImageMetaError) { InSequence seq; bufferlist data_bl; ceph::encode("data", data_bl); MockGetMetadataRequest mock_get_metadata_request; expect_get_metadata(mock_get_metadata_request, {{"key1", {}}, {"key2", {}}}, -EINVAL); librbd::mirror::snapshot::ImageState image_state; image_state.name = m_image_name; image_state.features = m_remote_image_ctx->features; image_state.metadata = {{"key1", data_bl}}; C_SaferCond ctx; auto req = MockApplyImageStateRequest::create( "local mirror uuid", "remote mirror uuid", m_mock_local_image_ctx, m_mock_remote_image_ctx, image_state, &ctx); req->send(); ASSERT_EQ(-EINVAL, ctx.wait()); } TEST_F(TestMockImageReplayerSnapshotApplyImageStateRequest, UpdateImageMetaError) { InSequence seq; bufferlist data_bl; ceph::encode("data", data_bl); MockGetMetadataRequest mock_get_metadata_request; expect_get_metadata(mock_get_metadata_request, {{"key1", {}}, {"key2", {}}}, 0); expect_update_metadata({"key2"}, {{"key1", data_bl}}, -EINVAL); librbd::mirror::snapshot::ImageState image_state; image_state.name = m_image_name; image_state.features = m_remote_image_ctx->features; image_state.metadata = {{"key1", data_bl}}; C_SaferCond ctx; auto req = MockApplyImageStateRequest::create( "local mirror uuid", "remote mirror uuid", m_mock_local_image_ctx, m_mock_remote_image_ctx, image_state, &ctx); req->send(); ASSERT_EQ(-EINVAL, ctx.wait()); } TEST_F(TestMockImageReplayerSnapshotApplyImageStateRequest, UnprotectSnapshot) { InSequence seq; MockGetMetadataRequest mock_get_metadata_request; expect_get_metadata(mock_get_metadata_request, {}, 0); expect_unprotect_snapshot("snap1", 0); expect_set_snap_limit(0, 0); librbd::mirror::snapshot::ImageState image_state; image_state.name = m_image_name; image_state.features = m_remote_image_ctx->features; image_state.snapshots = { {1U, {cls::rbd::UserSnapshotNamespace{}, "snap1", RBD_PROTECTION_STATUS_UNPROTECTED}}}; m_mock_local_image_ctx->snap_info = { {11U, librbd::SnapInfo{"snap1", cls::rbd::UserSnapshotNamespace{}, 0U, {}, RBD_PROTECTION_STATUS_PROTECTED, 0, {}}}, {12U, librbd::SnapInfo{"snap2", cls::rbd::MirrorSnapshotNamespace{ cls::rbd::MIRROR_SNAPSHOT_STATE_NON_PRIMARY, {}, "remote mirror uuid", 1, true, 0, {{1, 11}, {2, CEPH_NOSNAP}}}, 0, {}, 0, 0, {}}}}; C_SaferCond ctx; auto req = MockApplyImageStateRequest::create( "local mirror uuid", "remote mirror uuid", m_mock_local_image_ctx, m_mock_remote_image_ctx, image_state, &ctx); req->send(); ASSERT_EQ(0, ctx.wait()); } TEST_F(TestMockImageReplayerSnapshotApplyImageStateRequest, UnprotectSnapshotError) { InSequence seq; MockGetMetadataRequest mock_get_metadata_request; expect_get_metadata(mock_get_metadata_request, {}, 0); expect_unprotect_snapshot("snap1", -EINVAL); librbd::mirror::snapshot::ImageState image_state; image_state.name = m_image_name; image_state.features = m_remote_image_ctx->features; image_state.snapshots = { {1U, {cls::rbd::UserSnapshotNamespace{}, "snap1", RBD_PROTECTION_STATUS_UNPROTECTED}}}; m_mock_local_image_ctx->snap_info = { {11U, librbd::SnapInfo{"snap1", cls::rbd::UserSnapshotNamespace{}, 0U, {}, RBD_PROTECTION_STATUS_PROTECTED, 0, {}}}, {12U, librbd::SnapInfo{"snap2", cls::rbd::MirrorSnapshotNamespace{ cls::rbd::MIRROR_SNAPSHOT_STATE_NON_PRIMARY, {}, "remote mirror uuid", 1, true, 0, {{1, 11}, {2, CEPH_NOSNAP}}}, 0, {}, 0, 0, {}}}}; C_SaferCond ctx; auto req = MockApplyImageStateRequest::create( "local mirror uuid", "remote mirror uuid", m_mock_local_image_ctx, m_mock_remote_image_ctx, image_state, &ctx); req->send(); ASSERT_EQ(-EINVAL, ctx.wait()); } TEST_F(TestMockImageReplayerSnapshotApplyImageStateRequest, RemoveSnapshot) { InSequence seq; MockGetMetadataRequest mock_get_metadata_request; expect_get_metadata(mock_get_metadata_request, {}, 0); expect_remove_snapshot("snap1", 0); expect_set_snap_limit(0, 0); librbd::mirror::snapshot::ImageState image_state; image_state.name = m_image_name; image_state.features = m_remote_image_ctx->features; m_mock_local_image_ctx->snap_info = { {11U, librbd::SnapInfo{"snap1", cls::rbd::UserSnapshotNamespace{}, 0U, {}, RBD_PROTECTION_STATUS_UNPROTECTED, 0, {}}}, {12U, librbd::SnapInfo{"snap2", cls::rbd::MirrorSnapshotNamespace{ cls::rbd::MIRROR_SNAPSHOT_STATE_NON_PRIMARY, {}, "remote mirror uuid", 1, true, 0, {{1, 11}, {2, CEPH_NOSNAP}}}, 0, {}, 0, 0, {}}}}; C_SaferCond ctx; auto req = MockApplyImageStateRequest::create( "local mirror uuid", "remote mirror uuid", m_mock_local_image_ctx, m_mock_remote_image_ctx, image_state, &ctx); req->send(); ASSERT_EQ(0, ctx.wait()); } TEST_F(TestMockImageReplayerSnapshotApplyImageStateRequest, RemoveSnapshotError) { InSequence seq; MockGetMetadataRequest mock_get_metadata_request; expect_get_metadata(mock_get_metadata_request, {}, 0); expect_remove_snapshot("snap1", -EINVAL); librbd::mirror::snapshot::ImageState image_state; image_state.name = m_image_name; image_state.features = m_remote_image_ctx->features; m_mock_local_image_ctx->snap_info = { {11U, librbd::SnapInfo{"snap1", cls::rbd::UserSnapshotNamespace{}, 0U, {}, RBD_PROTECTION_STATUS_UNPROTECTED, 0, {}}}, {12U, librbd::SnapInfo{"snap2", cls::rbd::MirrorSnapshotNamespace{ cls::rbd::MIRROR_SNAPSHOT_STATE_NON_PRIMARY, {}, "remote mirror uuid", 1, true, 0, {{1, 11}, {2, CEPH_NOSNAP}}}, 0, {}, 0, 0, {}}}}; C_SaferCond ctx; auto req = MockApplyImageStateRequest::create( "local mirror uuid", "remote mirror uuid", m_mock_local_image_ctx, m_mock_remote_image_ctx, image_state, &ctx); req->send(); ASSERT_EQ(-EINVAL, ctx.wait()); } TEST_F(TestMockImageReplayerSnapshotApplyImageStateRequest, ProtectSnapshot) { InSequence seq; MockGetMetadataRequest mock_get_metadata_request; expect_get_metadata(mock_get_metadata_request, {}, 0); expect_protect_snapshot("snap1", 0); expect_set_snap_limit(0, 0); librbd::mirror::snapshot::ImageState image_state; image_state.name = m_image_name; image_state.features = m_remote_image_ctx->features; image_state.snapshots = { {1U, {cls::rbd::UserSnapshotNamespace{}, "snap1", RBD_PROTECTION_STATUS_PROTECTED}}}; m_mock_local_image_ctx->snap_info = { {11U, librbd::SnapInfo{"snap1", cls::rbd::UserSnapshotNamespace{}, 0U, {}, RBD_PROTECTION_STATUS_UNPROTECTED, 0, {}}}, {12U, librbd::SnapInfo{"snap2", cls::rbd::MirrorSnapshotNamespace{ cls::rbd::MIRROR_SNAPSHOT_STATE_NON_PRIMARY, {}, "remote mirror uuid", 1, true, 0, {{1, 11}, {2, CEPH_NOSNAP}}}, 0, {}, 0, 0, {}}}}; C_SaferCond ctx; auto req = MockApplyImageStateRequest::create( "local mirror uuid", "remote mirror uuid", m_mock_local_image_ctx, m_mock_remote_image_ctx, image_state, &ctx); req->send(); ASSERT_EQ(0, ctx.wait()); } TEST_F(TestMockImageReplayerSnapshotApplyImageStateRequest, ProtectSnapshotError) { InSequence seq; MockGetMetadataRequest mock_get_metadata_request; expect_get_metadata(mock_get_metadata_request, {}, 0); expect_protect_snapshot("snap1", -EINVAL); librbd::mirror::snapshot::ImageState image_state; image_state.name = m_image_name; image_state.features = m_remote_image_ctx->features; image_state.snapshots = { {1U, {cls::rbd::UserSnapshotNamespace{}, "snap1", RBD_PROTECTION_STATUS_PROTECTED}}}; m_mock_local_image_ctx->snap_info = { {11U, librbd::SnapInfo{"snap1", cls::rbd::UserSnapshotNamespace{}, 0U, {}, RBD_PROTECTION_STATUS_UNPROTECTED, 0, {}}}, {12U, librbd::SnapInfo{"snap2", cls::rbd::MirrorSnapshotNamespace{ cls::rbd::MIRROR_SNAPSHOT_STATE_NON_PRIMARY, {}, "remote mirror uuid", 1, true, 0, {{1, 11}, {2, CEPH_NOSNAP}}}, 0, {}, 0, 0, {}}}}; C_SaferCond ctx; auto req = MockApplyImageStateRequest::create( "local mirror uuid", "remote mirror uuid", m_mock_local_image_ctx, m_mock_remote_image_ctx, image_state, &ctx); req->send(); ASSERT_EQ(-EINVAL, ctx.wait()); } TEST_F(TestMockImageReplayerSnapshotApplyImageStateRequest, RenameSnapshot) { InSequence seq; MockGetMetadataRequest mock_get_metadata_request; expect_get_metadata(mock_get_metadata_request, {}, 0); expect_rename_snapshot(11, "snap1-renamed", 0); expect_set_snap_limit(0, 0); librbd::mirror::snapshot::ImageState image_state; image_state.name = m_image_name; image_state.features = m_remote_image_ctx->features; image_state.snapshots = { {1U, {cls::rbd::UserSnapshotNamespace{}, "snap1-renamed", RBD_PROTECTION_STATUS_PROTECTED}}}; m_mock_local_image_ctx->snap_info = { {11U, librbd::SnapInfo{"snap1", cls::rbd::UserSnapshotNamespace{}, 0U, {}, RBD_PROTECTION_STATUS_PROTECTED, 0, {}}}, {12U, librbd::SnapInfo{"snap2", cls::rbd::MirrorSnapshotNamespace{ cls::rbd::MIRROR_SNAPSHOT_STATE_NON_PRIMARY, {}, "remote mirror uuid", 1, true, 0, {{1, 11}, {2, CEPH_NOSNAP}}}, 0, {}, 0, 0, {}}}}; C_SaferCond ctx; auto req = MockApplyImageStateRequest::create( "local mirror uuid", "remote mirror uuid", m_mock_local_image_ctx, m_mock_remote_image_ctx, image_state, &ctx); req->send(); ASSERT_EQ(0, ctx.wait()); } TEST_F(TestMockImageReplayerSnapshotApplyImageStateRequest, RenameSnapshotError) { InSequence seq; MockGetMetadataRequest mock_get_metadata_request; expect_get_metadata(mock_get_metadata_request, {}, 0); expect_rename_snapshot(11, "snap1-renamed", -EINVAL); librbd::mirror::snapshot::ImageState image_state; image_state.name = m_image_name; image_state.features = m_remote_image_ctx->features; image_state.snapshots = { {1U, {cls::rbd::UserSnapshotNamespace{}, "snap1-renamed", RBD_PROTECTION_STATUS_PROTECTED}}}; m_mock_local_image_ctx->snap_info = { {11U, librbd::SnapInfo{"snap1", cls::rbd::UserSnapshotNamespace{}, 0U, {}, RBD_PROTECTION_STATUS_PROTECTED, 0, {}}}, {12U, librbd::SnapInfo{"snap2", cls::rbd::MirrorSnapshotNamespace{ cls::rbd::MIRROR_SNAPSHOT_STATE_NON_PRIMARY, {}, "remote mirror uuid", 1, true, 0, {{1, 11}, {2, CEPH_NOSNAP}}}, 0, {}, 0, 0, {}}}}; C_SaferCond ctx; auto req = MockApplyImageStateRequest::create( "local mirror uuid", "remote mirror uuid", m_mock_local_image_ctx, m_mock_remote_image_ctx, image_state, &ctx); req->send(); ASSERT_EQ(-EINVAL, ctx.wait()); } TEST_F(TestMockImageReplayerSnapshotApplyImageStateRequest, SetSnapshotLimitError) { InSequence seq; MockGetMetadataRequest mock_get_metadata_request; expect_get_metadata(mock_get_metadata_request, {}, 0); expect_set_snap_limit(0, -EINVAL); librbd::mirror::snapshot::ImageState image_state; image_state.name = m_image_name; image_state.features = m_remote_image_ctx->features; C_SaferCond ctx; auto req = MockApplyImageStateRequest::create( "local mirror uuid", "remote mirror uuid", m_mock_local_image_ctx, m_mock_remote_image_ctx, image_state, &ctx); req->send(); ASSERT_EQ(-EINVAL, ctx.wait()); } } // namespace journal } // namespace image_replayer } // namespace mirror } // namespace rbd
22,802
34.518692
97
cc
null
ceph-main/src/test/rbd_mirror/image_replayer/snapshot/test_mock_CreateLocalImageRequest.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "test/rbd_mirror/test_mock_fixture.h" #include "librbd/internal.h" #include "librbd/ImageState.h" #include "librbd/Operations.h" #include "tools/rbd_mirror/PoolMetaCache.h" #include "tools/rbd_mirror/Threads.h" #include "tools/rbd_mirror/image_replayer/CreateImageRequest.h" #include "tools/rbd_mirror/image_replayer/snapshot/CreateLocalImageRequest.h" #include "tools/rbd_mirror/image_replayer/snapshot/StateBuilder.h" #include "test/librados_test_stub/MockTestMemIoCtxImpl.h" #include "test/librbd/mock/MockImageCtx.h" #include "test/rbd_mirror/mock/MockContextWQ.h" #include "test/rbd_mirror/mock/MockSafeTimer.h" #include <boost/intrusive_ptr.hpp> namespace librbd { namespace { struct MockTestImageCtx : public librbd::MockImageCtx { explicit MockTestImageCtx(librbd::ImageCtx &image_ctx) : librbd::MockImageCtx(image_ctx) { } }; } // anonymous namespace namespace util { static std::string s_image_id; template <> std::string generate_image_id<MockTestImageCtx>(librados::IoCtx&) { ceph_assert(!s_image_id.empty()); return s_image_id; } } // namespace util } // namespace librbd namespace rbd { namespace mirror { template <> struct Threads<librbd::MockTestImageCtx> { ceph::mutex &timer_lock; SafeTimer *timer; librbd::asio::ContextWQ *work_queue; Threads(Threads<librbd::ImageCtx> *threads) : timer_lock(threads->timer_lock), timer(threads->timer), work_queue(threads->work_queue) { } }; namespace image_replayer { template<> struct CreateImageRequest<librbd::MockTestImageCtx> { static CreateImageRequest* s_instance; Context *on_finish = nullptr; static CreateImageRequest* create(Threads<librbd::MockTestImageCtx>* threads, librados::IoCtx &local_io_ctx, const std::string &global_image_id, const std::string &remote_mirror_uuid, const std::string &local_image_name, const std::string &local_image_id, librbd::MockTestImageCtx *remote_image_ctx, PoolMetaCache* pool_meta_cache, cls::rbd::MirrorImageMode mirror_image_mode, Context *on_finish) { ceph_assert(s_instance != nullptr); s_instance->on_finish = on_finish; s_instance->construct(local_image_id); return s_instance; } CreateImageRequest() { ceph_assert(s_instance == nullptr); s_instance = this; } ~CreateImageRequest() { s_instance = nullptr; } MOCK_METHOD1(construct, void(const std::string&)); MOCK_METHOD0(send, void()); }; CreateImageRequest<librbd::MockTestImageCtx>* CreateImageRequest<librbd::MockTestImageCtx>::s_instance = nullptr; namespace snapshot { template<> struct StateBuilder<librbd::MockTestImageCtx> { std::string local_image_id; std::string remote_mirror_uuid; }; } // namespace snapshot } // namespace image_replayer } // namespace mirror } // namespace rbd #include "tools/rbd_mirror/image_replayer/snapshot/CreateLocalImageRequest.cc" using ::testing::_; using ::testing::InSequence; using ::testing::Invoke; using ::testing::Return; using ::testing::StrEq; namespace rbd { namespace mirror { namespace image_replayer { namespace snapshot { class TestMockImageReplayerSnapshotCreateLocalImageRequest : public TestMockFixture { public: typedef CreateLocalImageRequest<librbd::MockTestImageCtx> MockCreateLocalImageRequest; typedef Threads<librbd::MockTestImageCtx> MockThreads; typedef CreateImageRequest<librbd::MockTestImageCtx> MockCreateImageRequest; typedef StateBuilder<librbd::MockTestImageCtx> MockStateBuilder; void SetUp() override { TestMockFixture::SetUp(); librbd::RBD rbd; ASSERT_EQ(0, create_image(rbd, m_remote_io_ctx, m_image_name, m_image_size)); ASSERT_EQ(0, open_image(m_remote_io_ctx, m_image_name, &m_remote_image_ctx)); m_mock_remote_image_ctx = new librbd::MockTestImageCtx(*m_remote_image_ctx); } void TearDown() override { delete m_mock_remote_image_ctx; TestMockFixture::TearDown(); } void snap_create(librbd::ImageCtx *image_ctx, const std::string &snap_name) { librbd::NoOpProgressContext prog_ctx; ASSERT_EQ(0, image_ctx->operations->snap_create(cls::rbd::UserSnapshotNamespace(), snap_name, 0, prog_ctx)); ASSERT_EQ(0, image_ctx->operations->snap_protect(cls::rbd::UserSnapshotNamespace(), snap_name)); ASSERT_EQ(0, image_ctx->state->refresh()); } int clone_image(librbd::ImageCtx *parent_image_ctx, const std::string &snap_name, const std::string &clone_name) { snap_create(parent_image_ctx, snap_name); int order = 0; return librbd::clone(m_remote_io_ctx, parent_image_ctx->name.c_str(), snap_name.c_str(), m_remote_io_ctx, clone_name.c_str(), parent_image_ctx->features, &order, 0, 0); } void expect_mirror_image_set(const std::string& image_id, const cls::rbd::MirrorImage& mirror_image, int r) { bufferlist bl; encode(image_id, bl); encode(mirror_image, bl); EXPECT_CALL(get_mock_io_ctx(m_local_io_ctx), exec(RBD_MIRRORING, _, StrEq("rbd"), StrEq("mirror_image_set"), ContentsEqual(bl), _, _, _)) .WillOnce(Return(r)); } void expect_mirror_image_remove(const std::string& image_id, int r) { bufferlist bl; encode(image_id, bl); EXPECT_CALL(get_mock_io_ctx(m_local_io_ctx), exec(StrEq("rbd_mirroring"), _, StrEq("rbd"), StrEq("mirror_image_remove"), ContentsEqual(bl), _, _, _)) .WillOnce(Return(r)); } void expect_create_image(MockCreateImageRequest& mock_create_image_request, const std::string& image_id, int r) { EXPECT_CALL(mock_create_image_request, construct(image_id)); EXPECT_CALL(mock_create_image_request, send()) .WillOnce(Invoke([this, &mock_create_image_request, r]() { m_threads->work_queue->queue(mock_create_image_request.on_finish, r); })); } MockCreateLocalImageRequest* create_request( MockThreads& mock_threads, MockStateBuilder& mock_state_builder, const std::string& global_image_id, Context* on_finish) { return new MockCreateLocalImageRequest( &mock_threads, m_local_io_ctx, m_mock_remote_image_ctx, global_image_id, &m_pool_meta_cache, nullptr, &mock_state_builder, on_finish); } PoolMetaCache m_pool_meta_cache{g_ceph_context}; librbd::ImageCtx *m_remote_image_ctx; librbd::MockTestImageCtx *m_mock_remote_image_ctx = nullptr; }; TEST_F(TestMockImageReplayerSnapshotCreateLocalImageRequest, Success) { InSequence seq; librbd::util::s_image_id = "local image id"; expect_mirror_image_set("local image id", {cls::rbd::MIRROR_IMAGE_MODE_SNAPSHOT, "global image id", cls::rbd::MIRROR_IMAGE_STATE_CREATING}, 0); MockCreateImageRequest mock_create_image_request; expect_create_image(mock_create_image_request, "local image id", 0); C_SaferCond ctx; MockThreads mock_threads(m_threads); MockStateBuilder mock_state_builder; auto request = create_request( mock_threads, mock_state_builder, "global image id", &ctx); request->send(); ASSERT_EQ(0, ctx.wait()); ASSERT_EQ("local image id", mock_state_builder.local_image_id); } TEST_F(TestMockImageReplayerSnapshotCreateLocalImageRequest, AddMirrorImageError) { InSequence seq; librbd::util::s_image_id = "local image id"; expect_mirror_image_set("local image id", {cls::rbd::MIRROR_IMAGE_MODE_SNAPSHOT, "global image id", cls::rbd::MIRROR_IMAGE_STATE_CREATING}, -EINVAL); C_SaferCond ctx; MockThreads mock_threads(m_threads); MockStateBuilder mock_state_builder; auto request = create_request( mock_threads, mock_state_builder, "global image id", &ctx); request->send(); ASSERT_EQ(-EINVAL, ctx.wait()); } TEST_F(TestMockImageReplayerSnapshotCreateLocalImageRequest, CreateImageError) { InSequence seq; librbd::util::s_image_id = "local image id"; expect_mirror_image_set("local image id", {cls::rbd::MIRROR_IMAGE_MODE_SNAPSHOT, "global image id", cls::rbd::MIRROR_IMAGE_STATE_CREATING}, 0); MockCreateImageRequest mock_create_image_request; expect_create_image(mock_create_image_request, "local image id", -EINVAL); C_SaferCond ctx; MockThreads mock_threads(m_threads); MockStateBuilder mock_state_builder; auto request = create_request( mock_threads, mock_state_builder, "global image id", &ctx); request->send(); ASSERT_EQ(-EINVAL, ctx.wait()); } TEST_F(TestMockImageReplayerSnapshotCreateLocalImageRequest, CreateImageDuplicate) { InSequence seq; librbd::util::s_image_id = "local image id"; expect_mirror_image_set("local image id", {cls::rbd::MIRROR_IMAGE_MODE_SNAPSHOT, "global image id", cls::rbd::MIRROR_IMAGE_STATE_CREATING}, 0); MockCreateImageRequest mock_create_image_request; expect_create_image(mock_create_image_request, "local image id", -EBADF); expect_mirror_image_set("local image id", {cls::rbd::MIRROR_IMAGE_MODE_SNAPSHOT, "global image id", cls::rbd::MIRROR_IMAGE_STATE_DISABLING}, 0); expect_mirror_image_remove("local image id", 0); expect_mirror_image_set("local image id", {cls::rbd::MIRROR_IMAGE_MODE_SNAPSHOT, "global image id", cls::rbd::MIRROR_IMAGE_STATE_CREATING}, 0); expect_create_image(mock_create_image_request, "local image id", 0); C_SaferCond ctx; MockThreads mock_threads(m_threads); MockStateBuilder mock_state_builder; auto request = create_request( mock_threads, mock_state_builder, "global image id", &ctx); request->send(); ASSERT_EQ(0, ctx.wait()); ASSERT_EQ("local image id", mock_state_builder.local_image_id); } TEST_F(TestMockImageReplayerSnapshotCreateLocalImageRequest, DisableMirrorImageError) { InSequence seq; librbd::util::s_image_id = "local image id"; expect_mirror_image_set("local image id", {cls::rbd::MIRROR_IMAGE_MODE_SNAPSHOT, "global image id", cls::rbd::MIRROR_IMAGE_STATE_DISABLING}, -EINVAL); C_SaferCond ctx; MockThreads mock_threads(m_threads); MockStateBuilder mock_state_builder; mock_state_builder.local_image_id = "local image id"; auto request = create_request( mock_threads, mock_state_builder, "global image id", &ctx); request->send(); ASSERT_EQ(-EINVAL, ctx.wait()); } TEST_F(TestMockImageReplayerSnapshotCreateLocalImageRequest, RemoveMirrorImageError) { InSequence seq; librbd::util::s_image_id = "local image id"; expect_mirror_image_set("local image id", {cls::rbd::MIRROR_IMAGE_MODE_SNAPSHOT, "global image id", cls::rbd::MIRROR_IMAGE_STATE_DISABLING}, 0); expect_mirror_image_remove("local image id", -EINVAL); C_SaferCond ctx; MockThreads mock_threads(m_threads); MockStateBuilder mock_state_builder; mock_state_builder.local_image_id = "local image id"; auto request = create_request( mock_threads, mock_state_builder, "global image id", &ctx); request->send(); ASSERT_EQ(-EINVAL, ctx.wait()); } } // namespace journal } // namespace image_replayer } // namespace mirror } // namespace rbd
12,055
32.770308
88
cc
null
ceph-main/src/test/rbd_mirror/image_replayer/snapshot/test_mock_Replayer.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "test/rbd_mirror/test_mock_fixture.h" #include "librbd/deep_copy/ImageCopyRequest.h" #include "librbd/deep_copy/SnapshotCopyRequest.h" #include "librbd/mirror/ImageStateUpdateRequest.h" #include "librbd/mirror/snapshot/CreateNonPrimaryRequest.h" #include "librbd/mirror/snapshot/GetImageStateRequest.h" #include "librbd/mirror/snapshot/ImageMeta.h" #include "librbd/mirror/snapshot/UnlinkPeerRequest.h" #include "tools/rbd_mirror/InstanceWatcher.h" #include "tools/rbd_mirror/Threads.h" #include "tools/rbd_mirror/image_replayer/CloseImageRequest.h" #include "tools/rbd_mirror/image_replayer/ReplayerListener.h" #include "tools/rbd_mirror/image_replayer/Utils.h" #include "tools/rbd_mirror/image_replayer/snapshot/ApplyImageStateRequest.h" #include "tools/rbd_mirror/image_replayer/snapshot/Replayer.h" #include "tools/rbd_mirror/image_replayer/snapshot/StateBuilder.h" #include "test/librados_test_stub/MockTestMemIoCtxImpl.h" #include "test/librbd/mock/MockImageCtx.h" #include "test/librbd/mock/MockOperations.h" #include "test/rbd_mirror/mock/MockContextWQ.h" #include "test/rbd_mirror/mock/MockSafeTimer.h" using namespace std::chrono_literals; namespace librbd { namespace { struct MockTestImageCtx : public librbd::MockImageCtx { explicit MockTestImageCtx(librbd::ImageCtx &image_ctx) : librbd::MockImageCtx(image_ctx) { } }; } // anonymous namespace namespace deep_copy { template <> struct ImageCopyRequest<MockTestImageCtx> { uint64_t src_snap_id_start; uint64_t src_snap_id_end; uint64_t dst_snap_id_start; librbd::deep_copy::ObjectNumber object_number; librbd::SnapSeqs snap_seqs; static ImageCopyRequest* s_instance; static ImageCopyRequest* create(MockTestImageCtx *src_image_ctx, MockTestImageCtx *dst_image_ctx, librados::snap_t src_snap_id_start, librados::snap_t src_snap_id_end, librados::snap_t dst_snap_id_start, bool flatten, const ObjectNumber &object_number, const SnapSeqs &snap_seqs, Handler *handler, Context *on_finish) { ceph_assert(s_instance != nullptr); s_instance->src_snap_id_start = src_snap_id_start; s_instance->src_snap_id_end = src_snap_id_end; s_instance->dst_snap_id_start = dst_snap_id_start; s_instance->object_number = object_number; s_instance->snap_seqs = snap_seqs; s_instance->on_finish = on_finish; return s_instance; } Context* on_finish = nullptr; ImageCopyRequest() { s_instance = this; } MOCK_METHOD0(send, void()); }; template <> struct SnapshotCopyRequest<MockTestImageCtx> { librados::snap_t src_snap_id_start; librados::snap_t src_snap_id_end; librados::snap_t dst_snap_id_start; SnapSeqs* snap_seqs = nullptr; static SnapshotCopyRequest* s_instance; static SnapshotCopyRequest* create(MockTestImageCtx *src_image_ctx, MockTestImageCtx *dst_image_ctx, librados::snap_t src_snap_id_start, librados::snap_t src_snap_id_end, librados::snap_t dst_snap_id_start, bool flatten, ::MockContextWQ *work_queue, SnapSeqs *snap_seqs, Context *on_finish) { ceph_assert(s_instance != nullptr); s_instance->src_snap_id_start = src_snap_id_start; s_instance->src_snap_id_end = src_snap_id_end; s_instance->dst_snap_id_start = dst_snap_id_start; s_instance->snap_seqs = snap_seqs; s_instance->on_finish = on_finish; return s_instance; } Context* on_finish = nullptr; SnapshotCopyRequest() { s_instance = this; } MOCK_METHOD0(send, void()); }; ImageCopyRequest<MockTestImageCtx>* ImageCopyRequest<MockTestImageCtx>::s_instance = nullptr; SnapshotCopyRequest<MockTestImageCtx>* SnapshotCopyRequest<MockTestImageCtx>::s_instance = nullptr; } // namespace deep_copy namespace mirror { template <> struct ImageStateUpdateRequest<MockTestImageCtx> { static ImageStateUpdateRequest* s_instance; static ImageStateUpdateRequest* create( librados::IoCtx& io_ctx, const std::string& image_id, cls::rbd::MirrorImageState mirror_image_state, const cls::rbd::MirrorImage& mirror_image, Context* on_finish) { ceph_assert(s_instance != nullptr); EXPECT_EQ(cls::rbd::MIRROR_IMAGE_STATE_ENABLED, mirror_image_state); EXPECT_EQ(cls::rbd::MirrorImage{}, mirror_image); s_instance->on_finish = on_finish; return s_instance; } Context* on_finish = nullptr; ImageStateUpdateRequest() { s_instance = this; } MOCK_METHOD0(send, void()); }; ImageStateUpdateRequest<MockTestImageCtx>* ImageStateUpdateRequest<MockTestImageCtx>::s_instance = nullptr; namespace snapshot { template <> struct CreateNonPrimaryRequest<MockTestImageCtx> { bool demoted = false; std::string primary_mirror_uuid; uint64_t primary_snap_id; SnapSeqs snap_seqs; uint64_t* snap_id = nullptr; static CreateNonPrimaryRequest* s_instance; static CreateNonPrimaryRequest* create(MockTestImageCtx *image_ctx, bool demoted, const std::string &primary_mirror_uuid, uint64_t primary_snap_id, const SnapSeqs& snap_seqs, const ImageState &image_state, uint64_t *snap_id, Context *on_finish) { ceph_assert(s_instance != nullptr); s_instance->demoted = demoted; s_instance->primary_mirror_uuid = primary_mirror_uuid; s_instance->primary_snap_id = primary_snap_id; s_instance->snap_seqs = snap_seqs; s_instance->snap_id = snap_id; s_instance->on_finish = on_finish; return s_instance; } Context* on_finish = nullptr; CreateNonPrimaryRequest() { s_instance = this; } MOCK_METHOD0(send, void()); }; template <> struct GetImageStateRequest<MockTestImageCtx> { uint64_t snap_id = CEPH_NOSNAP; static GetImageStateRequest* s_instance; static GetImageStateRequest* create(MockTestImageCtx *image_ctx, uint64_t snap_id, ImageState *image_state, Context *on_finish) { ceph_assert(s_instance != nullptr); s_instance->snap_id = snap_id; s_instance->on_finish = on_finish; return s_instance; } Context* on_finish = nullptr; GetImageStateRequest() { s_instance = this; } MOCK_METHOD0(send, void()); }; template <> struct ImageMeta<MockTestImageCtx> { MOCK_METHOD1(load, void(Context*)); bool resync_requested = false; }; template <> struct UnlinkPeerRequest<MockTestImageCtx> { uint64_t snap_id; std::string mirror_peer_uuid; bool allow_remove; static UnlinkPeerRequest* s_instance; static UnlinkPeerRequest*create (MockTestImageCtx *image_ctx, uint64_t snap_id, const std::string &mirror_peer_uuid, bool allow_remove, Context *on_finish) { ceph_assert(s_instance != nullptr); s_instance->snap_id = snap_id; s_instance->mirror_peer_uuid = mirror_peer_uuid; s_instance->allow_remove = allow_remove; s_instance->on_finish = on_finish; return s_instance; } Context* on_finish = nullptr; UnlinkPeerRequest() { s_instance = this; } MOCK_METHOD0(send, void()); }; CreateNonPrimaryRequest<MockTestImageCtx>* CreateNonPrimaryRequest<MockTestImageCtx>::s_instance = nullptr; GetImageStateRequest<MockTestImageCtx>* GetImageStateRequest<MockTestImageCtx>::s_instance = nullptr; UnlinkPeerRequest<MockTestImageCtx>* UnlinkPeerRequest<MockTestImageCtx>::s_instance = nullptr; } // namespace snapshot } // namespace mirror } // namespace librbd namespace rbd { namespace mirror { template <> struct InstanceWatcher<librbd::MockTestImageCtx> { MOCK_METHOD1(cancel_sync_request, void(const std::string&)); MOCK_METHOD2(notify_sync_request, void(const std::string&, Context*)); MOCK_METHOD1(notify_sync_complete, void(const std::string&)); }; template <> struct Threads<librbd::MockTestImageCtx> { MockSafeTimer *timer; ceph::mutex &timer_lock; MockContextWQ *work_queue; Threads(Threads<librbd::ImageCtx>* threads) : timer(new MockSafeTimer()), timer_lock(threads->timer_lock), work_queue(new MockContextWQ()) { } ~Threads() { delete timer; delete work_queue; } }; namespace { struct MockReplayerListener : public image_replayer::ReplayerListener { MOCK_METHOD0(handle_notification, void()); }; } // anonymous namespace namespace image_replayer { template<> struct CloseImageRequest<librbd::MockTestImageCtx> { static CloseImageRequest* s_instance; librbd::MockTestImageCtx **image_ctx = nullptr; Context *on_finish = nullptr; static CloseImageRequest* create(librbd::MockTestImageCtx **image_ctx, Context *on_finish) { ceph_assert(s_instance != nullptr); s_instance->image_ctx = image_ctx; s_instance->on_finish = on_finish; return s_instance; } CloseImageRequest() { ceph_assert(s_instance == nullptr); s_instance = this; } ~CloseImageRequest() { ceph_assert(s_instance == this); s_instance = nullptr; } MOCK_METHOD0(send, void()); }; CloseImageRequest<librbd::MockTestImageCtx>* CloseImageRequest<librbd::MockTestImageCtx>::s_instance = nullptr; namespace snapshot { template <> struct ApplyImageStateRequest<librbd::MockTestImageCtx> { Context* on_finish = nullptr; static ApplyImageStateRequest* s_instance; static ApplyImageStateRequest* create( const std::string& local_mirror_uuid, const std::string& remote_mirror_uuid, librbd::MockTestImageCtx* local_image_ctx, librbd::MockTestImageCtx* remote_image_ctx, const librbd::mirror::snapshot::ImageState& image_state, Context* on_finish) { ceph_assert(s_instance != nullptr); s_instance->on_finish = on_finish; return s_instance; } ApplyImageStateRequest() { s_instance = this; } MOCK_METHOD0(send, void()); }; template<> struct StateBuilder<librbd::MockTestImageCtx> { StateBuilder(librbd::MockTestImageCtx& local_image_ctx, librbd::MockTestImageCtx& remote_image_ctx, librbd::mirror::snapshot::ImageMeta<librbd::MockTestImageCtx>& local_image_meta) : local_image_ctx(&local_image_ctx), remote_image_ctx(&remote_image_ctx), local_image_meta(&local_image_meta) { } librbd::MockTestImageCtx* local_image_ctx; librbd::MockTestImageCtx* remote_image_ctx; std::string remote_mirror_uuid = "remote mirror uuid"; librbd::mirror::snapshot::ImageMeta<librbd::MockTestImageCtx>* local_image_meta = nullptr; }; ApplyImageStateRequest<librbd::MockTestImageCtx>* ApplyImageStateRequest<librbd::MockTestImageCtx>::s_instance = nullptr; } // namespace snapshot } // namespace image_replayer } // namespace mirror } // namespace rbd #include "tools/rbd_mirror/image_replayer/snapshot/Replayer.cc" namespace rbd { namespace mirror { namespace image_replayer { namespace snapshot { using ::testing::_; using ::testing::DoAll; using ::testing::InSequence; using ::testing::Invoke; using ::testing::Return; using ::testing::ReturnArg; using ::testing::StrEq; using ::testing::WithArg; class TestMockImageReplayerSnapshotReplayer : public TestMockFixture { public: typedef Replayer<librbd::MockTestImageCtx> MockReplayer; typedef ApplyImageStateRequest<librbd::MockTestImageCtx> MockApplyImageStateRequest; typedef StateBuilder<librbd::MockTestImageCtx> MockStateBuilder; typedef InstanceWatcher<librbd::MockTestImageCtx> MockInstanceWatcher; typedef Threads<librbd::MockTestImageCtx> MockThreads; typedef CloseImageRequest<librbd::MockTestImageCtx> MockCloseImageRequest; typedef librbd::deep_copy::ImageCopyRequest<librbd::MockTestImageCtx> MockImageCopyRequest; typedef librbd::deep_copy::SnapshotCopyRequest<librbd::MockTestImageCtx> MockSnapshotCopyRequest; typedef librbd::mirror::ImageStateUpdateRequest<librbd::MockTestImageCtx> MockImageStateUpdateRequest; typedef librbd::mirror::snapshot::CreateNonPrimaryRequest<librbd::MockTestImageCtx> MockCreateNonPrimaryRequest; typedef librbd::mirror::snapshot::GetImageStateRequest<librbd::MockTestImageCtx> MockGetImageStateRequest; typedef librbd::mirror::snapshot::ImageMeta<librbd::MockTestImageCtx> MockImageMeta; typedef librbd::mirror::snapshot::UnlinkPeerRequest<librbd::MockTestImageCtx> MockUnlinkPeerRequest; void SetUp() override { TestMockFixture::SetUp(); librbd::RBD rbd; ASSERT_EQ(0, create_image(rbd, m_local_io_ctx, m_image_name, m_image_size)); ASSERT_EQ(0, open_image(m_local_io_ctx, m_image_name, &m_local_image_ctx)); ASSERT_EQ(0, create_image(rbd, m_remote_io_ctx, m_image_name, m_image_size)); ASSERT_EQ(0, open_image(m_remote_io_ctx, m_image_name, &m_remote_image_ctx)); } void expect_work_queue_repeatedly(MockThreads &mock_threads) { EXPECT_CALL(*mock_threads.work_queue, queue(_, _)) .WillRepeatedly(Invoke([this](Context *ctx, int r) { m_threads->work_queue->queue(ctx, r); })); } void expect_add_event_after_repeatedly(MockThreads &mock_threads) { EXPECT_CALL(*mock_threads.timer, add_event_after(_, _)) .WillRepeatedly( DoAll(Invoke([this](double seconds, Context *ctx) { m_threads->timer->add_event_after(seconds, ctx); }), ReturnArg<1>())); EXPECT_CALL(*mock_threads.timer, cancel_event(_)) .WillRepeatedly( Invoke([this](Context *ctx) { return m_threads->timer->cancel_event(ctx); })); } void expect_register_update_watcher(librbd::MockTestImageCtx& mock_image_ctx, librbd::UpdateWatchCtx** update_watch_ctx, uint64_t watch_handle, int r) { EXPECT_CALL(*mock_image_ctx.state, register_update_watcher(_, _)) .WillOnce(Invoke([update_watch_ctx, watch_handle, r] (librbd::UpdateWatchCtx* ctx, uint64_t* handle) { if (r >= 0) { *update_watch_ctx = ctx; *handle = watch_handle; } return r; })); } void expect_unregister_update_watcher(librbd::MockTestImageCtx& mock_image_ctx, uint64_t watch_handle, int r) { EXPECT_CALL(*mock_image_ctx.state, unregister_update_watcher(watch_handle, _)) .WillOnce(WithArg<1>(Invoke([this, r](Context* ctx) { m_threads->work_queue->queue(ctx, r); }))); } void expect_load_image_meta(MockImageMeta& mock_image_meta, bool resync_requested, int r) { EXPECT_CALL(mock_image_meta, load(_)) .WillOnce(Invoke([this, &mock_image_meta, resync_requested, r](Context* ctx) { mock_image_meta.resync_requested = resync_requested; m_threads->work_queue->queue(ctx, r); })); } void expect_is_refresh_required(librbd::MockTestImageCtx& mock_image_ctx, bool is_required) { EXPECT_CALL(*mock_image_ctx.state, is_refresh_required()) .WillOnce(Return(is_required)); } void expect_refresh(librbd::MockTestImageCtx& mock_image_ctx, const std::map<uint64_t, librbd::SnapInfo>& snaps, int r) { EXPECT_CALL(*mock_image_ctx.state, refresh(_)) .WillOnce(Invoke([this, &mock_image_ctx, snaps, r](Context* ctx) { mock_image_ctx.snap_info = snaps; m_threads->work_queue->queue(ctx, r); })); } void expect_notify_update(librbd::MockTestImageCtx& mock_image_ctx) { EXPECT_CALL(mock_image_ctx, notify_update(_)) .WillOnce(Invoke([this](Context* ctx) { m_threads->work_queue->queue(ctx, 0); })); } void expect_prune_non_primary_snapshot(librbd::MockTestImageCtx& mock_image_ctx, uint64_t snap_id, int r) { EXPECT_CALL(mock_image_ctx, get_snap_info(snap_id)) .WillOnce(Invoke([&mock_image_ctx](uint64_t snap_id) -> librbd::SnapInfo* { auto it = mock_image_ctx.snap_info.find(snap_id); if (it == mock_image_ctx.snap_info.end()) { return nullptr; } return &it->second; })); EXPECT_CALL(*mock_image_ctx.operations, snap_remove(_, _, _)) .WillOnce(WithArg<2>(Invoke([this, r](Context* ctx) { m_threads->work_queue->queue(ctx, r); }))); } void expect_snapshot_copy(MockSnapshotCopyRequest& mock_snapshot_copy_request, uint64_t src_snap_id_start, uint64_t src_snap_id_end, uint64_t dst_snap_id_start, const librbd::SnapSeqs& snap_seqs, int r) { EXPECT_CALL(mock_snapshot_copy_request, send()) .WillOnce(Invoke([this, &req=mock_snapshot_copy_request, src_snap_id_start, src_snap_id_end, dst_snap_id_start, snap_seqs, r]() { ASSERT_EQ(src_snap_id_start, req.src_snap_id_start); ASSERT_EQ(src_snap_id_end, req.src_snap_id_end); ASSERT_EQ(dst_snap_id_start, req.dst_snap_id_start); *req.snap_seqs = snap_seqs; m_threads->work_queue->queue(req.on_finish, r); })); } void expect_get_image_state(MockGetImageStateRequest& mock_get_image_state_request, uint64_t snap_id, int r) { EXPECT_CALL(mock_get_image_state_request, send()) .WillOnce(Invoke([this, &req=mock_get_image_state_request, snap_id, r]() { ASSERT_EQ(snap_id, req.snap_id); m_threads->work_queue->queue(req.on_finish, r); })); } void expect_create_non_primary_request(MockCreateNonPrimaryRequest& mock_create_non_primary_request, bool demoted, const std::string& primary_mirror_uuid, uint64_t primary_snap_id, const librbd::SnapSeqs& snap_seqs, uint64_t snap_id, int r) { EXPECT_CALL(mock_create_non_primary_request, send()) .WillOnce(Invoke([this, &req=mock_create_non_primary_request, demoted, primary_mirror_uuid, primary_snap_id, snap_seqs, snap_id, r]() { ASSERT_EQ(demoted, req.demoted); ASSERT_EQ(primary_mirror_uuid, req.primary_mirror_uuid); ASSERT_EQ(primary_snap_id, req.primary_snap_id); ASSERT_EQ(snap_seqs, req.snap_seqs); *req.snap_id = snap_id; m_threads->work_queue->queue(req.on_finish, r); })); } void expect_update_mirror_image_state(MockImageStateUpdateRequest& mock_image_state_update_request, int r) { EXPECT_CALL(mock_image_state_update_request, send()) .WillOnce(Invoke([this, &req=mock_image_state_update_request, r]() { m_threads->work_queue->queue(req.on_finish, r); })); } void expect_notify_sync_request(MockInstanceWatcher& mock_instance_watcher, const std::string& image_id, int r) { EXPECT_CALL(mock_instance_watcher, notify_sync_request(image_id, _)) .WillOnce(WithArg<1>(Invoke([this, r](Context* ctx) { m_threads->work_queue->queue(ctx, r); }))); } void expect_notify_sync_complete(MockInstanceWatcher& mock_instance_watcher, const std::string& image_id) { EXPECT_CALL(mock_instance_watcher, notify_sync_complete(image_id)); } void expect_cancel_sync_request(MockInstanceWatcher& mock_instance_watcher, const std::string& image_id) { EXPECT_CALL(mock_instance_watcher, cancel_sync_request(image_id)); } void expect_image_copy(MockImageCopyRequest& mock_image_copy_request, uint64_t src_snap_id_start, uint64_t src_snap_id_end, uint64_t dst_snap_id_start, const librbd::deep_copy::ObjectNumber& object_number, const librbd::SnapSeqs& snap_seqs, int r) { EXPECT_CALL(mock_image_copy_request, send()) .WillOnce(Invoke([this, &req=mock_image_copy_request, src_snap_id_start, src_snap_id_end, dst_snap_id_start, object_number, snap_seqs, r]() { ASSERT_EQ(src_snap_id_start, req.src_snap_id_start); ASSERT_EQ(src_snap_id_end, req.src_snap_id_end); ASSERT_EQ(dst_snap_id_start, req.dst_snap_id_start); ASSERT_EQ(object_number, req.object_number); ASSERT_EQ(snap_seqs, req.snap_seqs); m_threads->work_queue->queue(req.on_finish, r); })); } void expect_unlink_peer(MockUnlinkPeerRequest& mock_unlink_peer_request, uint64_t snap_id, const std::string& mirror_peer_uuid, bool allow_remove, int r) { EXPECT_CALL(mock_unlink_peer_request, send()) .WillOnce(Invoke([this, &req=mock_unlink_peer_request, snap_id, mirror_peer_uuid, allow_remove, r]() { ASSERT_EQ(snap_id, req.snap_id); ASSERT_EQ(mirror_peer_uuid, req.mirror_peer_uuid); ASSERT_EQ(allow_remove, req.allow_remove); m_threads->work_queue->queue(req.on_finish, r); })); } void expect_apply_image_state( MockApplyImageStateRequest& mock_request, int r) { EXPECT_CALL(mock_request, send()) .WillOnce(Invoke([this, &req=mock_request, r]() { m_threads->work_queue->queue(req.on_finish, r); })); } void expect_mirror_image_snapshot_set_copy_progress( librbd::MockTestImageCtx& mock_test_image_ctx, uint64_t snap_id, bool completed, uint64_t last_copied_object, int r) { bufferlist bl; encode(snap_id, bl); encode(completed, bl); encode(last_copied_object, bl); EXPECT_CALL(get_mock_io_ctx(mock_test_image_ctx.md_ctx), exec(mock_test_image_ctx.header_oid, _, StrEq("rbd"), StrEq("mirror_image_snapshot_set_copy_progress"), ContentsEqual(bl), _, _, _)) .WillOnce(Return(r)); } void expect_send(MockCloseImageRequest &mock_close_image_request, int r) { EXPECT_CALL(mock_close_image_request, send()) .WillOnce(Invoke([this, &mock_close_image_request, r]() { *mock_close_image_request.image_ctx = nullptr; m_threads->work_queue->queue(mock_close_image_request.on_finish, r); })); } void expect_notification(MockThreads& mock_threads, MockReplayerListener& mock_replayer_listener) { EXPECT_CALL(mock_replayer_listener, handle_notification()) .WillRepeatedly(Invoke([this]() { std::unique_lock locker{m_lock}; ++m_notifications; m_cond.notify_all(); })); } int wait_for_notification(uint32_t count) { std::unique_lock locker{m_lock}; for (uint32_t idx = 0; idx < count; ++idx) { while (m_notifications == 0) { if (m_cond.wait_for(locker, 10s) == std::cv_status::timeout) { return -ETIMEDOUT; } } --m_notifications; } return 0; } int init_entry_replayer(MockReplayer& mock_replayer, MockThreads& mock_threads, librbd::MockTestImageCtx& mock_local_image_ctx, librbd::MockTestImageCtx& mock_remote_image_ctx, MockReplayerListener& mock_replayer_listener, MockImageMeta& mock_image_meta, librbd::UpdateWatchCtx** update_watch_ctx) { expect_register_update_watcher(mock_local_image_ctx, update_watch_ctx, 123, 0); expect_register_update_watcher(mock_remote_image_ctx, update_watch_ctx, 234, 0); expect_load_image_meta(mock_image_meta, false, 0); expect_is_refresh_required(mock_local_image_ctx, false); expect_is_refresh_required(mock_remote_image_ctx, false); C_SaferCond init_ctx; mock_replayer.init(&init_ctx); int r = init_ctx.wait(); if (r < 0) { return r; } return wait_for_notification(2); } int shut_down_entry_replayer(MockReplayer& mock_replayer, MockThreads& mock_threads, librbd::MockTestImageCtx& mock_local_image_ctx, librbd::MockTestImageCtx& mock_remote_image_ctx) { expect_unregister_update_watcher(mock_remote_image_ctx, 234, 0); expect_unregister_update_watcher(mock_local_image_ctx, 123, 0); C_SaferCond shutdown_ctx; mock_replayer.shut_down(&shutdown_ctx); return shutdown_ctx.wait(); } librbd::ImageCtx* m_local_image_ctx = nullptr; librbd::ImageCtx* m_remote_image_ctx = nullptr; PoolMetaCache m_pool_meta_cache{g_ceph_context}; ceph::mutex m_lock = ceph::make_mutex( "TestMockImageReplayerSnapshotReplayer"); ceph::condition_variable m_cond; uint32_t m_notifications = 0; }; TEST_F(TestMockImageReplayerSnapshotReplayer, InitShutDown) { librbd::MockTestImageCtx mock_local_image_ctx{*m_local_image_ctx}; librbd::MockTestImageCtx mock_remote_image_ctx{*m_remote_image_ctx}; MockThreads mock_threads(m_threads); expect_work_queue_repeatedly(mock_threads); MockReplayerListener mock_replayer_listener; expect_notification(mock_threads, mock_replayer_listener); InSequence seq; MockInstanceWatcher mock_instance_watcher; MockImageMeta mock_image_meta; MockStateBuilder mock_state_builder(mock_local_image_ctx, mock_remote_image_ctx, mock_image_meta); MockReplayer mock_replayer{&mock_threads, &mock_instance_watcher, "local mirror uuid", &m_pool_meta_cache, &mock_state_builder, &mock_replayer_listener}; m_pool_meta_cache.set_remote_pool_meta( m_remote_io_ctx.get_id(), {"remote mirror uuid", "remote mirror peer uuid"}); librbd::UpdateWatchCtx* update_watch_ctx = nullptr; ASSERT_EQ(0, init_entry_replayer(mock_replayer, mock_threads, mock_local_image_ctx, mock_remote_image_ctx, mock_replayer_listener, mock_image_meta, &update_watch_ctx)); ASSERT_EQ(0, shut_down_entry_replayer(mock_replayer, mock_threads, mock_local_image_ctx, mock_remote_image_ctx)); } TEST_F(TestMockImageReplayerSnapshotReplayer, SyncSnapshot) { librbd::MockTestImageCtx mock_local_image_ctx{*m_local_image_ctx}; librbd::MockTestImageCtx mock_remote_image_ctx{*m_remote_image_ctx}; // it should sync two snapshots and skip two (user and mirror w/o matching // peer uuid) mock_remote_image_ctx.snap_info = { {1U, librbd::SnapInfo{"snap1", cls::rbd::MirrorSnapshotNamespace{ cls::rbd::MIRROR_SNAPSHOT_STATE_PRIMARY, {"remote mirror peer uuid"}, "", CEPH_NOSNAP, true, 0, {}}, 0, {}, 0, 0, {}}}, {2U, librbd::SnapInfo{"snap2", cls::rbd::UserSnapshotNamespace{}, 0, {}, 0, 0, {}}}, {3U, librbd::SnapInfo{"snap3", cls::rbd::MirrorSnapshotNamespace{ cls::rbd::MIRROR_SNAPSHOT_STATE_PRIMARY, {""}, "", CEPH_NOSNAP, true, 0, {}}, 0, {}, 0, 0, {}}}, {4U, librbd::SnapInfo{"snap4", cls::rbd::MirrorSnapshotNamespace{ cls::rbd::MIRROR_SNAPSHOT_STATE_PRIMARY, {"remote mirror peer uuid"}, "", CEPH_NOSNAP, true, 0, {}}, 0, {}, 0, 0, {}}}}; MockThreads mock_threads(m_threads); expect_work_queue_repeatedly(mock_threads); MockReplayerListener mock_replayer_listener; expect_notification(mock_threads, mock_replayer_listener); InSequence seq; MockInstanceWatcher mock_instance_watcher; MockImageMeta mock_image_meta; MockStateBuilder mock_state_builder(mock_local_image_ctx, mock_remote_image_ctx, mock_image_meta); MockReplayer mock_replayer{&mock_threads, &mock_instance_watcher, "local mirror uuid", &m_pool_meta_cache, &mock_state_builder, &mock_replayer_listener}; m_pool_meta_cache.set_remote_pool_meta( m_remote_io_ctx.get_id(), {"remote mirror uuid", "remote mirror peer uuid"}); librbd::UpdateWatchCtx* update_watch_ctx = nullptr; // init expect_register_update_watcher(mock_local_image_ctx, &update_watch_ctx, 123, 0); expect_register_update_watcher(mock_remote_image_ctx, &update_watch_ctx, 234, 0); // sync snap1 expect_load_image_meta(mock_image_meta, false, 0); expect_is_refresh_required(mock_local_image_ctx, false); expect_is_refresh_required(mock_remote_image_ctx, false); MockSnapshotCopyRequest mock_snapshot_copy_request; expect_snapshot_copy(mock_snapshot_copy_request, 0, 1, 0, {{1, CEPH_NOSNAP}}, 0); MockGetImageStateRequest mock_get_image_state_request; expect_get_image_state(mock_get_image_state_request, 1, 0); MockCreateNonPrimaryRequest mock_create_non_primary_request; expect_create_non_primary_request(mock_create_non_primary_request, false, "remote mirror uuid", 1, {{1, CEPH_NOSNAP}}, 11, 0); MockImageStateUpdateRequest mock_image_state_update_request; expect_update_mirror_image_state(mock_image_state_update_request, 0); expect_notify_sync_request(mock_instance_watcher, mock_local_image_ctx.id, 0); MockImageCopyRequest mock_image_copy_request; expect_image_copy(mock_image_copy_request, 0, 1, 0, {}, {{1, CEPH_NOSNAP}}, 0); MockApplyImageStateRequest mock_apply_state_request; expect_apply_image_state(mock_apply_state_request, 0); expect_mirror_image_snapshot_set_copy_progress( mock_local_image_ctx, 11, true, 0, 0); expect_notify_update(mock_local_image_ctx); expect_notify_sync_complete(mock_instance_watcher, mock_local_image_ctx.id); // sync snap4 expect_load_image_meta(mock_image_meta, false, 0); expect_is_refresh_required(mock_local_image_ctx, true); expect_refresh( mock_local_image_ctx, { {11U, librbd::SnapInfo{"snap1", cls::rbd::MirrorSnapshotNamespace{ cls::rbd::MIRROR_SNAPSHOT_STATE_NON_PRIMARY, {}, "remote mirror uuid", 1, true, 0, {{1, CEPH_NOSNAP}}}, 0, {}, 0, 0, {}}}, }, 0); expect_is_refresh_required(mock_remote_image_ctx, true); expect_refresh( mock_remote_image_ctx, { {1U, librbd::SnapInfo{"snap1", cls::rbd::MirrorSnapshotNamespace{ cls::rbd::MIRROR_SNAPSHOT_STATE_PRIMARY, {"remote mirror peer uuid"}, "", CEPH_NOSNAP, true, 0, {}}, 0, {}, 0, 0, {}}}, {2U, librbd::SnapInfo{"snap2", cls::rbd::UserSnapshotNamespace{}, 0, {}, 0, 0, {}}}, {3U, librbd::SnapInfo{"snap3", cls::rbd::MirrorSnapshotNamespace{ cls::rbd::MIRROR_SNAPSHOT_STATE_PRIMARY, {""}, "", CEPH_NOSNAP, true, 0, {}}, 0, {}, 0, 0, {}}}, {4U, librbd::SnapInfo{"snap4", cls::rbd::MirrorSnapshotNamespace{ cls::rbd::MIRROR_SNAPSHOT_STATE_PRIMARY, {"remote mirror peer uuid"}, "", CEPH_NOSNAP, true, 0, {}}, 0, {}, 0, 0, {}}}, {5U, librbd::SnapInfo{"snap5", cls::rbd::MirrorSnapshotNamespace{ cls::rbd::MIRROR_SNAPSHOT_STATE_PRIMARY, {"remote mirror peer uuid"}, "", CEPH_NOSNAP, true, 0, {}}, 0, {}, 0, 0, {}}} }, 0); expect_snapshot_copy(mock_snapshot_copy_request, 1, 4, 11, {{1, 11}, {2, 12}, {4, CEPH_NOSNAP}}, 0); expect_get_image_state(mock_get_image_state_request, 4, 0); expect_create_non_primary_request(mock_create_non_primary_request, false, "remote mirror uuid", 4, {{1, 11}, {2, 12}, {4, CEPH_NOSNAP}}, 14, 0); expect_notify_sync_request(mock_instance_watcher, mock_local_image_ctx.id, 0); expect_image_copy(mock_image_copy_request, 1, 4, 11, {}, {{1, 11}, {2, 12}, {4, CEPH_NOSNAP}}, 0); expect_apply_image_state(mock_apply_state_request, 0); expect_mirror_image_snapshot_set_copy_progress( mock_local_image_ctx, 14, true, 0, 0); expect_notify_update(mock_local_image_ctx); MockUnlinkPeerRequest mock_unlink_peer_request; expect_unlink_peer(mock_unlink_peer_request, 1, "remote mirror peer uuid", false, 0); expect_notify_sync_complete(mock_instance_watcher, mock_local_image_ctx.id); // prune non-primary snap1 expect_load_image_meta(mock_image_meta, false, 0); expect_is_refresh_required(mock_local_image_ctx, true); expect_refresh( mock_local_image_ctx, { {11U, librbd::SnapInfo{"snap1", cls::rbd::MirrorSnapshotNamespace{ cls::rbd::MIRROR_SNAPSHOT_STATE_NON_PRIMARY, {}, "remote mirror uuid", 1, true, 0, {}}, 0, {}, 0, 0, {}}}, {12U, librbd::SnapInfo{"snap2", cls::rbd::UserSnapshotNamespace{}, 0, {}, 0, 0, {}}}, {14U, librbd::SnapInfo{"snap4", cls::rbd::MirrorSnapshotNamespace{ cls::rbd::MIRROR_SNAPSHOT_STATE_NON_PRIMARY, {}, "remote mirror uuid", 4, true, 0, {}}, 0, {}, 0, 0, {}}}, }, 0); expect_is_refresh_required(mock_remote_image_ctx, true); expect_refresh( mock_remote_image_ctx, { {1U, librbd::SnapInfo{"snap1", cls::rbd::MirrorSnapshotNamespace{ cls::rbd::MIRROR_SNAPSHOT_STATE_PRIMARY, {"remote mirror peer uuid"}, "", CEPH_NOSNAP, true, 0, {}}, 0, {}, 0, 0, {}}}, {2U, librbd::SnapInfo{"snap2", cls::rbd::UserSnapshotNamespace{}, 0, {}, 0, 0, {}}}, {3U, librbd::SnapInfo{"snap3", cls::rbd::MirrorSnapshotNamespace{ cls::rbd::MIRROR_SNAPSHOT_STATE_PRIMARY, {""}, "", CEPH_NOSNAP, true, 0, {}}, 0, {}, 0, 0, {}}}, {4U, librbd::SnapInfo{"snap4", cls::rbd::MirrorSnapshotNamespace{ cls::rbd::MIRROR_SNAPSHOT_STATE_PRIMARY, {"remote mirror peer uuid"}, "", CEPH_NOSNAP, true, 0, {}}, 0, {}, 0, 0, {}}} }, 0); expect_prune_non_primary_snapshot(mock_local_image_ctx, 11, 0); // idle expect_load_image_meta(mock_image_meta, false, 0); expect_is_refresh_required(mock_local_image_ctx, true); expect_refresh( mock_local_image_ctx, { {14U, librbd::SnapInfo{"snap4", cls::rbd::MirrorSnapshotNamespace{ cls::rbd::MIRROR_SNAPSHOT_STATE_NON_PRIMARY, {}, "remote mirror uuid", 4, true, 0, {}}, 0, {}, 0, 0, {}}}, }, 0); expect_is_refresh_required(mock_remote_image_ctx, true); expect_refresh( mock_remote_image_ctx, { {4U, librbd::SnapInfo{"snap4", cls::rbd::MirrorSnapshotNamespace{ cls::rbd::MIRROR_SNAPSHOT_STATE_PRIMARY, {"remote mirror peer uuid"}, "", CEPH_NOSNAP, true, 0, {}}, 0, {}, 0, 0, {}}} }, 0); // fire init C_SaferCond init_ctx; mock_replayer.init(&init_ctx); ASSERT_EQ(0, init_ctx.wait()); // wait for sync to complete ASSERT_EQ(0, wait_for_notification(4)); // shut down ASSERT_EQ(0, shut_down_entry_replayer(mock_replayer, mock_threads, mock_local_image_ctx, mock_remote_image_ctx)); } TEST_F(TestMockImageReplayerSnapshotReplayer, InterruptedSyncInitial) { librbd::MockTestImageCtx mock_local_image_ctx{*m_local_image_ctx}; librbd::MockTestImageCtx mock_remote_image_ctx{*m_remote_image_ctx}; MockThreads mock_threads(m_threads); expect_work_queue_repeatedly(mock_threads); MockReplayerListener mock_replayer_listener; expect_notification(mock_threads, mock_replayer_listener); InSequence seq; MockInstanceWatcher mock_instance_watcher; MockImageMeta mock_image_meta; MockStateBuilder mock_state_builder(mock_local_image_ctx, mock_remote_image_ctx, mock_image_meta); MockReplayer mock_replayer{&mock_threads, &mock_instance_watcher, "local mirror uuid", &m_pool_meta_cache, &mock_state_builder, &mock_replayer_listener}; m_pool_meta_cache.set_remote_pool_meta( m_remote_io_ctx.get_id(), {"remote mirror uuid", "remote mirror peer uuid"}); librbd::UpdateWatchCtx* update_watch_ctx = nullptr; ASSERT_EQ(0, init_entry_replayer(mock_replayer, mock_threads, mock_local_image_ctx, mock_remote_image_ctx, mock_replayer_listener, mock_image_meta, &update_watch_ctx)); // inject an incomplete sync snapshot with last_copied_object_number > 0 mock_remote_image_ctx.snap_info = { {1U, librbd::SnapInfo{"snap1", cls::rbd::MirrorSnapshotNamespace{ cls::rbd::MIRROR_SNAPSHOT_STATE_PRIMARY, {"remote mirror peer uuid"}, "", CEPH_NOSNAP, true, 0, {}}, 0, {}, 0, 0, {}}}}; mock_local_image_ctx.snap_info = { {11U, librbd::SnapInfo{"snap1", cls::rbd::MirrorSnapshotNamespace{ cls::rbd::MIRROR_SNAPSHOT_STATE_NON_PRIMARY, {}, "remote mirror uuid", 1, false, 123, {{1, CEPH_NOSNAP}}}, 0, {}, 0, 0, {}}}}; // re-sync snap1 expect_load_image_meta(mock_image_meta, false, 0); expect_is_refresh_required(mock_local_image_ctx, false); expect_is_refresh_required(mock_remote_image_ctx, false); MockGetImageStateRequest mock_get_image_state_request; expect_get_image_state(mock_get_image_state_request, 11, 0); expect_notify_sync_request(mock_instance_watcher, mock_local_image_ctx.id, 0); MockImageCopyRequest mock_image_copy_request; expect_image_copy(mock_image_copy_request, 0, 1, 0, librbd::deep_copy::ObjectNumber{123U}, {{1, CEPH_NOSNAP}}, 0); MockApplyImageStateRequest mock_apply_state_request; expect_apply_image_state(mock_apply_state_request, 0); expect_mirror_image_snapshot_set_copy_progress( mock_local_image_ctx, 11, true, 123, 0); expect_notify_update(mock_local_image_ctx); expect_notify_sync_complete(mock_instance_watcher, mock_local_image_ctx.id); // idle expect_load_image_meta(mock_image_meta, false, 0); expect_is_refresh_required(mock_local_image_ctx, true); expect_refresh( mock_local_image_ctx, { {11U, librbd::SnapInfo{"snap1", cls::rbd::MirrorSnapshotNamespace{ cls::rbd::MIRROR_SNAPSHOT_STATE_NON_PRIMARY, {}, "remote mirror uuid", 1, true, 0, {}}, 0, {}, 0, 0, {}}}, }, 0); expect_is_refresh_required(mock_remote_image_ctx, false); // wake-up replayer update_watch_ctx->handle_notify(); // wait for sync to complete ASSERT_EQ(0, wait_for_notification(2)); ASSERT_EQ(0, shut_down_entry_replayer(mock_replayer, mock_threads, mock_local_image_ctx, mock_remote_image_ctx)); } TEST_F(TestMockImageReplayerSnapshotReplayer, InterruptedSyncDelta) { librbd::MockTestImageCtx mock_local_image_ctx{*m_local_image_ctx}; librbd::MockTestImageCtx mock_remote_image_ctx{*m_remote_image_ctx}; MockThreads mock_threads(m_threads); expect_work_queue_repeatedly(mock_threads); MockReplayerListener mock_replayer_listener; expect_notification(mock_threads, mock_replayer_listener); InSequence seq; MockInstanceWatcher mock_instance_watcher; MockImageMeta mock_image_meta; MockStateBuilder mock_state_builder(mock_local_image_ctx, mock_remote_image_ctx, mock_image_meta); MockReplayer mock_replayer{&mock_threads, &mock_instance_watcher, "local mirror uuid", &m_pool_meta_cache, &mock_state_builder, &mock_replayer_listener}; m_pool_meta_cache.set_remote_pool_meta( m_remote_io_ctx.get_id(), {"remote mirror uuid", "remote mirror peer uuid"}); librbd::UpdateWatchCtx* update_watch_ctx = nullptr; ASSERT_EQ(0, init_entry_replayer(mock_replayer, mock_threads, mock_local_image_ctx, mock_remote_image_ctx, mock_replayer_listener, mock_image_meta, &update_watch_ctx)); // inject an incomplete sync snapshot with last_copied_object_number > 0 // after a complete snapshot mock_remote_image_ctx.snap_info = { {1U, librbd::SnapInfo{"snap1", cls::rbd::MirrorSnapshotNamespace{ cls::rbd::MIRROR_SNAPSHOT_STATE_PRIMARY, {"remote mirror peer uuid"}, "", CEPH_NOSNAP, true, 0, {}}, 0, {}, 0, 0, {}}}, {2U, librbd::SnapInfo{"snap2", cls::rbd::MirrorSnapshotNamespace{ cls::rbd::MIRROR_SNAPSHOT_STATE_PRIMARY, {"remote mirror peer uuid"}, "", CEPH_NOSNAP, true, 0, {}}, 0, {}, 0, 0, {}}}}; mock_local_image_ctx.snap_info = { {11U, librbd::SnapInfo{"snap1", cls::rbd::MirrorSnapshotNamespace{ cls::rbd::MIRROR_SNAPSHOT_STATE_NON_PRIMARY, {}, "remote mirror uuid", 1, true, 0, {{1, CEPH_NOSNAP}}}, 0, {}, 0, 0, {}}}, {12U, librbd::SnapInfo{"snap2", cls::rbd::MirrorSnapshotNamespace{ cls::rbd::MIRROR_SNAPSHOT_STATE_NON_PRIMARY, {}, "remote mirror uuid", 2, false, 123, {{2, CEPH_NOSNAP}}}, 0, {}, 0, 0, {}}}}; // re-sync snap2 expect_load_image_meta(mock_image_meta, false, 0); expect_is_refresh_required(mock_local_image_ctx, false); expect_is_refresh_required(mock_remote_image_ctx, false); MockGetImageStateRequest mock_get_image_state_request; expect_get_image_state(mock_get_image_state_request, 12, 0); expect_notify_sync_request(mock_instance_watcher, mock_local_image_ctx.id, 0); MockImageCopyRequest mock_image_copy_request; expect_image_copy(mock_image_copy_request, 1, 2, 11, librbd::deep_copy::ObjectNumber{123U}, {{2, CEPH_NOSNAP}}, 0); MockApplyImageStateRequest mock_apply_state_request; expect_apply_image_state(mock_apply_state_request, 0); expect_mirror_image_snapshot_set_copy_progress( mock_local_image_ctx, 12, true, 123, 0); expect_notify_update(mock_local_image_ctx); MockUnlinkPeerRequest mock_unlink_peer_request; expect_unlink_peer(mock_unlink_peer_request, 1, "remote mirror peer uuid", false, 0); expect_notify_sync_complete(mock_instance_watcher, mock_local_image_ctx.id); // prune non-primary snap1 expect_load_image_meta(mock_image_meta, false, 0); expect_is_refresh_required(mock_local_image_ctx, true); expect_refresh( mock_local_image_ctx, { {11U, librbd::SnapInfo{"snap1", cls::rbd::MirrorSnapshotNamespace{ cls::rbd::MIRROR_SNAPSHOT_STATE_NON_PRIMARY, {}, "remote mirror uuid", 1, true, 0, {}}, 0, {}, 0, 0, {}}}, {12U, librbd::SnapInfo{"snap2", cls::rbd::MirrorSnapshotNamespace{ cls::rbd::MIRROR_SNAPSHOT_STATE_NON_PRIMARY, {}, "remote mirror uuid", 2, true, 0, {}}, 0, {}, 0, 0, {}}}, }, 0); expect_is_refresh_required(mock_remote_image_ctx, true); expect_refresh( mock_remote_image_ctx, { {2U, librbd::SnapInfo{"snap2", cls::rbd::MirrorSnapshotNamespace{ cls::rbd::MIRROR_SNAPSHOT_STATE_PRIMARY, {"remote mirror peer uuid"}, "", CEPH_NOSNAP, true, 0, {}}, 0, {}, 0, 0, {}}}, }, 0); expect_prune_non_primary_snapshot(mock_local_image_ctx, 11, 0); // idle expect_load_image_meta(mock_image_meta, false, 0); expect_is_refresh_required(mock_local_image_ctx, true); expect_refresh( mock_local_image_ctx, { {12U, librbd::SnapInfo{"snap2", cls::rbd::MirrorSnapshotNamespace{ cls::rbd::MIRROR_SNAPSHOT_STATE_NON_PRIMARY, {}, "remote mirror uuid", 2, true, 0, {}}, 0, {}, 0, 0, {}}}, }, 0); expect_is_refresh_required(mock_remote_image_ctx, false); // wake-up replayer update_watch_ctx->handle_notify(); // wait for sync to complete ASSERT_EQ(0, wait_for_notification(2)); ASSERT_EQ(0, shut_down_entry_replayer(mock_replayer, mock_threads, mock_local_image_ctx, mock_remote_image_ctx)); } TEST_F(TestMockImageReplayerSnapshotReplayer, InterruptedSyncDeltaDemote) { librbd::MockTestImageCtx mock_local_image_ctx{*m_local_image_ctx}; librbd::MockTestImageCtx mock_remote_image_ctx{*m_remote_image_ctx}; MockThreads mock_threads(m_threads); expect_work_queue_repeatedly(mock_threads); MockReplayerListener mock_replayer_listener; expect_notification(mock_threads, mock_replayer_listener); InSequence seq; MockInstanceWatcher mock_instance_watcher; MockImageMeta mock_image_meta; MockStateBuilder mock_state_builder(mock_local_image_ctx, mock_remote_image_ctx, mock_image_meta); MockReplayer mock_replayer{&mock_threads, &mock_instance_watcher, "local mirror uuid", &m_pool_meta_cache, &mock_state_builder, &mock_replayer_listener}; m_pool_meta_cache.set_remote_pool_meta( m_remote_io_ctx.get_id(), {"remote mirror uuid", "remote mirror peer uuid"}); librbd::UpdateWatchCtx* update_watch_ctx = nullptr; ASSERT_EQ(0, init_entry_replayer(mock_replayer, mock_threads, mock_local_image_ctx, mock_remote_image_ctx, mock_replayer_listener, mock_image_meta, &update_watch_ctx)); // inject an incomplete sync snapshot with last_copied_object_number > 0 // after a primary demotion snapshot mock_remote_image_ctx.snap_info = { {1U, librbd::SnapInfo{"snap1", cls::rbd::MirrorSnapshotNamespace{ cls::rbd::MIRROR_SNAPSHOT_STATE_NON_PRIMARY_DEMOTED, {"remote mirror peer uuid"}, "local mirror uuid", 11, true, 0, {{11, CEPH_NOSNAP}}}, 0, {}, 0, 0, {}}}, {2U, librbd::SnapInfo{"snap2", cls::rbd::MirrorSnapshotNamespace{ cls::rbd::MIRROR_SNAPSHOT_STATE_PRIMARY, {"remote mirror peer uuid"}, "", CEPH_NOSNAP, true, 0, {}}, 0, {}, 0, 0, {}}}}; mock_local_image_ctx.snap_info = { {11U, librbd::SnapInfo{"snap1", cls::rbd::MirrorSnapshotNamespace{ cls::rbd::MIRROR_SNAPSHOT_STATE_PRIMARY_DEMOTED, {"remote mirror peer uuid"}, "", CEPH_NOSNAP, true, 0, {}}, 0, {}, 0, 0, {}}}, {12U, librbd::SnapInfo{"snap2", cls::rbd::MirrorSnapshotNamespace{ cls::rbd::MIRROR_SNAPSHOT_STATE_NON_PRIMARY, {}, "remote mirror uuid", 2, false, 123, {{2, CEPH_NOSNAP}}}, 0, {}, 0, 0, {}}}}; // re-sync snap2 expect_load_image_meta(mock_image_meta, false, 0); expect_is_refresh_required(mock_local_image_ctx, false); expect_is_refresh_required(mock_remote_image_ctx, false); MockGetImageStateRequest mock_get_image_state_request; expect_get_image_state(mock_get_image_state_request, 12, 0); expect_notify_sync_request(mock_instance_watcher, mock_local_image_ctx.id, 0); MockImageCopyRequest mock_image_copy_request; expect_image_copy(mock_image_copy_request, 1, 2, 11, librbd::deep_copy::ObjectNumber{123U}, {{2, CEPH_NOSNAP}}, 0); MockApplyImageStateRequest mock_apply_state_request; expect_apply_image_state(mock_apply_state_request, 0); expect_mirror_image_snapshot_set_copy_progress( mock_local_image_ctx, 12, true, 123, 0); expect_notify_update(mock_local_image_ctx); MockUnlinkPeerRequest mock_unlink_peer_request; expect_unlink_peer(mock_unlink_peer_request, 1, "remote mirror peer uuid", false, 0); expect_notify_sync_complete(mock_instance_watcher, mock_local_image_ctx.id); // idle expect_load_image_meta(mock_image_meta, false, 0); expect_is_refresh_required(mock_local_image_ctx, true); expect_refresh( mock_local_image_ctx, { {11U, librbd::SnapInfo{"snap1", cls::rbd::MirrorSnapshotNamespace{ cls::rbd::MIRROR_SNAPSHOT_STATE_PRIMARY_DEMOTED, {"remote mirror peer uuid"}, "", CEPH_NOSNAP, true, 0, {}}, 0, {}, 0, 0, {}}}, {12U, librbd::SnapInfo{"snap2", cls::rbd::MirrorSnapshotNamespace{ cls::rbd::MIRROR_SNAPSHOT_STATE_NON_PRIMARY, {}, "remote mirror uuid", 2, true, 0, {}}, 0, {}, 0, 0, {}}}, }, 0); expect_is_refresh_required(mock_remote_image_ctx, true); expect_refresh( mock_remote_image_ctx, { {2U, librbd::SnapInfo{"snap2", cls::rbd::MirrorSnapshotNamespace{ cls::rbd::MIRROR_SNAPSHOT_STATE_PRIMARY, {"remote mirror peer uuid"}, "", CEPH_NOSNAP, true, 0, {}}, 0, {}, 0, 0, {}}}, }, 0); // wake-up replayer update_watch_ctx->handle_notify(); // wait for sync to complete ASSERT_EQ(0, wait_for_notification(2)); ASSERT_EQ(0, shut_down_entry_replayer(mock_replayer, mock_threads, mock_local_image_ctx, mock_remote_image_ctx)); } TEST_F(TestMockImageReplayerSnapshotReplayer, InterruptedPendingSyncInitial) { librbd::MockTestImageCtx mock_local_image_ctx{*m_local_image_ctx}; librbd::MockTestImageCtx mock_remote_image_ctx{*m_remote_image_ctx}; MockThreads mock_threads(m_threads); expect_work_queue_repeatedly(mock_threads); MockReplayerListener mock_replayer_listener; expect_notification(mock_threads, mock_replayer_listener); InSequence seq; MockInstanceWatcher mock_instance_watcher; MockImageMeta mock_image_meta; MockStateBuilder mock_state_builder(mock_local_image_ctx, mock_remote_image_ctx, mock_image_meta); MockReplayer mock_replayer{&mock_threads, &mock_instance_watcher, "local mirror uuid", &m_pool_meta_cache, &mock_state_builder, &mock_replayer_listener}; m_pool_meta_cache.set_remote_pool_meta( m_remote_io_ctx.get_id(), {"remote mirror uuid", "remote mirror peer uuid"}); librbd::UpdateWatchCtx* update_watch_ctx = nullptr; ASSERT_EQ(0, init_entry_replayer(mock_replayer, mock_threads, mock_local_image_ctx, mock_remote_image_ctx, mock_replayer_listener, mock_image_meta, &update_watch_ctx)); // inject an incomplete sync snapshot with last_copied_object_number == 0 mock_remote_image_ctx.snap_info = { {1U, librbd::SnapInfo{"snap1", cls::rbd::MirrorSnapshotNamespace{ cls::rbd::MIRROR_SNAPSHOT_STATE_PRIMARY, {"remote mirror peer uuid"}, "", CEPH_NOSNAP, true, 0, {}}, 0, {}, 0, 0, {}}}}; mock_local_image_ctx.snap_info = { {11U, librbd::SnapInfo{"snap1", cls::rbd::MirrorSnapshotNamespace{ cls::rbd::MIRROR_SNAPSHOT_STATE_NON_PRIMARY, {}, "remote mirror uuid", 1, false, 0, {{1, CEPH_NOSNAP}}}, 0, {}, 0, 0, {}}}}; // re-sync snap1 expect_load_image_meta(mock_image_meta, false, 0); expect_is_refresh_required(mock_local_image_ctx, false); expect_is_refresh_required(mock_remote_image_ctx, false); MockGetImageStateRequest mock_get_image_state_request; expect_get_image_state(mock_get_image_state_request, 11, 0); expect_notify_sync_request(mock_instance_watcher, mock_local_image_ctx.id, 0); MockImageCopyRequest mock_image_copy_request; expect_image_copy(mock_image_copy_request, 0, 1, 0, {}, {{1, CEPH_NOSNAP}}, 0); MockApplyImageStateRequest mock_apply_state_request; expect_apply_image_state(mock_apply_state_request, 0); expect_mirror_image_snapshot_set_copy_progress( mock_local_image_ctx, 11, true, 0, 0); expect_notify_update(mock_local_image_ctx); expect_notify_sync_complete(mock_instance_watcher, mock_local_image_ctx.id); // idle expect_load_image_meta(mock_image_meta, false, 0); expect_is_refresh_required(mock_local_image_ctx, true); expect_refresh( mock_local_image_ctx, { {11U, librbd::SnapInfo{"snap1", cls::rbd::MirrorSnapshotNamespace{ cls::rbd::MIRROR_SNAPSHOT_STATE_NON_PRIMARY, {}, "remote mirror uuid", 1, true, 0, {}}, 0, {}, 0, 0, {}}}, }, 0); expect_is_refresh_required(mock_remote_image_ctx, false); // wake-up replayer update_watch_ctx->handle_notify(); // wait for sync to complete ASSERT_EQ(0, wait_for_notification(2)); ASSERT_EQ(0, shut_down_entry_replayer(mock_replayer, mock_threads, mock_local_image_ctx, mock_remote_image_ctx)); } TEST_F(TestMockImageReplayerSnapshotReplayer, InterruptedPendingSyncDelta) { librbd::MockTestImageCtx mock_local_image_ctx{*m_local_image_ctx}; librbd::MockTestImageCtx mock_remote_image_ctx{*m_remote_image_ctx}; MockThreads mock_threads(m_threads); expect_work_queue_repeatedly(mock_threads); MockReplayerListener mock_replayer_listener; expect_notification(mock_threads, mock_replayer_listener); InSequence seq; MockInstanceWatcher mock_instance_watcher; MockImageMeta mock_image_meta; MockStateBuilder mock_state_builder(mock_local_image_ctx, mock_remote_image_ctx, mock_image_meta); MockReplayer mock_replayer{&mock_threads, &mock_instance_watcher, "local mirror uuid", &m_pool_meta_cache, &mock_state_builder, &mock_replayer_listener}; m_pool_meta_cache.set_remote_pool_meta( m_remote_io_ctx.get_id(), {"remote mirror uuid", "remote mirror peer uuid"}); librbd::UpdateWatchCtx* update_watch_ctx = nullptr; ASSERT_EQ(0, init_entry_replayer(mock_replayer, mock_threads, mock_local_image_ctx, mock_remote_image_ctx, mock_replayer_listener, mock_image_meta, &update_watch_ctx)); // inject an incomplete sync snapshot with last_copied_object_number == 0 // after a complete snapshot mock_remote_image_ctx.snap_info = { {1U, librbd::SnapInfo{"snap1", cls::rbd::MirrorSnapshotNamespace{ cls::rbd::MIRROR_SNAPSHOT_STATE_PRIMARY, {"remote mirror peer uuid"}, "", CEPH_NOSNAP, true, 0, {}}, 0, {}, 0, 0, {}}}, {2U, librbd::SnapInfo{"snap2", cls::rbd::MirrorSnapshotNamespace{ cls::rbd::MIRROR_SNAPSHOT_STATE_PRIMARY, {"remote mirror peer uuid"}, "", CEPH_NOSNAP, true, 0, {}}, 0, {}, 0, 0, {}}}}; mock_local_image_ctx.snap_info = { {11U, librbd::SnapInfo{"snap1", cls::rbd::MirrorSnapshotNamespace{ cls::rbd::MIRROR_SNAPSHOT_STATE_NON_PRIMARY, {}, "remote mirror uuid", 1, true, 0, {{1, CEPH_NOSNAP}}}, 0, {}, 0, 0, {}}}, {12U, librbd::SnapInfo{"snap2", cls::rbd::MirrorSnapshotNamespace{ cls::rbd::MIRROR_SNAPSHOT_STATE_NON_PRIMARY, {}, "remote mirror uuid", 2, false, 0, {{2, CEPH_NOSNAP}}}, 0, {}, 0, 0, {}}}}; // prune non-primary snap2 expect_load_image_meta(mock_image_meta, false, 0); expect_is_refresh_required(mock_local_image_ctx, false); expect_is_refresh_required(mock_remote_image_ctx, false); expect_prune_non_primary_snapshot(mock_local_image_ctx, 12, 0); // sync snap2 expect_load_image_meta(mock_image_meta, false, 0); expect_is_refresh_required(mock_local_image_ctx, true); expect_refresh( mock_local_image_ctx, { {11U, librbd::SnapInfo{"snap1", cls::rbd::MirrorSnapshotNamespace{ cls::rbd::MIRROR_SNAPSHOT_STATE_NON_PRIMARY, {}, "remote mirror uuid", 1, true, 0, {{1, CEPH_NOSNAP}}}, 0, {}, 0, 0, {}}}, }, 0); expect_is_refresh_required(mock_remote_image_ctx, false); MockSnapshotCopyRequest mock_snapshot_copy_request; expect_snapshot_copy(mock_snapshot_copy_request, 1, 2, 11, {{2, CEPH_NOSNAP}}, 0); MockGetImageStateRequest mock_get_image_state_request; expect_get_image_state(mock_get_image_state_request, 2, 0); MockCreateNonPrimaryRequest mock_create_non_primary_request; expect_create_non_primary_request(mock_create_non_primary_request, false, "remote mirror uuid", 2, {{2, CEPH_NOSNAP}}, 13, 0); expect_notify_sync_request(mock_instance_watcher, mock_local_image_ctx.id, 0); MockImageCopyRequest mock_image_copy_request; expect_image_copy(mock_image_copy_request, 1, 2, 11, {}, {{2, CEPH_NOSNAP}}, 0); MockApplyImageStateRequest mock_apply_state_request; expect_apply_image_state(mock_apply_state_request, 0); expect_mirror_image_snapshot_set_copy_progress( mock_local_image_ctx, 13, true, 0, 0); expect_notify_update(mock_local_image_ctx); MockUnlinkPeerRequest mock_unlink_peer_request; expect_unlink_peer(mock_unlink_peer_request, 1, "remote mirror peer uuid", false, 0); expect_notify_sync_complete(mock_instance_watcher, mock_local_image_ctx.id); // prune non-primary snap1 expect_load_image_meta(mock_image_meta, false, 0); expect_is_refresh_required(mock_local_image_ctx, true); expect_refresh( mock_local_image_ctx, { {11U, librbd::SnapInfo{"snap1", cls::rbd::MirrorSnapshotNamespace{ cls::rbd::MIRROR_SNAPSHOT_STATE_NON_PRIMARY, {}, "remote mirror uuid", 1, true, 0, {}}, 0, {}, 0, 0, {}}}, {13U, librbd::SnapInfo{"snap2", cls::rbd::MirrorSnapshotNamespace{ cls::rbd::MIRROR_SNAPSHOT_STATE_NON_PRIMARY, {}, "remote mirror uuid", 2, true, 0, {}}, 0, {}, 0, 0, {}}}, }, 0); expect_is_refresh_required(mock_remote_image_ctx, true); expect_refresh( mock_remote_image_ctx, { {2U, librbd::SnapInfo{"snap2", cls::rbd::MirrorSnapshotNamespace{ cls::rbd::MIRROR_SNAPSHOT_STATE_PRIMARY, {"remote mirror peer uuid"}, "", CEPH_NOSNAP, true, 0, {}}, 0, {}, 0, 0, {}}}, }, 0); expect_prune_non_primary_snapshot(mock_local_image_ctx, 11, 0); // idle expect_load_image_meta(mock_image_meta, false, 0); expect_is_refresh_required(mock_local_image_ctx, true); expect_refresh( mock_local_image_ctx, { {13U, librbd::SnapInfo{"snap2", cls::rbd::MirrorSnapshotNamespace{ cls::rbd::MIRROR_SNAPSHOT_STATE_NON_PRIMARY, {}, "remote mirror uuid", 2, true, 0, {}}, 0, {}, 0, 0, {}}}, }, 0); expect_is_refresh_required(mock_remote_image_ctx, false); // wake-up replayer update_watch_ctx->handle_notify(); // wait for sync to complete ASSERT_EQ(0, wait_for_notification(2)); ASSERT_EQ(0, shut_down_entry_replayer(mock_replayer, mock_threads, mock_local_image_ctx, mock_remote_image_ctx)); } TEST_F(TestMockImageReplayerSnapshotReplayer, InterruptedPendingSyncDeltaDemote) { librbd::MockTestImageCtx mock_local_image_ctx{*m_local_image_ctx}; librbd::MockTestImageCtx mock_remote_image_ctx{*m_remote_image_ctx}; MockThreads mock_threads(m_threads); expect_work_queue_repeatedly(mock_threads); MockReplayerListener mock_replayer_listener; expect_notification(mock_threads, mock_replayer_listener); InSequence seq; MockInstanceWatcher mock_instance_watcher; MockImageMeta mock_image_meta; MockStateBuilder mock_state_builder(mock_local_image_ctx, mock_remote_image_ctx, mock_image_meta); MockReplayer mock_replayer{&mock_threads, &mock_instance_watcher, "local mirror uuid", &m_pool_meta_cache, &mock_state_builder, &mock_replayer_listener}; m_pool_meta_cache.set_remote_pool_meta( m_remote_io_ctx.get_id(), {"remote mirror uuid", "remote mirror peer uuid"}); librbd::UpdateWatchCtx* update_watch_ctx = nullptr; ASSERT_EQ(0, init_entry_replayer(mock_replayer, mock_threads, mock_local_image_ctx, mock_remote_image_ctx, mock_replayer_listener, mock_image_meta, &update_watch_ctx)); // inject an incomplete sync snapshot with last_copied_object_number == 0 // after a primary demotion snapshot mock_remote_image_ctx.snap_info = { {1U, librbd::SnapInfo{"snap1", cls::rbd::MirrorSnapshotNamespace{ cls::rbd::MIRROR_SNAPSHOT_STATE_NON_PRIMARY_DEMOTED, {"remote mirror peer uuid"}, "local mirror uuid", 11, true, 0, {{11, CEPH_NOSNAP}}}, 0, {}, 0, 0, {}}}, {2U, librbd::SnapInfo{"snap2", cls::rbd::MirrorSnapshotNamespace{ cls::rbd::MIRROR_SNAPSHOT_STATE_PRIMARY, {"remote mirror peer uuid"}, "", CEPH_NOSNAP, true, 0, {}}, 0, {}, 0, 0, {}}}}; mock_local_image_ctx.snap_info = { {11U, librbd::SnapInfo{"snap1", cls::rbd::MirrorSnapshotNamespace{ cls::rbd::MIRROR_SNAPSHOT_STATE_PRIMARY_DEMOTED, {"remote mirror peer uuid"}, "", CEPH_NOSNAP, true, 0, {}}, 0, {}, 0, 0, {}}}, {12U, librbd::SnapInfo{"snap2", cls::rbd::MirrorSnapshotNamespace{ cls::rbd::MIRROR_SNAPSHOT_STATE_NON_PRIMARY, {}, "remote mirror uuid", 2, false, 0, {{2, CEPH_NOSNAP}}}, 0, {}, 0, 0, {}}}}; // prune non-primary snap2 expect_load_image_meta(mock_image_meta, false, 0); expect_is_refresh_required(mock_local_image_ctx, false); expect_is_refresh_required(mock_remote_image_ctx, false); expect_prune_non_primary_snapshot(mock_local_image_ctx, 12, 0); // sync snap2 expect_load_image_meta(mock_image_meta, false, 0); expect_is_refresh_required(mock_local_image_ctx, true); expect_refresh( mock_local_image_ctx, { {11U, librbd::SnapInfo{"snap1", cls::rbd::MirrorSnapshotNamespace{ cls::rbd::MIRROR_SNAPSHOT_STATE_PRIMARY_DEMOTED, {"remote mirror peer uuid"}, "", CEPH_NOSNAP, true, 0, {}}, 0, {}, 0, 0, {}}}, }, 0); expect_is_refresh_required(mock_remote_image_ctx, false); MockSnapshotCopyRequest mock_snapshot_copy_request; expect_snapshot_copy(mock_snapshot_copy_request, 1, 2, 11, {{2, CEPH_NOSNAP}}, 0); MockGetImageStateRequest mock_get_image_state_request; expect_get_image_state(mock_get_image_state_request, 2, 0); MockCreateNonPrimaryRequest mock_create_non_primary_request; expect_create_non_primary_request(mock_create_non_primary_request, false, "remote mirror uuid", 2, {{2, CEPH_NOSNAP}}, 13, 0); expect_notify_sync_request(mock_instance_watcher, mock_local_image_ctx.id, 0); MockImageCopyRequest mock_image_copy_request; expect_image_copy(mock_image_copy_request, 1, 2, 11, {}, {{2, CEPH_NOSNAP}}, 0); MockApplyImageStateRequest mock_apply_state_request; expect_apply_image_state(mock_apply_state_request, 0); expect_mirror_image_snapshot_set_copy_progress( mock_local_image_ctx, 13, true, 0, 0); expect_notify_update(mock_local_image_ctx); MockUnlinkPeerRequest mock_unlink_peer_request; expect_unlink_peer(mock_unlink_peer_request, 1, "remote mirror peer uuid", false, 0); expect_notify_sync_complete(mock_instance_watcher, mock_local_image_ctx.id); // idle expect_load_image_meta(mock_image_meta, false, 0); expect_is_refresh_required(mock_local_image_ctx, true); expect_refresh( mock_local_image_ctx, { {11U, librbd::SnapInfo{"snap1", cls::rbd::MirrorSnapshotNamespace{ cls::rbd::MIRROR_SNAPSHOT_STATE_PRIMARY_DEMOTED, {"remote mirror peer uuid"}, "", CEPH_NOSNAP, true, 0, {}}, 0, {}, 0, 0, {}}}, {13U, librbd::SnapInfo{"snap2", cls::rbd::MirrorSnapshotNamespace{ cls::rbd::MIRROR_SNAPSHOT_STATE_NON_PRIMARY, {}, "remote mirror uuid", 2, true, 0, {}}, 0, {}, 0, 0, {}}}, }, 0); expect_is_refresh_required(mock_remote_image_ctx, true); expect_refresh( mock_remote_image_ctx, { {2U, librbd::SnapInfo{"snap2", cls::rbd::MirrorSnapshotNamespace{ cls::rbd::MIRROR_SNAPSHOT_STATE_PRIMARY, {"remote mirror peer uuid"}, "", CEPH_NOSNAP, true, 0, {}}, 0, {}, 0, 0, {}}}, }, 0); // wake-up replayer update_watch_ctx->handle_notify(); // wait for sync to complete ASSERT_EQ(0, wait_for_notification(2)); ASSERT_EQ(0, shut_down_entry_replayer(mock_replayer, mock_threads, mock_local_image_ctx, mock_remote_image_ctx)); } TEST_F(TestMockImageReplayerSnapshotReplayer, RemoteImageDemoted) { librbd::MockTestImageCtx mock_local_image_ctx{*m_local_image_ctx}; librbd::MockTestImageCtx mock_remote_image_ctx{*m_remote_image_ctx}; MockThreads mock_threads(m_threads); expect_work_queue_repeatedly(mock_threads); MockReplayerListener mock_replayer_listener; expect_notification(mock_threads, mock_replayer_listener); InSequence seq; MockInstanceWatcher mock_instance_watcher; MockImageMeta mock_image_meta; MockStateBuilder mock_state_builder(mock_local_image_ctx, mock_remote_image_ctx, mock_image_meta); MockReplayer mock_replayer{&mock_threads, &mock_instance_watcher, "local mirror uuid", &m_pool_meta_cache, &mock_state_builder, &mock_replayer_listener}; m_pool_meta_cache.set_remote_pool_meta( m_remote_io_ctx.get_id(), {"remote mirror uuid", "remote mirror peer uuid"}); librbd::UpdateWatchCtx* update_watch_ctx = nullptr; ASSERT_EQ(0, init_entry_replayer(mock_replayer, mock_threads, mock_local_image_ctx, mock_remote_image_ctx, mock_replayer_listener, mock_image_meta, &update_watch_ctx)); // inject a demotion snapshot mock_remote_image_ctx.snap_info = { {1U, librbd::SnapInfo{"snap1", cls::rbd::MirrorSnapshotNamespace{ cls::rbd::MIRROR_SNAPSHOT_STATE_PRIMARY_DEMOTED, {"remote mirror peer uuid"}, "", CEPH_NOSNAP, true, 0, {}}, 0, {}, 0, 0, {}}}}; // sync snap1 expect_load_image_meta(mock_image_meta, false, 0); expect_is_refresh_required(mock_local_image_ctx, false); expect_is_refresh_required(mock_remote_image_ctx, false); MockSnapshotCopyRequest mock_snapshot_copy_request; expect_snapshot_copy(mock_snapshot_copy_request, 0, 1, 0, {{1, CEPH_NOSNAP}}, 0); MockGetImageStateRequest mock_get_image_state_request; expect_get_image_state(mock_get_image_state_request, 1, 0); MockCreateNonPrimaryRequest mock_create_non_primary_request; expect_create_non_primary_request(mock_create_non_primary_request, true, "remote mirror uuid", 1, {{1, CEPH_NOSNAP}}, 11, 0); MockImageStateUpdateRequest mock_image_state_update_request; expect_update_mirror_image_state(mock_image_state_update_request, 0); expect_notify_sync_request(mock_instance_watcher, mock_local_image_ctx.id, 0); MockImageCopyRequest mock_image_copy_request; expect_image_copy(mock_image_copy_request, 0, 1, 0, {}, {{1, CEPH_NOSNAP}}, 0); MockApplyImageStateRequest mock_apply_state_request; expect_apply_image_state(mock_apply_state_request, 0); expect_mirror_image_snapshot_set_copy_progress( mock_local_image_ctx, 11, true, 0, 0); expect_notify_update(mock_local_image_ctx); expect_notify_sync_complete(mock_instance_watcher, mock_local_image_ctx.id); // idle expect_load_image_meta(mock_image_meta, false, 0); expect_is_refresh_required(mock_local_image_ctx, true); expect_refresh( mock_local_image_ctx, { {11U, librbd::SnapInfo{"snap1", cls::rbd::MirrorSnapshotNamespace{ cls::rbd::MIRROR_SNAPSHOT_STATE_NON_PRIMARY, {}, "remote mirror uuid", 1, true, 0, {}}, 0, {}, 0, 0, {}}}, }, 0); expect_is_refresh_required(mock_remote_image_ctx, false); // wake-up replayer update_watch_ctx->handle_notify(); // wait for sync to complete and expect replay complete ASSERT_EQ(0, wait_for_notification(2)); ASSERT_FALSE(mock_replayer.is_replaying()); ASSERT_EQ(0, shut_down_entry_replayer(mock_replayer, mock_threads, mock_local_image_ctx, mock_remote_image_ctx)); } TEST_F(TestMockImageReplayerSnapshotReplayer, LocalImagePromoted) { librbd::MockTestImageCtx mock_local_image_ctx{*m_local_image_ctx}; librbd::MockTestImageCtx mock_remote_image_ctx{*m_remote_image_ctx}; MockThreads mock_threads(m_threads); expect_work_queue_repeatedly(mock_threads); MockReplayerListener mock_replayer_listener; expect_notification(mock_threads, mock_replayer_listener); InSequence seq; MockInstanceWatcher mock_instance_watcher; MockImageMeta mock_image_meta; MockStateBuilder mock_state_builder(mock_local_image_ctx, mock_remote_image_ctx, mock_image_meta); MockReplayer mock_replayer{&mock_threads, &mock_instance_watcher, "local mirror uuid", &m_pool_meta_cache, &mock_state_builder, &mock_replayer_listener}; m_pool_meta_cache.set_remote_pool_meta( m_remote_io_ctx.get_id(), {"remote mirror uuid", "remote mirror peer uuid"}); librbd::UpdateWatchCtx* update_watch_ctx = nullptr; ASSERT_EQ(0, init_entry_replayer(mock_replayer, mock_threads, mock_local_image_ctx, mock_remote_image_ctx, mock_replayer_listener, mock_image_meta, &update_watch_ctx)); // inject a promotion snapshot mock_local_image_ctx.snap_info = { {1U, librbd::SnapInfo{"snap1", cls::rbd::MirrorSnapshotNamespace{ cls::rbd::MIRROR_SNAPSHOT_STATE_PRIMARY, {"remote mirror peer uuid"}, "", CEPH_NOSNAP, true, 0, {}}, 0, {}, 0, 0, {}}}}; // idle expect_load_image_meta(mock_image_meta, false, 0); expect_is_refresh_required(mock_local_image_ctx, false); expect_is_refresh_required(mock_remote_image_ctx, false); // wake-up replayer update_watch_ctx->handle_notify(); // wait for sync to complete and expect replay complete ASSERT_EQ(0, wait_for_notification(1)); ASSERT_FALSE(mock_replayer.is_replaying()); ASSERT_EQ(0, shut_down_entry_replayer(mock_replayer, mock_threads, mock_local_image_ctx, mock_remote_image_ctx)); } TEST_F(TestMockImageReplayerSnapshotReplayer, ResyncRequested) { librbd::MockTestImageCtx mock_local_image_ctx{*m_local_image_ctx}; librbd::MockTestImageCtx mock_remote_image_ctx{*m_remote_image_ctx}; MockThreads mock_threads(m_threads); expect_work_queue_repeatedly(mock_threads); MockReplayerListener mock_replayer_listener; expect_notification(mock_threads, mock_replayer_listener); InSequence seq; MockInstanceWatcher mock_instance_watcher; MockImageMeta mock_image_meta; MockStateBuilder mock_state_builder(mock_local_image_ctx, mock_remote_image_ctx, mock_image_meta); MockReplayer mock_replayer{&mock_threads, &mock_instance_watcher, "local mirror uuid", &m_pool_meta_cache, &mock_state_builder, &mock_replayer_listener}; m_pool_meta_cache.set_remote_pool_meta( m_remote_io_ctx.get_id(), {"remote mirror uuid", "remote mirror peer uuid"}); librbd::UpdateWatchCtx* update_watch_ctx = nullptr; ASSERT_EQ(0, init_entry_replayer(mock_replayer, mock_threads, mock_local_image_ctx, mock_remote_image_ctx, mock_replayer_listener, mock_image_meta, &update_watch_ctx)); // idle expect_load_image_meta(mock_image_meta, true, 0); // wake-up replayer update_watch_ctx->handle_notify(); // wait for sync to complete and expect replay complete ASSERT_EQ(0, wait_for_notification(1)); ASSERT_FALSE(mock_replayer.is_replaying()); ASSERT_EQ(0, shut_down_entry_replayer(mock_replayer, mock_threads, mock_local_image_ctx, mock_remote_image_ctx)); } TEST_F(TestMockImageReplayerSnapshotReplayer, RegisterLocalUpdateWatcherError) { librbd::MockTestImageCtx mock_local_image_ctx{*m_local_image_ctx}; librbd::MockTestImageCtx mock_remote_image_ctx{*m_remote_image_ctx}; MockThreads mock_threads(m_threads); expect_work_queue_repeatedly(mock_threads); InSequence seq; MockInstanceWatcher mock_instance_watcher; MockImageMeta mock_image_meta; MockStateBuilder mock_state_builder(mock_local_image_ctx, mock_remote_image_ctx, mock_image_meta); MockReplayerListener mock_replayer_listener; MockReplayer mock_replayer{&mock_threads, &mock_instance_watcher, "local mirror uuid", &m_pool_meta_cache, &mock_state_builder, &mock_replayer_listener}; m_pool_meta_cache.set_remote_pool_meta( m_remote_io_ctx.get_id(), {"remote mirror uuid", "remote mirror peer uuid"}); // init librbd::UpdateWatchCtx* update_watch_ctx = nullptr; expect_register_update_watcher(mock_local_image_ctx, &update_watch_ctx, 123, -EINVAL); // fire init C_SaferCond init_ctx; mock_replayer.init(&init_ctx); ASSERT_EQ(-EINVAL, init_ctx.wait()); } TEST_F(TestMockImageReplayerSnapshotReplayer, RegisterRemoteUpdateWatcherError) { librbd::MockTestImageCtx mock_local_image_ctx{*m_local_image_ctx}; librbd::MockTestImageCtx mock_remote_image_ctx{*m_remote_image_ctx}; MockThreads mock_threads(m_threads); expect_work_queue_repeatedly(mock_threads); InSequence seq; MockInstanceWatcher mock_instance_watcher; MockImageMeta mock_image_meta; MockStateBuilder mock_state_builder(mock_local_image_ctx, mock_remote_image_ctx, mock_image_meta); MockReplayerListener mock_replayer_listener; MockReplayer mock_replayer{&mock_threads, &mock_instance_watcher, "local mirror uuid", &m_pool_meta_cache, &mock_state_builder, &mock_replayer_listener}; m_pool_meta_cache.set_remote_pool_meta( m_remote_io_ctx.get_id(), {"remote mirror uuid", "remote mirror peer uuid"}); // init librbd::UpdateWatchCtx* update_watch_ctx = nullptr; expect_register_update_watcher(mock_local_image_ctx, &update_watch_ctx, 123, 0); expect_register_update_watcher(mock_remote_image_ctx, &update_watch_ctx, 234, -EINVAL); expect_unregister_update_watcher(mock_local_image_ctx, 123, 0); // fire init C_SaferCond init_ctx; mock_replayer.init(&init_ctx); ASSERT_EQ(-EINVAL, init_ctx.wait()); } TEST_F(TestMockImageReplayerSnapshotReplayer, UnregisterRemoteUpdateWatcherError) { librbd::MockTestImageCtx mock_local_image_ctx{*m_local_image_ctx}; librbd::MockTestImageCtx mock_remote_image_ctx{*m_remote_image_ctx}; MockThreads mock_threads(m_threads); expect_work_queue_repeatedly(mock_threads); MockReplayerListener mock_replayer_listener; expect_notification(mock_threads, mock_replayer_listener); InSequence seq; MockInstanceWatcher mock_instance_watcher; MockImageMeta mock_image_meta; MockStateBuilder mock_state_builder(mock_local_image_ctx, mock_remote_image_ctx, mock_image_meta); MockReplayer mock_replayer{&mock_threads, &mock_instance_watcher, "local mirror uuid", &m_pool_meta_cache, &mock_state_builder, &mock_replayer_listener}; m_pool_meta_cache.set_remote_pool_meta( m_remote_io_ctx.get_id(), {"remote mirror uuid", "remote mirror peer uuid"}); librbd::UpdateWatchCtx* update_watch_ctx = nullptr; ASSERT_EQ(0, init_entry_replayer(mock_replayer, mock_threads, mock_local_image_ctx, mock_remote_image_ctx, mock_replayer_listener, mock_image_meta, &update_watch_ctx)); // shut down expect_unregister_update_watcher(mock_remote_image_ctx, 234, -EINVAL); expect_unregister_update_watcher(mock_local_image_ctx, 123, 0); C_SaferCond shutdown_ctx; mock_replayer.shut_down(&shutdown_ctx); ASSERT_EQ(0, shutdown_ctx.wait()); } TEST_F(TestMockImageReplayerSnapshotReplayer, UnregisterLocalUpdateWatcherError) { librbd::MockTestImageCtx mock_local_image_ctx{*m_local_image_ctx}; librbd::MockTestImageCtx mock_remote_image_ctx{*m_remote_image_ctx}; MockThreads mock_threads(m_threads); expect_work_queue_repeatedly(mock_threads); MockReplayerListener mock_replayer_listener; expect_notification(mock_threads, mock_replayer_listener); InSequence seq; MockInstanceWatcher mock_instance_watcher; MockImageMeta mock_image_meta; MockStateBuilder mock_state_builder(mock_local_image_ctx, mock_remote_image_ctx, mock_image_meta); MockReplayer mock_replayer{&mock_threads, &mock_instance_watcher, "local mirror uuid", &m_pool_meta_cache, &mock_state_builder, &mock_replayer_listener}; m_pool_meta_cache.set_remote_pool_meta( m_remote_io_ctx.get_id(), {"remote mirror uuid", "remote mirror peer uuid"}); librbd::UpdateWatchCtx* update_watch_ctx = nullptr; ASSERT_EQ(0, init_entry_replayer(mock_replayer, mock_threads, mock_local_image_ctx, mock_remote_image_ctx, mock_replayer_listener, mock_image_meta, &update_watch_ctx)); // shut down expect_unregister_update_watcher(mock_remote_image_ctx, 234, 0); expect_unregister_update_watcher(mock_local_image_ctx, 123, -EINVAL); C_SaferCond shutdown_ctx; mock_replayer.shut_down(&shutdown_ctx); ASSERT_EQ(0, shutdown_ctx.wait()); } TEST_F(TestMockImageReplayerSnapshotReplayer, LoadImageMetaError) { librbd::MockTestImageCtx mock_local_image_ctx{*m_local_image_ctx}; librbd::MockTestImageCtx mock_remote_image_ctx{*m_remote_image_ctx}; MockThreads mock_threads(m_threads); expect_work_queue_repeatedly(mock_threads); MockReplayerListener mock_replayer_listener; expect_notification(mock_threads, mock_replayer_listener); InSequence seq; MockInstanceWatcher mock_instance_watcher; MockImageMeta mock_image_meta; MockStateBuilder mock_state_builder(mock_local_image_ctx, mock_remote_image_ctx, mock_image_meta); MockReplayer mock_replayer{&mock_threads, &mock_instance_watcher, "local mirror uuid", &m_pool_meta_cache, &mock_state_builder, &mock_replayer_listener}; m_pool_meta_cache.set_remote_pool_meta( m_remote_io_ctx.get_id(), {"remote mirror uuid", "remote mirror peer uuid"}); librbd::UpdateWatchCtx* update_watch_ctx = nullptr; ASSERT_EQ(0, init_entry_replayer(mock_replayer, mock_threads, mock_local_image_ctx, mock_remote_image_ctx, mock_replayer_listener, mock_image_meta, &update_watch_ctx)); // sync expect_load_image_meta(mock_image_meta, false, -EINVAL); // wake-up replayer update_watch_ctx->handle_notify(); // wait for sync to complete and expect replay complete ASSERT_EQ(0, wait_for_notification(1)); ASSERT_FALSE(mock_replayer.is_replaying()); ASSERT_EQ(-EINVAL, mock_replayer.get_error_code()); ASSERT_EQ(0, shut_down_entry_replayer(mock_replayer, mock_threads, mock_local_image_ctx, mock_remote_image_ctx)); } TEST_F(TestMockImageReplayerSnapshotReplayer, RefreshLocalImageError) { librbd::MockTestImageCtx mock_local_image_ctx{*m_local_image_ctx}; librbd::MockTestImageCtx mock_remote_image_ctx{*m_remote_image_ctx}; MockThreads mock_threads(m_threads); expect_work_queue_repeatedly(mock_threads); MockReplayerListener mock_replayer_listener; expect_notification(mock_threads, mock_replayer_listener); InSequence seq; MockInstanceWatcher mock_instance_watcher; MockImageMeta mock_image_meta; MockStateBuilder mock_state_builder(mock_local_image_ctx, mock_remote_image_ctx, mock_image_meta); MockReplayer mock_replayer{&mock_threads, &mock_instance_watcher, "local mirror uuid", &m_pool_meta_cache, &mock_state_builder, &mock_replayer_listener}; m_pool_meta_cache.set_remote_pool_meta( m_remote_io_ctx.get_id(), {"remote mirror uuid", "remote mirror peer uuid"}); librbd::UpdateWatchCtx* update_watch_ctx = nullptr; ASSERT_EQ(0, init_entry_replayer(mock_replayer, mock_threads, mock_local_image_ctx, mock_remote_image_ctx, mock_replayer_listener, mock_image_meta, &update_watch_ctx)); // sync expect_load_image_meta(mock_image_meta, false, 0); expect_is_refresh_required(mock_local_image_ctx, true); expect_refresh(mock_local_image_ctx, {}, -EINVAL); // wake-up replayer update_watch_ctx->handle_notify(); // wait for sync to complete and expect replay complete ASSERT_EQ(0, wait_for_notification(1)); ASSERT_FALSE(mock_replayer.is_replaying()); ASSERT_EQ(-EINVAL, mock_replayer.get_error_code()); ASSERT_EQ(0, shut_down_entry_replayer(mock_replayer, mock_threads, mock_local_image_ctx, mock_remote_image_ctx)); } TEST_F(TestMockImageReplayerSnapshotReplayer, RefreshRemoteImageError) { librbd::MockTestImageCtx mock_local_image_ctx{*m_local_image_ctx}; librbd::MockTestImageCtx mock_remote_image_ctx{*m_remote_image_ctx}; MockThreads mock_threads(m_threads); expect_work_queue_repeatedly(mock_threads); MockReplayerListener mock_replayer_listener; expect_notification(mock_threads, mock_replayer_listener); InSequence seq; MockInstanceWatcher mock_instance_watcher; MockImageMeta mock_image_meta; MockStateBuilder mock_state_builder(mock_local_image_ctx, mock_remote_image_ctx, mock_image_meta); MockReplayer mock_replayer{&mock_threads, &mock_instance_watcher, "local mirror uuid", &m_pool_meta_cache, &mock_state_builder, &mock_replayer_listener}; m_pool_meta_cache.set_remote_pool_meta( m_remote_io_ctx.get_id(), {"remote mirror uuid", "remote mirror peer uuid"}); librbd::UpdateWatchCtx* update_watch_ctx = nullptr; ASSERT_EQ(0, init_entry_replayer(mock_replayer, mock_threads, mock_local_image_ctx, mock_remote_image_ctx, mock_replayer_listener, mock_image_meta, &update_watch_ctx)); // sync expect_load_image_meta(mock_image_meta, false, 0); expect_is_refresh_required(mock_local_image_ctx, false); expect_is_refresh_required(mock_remote_image_ctx, true); expect_refresh(mock_remote_image_ctx, {}, -EINVAL); // wake-up replayer update_watch_ctx->handle_notify(); // wait for sync to complete and expect replay complete ASSERT_EQ(0, wait_for_notification(1)); ASSERT_FALSE(mock_replayer.is_replaying()); ASSERT_EQ(-EINVAL, mock_replayer.get_error_code()); ASSERT_EQ(0, shut_down_entry_replayer(mock_replayer, mock_threads, mock_local_image_ctx, mock_remote_image_ctx)); } TEST_F(TestMockImageReplayerSnapshotReplayer, CopySnapshotsError) { librbd::MockTestImageCtx mock_local_image_ctx{*m_local_image_ctx}; librbd::MockTestImageCtx mock_remote_image_ctx{*m_remote_image_ctx}; MockThreads mock_threads(m_threads); expect_work_queue_repeatedly(mock_threads); MockReplayerListener mock_replayer_listener; expect_notification(mock_threads, mock_replayer_listener); InSequence seq; MockInstanceWatcher mock_instance_watcher; MockImageMeta mock_image_meta; MockStateBuilder mock_state_builder(mock_local_image_ctx, mock_remote_image_ctx, mock_image_meta); MockReplayer mock_replayer{&mock_threads, &mock_instance_watcher, "local mirror uuid", &m_pool_meta_cache, &mock_state_builder, &mock_replayer_listener}; m_pool_meta_cache.set_remote_pool_meta( m_remote_io_ctx.get_id(), {"remote mirror uuid", "remote mirror peer uuid"}); librbd::UpdateWatchCtx* update_watch_ctx = nullptr; ASSERT_EQ(0, init_entry_replayer(mock_replayer, mock_threads, mock_local_image_ctx, mock_remote_image_ctx, mock_replayer_listener, mock_image_meta, &update_watch_ctx)); // inject snapshot mock_remote_image_ctx.snap_info = { {1U, librbd::SnapInfo{"snap1", cls::rbd::MirrorSnapshotNamespace{ cls::rbd::MIRROR_SNAPSHOT_STATE_PRIMARY, {"remote mirror peer uuid"}, "", CEPH_NOSNAP, true, 0, {}}, 0, {}, 0, 0, {}}}}; // sync snap1 expect_load_image_meta(mock_image_meta, false, 0); expect_is_refresh_required(mock_local_image_ctx, false); expect_is_refresh_required(mock_remote_image_ctx, false); MockSnapshotCopyRequest mock_snapshot_copy_request; expect_snapshot_copy(mock_snapshot_copy_request, 0, 1, 0, {{1, CEPH_NOSNAP}}, -EINVAL); // wake-up replayer update_watch_ctx->handle_notify(); // wait for sync to complete and expect replay complete ASSERT_EQ(0, wait_for_notification(1)); ASSERT_FALSE(mock_replayer.is_replaying()); ASSERT_EQ(-EINVAL, mock_replayer.get_error_code()); ASSERT_EQ(0, shut_down_entry_replayer(mock_replayer, mock_threads, mock_local_image_ctx, mock_remote_image_ctx)); } TEST_F(TestMockImageReplayerSnapshotReplayer, GetImageStateError) { librbd::MockTestImageCtx mock_local_image_ctx{*m_local_image_ctx}; librbd::MockTestImageCtx mock_remote_image_ctx{*m_remote_image_ctx}; MockThreads mock_threads(m_threads); expect_work_queue_repeatedly(mock_threads); MockReplayerListener mock_replayer_listener; expect_notification(mock_threads, mock_replayer_listener); InSequence seq; MockInstanceWatcher mock_instance_watcher; MockImageMeta mock_image_meta; MockStateBuilder mock_state_builder(mock_local_image_ctx, mock_remote_image_ctx, mock_image_meta); MockReplayer mock_replayer{&mock_threads, &mock_instance_watcher, "local mirror uuid", &m_pool_meta_cache, &mock_state_builder, &mock_replayer_listener}; m_pool_meta_cache.set_remote_pool_meta( m_remote_io_ctx.get_id(), {"remote mirror uuid", "remote mirror peer uuid"}); librbd::UpdateWatchCtx* update_watch_ctx = nullptr; ASSERT_EQ(0, init_entry_replayer(mock_replayer, mock_threads, mock_local_image_ctx, mock_remote_image_ctx, mock_replayer_listener, mock_image_meta, &update_watch_ctx)); // inject snapshot mock_remote_image_ctx.snap_info = { {1U, librbd::SnapInfo{"snap1", cls::rbd::MirrorSnapshotNamespace{ cls::rbd::MIRROR_SNAPSHOT_STATE_PRIMARY, {"remote mirror peer uuid"}, "", CEPH_NOSNAP, true, 0, {}}, 0, {}, 0, 0, {}}}}; // sync snap1 expect_load_image_meta(mock_image_meta, false, 0); expect_is_refresh_required(mock_local_image_ctx, false); expect_is_refresh_required(mock_remote_image_ctx, false); MockSnapshotCopyRequest mock_snapshot_copy_request; expect_snapshot_copy(mock_snapshot_copy_request, 0, 1, 0, {{1, CEPH_NOSNAP}}, 0); MockGetImageStateRequest mock_get_image_state_request; expect_get_image_state(mock_get_image_state_request, 1, -EINVAL); // wake-up replayer update_watch_ctx->handle_notify(); // wait for sync to complete and expect replay complete ASSERT_EQ(0, wait_for_notification(1)); ASSERT_FALSE(mock_replayer.is_replaying()); ASSERT_EQ(-EINVAL, mock_replayer.get_error_code()); ASSERT_EQ(0, shut_down_entry_replayer(mock_replayer, mock_threads, mock_local_image_ctx, mock_remote_image_ctx)); } TEST_F(TestMockImageReplayerSnapshotReplayer, CreateNonPrimarySnapshotError) { librbd::MockTestImageCtx mock_local_image_ctx{*m_local_image_ctx}; librbd::MockTestImageCtx mock_remote_image_ctx{*m_remote_image_ctx}; MockThreads mock_threads(m_threads); expect_work_queue_repeatedly(mock_threads); MockReplayerListener mock_replayer_listener; expect_notification(mock_threads, mock_replayer_listener); InSequence seq; MockInstanceWatcher mock_instance_watcher; MockImageMeta mock_image_meta; MockStateBuilder mock_state_builder(mock_local_image_ctx, mock_remote_image_ctx, mock_image_meta); MockReplayer mock_replayer{&mock_threads, &mock_instance_watcher, "local mirror uuid", &m_pool_meta_cache, &mock_state_builder, &mock_replayer_listener}; m_pool_meta_cache.set_remote_pool_meta( m_remote_io_ctx.get_id(), {"remote mirror uuid", "remote mirror peer uuid"}); librbd::UpdateWatchCtx* update_watch_ctx = nullptr; ASSERT_EQ(0, init_entry_replayer(mock_replayer, mock_threads, mock_local_image_ctx, mock_remote_image_ctx, mock_replayer_listener, mock_image_meta, &update_watch_ctx)); // inject snapshot mock_remote_image_ctx.snap_info = { {1U, librbd::SnapInfo{"snap1", cls::rbd::MirrorSnapshotNamespace{ cls::rbd::MIRROR_SNAPSHOT_STATE_PRIMARY, {"remote mirror peer uuid"}, "", CEPH_NOSNAP, true, 0, {}}, 0, {}, 0, 0, {}}}}; // sync snap1 expect_load_image_meta(mock_image_meta, false, 0); expect_is_refresh_required(mock_local_image_ctx, false); expect_is_refresh_required(mock_remote_image_ctx, false); MockSnapshotCopyRequest mock_snapshot_copy_request; expect_snapshot_copy(mock_snapshot_copy_request, 0, 1, 0, {{1, CEPH_NOSNAP}}, 0); MockGetImageStateRequest mock_get_image_state_request; expect_get_image_state(mock_get_image_state_request, 1, 0); MockCreateNonPrimaryRequest mock_create_non_primary_request; expect_create_non_primary_request(mock_create_non_primary_request, false, "remote mirror uuid", 1, {{1, CEPH_NOSNAP}}, 11, -EINVAL); // wake-up replayer update_watch_ctx->handle_notify(); // wait for sync to complete and expect replay complete ASSERT_EQ(0, wait_for_notification(1)); ASSERT_FALSE(mock_replayer.is_replaying()); ASSERT_EQ(-EINVAL, mock_replayer.get_error_code()); ASSERT_EQ(0, shut_down_entry_replayer(mock_replayer, mock_threads, mock_local_image_ctx, mock_remote_image_ctx)); } TEST_F(TestMockImageReplayerSnapshotReplayer, UpdateMirrorImageStateError) { librbd::MockTestImageCtx mock_local_image_ctx{*m_local_image_ctx}; librbd::MockTestImageCtx mock_remote_image_ctx{*m_remote_image_ctx}; MockThreads mock_threads(m_threads); expect_work_queue_repeatedly(mock_threads); MockReplayerListener mock_replayer_listener; expect_notification(mock_threads, mock_replayer_listener); InSequence seq; MockInstanceWatcher mock_instance_watcher; MockImageMeta mock_image_meta; MockStateBuilder mock_state_builder(mock_local_image_ctx, mock_remote_image_ctx, mock_image_meta); MockReplayer mock_replayer{&mock_threads, &mock_instance_watcher, "local mirror uuid", &m_pool_meta_cache, &mock_state_builder, &mock_replayer_listener}; m_pool_meta_cache.set_remote_pool_meta( m_remote_io_ctx.get_id(), {"remote mirror uuid", "remote mirror peer uuid"}); librbd::UpdateWatchCtx* update_watch_ctx = nullptr; ASSERT_EQ(0, init_entry_replayer(mock_replayer, mock_threads, mock_local_image_ctx, mock_remote_image_ctx, mock_replayer_listener, mock_image_meta, &update_watch_ctx)); // inject snapshot mock_remote_image_ctx.snap_info = { {1U, librbd::SnapInfo{"snap1", cls::rbd::MirrorSnapshotNamespace{ cls::rbd::MIRROR_SNAPSHOT_STATE_PRIMARY, {"remote mirror peer uuid"}, "", CEPH_NOSNAP, true, 0, {}}, 0, {}, 0, 0, {}}}}; // sync snap1 expect_load_image_meta(mock_image_meta, false, 0); expect_is_refresh_required(mock_local_image_ctx, false); expect_is_refresh_required(mock_remote_image_ctx, false); MockSnapshotCopyRequest mock_snapshot_copy_request; expect_snapshot_copy(mock_snapshot_copy_request, 0, 1, 0, {{1, CEPH_NOSNAP}}, 0); MockGetImageStateRequest mock_get_image_state_request; expect_get_image_state(mock_get_image_state_request, 1, 0); MockCreateNonPrimaryRequest mock_create_non_primary_request; expect_create_non_primary_request(mock_create_non_primary_request, false, "remote mirror uuid", 1, {{1, CEPH_NOSNAP}}, 11, 0); MockImageStateUpdateRequest mock_image_state_update_request; expect_update_mirror_image_state(mock_image_state_update_request, -EIO); // wake-up replayer update_watch_ctx->handle_notify(); // wait for sync to complete and expect replay complete ASSERT_EQ(0, wait_for_notification(1)); ASSERT_FALSE(mock_replayer.is_replaying()); ASSERT_EQ(-EIO, mock_replayer.get_error_code()); ASSERT_EQ(0, shut_down_entry_replayer(mock_replayer, mock_threads, mock_local_image_ctx, mock_remote_image_ctx)); } TEST_F(TestMockImageReplayerSnapshotReplayer, RequestSyncError) { librbd::MockTestImageCtx mock_local_image_ctx{*m_local_image_ctx}; librbd::MockTestImageCtx mock_remote_image_ctx{*m_remote_image_ctx}; MockThreads mock_threads(m_threads); expect_work_queue_repeatedly(mock_threads); MockReplayerListener mock_replayer_listener; expect_notification(mock_threads, mock_replayer_listener); InSequence seq; MockInstanceWatcher mock_instance_watcher; MockImageMeta mock_image_meta; MockStateBuilder mock_state_builder(mock_local_image_ctx, mock_remote_image_ctx, mock_image_meta); MockReplayer mock_replayer{&mock_threads, &mock_instance_watcher, "local mirror uuid", &m_pool_meta_cache, &mock_state_builder, &mock_replayer_listener}; m_pool_meta_cache.set_remote_pool_meta( m_remote_io_ctx.get_id(), {"remote mirror uuid", "remote mirror peer uuid"}); librbd::UpdateWatchCtx* update_watch_ctx = nullptr; ASSERT_EQ(0, init_entry_replayer(mock_replayer, mock_threads, mock_local_image_ctx, mock_remote_image_ctx, mock_replayer_listener, mock_image_meta, &update_watch_ctx)); // inject snapshot mock_remote_image_ctx.snap_info = { {1U, librbd::SnapInfo{"snap1", cls::rbd::MirrorSnapshotNamespace{ cls::rbd::MIRROR_SNAPSHOT_STATE_PRIMARY, {"remote mirror peer uuid"}, "", CEPH_NOSNAP, true, 0, {}}, 0, {}, 0, 0, {}}}}; // sync snap1 expect_load_image_meta(mock_image_meta, false, 0); expect_is_refresh_required(mock_local_image_ctx, false); expect_is_refresh_required(mock_remote_image_ctx, false); MockSnapshotCopyRequest mock_snapshot_copy_request; expect_snapshot_copy(mock_snapshot_copy_request, 0, 1, 0, {{1, CEPH_NOSNAP}}, 0); MockGetImageStateRequest mock_get_image_state_request; expect_get_image_state(mock_get_image_state_request, 1, 0); MockCreateNonPrimaryRequest mock_create_non_primary_request; expect_create_non_primary_request(mock_create_non_primary_request, false, "remote mirror uuid", 1, {{1, CEPH_NOSNAP}}, 11, 0); MockImageStateUpdateRequest mock_image_state_update_request; expect_update_mirror_image_state(mock_image_state_update_request, 0); expect_notify_sync_request(mock_instance_watcher, mock_local_image_ctx.id, -ECANCELED); // wake-up replayer update_watch_ctx->handle_notify(); // wait for sync to complete and expect replay complete ASSERT_EQ(0, wait_for_notification(1)); ASSERT_FALSE(mock_replayer.is_replaying()); ASSERT_EQ(-ECANCELED, mock_replayer.get_error_code()); ASSERT_EQ(0, shut_down_entry_replayer(mock_replayer, mock_threads, mock_local_image_ctx, mock_remote_image_ctx)); } TEST_F(TestMockImageReplayerSnapshotReplayer, CopyImageError) { librbd::MockTestImageCtx mock_local_image_ctx{*m_local_image_ctx}; librbd::MockTestImageCtx mock_remote_image_ctx{*m_remote_image_ctx}; MockThreads mock_threads(m_threads); expect_work_queue_repeatedly(mock_threads); MockReplayerListener mock_replayer_listener; expect_notification(mock_threads, mock_replayer_listener); InSequence seq; MockInstanceWatcher mock_instance_watcher; MockImageMeta mock_image_meta; MockStateBuilder mock_state_builder(mock_local_image_ctx, mock_remote_image_ctx, mock_image_meta); MockReplayer mock_replayer{&mock_threads, &mock_instance_watcher, "local mirror uuid", &m_pool_meta_cache, &mock_state_builder, &mock_replayer_listener}; m_pool_meta_cache.set_remote_pool_meta( m_remote_io_ctx.get_id(), {"remote mirror uuid", "remote mirror peer uuid"}); librbd::UpdateWatchCtx* update_watch_ctx = nullptr; ASSERT_EQ(0, init_entry_replayer(mock_replayer, mock_threads, mock_local_image_ctx, mock_remote_image_ctx, mock_replayer_listener, mock_image_meta, &update_watch_ctx)); // inject snapshot mock_remote_image_ctx.snap_info = { {1U, librbd::SnapInfo{"snap1", cls::rbd::MirrorSnapshotNamespace{ cls::rbd::MIRROR_SNAPSHOT_STATE_PRIMARY, {"remote mirror peer uuid"}, "", CEPH_NOSNAP,true, 0, {}}, 0, {}, 0, 0, {}}}}; // sync snap1 expect_load_image_meta(mock_image_meta, false, 0); expect_is_refresh_required(mock_local_image_ctx, false); expect_is_refresh_required(mock_remote_image_ctx, false); MockSnapshotCopyRequest mock_snapshot_copy_request; expect_snapshot_copy(mock_snapshot_copy_request, 0, 1, 0, {{1, CEPH_NOSNAP}}, 0); MockGetImageStateRequest mock_get_image_state_request; expect_get_image_state(mock_get_image_state_request, 1, 0); MockCreateNonPrimaryRequest mock_create_non_primary_request; expect_create_non_primary_request(mock_create_non_primary_request, false, "remote mirror uuid", 1, {{1, CEPH_NOSNAP}}, 11, 0); MockImageStateUpdateRequest mock_image_state_update_request; expect_update_mirror_image_state(mock_image_state_update_request, 0); expect_notify_sync_request(mock_instance_watcher, mock_local_image_ctx.id, 0); MockImageCopyRequest mock_image_copy_request; expect_image_copy(mock_image_copy_request, 0, 1, 0, {}, {{1, CEPH_NOSNAP}}, -EINVAL); expect_notify_sync_complete(mock_instance_watcher, mock_local_image_ctx.id); // wake-up replayer update_watch_ctx->handle_notify(); // wait for sync to complete and expect replay complete ASSERT_EQ(0, wait_for_notification(1)); ASSERT_FALSE(mock_replayer.is_replaying()); ASSERT_EQ(-EINVAL, mock_replayer.get_error_code()); ASSERT_EQ(0, shut_down_entry_replayer(mock_replayer, mock_threads, mock_local_image_ctx, mock_remote_image_ctx)); } TEST_F(TestMockImageReplayerSnapshotReplayer, UpdateNonPrimarySnapshotError) { librbd::MockTestImageCtx mock_local_image_ctx{*m_local_image_ctx}; librbd::MockTestImageCtx mock_remote_image_ctx{*m_remote_image_ctx}; MockThreads mock_threads(m_threads); expect_work_queue_repeatedly(mock_threads); MockReplayerListener mock_replayer_listener; expect_notification(mock_threads, mock_replayer_listener); InSequence seq; MockInstanceWatcher mock_instance_watcher; MockImageMeta mock_image_meta; MockStateBuilder mock_state_builder(mock_local_image_ctx, mock_remote_image_ctx, mock_image_meta); MockReplayer mock_replayer{&mock_threads, &mock_instance_watcher, "local mirror uuid", &m_pool_meta_cache, &mock_state_builder, &mock_replayer_listener}; m_pool_meta_cache.set_remote_pool_meta( m_remote_io_ctx.get_id(), {"remote mirror uuid", "remote mirror peer uuid"}); librbd::UpdateWatchCtx* update_watch_ctx = nullptr; ASSERT_EQ(0, init_entry_replayer(mock_replayer, mock_threads, mock_local_image_ctx, mock_remote_image_ctx, mock_replayer_listener, mock_image_meta, &update_watch_ctx)); // inject snapshot mock_remote_image_ctx.snap_info = { {1U, librbd::SnapInfo{"snap1", cls::rbd::MirrorSnapshotNamespace{ cls::rbd::MIRROR_SNAPSHOT_STATE_PRIMARY, {"remote mirror peer uuid"}, "", CEPH_NOSNAP, true, 0, {}}, 0, {}, 0, 0, {}}}}; // sync snap1 expect_load_image_meta(mock_image_meta, false, 0); expect_is_refresh_required(mock_local_image_ctx, false); expect_is_refresh_required(mock_remote_image_ctx, false); MockSnapshotCopyRequest mock_snapshot_copy_request; expect_snapshot_copy(mock_snapshot_copy_request, 0, 1, 0, {{1, CEPH_NOSNAP}}, 0); MockGetImageStateRequest mock_get_image_state_request; expect_get_image_state(mock_get_image_state_request, 1, 0); MockCreateNonPrimaryRequest mock_create_non_primary_request; expect_create_non_primary_request(mock_create_non_primary_request, false, "remote mirror uuid", 1, {{1, CEPH_NOSNAP}}, 11, 0); MockImageStateUpdateRequest mock_image_state_update_request; expect_update_mirror_image_state(mock_image_state_update_request, 0); expect_notify_sync_request(mock_instance_watcher, mock_local_image_ctx.id, 0); MockImageCopyRequest mock_image_copy_request; expect_image_copy(mock_image_copy_request, 0, 1, 0, {}, {{1, CEPH_NOSNAP}}, 0); MockApplyImageStateRequest mock_apply_state_request; expect_apply_image_state(mock_apply_state_request, 0); expect_mirror_image_snapshot_set_copy_progress( mock_local_image_ctx, 11, true, 0, -EINVAL); expect_notify_sync_complete(mock_instance_watcher, mock_local_image_ctx.id); // wake-up replayer update_watch_ctx->handle_notify(); // wait for sync to complete and expect replay complete ASSERT_EQ(0, wait_for_notification(1)); ASSERT_FALSE(mock_replayer.is_replaying()); ASSERT_EQ(-EINVAL, mock_replayer.get_error_code()); ASSERT_EQ(0, shut_down_entry_replayer(mock_replayer, mock_threads, mock_local_image_ctx, mock_remote_image_ctx)); } TEST_F(TestMockImageReplayerSnapshotReplayer, UnlinkPeerError) { librbd::MockTestImageCtx mock_local_image_ctx{*m_local_image_ctx}; librbd::MockTestImageCtx mock_remote_image_ctx{*m_remote_image_ctx}; MockThreads mock_threads(m_threads); expect_work_queue_repeatedly(mock_threads); MockReplayerListener mock_replayer_listener; expect_notification(mock_threads, mock_replayer_listener); InSequence seq; MockInstanceWatcher mock_instance_watcher; MockImageMeta mock_image_meta; MockStateBuilder mock_state_builder(mock_local_image_ctx, mock_remote_image_ctx, mock_image_meta); MockReplayer mock_replayer{&mock_threads, &mock_instance_watcher, "local mirror uuid", &m_pool_meta_cache, &mock_state_builder, &mock_replayer_listener}; m_pool_meta_cache.set_remote_pool_meta( m_remote_io_ctx.get_id(), {"remote mirror uuid", "remote mirror peer uuid"}); librbd::UpdateWatchCtx* update_watch_ctx = nullptr; ASSERT_EQ(0, init_entry_replayer(mock_replayer, mock_threads, mock_local_image_ctx, mock_remote_image_ctx, mock_replayer_listener, mock_image_meta, &update_watch_ctx)); // inject snapshot mock_remote_image_ctx.snap_info = { {1U, librbd::SnapInfo{"snap1", cls::rbd::MirrorSnapshotNamespace{ cls::rbd::MIRROR_SNAPSHOT_STATE_PRIMARY, {"remote mirror peer uuid"}, "", CEPH_NOSNAP, true, 0, {}}, 0, {}, 0, 0, {}}}, {2U, librbd::SnapInfo{"snap2", cls::rbd::MirrorSnapshotNamespace{ cls::rbd::MIRROR_SNAPSHOT_STATE_PRIMARY, {"remote mirror peer uuid"}, "", CEPH_NOSNAP, true, 0, {}}, 0, {}, 0, 0, {}}}}; mock_local_image_ctx.snap_info = { {11U, librbd::SnapInfo{"snap1", cls::rbd::MirrorSnapshotNamespace{ cls::rbd::MIRROR_SNAPSHOT_STATE_NON_PRIMARY, {}, "remote mirror uuid", 1, true, 0, {}}, 0, {}, 0, 0, {}}}}; // sync snap2 expect_load_image_meta(mock_image_meta, false, 0); expect_is_refresh_required(mock_local_image_ctx, false); expect_is_refresh_required(mock_remote_image_ctx, false); MockSnapshotCopyRequest mock_snapshot_copy_request; expect_snapshot_copy(mock_snapshot_copy_request, 1, 2, 11, {{2, CEPH_NOSNAP}}, 0); MockGetImageStateRequest mock_get_image_state_request; expect_get_image_state(mock_get_image_state_request, 2, 0); MockCreateNonPrimaryRequest mock_create_non_primary_request; expect_create_non_primary_request(mock_create_non_primary_request, false, "remote mirror uuid", 2, {{2, CEPH_NOSNAP}}, 12, 0); expect_notify_sync_request(mock_instance_watcher, mock_local_image_ctx.id, 0); MockImageCopyRequest mock_image_copy_request; expect_image_copy(mock_image_copy_request, 1, 2, 11, {}, {{2, CEPH_NOSNAP}}, 0); MockApplyImageStateRequest mock_apply_state_request; expect_apply_image_state(mock_apply_state_request, 0); expect_mirror_image_snapshot_set_copy_progress( mock_local_image_ctx, 12, true, 0, 0); expect_notify_update(mock_local_image_ctx); MockUnlinkPeerRequest mock_unlink_peer_request; expect_unlink_peer(mock_unlink_peer_request, 1, "remote mirror peer uuid", false, -EINVAL); expect_notify_sync_complete(mock_instance_watcher, mock_local_image_ctx.id); // wake-up replayer update_watch_ctx->handle_notify(); // wait for sync to complete and expect replay complete ASSERT_EQ(0, wait_for_notification(1)); ASSERT_FALSE(mock_replayer.is_replaying()); ASSERT_EQ(-EINVAL, mock_replayer.get_error_code()); ASSERT_EQ(0, shut_down_entry_replayer(mock_replayer, mock_threads, mock_local_image_ctx, mock_remote_image_ctx)); } TEST_F(TestMockImageReplayerSnapshotReplayer, SplitBrain) { librbd::MockTestImageCtx mock_local_image_ctx{*m_local_image_ctx}; librbd::MockTestImageCtx mock_remote_image_ctx{*m_remote_image_ctx}; MockThreads mock_threads(m_threads); expect_work_queue_repeatedly(mock_threads); MockReplayerListener mock_replayer_listener; expect_notification(mock_threads, mock_replayer_listener); InSequence seq; MockInstanceWatcher mock_instance_watcher; MockImageMeta mock_image_meta; MockStateBuilder mock_state_builder(mock_local_image_ctx, mock_remote_image_ctx, mock_image_meta); MockReplayer mock_replayer{&mock_threads, &mock_instance_watcher, "local mirror uuid", &m_pool_meta_cache, &mock_state_builder, &mock_replayer_listener}; m_pool_meta_cache.set_remote_pool_meta( m_remote_io_ctx.get_id(), {"remote mirror uuid", "remote mirror peer uuid"}); librbd::UpdateWatchCtx* update_watch_ctx = nullptr; ASSERT_EQ(0, init_entry_replayer(mock_replayer, mock_threads, mock_local_image_ctx, mock_remote_image_ctx, mock_replayer_listener, mock_image_meta, &update_watch_ctx)); // inject a primary demote to local image mock_remote_image_ctx.snap_info = { {1U, librbd::SnapInfo{"snap1", cls::rbd::MirrorSnapshotNamespace{ cls::rbd::MIRROR_SNAPSHOT_STATE_PRIMARY, {"remote mirror peer uuid"}, "", CEPH_NOSNAP, true, 0, {}}, 0, {}, 0, 0, {}}}}; mock_local_image_ctx.snap_info = { {1U, librbd::SnapInfo{"snap1", cls::rbd::MirrorSnapshotNamespace{ cls::rbd::MIRROR_SNAPSHOT_STATE_PRIMARY_DEMOTED, {}, "", CEPH_NOSNAP, true, 0, {}}, 0, {}, 0, 0, {}}}}; // detect split-brain expect_load_image_meta(mock_image_meta, false, 0); expect_is_refresh_required(mock_local_image_ctx, false); expect_is_refresh_required(mock_remote_image_ctx, false); // wake-up replayer update_watch_ctx->handle_notify(); // wait for sync to complete and expect replay complete ASSERT_EQ(0, wait_for_notification(1)); ASSERT_FALSE(mock_replayer.is_replaying()); ASSERT_EQ(-EEXIST, mock_replayer.get_error_code()); ASSERT_EQ(std::string{"split-brain"}, mock_replayer.get_error_description()); ASSERT_EQ(0, shut_down_entry_replayer(mock_replayer, mock_threads, mock_local_image_ctx, mock_remote_image_ctx)); } TEST_F(TestMockImageReplayerSnapshotReplayer, RemoteSnapshotMissingSplitBrain) { librbd::MockTestImageCtx mock_local_image_ctx{*m_local_image_ctx}; librbd::MockTestImageCtx mock_remote_image_ctx{*m_remote_image_ctx}; MockThreads mock_threads(m_threads); expect_work_queue_repeatedly(mock_threads); MockReplayerListener mock_replayer_listener; expect_notification(mock_threads, mock_replayer_listener); InSequence seq; MockInstanceWatcher mock_instance_watcher; MockImageMeta mock_image_meta; MockStateBuilder mock_state_builder(mock_local_image_ctx, mock_remote_image_ctx, mock_image_meta); MockReplayer mock_replayer{&mock_threads, &mock_instance_watcher, "local mirror uuid", &m_pool_meta_cache, &mock_state_builder, &mock_replayer_listener}; m_pool_meta_cache.set_remote_pool_meta( m_remote_io_ctx.get_id(), {"remote mirror uuid", "remote mirror peer uuid"}); librbd::UpdateWatchCtx* update_watch_ctx = nullptr; ASSERT_EQ(0, init_entry_replayer(mock_replayer, mock_threads, mock_local_image_ctx, mock_remote_image_ctx, mock_replayer_listener, mock_image_meta, &update_watch_ctx)); // inject a missing remote start snap (deleted) mock_local_image_ctx.snap_info = { {11U, librbd::SnapInfo{"snap3", cls::rbd::MirrorSnapshotNamespace{ cls::rbd::MIRROR_SNAPSHOT_STATE_NON_PRIMARY, {}, "remote mirror uuid", 1, true, 0, {{1, CEPH_NOSNAP}}}, 0, {}, 0, 0, {}}}}; mock_remote_image_ctx.snap_info = { {2U, librbd::SnapInfo{"snap2", cls::rbd::MirrorSnapshotNamespace{ cls::rbd::MIRROR_SNAPSHOT_STATE_PRIMARY, {"remote mirror peer uuid"}, "", CEPH_NOSNAP, true, 0, {}}, 0, {}, 0, 0, {}}}, {3U, librbd::SnapInfo{"snap3", cls::rbd::MirrorSnapshotNamespace{ cls::rbd::MIRROR_SNAPSHOT_STATE_PRIMARY, {"remote mirror peer uuid"}, "", CEPH_NOSNAP, true, 0, {}}, 0, {}, 0, 0, {}}}}; // split-brain due to missing snapshot 1 expect_load_image_meta(mock_image_meta, false, 0); expect_is_refresh_required(mock_local_image_ctx, false); expect_is_refresh_required(mock_remote_image_ctx, false); // wake-up replayer update_watch_ctx->handle_notify(); // wait for sync to complete and expect replay complete ASSERT_EQ(0, wait_for_notification(1)); ASSERT_FALSE(mock_replayer.is_replaying()); ASSERT_EQ(-EEXIST, mock_replayer.get_error_code()); ASSERT_EQ(std::string{"split-brain"}, mock_replayer.get_error_description()); ASSERT_EQ(0, shut_down_entry_replayer(mock_replayer, mock_threads, mock_local_image_ctx, mock_remote_image_ctx)); } TEST_F(TestMockImageReplayerSnapshotReplayer, RemoteFailover) { librbd::MockTestImageCtx mock_local_image_ctx{*m_local_image_ctx}; librbd::MockTestImageCtx mock_remote_image_ctx{*m_remote_image_ctx}; MockThreads mock_threads(m_threads); expect_work_queue_repeatedly(mock_threads); MockReplayerListener mock_replayer_listener; expect_notification(mock_threads, mock_replayer_listener); InSequence seq; MockInstanceWatcher mock_instance_watcher; MockImageMeta mock_image_meta; MockStateBuilder mock_state_builder(mock_local_image_ctx, mock_remote_image_ctx, mock_image_meta); MockReplayer mock_replayer{&mock_threads, &mock_instance_watcher, "local mirror uuid", &m_pool_meta_cache, &mock_state_builder, &mock_replayer_listener}; m_pool_meta_cache.set_remote_pool_meta( m_remote_io_ctx.get_id(), {"remote mirror uuid", "remote mirror peer uuid"}); librbd::UpdateWatchCtx* update_watch_ctx = nullptr; ASSERT_EQ(0, init_entry_replayer(mock_replayer, mock_threads, mock_local_image_ctx, mock_remote_image_ctx, mock_replayer_listener, mock_image_meta, &update_watch_ctx)); // inject a primary demote to local image mock_remote_image_ctx.snap_info = { {1U, librbd::SnapInfo{"snap1", cls::rbd::UserSnapshotNamespace{}, 0, {}, 0, 0, {}}}, {2U, librbd::SnapInfo{"snap2", cls::rbd::MirrorSnapshotNamespace{ cls::rbd::MIRROR_SNAPSHOT_STATE_NON_PRIMARY_DEMOTED, {"remote mirror peer uuid"}, "local mirror uuid", 12U, true, 0, {}}, 0, {}, 0, 0, {}}}, {3U, librbd::SnapInfo{"snap3", cls::rbd::MirrorSnapshotNamespace{ cls::rbd::MIRROR_SNAPSHOT_STATE_PRIMARY, {"remote mirror peer uuid"}, "", CEPH_NOSNAP, true, 0, {}}, 0, {}, 0, 0, {}}}}; mock_local_image_ctx.snap_ids = { {{cls::rbd::UserSnapshotNamespace{}, "snap1"}, 11}, {{cls::rbd::MirrorSnapshotNamespace{}, "snap2"}, 12}}; mock_local_image_ctx.snap_info = { {11U, librbd::SnapInfo{"snap1", cls::rbd::UserSnapshotNamespace{}, 0, {}, 0, 0, {}}}, {12U, librbd::SnapInfo{"snap2", cls::rbd::MirrorSnapshotNamespace{ cls::rbd::MIRROR_SNAPSHOT_STATE_PRIMARY_DEMOTED, {}, "", CEPH_NOSNAP, true, 0, {}}, 0, {}, 0, 0, {}}}}; // attach to promoted remote image expect_load_image_meta(mock_image_meta, false, 0); expect_is_refresh_required(mock_local_image_ctx, false); expect_is_refresh_required(mock_remote_image_ctx, false); MockSnapshotCopyRequest mock_snapshot_copy_request; expect_snapshot_copy(mock_snapshot_copy_request, 2, 3, 12, {{2, 12}, {3, CEPH_NOSNAP}}, 0); MockGetImageStateRequest mock_get_image_state_request; expect_get_image_state(mock_get_image_state_request, 3, 0); MockCreateNonPrimaryRequest mock_create_non_primary_request; expect_create_non_primary_request(mock_create_non_primary_request, false, "remote mirror uuid", 3, {{1, 11}, {2, 12}, {3, CEPH_NOSNAP}}, 13, 0); expect_notify_sync_request(mock_instance_watcher, mock_local_image_ctx.id, 0); MockImageCopyRequest mock_image_copy_request; expect_image_copy(mock_image_copy_request, 2, 3, 12, {}, {{1, 11}, {2, 12}, {3, CEPH_NOSNAP}}, 0); MockApplyImageStateRequest mock_apply_state_request; expect_apply_image_state(mock_apply_state_request, 0); expect_mirror_image_snapshot_set_copy_progress( mock_local_image_ctx, 13, true, 0, 0); expect_notify_update(mock_local_image_ctx); MockUnlinkPeerRequest mock_unlink_peer_request; expect_unlink_peer(mock_unlink_peer_request, 2, "remote mirror peer uuid", false, 0); expect_notify_sync_complete(mock_instance_watcher, mock_local_image_ctx.id); // idle expect_load_image_meta(mock_image_meta, false, 0); expect_is_refresh_required(mock_local_image_ctx, true); expect_refresh( mock_local_image_ctx, { {11U, librbd::SnapInfo{"snap1", cls::rbd::UserSnapshotNamespace{}, 0, {}, 0, 0, {}}}, {12U, librbd::SnapInfo{"snap2", cls::rbd::MirrorSnapshotNamespace{ cls::rbd::MIRROR_SNAPSHOT_STATE_PRIMARY_DEMOTED, {}, "", CEPH_NOSNAP, true, 0, {}}, 0, {}, 0, 0, {}}}, {13U, librbd::SnapInfo{"snap3", cls::rbd::MirrorSnapshotNamespace{ cls::rbd::MIRROR_SNAPSHOT_STATE_NON_PRIMARY, {}, "remote mirror uuid", 3, true, 0, {{1, 11}, {2, 12}, {3, CEPH_NOSNAP}}}, 0, {}, 0, 0, {}}}, }, 0); expect_is_refresh_required(mock_remote_image_ctx, true); expect_refresh( mock_remote_image_ctx, { {1U, librbd::SnapInfo{"snap1", cls::rbd::UserSnapshotNamespace{}, 0, {}, 0, 0, {}}}, {2U, librbd::SnapInfo{"snap2", cls::rbd::MirrorSnapshotNamespace{ cls::rbd::MIRROR_SNAPSHOT_STATE_NON_PRIMARY_DEMOTED, {"remote mirror peer uuid"}, "local mirror uuid", 12U, true, 0, {}}, 0, {}, 0, 0, {}}}, {3U, librbd::SnapInfo{"snap3", cls::rbd::MirrorSnapshotNamespace{ cls::rbd::MIRROR_SNAPSHOT_STATE_PRIMARY, {}, "", CEPH_NOSNAP, true, 0, {}}, 0, {}, 0, 0, {}}} }, 0); // wake-up replayer update_watch_ctx->handle_notify(); // wait for sync to complete and expect replay complete ASSERT_EQ(0, wait_for_notification(2)); ASSERT_EQ(0, shut_down_entry_replayer(mock_replayer, mock_threads, mock_local_image_ctx, mock_remote_image_ctx)); } TEST_F(TestMockImageReplayerSnapshotReplayer, UnlinkRemoteSnapshot) { librbd::MockTestImageCtx mock_local_image_ctx{*m_local_image_ctx}; librbd::MockTestImageCtx mock_remote_image_ctx{*m_remote_image_ctx}; // it should attempt to unlink from remote snap1 since we don't need it // anymore mock_local_image_ctx.snap_info = { {14U, librbd::SnapInfo{"snap4", cls::rbd::MirrorSnapshotNamespace{ cls::rbd::MIRROR_SNAPSHOT_STATE_NON_PRIMARY, {}, "remote mirror uuid", 4, true, 0, {}}, 0, {}, 0, 0, {}}}}; mock_remote_image_ctx.snap_info = { {1U, librbd::SnapInfo{"snap1", cls::rbd::MirrorSnapshotNamespace{ cls::rbd::MIRROR_SNAPSHOT_STATE_PRIMARY, {"remote mirror peer uuid"}, "", CEPH_NOSNAP, true, 0, {}}, 0, {}, 0, 0, {}}}, {4U, librbd::SnapInfo{"snap4", cls::rbd::MirrorSnapshotNamespace{ cls::rbd::MIRROR_SNAPSHOT_STATE_PRIMARY, {"remote mirror peer uuid"}, "", CEPH_NOSNAP, true, 0, {}}, 0, {}, 0, 0, {}}}}; MockThreads mock_threads(m_threads); expect_work_queue_repeatedly(mock_threads); MockReplayerListener mock_replayer_listener; expect_notification(mock_threads, mock_replayer_listener); InSequence seq; MockInstanceWatcher mock_instance_watcher; MockImageMeta mock_image_meta; MockStateBuilder mock_state_builder(mock_local_image_ctx, mock_remote_image_ctx, mock_image_meta); MockReplayer mock_replayer{&mock_threads, &mock_instance_watcher, "local mirror uuid", &m_pool_meta_cache, &mock_state_builder, &mock_replayer_listener}; m_pool_meta_cache.set_remote_pool_meta( m_remote_io_ctx.get_id(), {"remote mirror uuid", "remote mirror peer uuid"}); librbd::UpdateWatchCtx* update_watch_ctx = nullptr; // init expect_register_update_watcher(mock_local_image_ctx, &update_watch_ctx, 123, 0); expect_register_update_watcher(mock_remote_image_ctx, &update_watch_ctx, 234, 0); // unlink snap1 expect_load_image_meta(mock_image_meta, false, 0); expect_is_refresh_required(mock_local_image_ctx, false); expect_is_refresh_required(mock_remote_image_ctx, false); MockUnlinkPeerRequest mock_unlink_peer_request; expect_unlink_peer(mock_unlink_peer_request, 1, "remote mirror peer uuid", false, 0); // idle expect_load_image_meta(mock_image_meta, false, 0); expect_is_refresh_required(mock_local_image_ctx, false); expect_is_refresh_required(mock_remote_image_ctx, true); expect_refresh( mock_remote_image_ctx, { {2U, librbd::SnapInfo{"snap2", cls::rbd::UserSnapshotNamespace{}, 0, {}, 0, 0, {}}}, {3U, librbd::SnapInfo{"snap3", cls::rbd::MirrorSnapshotNamespace{ cls::rbd::MIRROR_SNAPSHOT_STATE_PRIMARY, {""}, "", CEPH_NOSNAP, true, 0, {}}, 0, {}, 0, 0, {}}}, {4U, librbd::SnapInfo{"snap4", cls::rbd::MirrorSnapshotNamespace{ cls::rbd::MIRROR_SNAPSHOT_STATE_PRIMARY, {"remote mirror peer uuid"}, "", CEPH_NOSNAP, true, 0, {}}, 0, {}, 0, 0, {}}} }, 0); // fire init C_SaferCond init_ctx; mock_replayer.init(&init_ctx); ASSERT_EQ(0, init_ctx.wait()); // wait for sync to complete ASSERT_EQ(0, wait_for_notification(3)); // shut down ASSERT_EQ(0, shut_down_entry_replayer(mock_replayer, mock_threads, mock_local_image_ctx, mock_remote_image_ctx)); } TEST_F(TestMockImageReplayerSnapshotReplayer, SkipImageSync) { librbd::MockTestImageCtx mock_local_image_ctx{*m_local_image_ctx}; librbd::MockTestImageCtx mock_remote_image_ctx{*m_remote_image_ctx}; mock_remote_image_ctx.snap_info = { {1U, librbd::SnapInfo{"snap1", cls::rbd::MirrorSnapshotNamespace{ cls::rbd::MIRROR_SNAPSHOT_STATE_PRIMARY, {"remote mirror peer uuid"}, "", 0U, true, 0, {}}, 0, {}, 0, 0, {}}}}; MockThreads mock_threads(m_threads); expect_work_queue_repeatedly(mock_threads); MockReplayerListener mock_replayer_listener; expect_notification(mock_threads, mock_replayer_listener); InSequence seq; MockInstanceWatcher mock_instance_watcher; MockImageMeta mock_image_meta; MockStateBuilder mock_state_builder(mock_local_image_ctx, mock_remote_image_ctx, mock_image_meta); MockReplayer mock_replayer{&mock_threads, &mock_instance_watcher, "local mirror uuid", &m_pool_meta_cache, &mock_state_builder, &mock_replayer_listener}; m_pool_meta_cache.set_remote_pool_meta( m_remote_io_ctx.get_id(), {"remote mirror uuid", "remote mirror peer uuid"}); librbd::UpdateWatchCtx* update_watch_ctx = nullptr; // init expect_register_update_watcher(mock_local_image_ctx, &update_watch_ctx, 123, 0); expect_register_update_watcher(mock_remote_image_ctx, &update_watch_ctx, 234, 0); // sync snap1 expect_load_image_meta(mock_image_meta, false, 0); expect_is_refresh_required(mock_local_image_ctx, false); expect_is_refresh_required(mock_remote_image_ctx, false); MockSnapshotCopyRequest mock_snapshot_copy_request; expect_snapshot_copy(mock_snapshot_copy_request, 0, 1, 0, {{1, CEPH_NOSNAP}}, 0); MockGetImageStateRequest mock_get_image_state_request; expect_get_image_state(mock_get_image_state_request, 1, 0); MockCreateNonPrimaryRequest mock_create_non_primary_request; expect_create_non_primary_request(mock_create_non_primary_request, false, "remote mirror uuid", 1, {{1, CEPH_NOSNAP}}, 11, 0); MockImageStateUpdateRequest mock_image_state_update_request; expect_update_mirror_image_state(mock_image_state_update_request, 0); MockApplyImageStateRequest mock_apply_state_request; expect_apply_image_state(mock_apply_state_request, 0); expect_mirror_image_snapshot_set_copy_progress( mock_local_image_ctx, 11, true, 0, 0); expect_notify_update(mock_local_image_ctx); // idle expect_load_image_meta(mock_image_meta, false, 0); expect_is_refresh_required(mock_local_image_ctx, true); expect_refresh( mock_local_image_ctx, { {11U, librbd::SnapInfo{"snap1", cls::rbd::MirrorSnapshotNamespace{ cls::rbd::MIRROR_SNAPSHOT_STATE_NON_PRIMARY, {}, "remote mirror uuid", 1, true, 0, {{1, CEPH_NOSNAP}}}, 0, {}, 0, 0, {}}}, }, 0); expect_is_refresh_required(mock_remote_image_ctx, false); // fire init C_SaferCond init_ctx; mock_replayer.init(&init_ctx); ASSERT_EQ(0, init_ctx.wait()); // wait for sync to complete ASSERT_EQ(0, wait_for_notification(3)); // shut down ASSERT_EQ(0, shut_down_entry_replayer(mock_replayer, mock_threads, mock_local_image_ctx, mock_remote_image_ctx)); } TEST_F(TestMockImageReplayerSnapshotReplayer, ImageNameUpdated) { librbd::MockTestImageCtx mock_local_image_ctx{*m_local_image_ctx}; librbd::MockTestImageCtx mock_remote_image_ctx{*m_remote_image_ctx}; MockThreads mock_threads(m_threads); expect_work_queue_repeatedly(mock_threads); MockReplayerListener mock_replayer_listener; expect_notification(mock_threads, mock_replayer_listener); InSequence seq; MockInstanceWatcher mock_instance_watcher; MockImageMeta mock_image_meta; MockStateBuilder mock_state_builder(mock_local_image_ctx, mock_remote_image_ctx, mock_image_meta); MockReplayer mock_replayer{&mock_threads, &mock_instance_watcher, "local mirror uuid", &m_pool_meta_cache, &mock_state_builder, &mock_replayer_listener}; m_pool_meta_cache.set_remote_pool_meta( m_remote_io_ctx.get_id(), {"remote mirror uuid", "remote mirror peer uuid"}); librbd::UpdateWatchCtx* update_watch_ctx = nullptr; ASSERT_EQ(0, init_entry_replayer(mock_replayer, mock_threads, mock_local_image_ctx, mock_remote_image_ctx, mock_replayer_listener, mock_image_meta, &update_watch_ctx)); // change the name of the image mock_local_image_ctx.name = "NEW NAME"; // idle expect_load_image_meta(mock_image_meta, true, 0); // wake-up replayer update_watch_ctx->handle_notify(); // wait for sync to complete and expect replay complete ASSERT_EQ(0, wait_for_notification(2)); auto image_spec = image_replayer::util::compute_image_spec(m_local_io_ctx, "NEW NAME"); ASSERT_EQ(image_spec, mock_replayer.get_image_spec()); ASSERT_FALSE(mock_replayer.is_replaying()); // shut down ASSERT_EQ(0, shut_down_entry_replayer(mock_replayer, mock_threads, mock_local_image_ctx, mock_remote_image_ctx)); } TEST_F(TestMockImageReplayerSnapshotReplayer, ApplyImageStatePendingShutdown) { librbd::MockTestImageCtx mock_local_image_ctx{*m_local_image_ctx}; librbd::MockTestImageCtx mock_remote_image_ctx{*m_remote_image_ctx}; MockThreads mock_threads(m_threads); expect_work_queue_repeatedly(mock_threads); MockReplayerListener mock_replayer_listener; expect_notification(mock_threads, mock_replayer_listener); InSequence seq; MockInstanceWatcher mock_instance_watcher; MockImageMeta mock_image_meta; MockStateBuilder mock_state_builder(mock_local_image_ctx, mock_remote_image_ctx, mock_image_meta); MockReplayer mock_replayer{&mock_threads, &mock_instance_watcher, "local mirror uuid", &m_pool_meta_cache, &mock_state_builder, &mock_replayer_listener}; C_SaferCond shutdown_ctx; m_pool_meta_cache.set_remote_pool_meta( m_remote_io_ctx.get_id(), {"remote mirror uuid", "remote mirror peer uuid"}); librbd::UpdateWatchCtx* update_watch_ctx = nullptr; ASSERT_EQ(0, init_entry_replayer(mock_replayer, mock_threads, mock_local_image_ctx, mock_remote_image_ctx, mock_replayer_listener, mock_image_meta, &update_watch_ctx)); // inject snapshot mock_remote_image_ctx.snap_info = { {1U, librbd::SnapInfo{"snap1", cls::rbd::MirrorSnapshotNamespace{ cls::rbd::MIRROR_SNAPSHOT_STATE_PRIMARY, {"remote mirror peer uuid"}, "", CEPH_NOSNAP, true, 0, {}}, 0, {}, 0, 0, {}}}}; // sync snap1 expect_load_image_meta(mock_image_meta, false, 0); expect_is_refresh_required(mock_local_image_ctx, false); expect_is_refresh_required(mock_remote_image_ctx, false); MockSnapshotCopyRequest mock_snapshot_copy_request; expect_snapshot_copy(mock_snapshot_copy_request, 0, 1, 0, {{1, CEPH_NOSNAP}}, 0); MockGetImageStateRequest mock_get_image_state_request; expect_get_image_state(mock_get_image_state_request, 1, 0); MockCreateNonPrimaryRequest mock_create_non_primary_request; expect_create_non_primary_request(mock_create_non_primary_request, false, "remote mirror uuid", 1, {{1, CEPH_NOSNAP}}, 11, 0); MockImageStateUpdateRequest mock_image_state_update_request; expect_update_mirror_image_state(mock_image_state_update_request, 0); expect_notify_sync_request(mock_instance_watcher, mock_local_image_ctx.id, 0); MockImageCopyRequest mock_image_copy_request; expect_image_copy(mock_image_copy_request, 0, 1, 0, {}, {{1, CEPH_NOSNAP}}, 0); MockApplyImageStateRequest mock_apply_state_request; EXPECT_CALL(mock_apply_state_request, send()) .WillOnce(Invoke([this, &req=mock_apply_state_request, &replayer=mock_replayer, &ctx=shutdown_ctx]() { // inject a shutdown, to be pended due to STATE_REPLAYING replayer.shut_down(&ctx); m_threads->work_queue->queue(req.on_finish, 0); })); expect_cancel_sync_request(mock_instance_watcher, mock_local_image_ctx.id); expect_mirror_image_snapshot_set_copy_progress( mock_local_image_ctx, 11, true, 0, 0); expect_notify_update(mock_local_image_ctx); expect_notify_sync_complete(mock_instance_watcher, mock_local_image_ctx.id); // shutdown should be resumed expect_unregister_update_watcher(mock_remote_image_ctx, 234, 0); expect_unregister_update_watcher(mock_local_image_ctx, 123, 0); // wake-up replayer update_watch_ctx->handle_notify(); ASSERT_EQ(0, wait_for_notification(1)); ASSERT_FALSE(mock_replayer.is_replaying()); ASSERT_EQ(0, mock_replayer.get_error_code()); ASSERT_EQ(0, shutdown_ctx.wait()); } TEST_F(TestMockImageReplayerSnapshotReplayer, ApplyImageStateErrorPendingShutdown) { librbd::MockTestImageCtx mock_local_image_ctx{*m_local_image_ctx}; librbd::MockTestImageCtx mock_remote_image_ctx{*m_remote_image_ctx}; MockThreads mock_threads(m_threads); expect_work_queue_repeatedly(mock_threads); MockReplayerListener mock_replayer_listener; expect_notification(mock_threads, mock_replayer_listener); InSequence seq; MockInstanceWatcher mock_instance_watcher; MockImageMeta mock_image_meta; MockStateBuilder mock_state_builder(mock_local_image_ctx, mock_remote_image_ctx, mock_image_meta); MockReplayer mock_replayer{&mock_threads, &mock_instance_watcher, "local mirror uuid", &m_pool_meta_cache, &mock_state_builder, &mock_replayer_listener}; C_SaferCond shutdown_ctx; m_pool_meta_cache.set_remote_pool_meta( m_remote_io_ctx.get_id(), {"remote mirror uuid", "remote mirror peer uuid"}); librbd::UpdateWatchCtx* update_watch_ctx = nullptr; ASSERT_EQ(0, init_entry_replayer(mock_replayer, mock_threads, mock_local_image_ctx, mock_remote_image_ctx, mock_replayer_listener, mock_image_meta, &update_watch_ctx)); // inject snapshot mock_remote_image_ctx.snap_info = { {1U, librbd::SnapInfo{"snap1", cls::rbd::MirrorSnapshotNamespace{ cls::rbd::MIRROR_SNAPSHOT_STATE_PRIMARY, {"remote mirror peer uuid"}, "", CEPH_NOSNAP, true, 0, {}}, 0, {}, 0, 0, {}}}}; // sync snap1 expect_load_image_meta(mock_image_meta, false, 0); expect_is_refresh_required(mock_local_image_ctx, false); expect_is_refresh_required(mock_remote_image_ctx, false); MockSnapshotCopyRequest mock_snapshot_copy_request; expect_snapshot_copy(mock_snapshot_copy_request, 0, 1, 0, {{1, CEPH_NOSNAP}}, 0); MockGetImageStateRequest mock_get_image_state_request; expect_get_image_state(mock_get_image_state_request, 1, 0); MockCreateNonPrimaryRequest mock_create_non_primary_request; expect_create_non_primary_request(mock_create_non_primary_request, false, "remote mirror uuid", 1, {{1, CEPH_NOSNAP}}, 11, 0); MockImageStateUpdateRequest mock_image_state_update_request; expect_update_mirror_image_state(mock_image_state_update_request, 0); expect_notify_sync_request(mock_instance_watcher, mock_local_image_ctx.id, 0); MockImageCopyRequest mock_image_copy_request; expect_image_copy(mock_image_copy_request, 0, 1, 0, {}, {{1, CEPH_NOSNAP}}, 0); MockApplyImageStateRequest mock_apply_state_request; EXPECT_CALL(mock_apply_state_request, send()) .WillOnce(Invoke([this, &req=mock_apply_state_request, &replayer=mock_replayer, &ctx=shutdown_ctx]() { // inject a shutdown, to be pended due to STATE_REPLAYING replayer.shut_down(&ctx); m_threads->work_queue->queue(req.on_finish, -EINVAL); })); expect_cancel_sync_request(mock_instance_watcher, mock_local_image_ctx.id); expect_notify_sync_complete(mock_instance_watcher, mock_local_image_ctx.id); // shutdown should be resumed expect_unregister_update_watcher(mock_remote_image_ctx, 234, 0); expect_unregister_update_watcher(mock_local_image_ctx, 123, 0); // wake-up replayer update_watch_ctx->handle_notify(); ASSERT_EQ(0, shutdown_ctx.wait()); } } // namespace snapshot } // namespace image_replayer } // namespace mirror } // namespace rbd
140,167
41.117788
121
cc
null
ceph-main/src/test/rbd_mirror/image_sync/test_mock_SyncPointCreateRequest.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "test/rbd_mirror/test_mock_fixture.h" #include "include/rbd/librbd.hpp" #include "test/librados_test_stub/MockTestMemIoCtxImpl.h" #include "test/librbd/mock/MockImageCtx.h" #include "test/rbd_mirror/mock/image_sync/MockSyncPointHandler.h" #include "tools/rbd_mirror/image_sync/SyncPointCreateRequest.h" namespace librbd { namespace { struct MockTestImageCtx : public librbd::MockImageCtx { explicit MockTestImageCtx(librbd::ImageCtx &image_ctx) : librbd::MockImageCtx(image_ctx) { } }; } // anonymous namespace } // namespace librbd // template definitions #include "tools/rbd_mirror/image_sync/SyncPointCreateRequest.cc" namespace rbd { namespace mirror { namespace image_sync { using ::testing::_; using ::testing::DoAll; using ::testing::InSequence; using ::testing::Invoke; using ::testing::Return; using ::testing::WithArg; class TestMockImageSyncSyncPointCreateRequest : public TestMockFixture { public: typedef SyncPointCreateRequest<librbd::MockTestImageCtx> MockSyncPointCreateRequest; void SetUp() override { TestMockFixture::SetUp(); librbd::RBD rbd; ASSERT_EQ(0, create_image(rbd, m_remote_io_ctx, m_image_name, m_image_size)); ASSERT_EQ(0, open_image(m_remote_io_ctx, m_image_name, &m_remote_image_ctx)); } void expect_get_snap_seqs(MockSyncPointHandler& mock_sync_point_handler) { EXPECT_CALL(mock_sync_point_handler, get_snap_seqs()) .WillRepeatedly(Return(librbd::SnapSeqs{})); } void expect_get_sync_points(MockSyncPointHandler& mock_sync_point_handler) { EXPECT_CALL(mock_sync_point_handler, get_sync_points()) .WillRepeatedly(Invoke([this]() { return m_sync_points; })); } void expect_update_sync_points(MockSyncPointHandler& mock_sync_point_handler, int r) { EXPECT_CALL(mock_sync_point_handler, update_sync_points(_, _, false, _)) .WillOnce(DoAll(WithArg<1>(Invoke([this, r](const SyncPoints& sync_points) { if (r >= 0) { m_sync_points = sync_points; } })), WithArg<3>(CompleteContext(r)))); } void expect_image_refresh(librbd::MockTestImageCtx &mock_remote_image_ctx, int r) { EXPECT_CALL(*mock_remote_image_ctx.state, refresh(_)) .WillOnce(CompleteContext(r)); } void expect_snap_create(librbd::MockTestImageCtx &mock_remote_image_ctx, int r) { EXPECT_CALL(*mock_remote_image_ctx.operations, snap_create(_, _, _, _, _)) .WillOnce(WithArg<4>(CompleteContext(r))); } MockSyncPointCreateRequest *create_request(librbd::MockTestImageCtx &mock_remote_image_ctx, MockSyncPointHandler& mock_sync_point_handler, Context *ctx) { return new MockSyncPointCreateRequest(&mock_remote_image_ctx, "uuid", &mock_sync_point_handler, ctx); } librbd::ImageCtx *m_remote_image_ctx; SyncPoints m_sync_points; }; TEST_F(TestMockImageSyncSyncPointCreateRequest, Success) { librbd::MockTestImageCtx mock_remote_image_ctx(*m_remote_image_ctx); MockSyncPointHandler mock_sync_point_handler; expect_get_snap_seqs(mock_sync_point_handler); expect_get_sync_points(mock_sync_point_handler); InSequence seq; expect_update_sync_points(mock_sync_point_handler, 0); expect_image_refresh(mock_remote_image_ctx, 0); expect_snap_create(mock_remote_image_ctx, 0); expect_image_refresh(mock_remote_image_ctx, 0); C_SaferCond ctx; MockSyncPointCreateRequest *req = create_request(mock_remote_image_ctx, mock_sync_point_handler, &ctx); req->send(); ASSERT_EQ(0, ctx.wait()); ASSERT_EQ(1U, m_sync_points.size()); } TEST_F(TestMockImageSyncSyncPointCreateRequest, ResyncSuccess) { m_sync_points.emplace_front(cls::rbd::UserSnapshotNamespace(), "start snap", "", boost::none); auto sync_point = m_sync_points.front(); librbd::MockTestImageCtx mock_remote_image_ctx(*m_remote_image_ctx); MockSyncPointHandler mock_sync_point_handler; expect_get_snap_seqs(mock_sync_point_handler); expect_get_sync_points(mock_sync_point_handler); InSequence seq; expect_update_sync_points(mock_sync_point_handler, 0); expect_image_refresh(mock_remote_image_ctx, 0); expect_snap_create(mock_remote_image_ctx, 0); expect_image_refresh(mock_remote_image_ctx, 0); C_SaferCond ctx; MockSyncPointCreateRequest *req = create_request(mock_remote_image_ctx, mock_sync_point_handler, &ctx); req->send(); ASSERT_EQ(0, ctx.wait()); ASSERT_EQ(2U, m_sync_points.size()); ASSERT_EQ(sync_point, m_sync_points.front()); ASSERT_EQ("start snap", m_sync_points.back().from_snap_name); } TEST_F(TestMockImageSyncSyncPointCreateRequest, SnapshotExists) { librbd::MockTestImageCtx mock_remote_image_ctx(*m_remote_image_ctx); MockSyncPointHandler mock_sync_point_handler; expect_get_snap_seqs(mock_sync_point_handler); expect_get_sync_points(mock_sync_point_handler); InSequence seq; expect_update_sync_points(mock_sync_point_handler, 0); expect_image_refresh(mock_remote_image_ctx, 0); expect_snap_create(mock_remote_image_ctx, -EEXIST); expect_update_sync_points(mock_sync_point_handler, 0); expect_image_refresh(mock_remote_image_ctx, 0); expect_snap_create(mock_remote_image_ctx, 0); expect_image_refresh(mock_remote_image_ctx, 0); C_SaferCond ctx; MockSyncPointCreateRequest *req = create_request(mock_remote_image_ctx, mock_sync_point_handler, &ctx); req->send(); ASSERT_EQ(0, ctx.wait()); ASSERT_EQ(1U, m_sync_points.size()); } TEST_F(TestMockImageSyncSyncPointCreateRequest, ClientUpdateError) { librbd::MockTestImageCtx mock_remote_image_ctx(*m_remote_image_ctx); MockSyncPointHandler mock_sync_point_handler; expect_get_snap_seqs(mock_sync_point_handler); expect_get_sync_points(mock_sync_point_handler); InSequence seq; expect_update_sync_points(mock_sync_point_handler, -EINVAL); C_SaferCond ctx; MockSyncPointCreateRequest *req = create_request(mock_remote_image_ctx, mock_sync_point_handler, &ctx); req->send(); ASSERT_EQ(-EINVAL, ctx.wait()); ASSERT_TRUE(m_sync_points.empty()); } } // namespace image_sync } // namespace mirror } // namespace rbd
6,891
34.163265
93
cc
null
ceph-main/src/test/rbd_mirror/image_sync/test_mock_SyncPointPruneRequest.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "test/rbd_mirror/test_mock_fixture.h" #include "include/rbd/librbd.hpp" #include "test/librados_test_stub/MockTestMemIoCtxImpl.h" #include "test/librbd/mock/MockImageCtx.h" #include "test/rbd_mirror/mock/image_sync/MockSyncPointHandler.h" #include "tools/rbd_mirror/image_sync/SyncPointPruneRequest.h" namespace librbd { namespace { struct MockTestImageCtx : public librbd::MockImageCtx { explicit MockTestImageCtx(librbd::ImageCtx &image_ctx) : librbd::MockImageCtx(image_ctx) { } }; } // anonymous namespace } // namespace librbd // template definitions #include "tools/rbd_mirror/image_sync/SyncPointPruneRequest.cc" template class rbd::mirror::image_sync::SyncPointPruneRequest<librbd::MockTestImageCtx>; namespace rbd { namespace mirror { namespace image_sync { using ::testing::_; using ::testing::DoAll; using ::testing::InSequence; using ::testing::Invoke; using ::testing::Return; using ::testing::StrEq; using ::testing::WithArg; class TestMockImageSyncSyncPointPruneRequest : public TestMockFixture { public: typedef SyncPointPruneRequest<librbd::MockTestImageCtx> MockSyncPointPruneRequest; void SetUp() override { TestMockFixture::SetUp(); librbd::RBD rbd; ASSERT_EQ(0, create_image(rbd, m_remote_io_ctx, m_image_name, m_image_size)); ASSERT_EQ(0, open_image(m_remote_io_ctx, m_image_name, &m_remote_image_ctx)); } void expect_get_snap_seqs(MockSyncPointHandler& mock_sync_point_handler) { EXPECT_CALL(mock_sync_point_handler, get_snap_seqs()) .WillRepeatedly(Return(librbd::SnapSeqs{})); } void expect_get_sync_points(MockSyncPointHandler& mock_sync_point_handler) { EXPECT_CALL(mock_sync_point_handler, get_sync_points()) .WillRepeatedly(Invoke([this]() { return m_sync_points; })); } void expect_update_sync_points(MockSyncPointHandler& mock_sync_point_handler, bool complete, int r) { EXPECT_CALL(mock_sync_point_handler, update_sync_points(_, _, complete, _)) .WillOnce(DoAll(WithArg<1>(Invoke([this, r](const SyncPoints& sync_points) { if (r >= 0) { m_sync_points = sync_points; } })), WithArg<3>(CompleteContext(r)))); } void expect_get_snap_id(librbd::MockTestImageCtx &mock_remote_image_ctx, const std::string &snap_name, uint64_t snap_id) { EXPECT_CALL(mock_remote_image_ctx, get_snap_id(_, StrEq(snap_name))) .WillOnce(Return(snap_id)); } void expect_image_refresh(librbd::MockTestImageCtx &mock_remote_image_ctx, int r) { EXPECT_CALL(*mock_remote_image_ctx.state, refresh(_)) .WillOnce(CompleteContext(r)); } void expect_snap_remove(librbd::MockTestImageCtx &mock_remote_image_ctx, const std::string &snap_name, int r) { EXPECT_CALL(*mock_remote_image_ctx.operations, snap_remove(_, StrEq(snap_name), _)) .WillOnce(WithArg<2>(CompleteContext(r))); } MockSyncPointPruneRequest *create_request(librbd::MockTestImageCtx &mock_remote_image_ctx, MockSyncPointHandler& mock_sync_point_handler, bool sync_complete, Context *ctx) { return new MockSyncPointPruneRequest(&mock_remote_image_ctx, sync_complete, &mock_sync_point_handler, ctx); } librbd::ImageCtx *m_remote_image_ctx; SyncPoints m_sync_points; }; TEST_F(TestMockImageSyncSyncPointPruneRequest, SyncInProgressSuccess) { m_sync_points.emplace_front(cls::rbd::UserSnapshotNamespace(), "snap1", "", boost::none); auto sync_points = m_sync_points; librbd::MockTestImageCtx mock_remote_image_ctx(*m_remote_image_ctx); MockSyncPointHandler mock_sync_point_handler; expect_get_snap_seqs(mock_sync_point_handler); expect_get_sync_points(mock_sync_point_handler); InSequence seq; expect_get_snap_id(mock_remote_image_ctx, "snap1", 123); expect_image_refresh(mock_remote_image_ctx, 0); expect_update_sync_points(mock_sync_point_handler, false, 0); C_SaferCond ctx; MockSyncPointPruneRequest *req = create_request(mock_remote_image_ctx, mock_sync_point_handler, false, &ctx); req->send(); ASSERT_EQ(0, ctx.wait()); ASSERT_EQ(sync_points, m_sync_points); } TEST_F(TestMockImageSyncSyncPointPruneRequest, RestartedSyncInProgressSuccess) { m_sync_points.emplace_front(cls::rbd::UserSnapshotNamespace(), "snap2", "snap1", boost::none); m_sync_points.emplace_front(cls::rbd::UserSnapshotNamespace(), "snap1", "", boost::none); auto sync_points = m_sync_points; librbd::MockTestImageCtx mock_remote_image_ctx(*m_remote_image_ctx); MockSyncPointHandler mock_sync_point_handler; expect_get_snap_seqs(mock_sync_point_handler); expect_get_sync_points(mock_sync_point_handler); InSequence seq; expect_get_snap_id(mock_remote_image_ctx, "snap1", 123); expect_snap_remove(mock_remote_image_ctx, "snap2", 0); expect_image_refresh(mock_remote_image_ctx, 0); expect_update_sync_points(mock_sync_point_handler, false, 0); C_SaferCond ctx; MockSyncPointPruneRequest *req = create_request(mock_remote_image_ctx, mock_sync_point_handler, false, &ctx); req->send(); ASSERT_EQ(0, ctx.wait()); sync_points.pop_back(); ASSERT_EQ(sync_points, m_sync_points); } TEST_F(TestMockImageSyncSyncPointPruneRequest, SyncInProgressMissingSnapSuccess) { m_sync_points.emplace_front(cls::rbd::UserSnapshotNamespace(), "snap2", "snap1", boost::none); m_sync_points.emplace_front(cls::rbd::UserSnapshotNamespace(), "snap1", "", boost::none); librbd::MockTestImageCtx mock_remote_image_ctx(*m_remote_image_ctx); MockSyncPointHandler mock_sync_point_handler; expect_get_snap_seqs(mock_sync_point_handler); expect_get_sync_points(mock_sync_point_handler); InSequence seq; expect_get_snap_id(mock_remote_image_ctx, "snap1", CEPH_NOSNAP); expect_snap_remove(mock_remote_image_ctx, "snap2", 0); expect_snap_remove(mock_remote_image_ctx, "snap1", 0); expect_image_refresh(mock_remote_image_ctx, 0); expect_update_sync_points(mock_sync_point_handler, false, 0); C_SaferCond ctx; MockSyncPointPruneRequest *req = create_request(mock_remote_image_ctx, mock_sync_point_handler, false, &ctx); req->send(); ASSERT_EQ(0, ctx.wait()); ASSERT_EQ(SyncPoints{}, m_sync_points); } TEST_F(TestMockImageSyncSyncPointPruneRequest, SyncInProgressUnexpectedFromSnapSuccess) { m_sync_points.emplace_front(cls::rbd::UserSnapshotNamespace(), "snap2", "snap1", boost::none); librbd::MockTestImageCtx mock_remote_image_ctx(*m_remote_image_ctx); MockSyncPointHandler mock_sync_point_handler; expect_get_snap_seqs(mock_sync_point_handler); expect_get_sync_points(mock_sync_point_handler); InSequence seq; expect_get_snap_id(mock_remote_image_ctx, "snap2", 124); expect_snap_remove(mock_remote_image_ctx, "snap2", 0); expect_snap_remove(mock_remote_image_ctx, "snap1", 0); expect_image_refresh(mock_remote_image_ctx, 0); expect_update_sync_points(mock_sync_point_handler, false, 0); C_SaferCond ctx; MockSyncPointPruneRequest *req = create_request(mock_remote_image_ctx, mock_sync_point_handler, false, &ctx); req->send(); ASSERT_EQ(0, ctx.wait()); ASSERT_EQ(SyncPoints(), m_sync_points); } TEST_F(TestMockImageSyncSyncPointPruneRequest, SyncCompleteSuccess) { m_sync_points.emplace_front(cls::rbd::UserSnapshotNamespace(), "snap1", "", boost::none); librbd::MockTestImageCtx mock_remote_image_ctx(*m_remote_image_ctx); MockSyncPointHandler mock_sync_point_handler; expect_get_snap_seqs(mock_sync_point_handler); expect_get_sync_points(mock_sync_point_handler); InSequence seq; expect_snap_remove(mock_remote_image_ctx, "snap1", 0); expect_image_refresh(mock_remote_image_ctx, 0); expect_update_sync_points(mock_sync_point_handler, true, 0); C_SaferCond ctx; MockSyncPointPruneRequest *req = create_request(mock_remote_image_ctx, mock_sync_point_handler, true, &ctx); req->send(); ASSERT_EQ(0, ctx.wait()); ASSERT_TRUE(m_sync_points.empty()); } TEST_F(TestMockImageSyncSyncPointPruneRequest, RestartedSyncCompleteSuccess) { m_sync_points.emplace_front(cls::rbd::UserSnapshotNamespace(), "snap2", "snap1", boost::none); m_sync_points.emplace_front(cls::rbd::UserSnapshotNamespace(), "snap1", "", boost::none); auto sync_points = m_sync_points; librbd::MockTestImageCtx mock_remote_image_ctx(*m_remote_image_ctx); MockSyncPointHandler mock_sync_point_handler; expect_get_snap_seqs(mock_sync_point_handler); expect_get_sync_points(mock_sync_point_handler); InSequence seq; expect_image_refresh(mock_remote_image_ctx, 0); expect_update_sync_points(mock_sync_point_handler, true, 0); C_SaferCond ctx; MockSyncPointPruneRequest *req = create_request(mock_remote_image_ctx, mock_sync_point_handler, true, &ctx); req->send(); ASSERT_EQ(0, ctx.wait()); sync_points.pop_front(); ASSERT_EQ(sync_points, m_sync_points); } TEST_F(TestMockImageSyncSyncPointPruneRequest, RestartedCatchUpSyncCompleteSuccess) { m_sync_points.emplace_front(cls::rbd::UserSnapshotNamespace(), "snap3", "snap2", boost::none); m_sync_points.emplace_front(cls::rbd::UserSnapshotNamespace(), "snap2", "snap1", boost::none); auto sync_points = m_sync_points; librbd::MockTestImageCtx mock_remote_image_ctx(*m_remote_image_ctx); MockSyncPointHandler mock_sync_point_handler; expect_get_snap_seqs(mock_sync_point_handler); expect_get_sync_points(mock_sync_point_handler); InSequence seq; expect_snap_remove(mock_remote_image_ctx, "snap1", 0); expect_image_refresh(mock_remote_image_ctx, 0); expect_update_sync_points(mock_sync_point_handler, true, 0); C_SaferCond ctx; MockSyncPointPruneRequest *req = create_request(mock_remote_image_ctx, mock_sync_point_handler, true, &ctx); req->send(); ASSERT_EQ(0, ctx.wait()); sync_points.pop_front(); ASSERT_EQ(sync_points, m_sync_points); } TEST_F(TestMockImageSyncSyncPointPruneRequest, SnapshotDNE) { m_sync_points.emplace_front(cls::rbd::UserSnapshotNamespace(), "snap1", "", boost::none); librbd::MockTestImageCtx mock_remote_image_ctx(*m_remote_image_ctx); MockSyncPointHandler mock_sync_point_handler; expect_get_snap_seqs(mock_sync_point_handler); expect_get_sync_points(mock_sync_point_handler); InSequence seq; expect_snap_remove(mock_remote_image_ctx, "snap1", -ENOENT); expect_image_refresh(mock_remote_image_ctx, 0); expect_update_sync_points(mock_sync_point_handler, true, 0); C_SaferCond ctx; MockSyncPointPruneRequest *req = create_request(mock_remote_image_ctx, mock_sync_point_handler, true, &ctx); req->send(); ASSERT_EQ(0, ctx.wait()); ASSERT_TRUE(m_sync_points.empty()); } TEST_F(TestMockImageSyncSyncPointPruneRequest, ClientUpdateError) { m_sync_points.emplace_front(cls::rbd::UserSnapshotNamespace(), "snap2", "snap1", boost::none); m_sync_points.emplace_front(cls::rbd::UserSnapshotNamespace(), "snap1", "", boost::none); auto sync_points = m_sync_points; librbd::MockTestImageCtx mock_remote_image_ctx(*m_remote_image_ctx); MockSyncPointHandler mock_sync_point_handler; expect_get_snap_seqs(mock_sync_point_handler); expect_get_sync_points(mock_sync_point_handler); InSequence seq; expect_image_refresh(mock_remote_image_ctx, 0); expect_update_sync_points(mock_sync_point_handler, true, -EINVAL); C_SaferCond ctx; MockSyncPointPruneRequest *req = create_request(mock_remote_image_ctx, mock_sync_point_handler, true, &ctx); req->send(); ASSERT_EQ(-EINVAL, ctx.wait()); ASSERT_EQ(sync_points, m_sync_points); } } // namespace image_sync } // namespace mirror } // namespace rbd
13,207
36.954023
92
cc
null
ceph-main/src/test/rbd_mirror/mock/MockBaseRequest.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_MOCK_BASE_REQUEST_H #define CEPH_MOCK_BASE_REQUEST_H #include "tools/rbd_mirror/BaseRequest.h" #include <gmock/gmock.h> struct Context; namespace rbd { namespace mirror { struct MockBaseRequest : public BaseRequest { MockBaseRequest() : BaseRequest(nullptr) {} Context* on_finish = nullptr; MOCK_METHOD0(send, void()); }; } // namespace mirror } // namespace rbd #endif // CEPH_MOCK_BASE_REQUEST_H
524
18.444444
70
h
null
ceph-main/src/test/rbd_mirror/mock/MockContextWQ.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_MOCK_CONTEXT_WQ_H #define CEPH_MOCK_CONTEXT_WQ_H #include <gmock/gmock.h> struct Context; struct MockContextWQ { void queue(Context *ctx) { queue(ctx, 0); } MOCK_METHOD2(queue, void(Context *, int)); }; #endif // CEPH_MOCK_CONTEXT_WQ_H
362
18.105263
70
h
null
ceph-main/src/test/rbd_mirror/mock/MockSafeTimer.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_MOCK_SAFE_TIMER_H #define CEPH_MOCK_SAFE_TIMER_H #include <gmock/gmock.h> struct Context; struct MockSafeTimer { MOCK_METHOD2(add_event_after, Context*(double, Context*)); MOCK_METHOD1(cancel_event, bool(Context *)); }; #endif // CEPH_MOCK_SAFE_TIMER_H
373
21
70
h
null
ceph-main/src/test/rbd_mirror/mock/image_sync/MockSyncPointHandler.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_MOCK_IMAGE_SYNC_SYNC_POINT_HANDLER_H #define CEPH_MOCK_IMAGE_SYNC_SYNC_POINT_HANDLER_H #include "tools/rbd_mirror/image_sync/Types.h" #include <gmock/gmock.h> struct Context; namespace rbd { namespace mirror { namespace image_sync { struct MockSyncPointHandler : public SyncPointHandler{ MOCK_CONST_METHOD0(get_sync_points, SyncPoints()); MOCK_CONST_METHOD0(get_snap_seqs, librbd::SnapSeqs()); MOCK_METHOD4(update_sync_points, void(const librbd::SnapSeqs&, const SyncPoints&, bool, Context*)); }; } // namespace image_sync } // namespace mirror } // namespace rbd #endif // CEPH_MOCK_IMAGE_SYNC_SYNC_POINT_HANDLER_H
821
26.4
70
h
null
ceph-main/src/test/rbd_mirror/pool_watcher/test_mock_RefreshImagesRequest.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "test/rbd_mirror/test_mock_fixture.h" #include "test/librados_test_stub/MockTestMemIoCtxImpl.h" #include "test/librados_test_stub/MockTestMemRadosClient.h" #include "test/librbd/mock/MockImageCtx.h" #include "tools/rbd_mirror/pool_watcher/RefreshImagesRequest.h" #include "include/stringify.h" namespace librbd { namespace { struct MockTestImageCtx : public librbd::MockImageCtx { explicit MockTestImageCtx(librbd::ImageCtx &image_ctx) : librbd::MockImageCtx(image_ctx) { } }; } // anonymous namespace } // namespace librbd // template definitions #include "tools/rbd_mirror/pool_watcher/RefreshImagesRequest.cc" template class rbd::mirror::pool_watcher::RefreshImagesRequest<librbd::MockTestImageCtx>; namespace rbd { namespace mirror { namespace pool_watcher { using ::testing::_; using ::testing::DoAll; using ::testing::InSequence; using ::testing::Invoke; using ::testing::Return; using ::testing::StrEq; using ::testing::WithArg; class TestMockPoolWatcherRefreshImagesRequest : public TestMockFixture { public: typedef RefreshImagesRequest<librbd::MockTestImageCtx> MockRefreshImagesRequest; void expect_mirror_image_list(librados::IoCtx &io_ctx, const std::map<std::string, std::string> &ids, int r) { bufferlist bl; encode(ids, bl); EXPECT_CALL(get_mock_io_ctx(io_ctx), exec(RBD_MIRRORING, _, StrEq("rbd"), StrEq("mirror_image_list"), _, _, _, _)) .WillOnce(DoAll(WithArg<5>(Invoke([bl](bufferlist *out_bl) { *out_bl = bl; })), Return(r))); } }; TEST_F(TestMockPoolWatcherRefreshImagesRequest, Success) { InSequence seq; expect_mirror_image_list(m_remote_io_ctx, {{"local id", "global id"}}, 0); C_SaferCond ctx; ImageIds image_ids; MockRefreshImagesRequest *req = new MockRefreshImagesRequest( m_remote_io_ctx, &image_ids, &ctx); req->send(); ASSERT_EQ(0, ctx.wait()); ImageIds expected_image_ids = {{"global id", "local id"}}; ASSERT_EQ(expected_image_ids, image_ids); } TEST_F(TestMockPoolWatcherRefreshImagesRequest, LargeDirectory) { InSequence seq; std::map<std::string, std::string> mirror_list; ImageIds expected_image_ids; for (uint32_t idx = 1; idx <= 1024; ++idx) { mirror_list.insert(std::make_pair("local id " + stringify(idx), "global id " + stringify(idx))); expected_image_ids.insert({{"global id " + stringify(idx), "local id " + stringify(idx)}}); } expect_mirror_image_list(m_remote_io_ctx, mirror_list, 0); expect_mirror_image_list(m_remote_io_ctx, {{"local id", "global id"}}, 0); C_SaferCond ctx; ImageIds image_ids; MockRefreshImagesRequest *req = new MockRefreshImagesRequest( m_remote_io_ctx, &image_ids, &ctx); req->send(); ASSERT_EQ(0, ctx.wait()); expected_image_ids.insert({"global id", "local id"}); ASSERT_EQ(expected_image_ids, image_ids); } TEST_F(TestMockPoolWatcherRefreshImagesRequest, MirrorImageListError) { InSequence seq; expect_mirror_image_list(m_remote_io_ctx, {}, -EINVAL); C_SaferCond ctx; ImageIds image_ids; MockRefreshImagesRequest *req = new MockRefreshImagesRequest( m_remote_io_ctx, &image_ids, &ctx); req->send(); ASSERT_EQ(-EINVAL, ctx.wait()); } } // namespace pool_watcher } // namespace mirror } // namespace rbd
3,595
29.474576
89
cc