Search is not available for this dataset
repo stringlengths 2 152 ⌀ | file stringlengths 15 239 | code stringlengths 0 58.4M | file_length int64 0 58.4M | avg_line_length float64 0 1.81M | max_line_length int64 0 12.7M | extension_type stringclasses 364 values |
|---|---|---|---|---|---|---|
null | ceph-main/src/test/journal/test_JournalRecorder.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "journal/JournalRecorder.h"
#include "journal/Entry.h"
#include "journal/JournalMetadata.h"
#include "test/journal/RadosTestFixture.h"
#include <limits>
#include <list>
#include <memory>
class TestJournalRecorder : public RadosTestFixture {
public:
using JournalRecorderPtr = std::unique_ptr<journal::JournalRecorder,
std::function<void(journal::JournalRecorder*)>>;
JournalRecorderPtr create_recorder(
const std::string &oid, const ceph::ref_t<journal::JournalMetadata>& metadata) {
JournalRecorderPtr recorder{
new journal::JournalRecorder(m_ioctx, oid + ".", metadata, 0),
[](journal::JournalRecorder* recorder) {
C_SaferCond cond;
recorder->shut_down(&cond);
cond.wait();
delete recorder;
}
};
recorder->set_append_batch_options(0, std::numeric_limits<uint32_t>::max(), 0);
return recorder;
}
};
TEST_F(TestJournalRecorder, Append) {
std::string oid = get_temp_oid();
ASSERT_EQ(0, create(oid, 12, 2));
ASSERT_EQ(0, client_register(oid));
auto metadata = create_metadata(oid);
ASSERT_EQ(0, init_metadata(metadata));
JournalRecorderPtr recorder = create_recorder(oid, metadata);
journal::Future future1 = recorder->append(123, create_payload("payload"));
C_SaferCond cond;
future1.flush(&cond);
ASSERT_EQ(0, cond.wait());
}
TEST_F(TestJournalRecorder, AppendKnownOverflow) {
std::string oid = get_temp_oid();
ASSERT_EQ(0, create(oid, 12, 2));
ASSERT_EQ(0, client_register(oid));
auto metadata = create_metadata(oid);
ASSERT_EQ(0, init_metadata(metadata));
ASSERT_EQ(0U, metadata->get_active_set());
JournalRecorderPtr recorder = create_recorder(oid, metadata);
recorder->append(123, create_payload(std::string(metadata->get_object_size() -
journal::Entry::get_fixed_size(), '1')));
journal::Future future2 = recorder->append(123, create_payload(std::string(1, '2')));
C_SaferCond cond;
future2.flush(&cond);
ASSERT_EQ(0, cond.wait());
ASSERT_EQ(1U, metadata->get_active_set());
}
TEST_F(TestJournalRecorder, AppendDelayedOverflow) {
std::string oid = get_temp_oid();
ASSERT_EQ(0, create(oid, 12, 2));
ASSERT_EQ(0, client_register(oid));
auto metadata = create_metadata(oid);
ASSERT_EQ(0, init_metadata(metadata));
ASSERT_EQ(0U, metadata->get_active_set());
JournalRecorderPtr recorder1 = create_recorder(oid, metadata);
JournalRecorderPtr recorder2 = create_recorder(oid, metadata);
recorder1->append(234, create_payload(std::string(1, '1')));
recorder2->append(123, create_payload(std::string(metadata->get_object_size() -
journal::Entry::get_fixed_size(), '2')));
journal::Future future = recorder2->append(123, create_payload(std::string(1, '3')));
C_SaferCond cond;
future.flush(&cond);
ASSERT_EQ(0, cond.wait());
ASSERT_EQ(1U, metadata->get_active_set());
}
TEST_F(TestJournalRecorder, FutureFlush) {
std::string oid = get_temp_oid();
ASSERT_EQ(0, create(oid, 12, 2));
ASSERT_EQ(0, client_register(oid));
auto metadata = create_metadata(oid);
ASSERT_EQ(0, init_metadata(metadata));
JournalRecorderPtr recorder = create_recorder(oid, metadata);
journal::Future future1 = recorder->append(123, create_payload("payload1"));
journal::Future future2 = recorder->append(123, create_payload("payload2"));
C_SaferCond cond;
future2.flush(&cond);
ASSERT_EQ(0, cond.wait());
ASSERT_TRUE(future1.is_complete());
ASSERT_TRUE(future2.is_complete());
}
TEST_F(TestJournalRecorder, Flush) {
std::string oid = get_temp_oid();
ASSERT_EQ(0, create(oid, 12, 2));
ASSERT_EQ(0, client_register(oid));
auto metadata = create_metadata(oid);
ASSERT_EQ(0, init_metadata(metadata));
JournalRecorderPtr recorder = create_recorder(oid, metadata);
journal::Future future1 = recorder->append(123, create_payload("payload1"));
journal::Future future2 = recorder->append(123, create_payload("payload2"));
C_SaferCond cond1;
recorder->flush(&cond1);
ASSERT_EQ(0, cond1.wait());
C_SaferCond cond2;
future2.wait(&cond2);
ASSERT_EQ(0, cond2.wait());
ASSERT_TRUE(future1.is_complete());
ASSERT_TRUE(future2.is_complete());
}
TEST_F(TestJournalRecorder, OverflowCommitObjectNumber) {
std::string oid = get_temp_oid();
ASSERT_EQ(0, create(oid, 12, 2));
ASSERT_EQ(0, client_register(oid));
auto metadata = create_metadata(oid);
ASSERT_EQ(0, init_metadata(metadata));
ASSERT_EQ(0U, metadata->get_active_set());
JournalRecorderPtr recorder = create_recorder(oid, metadata);
recorder->append(123, create_payload(std::string(metadata->get_object_size() -
journal::Entry::get_fixed_size(), '1')));
journal::Future future2 = recorder->append(124, create_payload(std::string(1, '2')));
C_SaferCond cond;
future2.flush(&cond);
ASSERT_EQ(0, cond.wait());
ASSERT_EQ(1U, metadata->get_active_set());
uint64_t object_num;
uint64_t tag_tid;
uint64_t entry_tid;
metadata->get_commit_entry(1, &object_num, &tag_tid, &entry_tid);
ASSERT_EQ(0U, object_num);
ASSERT_EQ(123U, tag_tid);
ASSERT_EQ(0U, entry_tid);
metadata->get_commit_entry(2, &object_num, &tag_tid, &entry_tid);
ASSERT_EQ(2U, object_num);
ASSERT_EQ(124U, tag_tid);
ASSERT_EQ(0U, entry_tid);
}
| 5,450 | 30.148571 | 93 | cc |
null | ceph-main/src/test/journal/test_JournalTrimmer.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "journal/JournalTrimmer.h"
#include "journal/JournalMetadata.h"
#include "include/stringify.h"
#include "test/journal/RadosTestFixture.h"
#include <limits>
#include <list>
class TestJournalTrimmer : public RadosTestFixture {
public:
void TearDown() override {
for (MetadataList::iterator it = m_metadata_list.begin();
it != m_metadata_list.end(); ++it) {
(*it)->remove_listener(&m_listener);
}
m_metadata_list.clear();
for (std::list<journal::JournalTrimmer*>::iterator it = m_trimmers.begin();
it != m_trimmers.end(); ++it) {
C_SaferCond ctx;
(*it)->shut_down(&ctx);
ASSERT_EQ(0, ctx.wait());
delete *it;
}
RadosTestFixture::TearDown();
}
int append_payload(const ceph::ref_t<journal::JournalMetadata>& metadata,
const std::string &oid, uint64_t object_num,
const std::string &payload, uint64_t *commit_tid) {
int r = append(oid + "." + stringify(object_num), create_payload(payload));
uint64_t tid = metadata->allocate_commit_tid(object_num, 234, 123);
if (commit_tid != NULL) {
*commit_tid = tid;
}
return r;
}
auto create_metadata(const std::string &oid) {
auto metadata = RadosTestFixture::create_metadata(oid);
m_metadata_list.push_back(metadata);
metadata->add_listener(&m_listener);
return metadata;
}
journal::JournalTrimmer *create_trimmer(const std::string &oid,
const ceph::ref_t<journal::JournalMetadata>& metadata) {
journal::JournalTrimmer *trimmer(new journal::JournalTrimmer(
m_ioctx, oid + ".", metadata));
m_trimmers.push_back(trimmer);
return trimmer;
}
int assert_exists(const std::string &oid) {
librados::ObjectWriteOperation op;
op.assert_exists();
return m_ioctx.operate(oid, &op);
}
typedef std::list<ceph::ref_t<journal::JournalMetadata>> MetadataList;
MetadataList m_metadata_list;
std::list<journal::JournalTrimmer*> m_trimmers;
};
TEST_F(TestJournalTrimmer, Committed) {
std::string oid = get_temp_oid();
ASSERT_EQ(0, create(oid, 12, 2));
ASSERT_EQ(0, client_register(oid));
auto metadata = create_metadata(oid);
ASSERT_EQ(0, init_metadata(metadata));
ASSERT_TRUE(wait_for_update(metadata));
ASSERT_EQ(0, metadata->set_active_set(10));
ASSERT_TRUE(wait_for_update(metadata));
uint64_t commit_tid1;
uint64_t commit_tid2;
uint64_t commit_tid3;
uint64_t commit_tid4;
uint64_t commit_tid5;
uint64_t commit_tid6;
ASSERT_EQ(0, append_payload(metadata, oid, 0, "payload", &commit_tid1));
ASSERT_EQ(0, append_payload(metadata, oid, 4, "payload", &commit_tid2));
ASSERT_EQ(0, append_payload(metadata, oid, 5, "payload", &commit_tid3));
ASSERT_EQ(0, append_payload(metadata, oid, 0, "payload", &commit_tid4));
ASSERT_EQ(0, append_payload(metadata, oid, 4, "payload", &commit_tid5));
ASSERT_EQ(0, append_payload(metadata, oid, 5, "payload", &commit_tid6));
journal::JournalTrimmer *trimmer = create_trimmer(oid, metadata);
trimmer->committed(commit_tid4);
trimmer->committed(commit_tid6);
trimmer->committed(commit_tid2);
trimmer->committed(commit_tid5);
trimmer->committed(commit_tid3);
trimmer->committed(commit_tid1);
while (metadata->get_minimum_set() != 2U) {
ASSERT_TRUE(wait_for_update(metadata));
}
ASSERT_EQ(-ENOENT, assert_exists(oid + ".0"));
ASSERT_EQ(-ENOENT, assert_exists(oid + ".2"));
ASSERT_EQ(0, assert_exists(oid + ".5"));
}
TEST_F(TestJournalTrimmer, CommittedWithOtherClient) {
std::string oid = get_temp_oid();
ASSERT_EQ(0, create(oid, 12, 2));
ASSERT_EQ(0, client_register(oid));
ASSERT_EQ(0, client_register(oid, "client2", "slow client"));
auto metadata = create_metadata(oid);
ASSERT_EQ(0, init_metadata(metadata));
ASSERT_TRUE(wait_for_update(metadata));
ASSERT_EQ(0, metadata->set_active_set(10));
ASSERT_TRUE(wait_for_update(metadata));
uint64_t commit_tid1;
uint64_t commit_tid2;
uint64_t commit_tid3;
uint64_t commit_tid4;
ASSERT_EQ(0, append_payload(metadata, oid, 0, "payload", &commit_tid1));
ASSERT_EQ(0, append_payload(metadata, oid, 2, "payload", &commit_tid2));
ASSERT_EQ(0, append_payload(metadata, oid, 3, "payload", &commit_tid3));
ASSERT_EQ(0, append_payload(metadata, oid, 5, "payload", &commit_tid4));
journal::JournalTrimmer *trimmer = create_trimmer(oid, metadata);
trimmer->committed(commit_tid1);
trimmer->committed(commit_tid2);
trimmer->committed(commit_tid3);
trimmer->committed(commit_tid4);
ASSERT_TRUE(wait_for_update(metadata));
ASSERT_EQ(0, assert_exists(oid + ".0"));
ASSERT_EQ(0, assert_exists(oid + ".2"));
ASSERT_EQ(0, assert_exists(oid + ".3"));
ASSERT_EQ(0, assert_exists(oid + ".5"));
}
TEST_F(TestJournalTrimmer, RemoveObjects) {
std::string oid = get_temp_oid();
ASSERT_EQ(0, create(oid, 12, 2));
ASSERT_EQ(0, client_register(oid));
auto metadata = create_metadata(oid);
ASSERT_EQ(0, init_metadata(metadata));
ASSERT_TRUE(wait_for_update(metadata));
ASSERT_EQ(0, metadata->set_active_set(10));
ASSERT_TRUE(wait_for_update(metadata));
ASSERT_EQ(0, append(oid + ".0", create_payload("payload")));
ASSERT_EQ(0, append(oid + ".2", create_payload("payload")));
ASSERT_EQ(0, append(oid + ".3", create_payload("payload")));
ASSERT_EQ(0, append(oid + ".5", create_payload("payload")));
journal::JournalTrimmer *trimmer = create_trimmer(oid, metadata);
C_SaferCond cond;
trimmer->remove_objects(false, &cond);
ASSERT_EQ(0, cond.wait());
ASSERT_TRUE(wait_for_update(metadata));
ASSERT_EQ(-ENOENT, assert_exists(oid + ".0"));
ASSERT_EQ(-ENOENT, assert_exists(oid + ".2"));
ASSERT_EQ(-ENOENT, assert_exists(oid + ".3"));
ASSERT_EQ(-ENOENT, assert_exists(oid + ".5"));
}
TEST_F(TestJournalTrimmer, RemoveObjectsWithOtherClient) {
std::string oid = get_temp_oid();
ASSERT_EQ(0, create(oid, 12, 2));
ASSERT_EQ(0, client_register(oid));
ASSERT_EQ(0, client_register(oid, "client2", "other client"));
auto metadata = create_metadata(oid);
ASSERT_EQ(0, init_metadata(metadata));
ASSERT_TRUE(wait_for_update(metadata));
journal::JournalTrimmer *trimmer = create_trimmer(oid, metadata);
C_SaferCond ctx1;
trimmer->remove_objects(false, &ctx1);
ASSERT_EQ(-EBUSY, ctx1.wait());
C_SaferCond ctx2;
trimmer->remove_objects(true, &ctx2);
ASSERT_EQ(0, ctx2.wait());
}
| 6,515 | 31.909091 | 98 | cc |
null | ceph-main/src/test/journal/test_Journaler.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "include/stringify.h"
#include "journal/Journaler.h"
#include "journal/Settings.h"
#include "test/librados/test.h"
#include "test/journal/RadosTestFixture.h"
#include "gtest/gtest.h"
// reinclude our assert to clobber the system one
#include "include/ceph_assert.h"
class TestJournaler : public RadosTestFixture {
public:
static const std::string CLIENT_ID;
static std::string get_temp_journal_id() {
return stringify(++_journal_id);
}
void SetUp() override {
RadosTestFixture::SetUp();
m_journal_id = get_temp_journal_id();
m_journaler = new journal::Journaler(m_work_queue, m_timer, &m_timer_lock,
m_ioctx, m_journal_id, CLIENT_ID, {},
nullptr);
}
void TearDown() override {
delete m_journaler;
RadosTestFixture::TearDown();
}
int create_journal(uint8_t order, uint8_t splay_width) {
C_SaferCond cond;
m_journaler->create(order, splay_width, -1, &cond);
return cond.wait();
}
int init_journaler() {
C_SaferCond cond;
m_journaler->init(&cond);
return cond.wait();
}
int shut_down_journaler() {
C_SaferCond ctx;
m_journaler->shut_down(&ctx);
return ctx.wait();
}
int register_client(const std::string &client_id, const std::string &desc) {
journal::Journaler journaler(m_work_queue, m_timer, &m_timer_lock, m_ioctx,
m_journal_id, client_id, {}, nullptr);
bufferlist data;
data.append(desc);
C_SaferCond cond;
journaler.register_client(data, &cond);
return cond.wait();
}
int update_client(const std::string &client_id, const std::string &desc) {
journal::Journaler journaler(m_work_queue, m_timer, &m_timer_lock, m_ioctx,
m_journal_id, client_id, {}, nullptr);
bufferlist data;
data.append(desc);
C_SaferCond cond;
journaler.update_client(data, &cond);
return cond.wait();
}
int unregister_client(const std::string &client_id) {
journal::Journaler journaler(m_work_queue, m_timer, &m_timer_lock, m_ioctx,
m_journal_id, client_id, {}, nullptr);
C_SaferCond cond;
journaler.unregister_client(&cond);
return cond.wait();
}
static uint64_t _journal_id;
std::string m_journal_id;
journal::Journaler *m_journaler;
};
const std::string TestJournaler::CLIENT_ID = "client1";
uint64_t TestJournaler::_journal_id = 0;
TEST_F(TestJournaler, Create) {
ASSERT_EQ(0, create_journal(12, 8));
}
TEST_F(TestJournaler, CreateDuplicate) {
ASSERT_EQ(0, create_journal(12, 8));
ASSERT_EQ(-EEXIST, create_journal(12, 8));
}
TEST_F(TestJournaler, CreateInvalidParams) {
ASSERT_EQ(-EDOM, create_journal(1, 8));
ASSERT_EQ(-EDOM, create_journal(123, 8));
ASSERT_EQ(-EINVAL, create_journal(12, 0));
}
TEST_F(TestJournaler, Init) {
ASSERT_EQ(0, create_journal(12, 8));
ASSERT_EQ(0, register_client(CLIENT_ID, "foo"));
ASSERT_EQ(0, init_journaler());
ASSERT_EQ(0, shut_down_journaler());
}
TEST_F(TestJournaler, InitDNE) {
ASSERT_EQ(-ENOENT, init_journaler());
ASSERT_EQ(0, shut_down_journaler());
}
TEST_F(TestJournaler, RegisterClientDuplicate) {
ASSERT_EQ(0, create_journal(12, 8));
ASSERT_EQ(0, register_client(CLIENT_ID, "foo"));
ASSERT_EQ(-EEXIST, register_client(CLIENT_ID, "foo2"));
}
TEST_F(TestJournaler, UpdateClient) {
ASSERT_EQ(0, create_journal(12, 8));
ASSERT_EQ(0, register_client(CLIENT_ID, "foo"));
ASSERT_EQ(0, update_client(CLIENT_ID, "foo2"));
}
TEST_F(TestJournaler, UpdateClientDNE) {
ASSERT_EQ(0, create_journal(12, 8));
ASSERT_EQ(-ENOENT, update_client(CLIENT_ID, "foo"));
}
TEST_F(TestJournaler, UnregisterClient) {
ASSERT_EQ(0, create_journal(12, 8));
ASSERT_EQ(0, register_client(CLIENT_ID, "foo"));
ASSERT_EQ(0, unregister_client(CLIENT_ID));
// Test it does not exist and can be registered again
ASSERT_EQ(-ENOENT, update_client(CLIENT_ID, "foo"));
ASSERT_EQ(0, register_client(CLIENT_ID, "foo"));
}
TEST_F(TestJournaler, UnregisterClientDNE) {
ASSERT_EQ(0, create_journal(12, 8));
ASSERT_EQ(-ENOENT, unregister_client(CLIENT_ID));
}
TEST_F(TestJournaler, AllocateTag) {
ASSERT_EQ(0, create_journal(12, 8));
cls::journal::Tag tag;
bufferlist data;
data.append(std::string(128, '1'));
// allocate a new tag class
C_SaferCond ctx1;
m_journaler->allocate_tag(data, &tag, &ctx1);
ASSERT_EQ(0, ctx1.wait());
ASSERT_EQ(cls::journal::Tag(0, 0, data), tag);
// re-use an existing tag class
C_SaferCond ctx2;
m_journaler->allocate_tag(tag.tag_class, bufferlist(), &tag, &ctx2);
ASSERT_EQ(0, ctx2.wait());
ASSERT_EQ(cls::journal::Tag(1, 0, bufferlist()), tag);
}
TEST_F(TestJournaler, GetTags) {
ASSERT_EQ(0, create_journal(12, 8));
ASSERT_EQ(0, register_client(CLIENT_ID, "foo"));
std::list<cls::journal::Tag> expected_tags;
for (size_t i = 0; i < 256; ++i) {
C_SaferCond ctx;
cls::journal::Tag tag;
if (i < 2) {
m_journaler->allocate_tag(bufferlist(), &tag, &ctx);
} else {
m_journaler->allocate_tag(i % 2, bufferlist(), &tag, &ctx);
}
ASSERT_EQ(0, ctx.wait());
if (i % 2 == 0) {
expected_tags.push_back(tag);
}
}
std::list<cls::journal::Tag> tags;
C_SaferCond ctx;
m_journaler->get_tags(0, &tags, &ctx);
ASSERT_EQ(0, ctx.wait());
ASSERT_EQ(expected_tags, tags);
}
| 5,506 | 26.673367 | 79 | cc |
null | ceph-main/src/test/journal/test_ObjectPlayer.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "journal/ObjectPlayer.h"
#include "journal/Entry.h"
#include "include/stringify.h"
#include "common/Timer.h"
#include "gtest/gtest.h"
#include "test/librados/test.h"
#include "test/journal/RadosTestFixture.h"
template <typename T>
class TestObjectPlayer : public RadosTestFixture, public T {
public:
auto create_object(const std::string &oid, uint8_t order) {
auto object = ceph::make_ref<journal::ObjectPlayer>(
m_ioctx, oid + ".", 0, *m_timer, m_timer_lock, order,
T::max_fetch_bytes);
return object;
}
int fetch(const ceph::ref_t<journal::ObjectPlayer>& object_player) {
while (true) {
C_SaferCond ctx;
object_player->set_refetch_state(
journal::ObjectPlayer::REFETCH_STATE_NONE);
object_player->fetch(&ctx);
int r = ctx.wait();
if (r < 0 || !object_player->refetch_required()) {
return r;
}
}
return 0;
}
int watch_and_wait_for_entries(const ceph::ref_t<journal::ObjectPlayer>& object_player,
journal::ObjectPlayer::Entries *entries,
size_t count) {
for (size_t i = 0; i < 50; ++i) {
object_player->get_entries(entries);
if (entries->size() == count) {
break;
}
C_SaferCond ctx;
object_player->watch(&ctx, 0.1);
int r = ctx.wait();
if (r < 0) {
return r;
}
}
return 0;
}
std::string get_object_name(const std::string &oid) {
return oid + ".0";
}
};
template <uint32_t _max_fetch_bytes>
struct TestObjectPlayerParams {
static inline const uint32_t max_fetch_bytes = _max_fetch_bytes;
};
typedef ::testing::Types<TestObjectPlayerParams<0>,
TestObjectPlayerParams<10> > TestObjectPlayerTypes;
TYPED_TEST_SUITE(TestObjectPlayer, TestObjectPlayerTypes);
TYPED_TEST(TestObjectPlayer, Fetch) {
std::string oid = this->get_temp_oid();
journal::Entry entry1(234, 123, this->create_payload(std::string(24, '1')));
journal::Entry entry2(234, 124, this->create_payload(std::string(24, '1')));
bufferlist bl;
encode(entry1, bl);
encode(entry2, bl);
ASSERT_EQ(0, this->append(this->get_object_name(oid), bl));
auto object = this->create_object(oid, 14);
ASSERT_LE(0, this->fetch(object));
journal::ObjectPlayer::Entries entries;
object->get_entries(&entries);
ASSERT_EQ(2U, entries.size());
journal::ObjectPlayer::Entries expected_entries = {entry1, entry2};
ASSERT_EQ(expected_entries, entries);
}
TYPED_TEST(TestObjectPlayer, FetchLarge) {
std::string oid = this->get_temp_oid();
journal::Entry entry1(234, 123,
this->create_payload(std::string(8192 - 32, '1')));
journal::Entry entry2(234, 124, this->create_payload(""));
bufferlist bl;
encode(entry1, bl);
encode(entry2, bl);
ASSERT_EQ(0, this->append(this->get_object_name(oid), bl));
auto object = this->create_object(oid, 12);
ASSERT_LE(0, this->fetch(object));
journal::ObjectPlayer::Entries entries;
object->get_entries(&entries);
ASSERT_EQ(2U, entries.size());
journal::ObjectPlayer::Entries expected_entries = {entry1, entry2};
ASSERT_EQ(expected_entries, entries);
}
TYPED_TEST(TestObjectPlayer, FetchDeDup) {
std::string oid = this->get_temp_oid();
journal::Entry entry1(234, 123, this->create_payload(std::string(24, '1')));
journal::Entry entry2(234, 123, this->create_payload(std::string(24, '2')));
bufferlist bl;
encode(entry1, bl);
encode(entry2, bl);
ASSERT_EQ(0, this->append(this->get_object_name(oid), bl));
auto object = this->create_object(oid, 14);
ASSERT_LE(0, this->fetch(object));
journal::ObjectPlayer::Entries entries;
object->get_entries(&entries);
ASSERT_EQ(1U, entries.size());
journal::ObjectPlayer::Entries expected_entries = {entry2};
ASSERT_EQ(expected_entries, entries);
}
TYPED_TEST(TestObjectPlayer, FetchEmpty) {
std::string oid = this->get_temp_oid();
bufferlist bl;
ASSERT_EQ(0, this->append(this->get_object_name(oid), bl));
auto object = this->create_object(oid, 14);
ASSERT_EQ(0, this->fetch(object));
ASSERT_TRUE(object->empty());
}
TYPED_TEST(TestObjectPlayer, FetchCorrupt) {
std::string oid = this->get_temp_oid();
journal::Entry entry1(234, 123, this->create_payload(std::string(24, '1')));
journal::Entry entry2(234, 124, this->create_payload(std::string(24, '2')));
bufferlist bl;
encode(entry1, bl);
encode(this->create_payload("corruption" + std::string(1024, 'X')), bl);
encode(entry2, bl);
ASSERT_EQ(0, this->append(this->get_object_name(oid), bl));
auto object = this->create_object(oid, 14);
ASSERT_EQ(-EBADMSG, this->fetch(object));
journal::ObjectPlayer::Entries entries;
object->get_entries(&entries);
ASSERT_EQ(1U, entries.size());
journal::ObjectPlayer::Entries expected_entries = {entry1};
ASSERT_EQ(expected_entries, entries);
}
TYPED_TEST(TestObjectPlayer, FetchAppend) {
std::string oid = this->get_temp_oid();
journal::Entry entry1(234, 123, this->create_payload(std::string(24, '1')));
journal::Entry entry2(234, 124, this->create_payload(std::string(24, '2')));
bufferlist bl;
encode(entry1, bl);
ASSERT_EQ(0, this->append(this->get_object_name(oid), bl));
auto object = this->create_object(oid, 14);
ASSERT_LE(0, this->fetch(object));
journal::ObjectPlayer::Entries entries;
object->get_entries(&entries);
ASSERT_EQ(1U, entries.size());
journal::ObjectPlayer::Entries expected_entries = {entry1};
ASSERT_EQ(expected_entries, entries);
bl.clear();
encode(entry2, bl);
ASSERT_EQ(0, this->append(this->get_object_name(oid), bl));
ASSERT_LE(0, this->fetch(object));
object->get_entries(&entries);
ASSERT_EQ(2U, entries.size());
expected_entries = {entry1, entry2};
ASSERT_EQ(expected_entries, entries);
}
TYPED_TEST(TestObjectPlayer, PopEntry) {
std::string oid = this->get_temp_oid();
journal::Entry entry1(234, 123, this->create_payload(std::string(24, '1')));
journal::Entry entry2(234, 124, this->create_payload(std::string(24, '1')));
bufferlist bl;
encode(entry1, bl);
encode(entry2, bl);
ASSERT_EQ(0, this->append(this->get_object_name(oid), bl));
auto object = this->create_object(oid, 14);
ASSERT_LE(0, this->fetch(object));
journal::ObjectPlayer::Entries entries;
object->get_entries(&entries);
ASSERT_EQ(2U, entries.size());
journal::Entry entry;
object->front(&entry);
object->pop_front();
ASSERT_EQ(entry1, entry);
object->front(&entry);
object->pop_front();
ASSERT_EQ(entry2, entry);
ASSERT_TRUE(object->empty());
}
TYPED_TEST(TestObjectPlayer, Watch) {
std::string oid = this->get_temp_oid();
auto object = this->create_object(oid, 14);
C_SaferCond cond1;
object->watch(&cond1, 0.1);
journal::Entry entry1(234, 123, this->create_payload(std::string(24, '1')));
journal::Entry entry2(234, 124, this->create_payload(std::string(24, '1')));
bufferlist bl;
encode(entry1, bl);
ASSERT_EQ(0, this->append(this->get_object_name(oid), bl));
ASSERT_LE(0, cond1.wait());
journal::ObjectPlayer::Entries entries;
ASSERT_EQ(0, this->watch_and_wait_for_entries(object, &entries, 1U));
ASSERT_EQ(1U, entries.size());
journal::ObjectPlayer::Entries expected_entries;
expected_entries = {entry1};
ASSERT_EQ(expected_entries, entries);
C_SaferCond cond2;
object->watch(&cond2, 0.1);
bl.clear();
encode(entry2, bl);
ASSERT_EQ(0, this->append(this->get_object_name(oid), bl));
ASSERT_LE(0, cond2.wait());
ASSERT_EQ(0, this->watch_and_wait_for_entries(object, &entries, 2U));
ASSERT_EQ(2U, entries.size());
expected_entries = {entry1, entry2};
ASSERT_EQ(expected_entries, entries);
}
TYPED_TEST(TestObjectPlayer, Unwatch) {
std::string oid = this->get_temp_oid();
auto object = this->create_object(oid, 14);
C_SaferCond watch_ctx;
object->watch(&watch_ctx, 600);
usleep(200000);
object->unwatch();
ASSERT_EQ(-ECANCELED, watch_ctx.wait());
}
| 8,088 | 27.684397 | 89 | cc |
null | ceph-main/src/test/journal/test_ObjectRecorder.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "journal/ObjectRecorder.h"
#include "common/Cond.h"
#include "common/ceph_mutex.h"
#include "common/Timer.h"
#include "gtest/gtest.h"
#include "test/librados/test.h"
#include "test/journal/RadosTestFixture.h"
#include <limits>
using namespace std::chrono_literals;
using std::shared_ptr;
class TestObjectRecorder : public RadosTestFixture {
public:
TestObjectRecorder() = default;
struct Handler : public journal::ObjectRecorder::Handler {
ceph::mutex lock = ceph::make_mutex("lock");
ceph::mutex* object_lock = nullptr;
ceph::condition_variable cond;
bool is_closed = false;
uint32_t overflows = 0;
Handler() = default;
void closed(journal::ObjectRecorder *object_recorder) override {
std::lock_guard locker{lock};
is_closed = true;
cond.notify_all();
}
void overflow(journal::ObjectRecorder *object_recorder) override {
std::lock_guard locker{lock};
journal::AppendBuffers append_buffers;
object_lock->lock();
object_recorder->claim_append_buffers(&append_buffers);
object_lock->unlock();
++overflows;
cond.notify_all();
}
};
// flush the pending buffers in dtor
class ObjectRecorderFlusher {
public:
ObjectRecorderFlusher(librados::IoCtx& ioctx,
ContextWQ* work_queue)
: m_ioctx{ioctx},
m_work_queue{work_queue}
{}
ObjectRecorderFlusher(librados::IoCtx& ioctx,
ContextWQ* work_queue,
uint32_t flush_interval,
uint16_t flush_bytes,
double flush_age,
int max_in_flight)
: m_ioctx{ioctx},
m_work_queue{work_queue},
m_flush_interval{flush_interval},
m_flush_bytes{flush_bytes},
m_flush_age{flush_age},
m_max_in_flight_appends{max_in_flight < 0 ?
std::numeric_limits<uint64_t>::max() :
static_cast<uint64_t>(max_in_flight)}
{}
~ObjectRecorderFlusher() {
for (auto& [object_recorder, m] : m_object_recorders) {
C_SaferCond cond;
object_recorder->flush(&cond);
cond.wait();
std::scoped_lock l{*m};
if (!object_recorder->is_closed()) {
object_recorder->close();
}
}
}
auto create_object(std::string_view oid, uint8_t order, ceph::mutex* lock) {
auto object = ceph::make_ref<journal::ObjectRecorder>(
m_ioctx, oid, 0, lock, m_work_queue, &m_handler,
order, m_max_in_flight_appends);
{
std::lock_guard locker{*lock};
object->set_append_batch_options(m_flush_interval,
m_flush_bytes,
m_flush_age);
}
m_object_recorders.emplace_back(object, lock);
m_handler.object_lock = lock;
return object;
}
bool wait_for_closed() {
std::unique_lock locker{m_handler.lock};
return m_handler.cond.wait_for(locker, 10s,
[this] { return m_handler.is_closed; });
}
bool wait_for_overflow() {
std::unique_lock locker{m_handler.lock};
if (m_handler.cond.wait_for(locker, 10s,
[this] { return m_handler.overflows > 0; })) {
m_handler.overflows = 0;
return true;
} else {
return false;
}
}
private:
librados::IoCtx& m_ioctx;
ContextWQ *m_work_queue;
uint32_t m_flush_interval = std::numeric_limits<uint32_t>::max();
uint64_t m_flush_bytes = std::numeric_limits<uint64_t>::max();
double m_flush_age = 600;
uint64_t m_max_in_flight_appends = 0;
using ObjectRecorders =
std::list<std::pair<ceph::ref_t<journal::ObjectRecorder>, ceph::mutex*>>;
ObjectRecorders m_object_recorders;
Handler m_handler;
};
journal::AppendBuffer create_append_buffer(uint64_t tag_tid,
uint64_t entry_tid,
const std::string &payload) {
auto future = ceph::make_ref<journal::FutureImpl>(tag_tid, entry_tid, 456);
future->init(ceph::ref_t<journal::FutureImpl>());
bufferlist bl;
bl.append(payload);
return std::make_pair(future, bl);
}
};
TEST_F(TestObjectRecorder, Append) {
std::string oid = get_temp_oid();
ASSERT_EQ(0, create(oid));
ASSERT_EQ(0, client_register(oid));
auto metadata = create_metadata(oid);
ASSERT_EQ(0, init_metadata(metadata));
ceph::mutex lock = ceph::make_mutex("object_recorder_lock");
ObjectRecorderFlusher flusher(m_ioctx, m_work_queue, 0, 0, 0, 0);
auto object = flusher.create_object(oid, 24, &lock);
journal::AppendBuffer append_buffer1 = create_append_buffer(234, 123,
"payload");
journal::AppendBuffers append_buffers;
append_buffers = {append_buffer1};
lock.lock();
ASSERT_FALSE(object->append(std::move(append_buffers)));
lock.unlock();
ASSERT_EQ(0U, object->get_pending_appends());
journal::AppendBuffer append_buffer2 = create_append_buffer(234, 124,
"payload");
append_buffers = {append_buffer2};
lock.lock();
ASSERT_FALSE(object->append(std::move(append_buffers)));
lock.unlock();
ASSERT_EQ(0U, object->get_pending_appends());
C_SaferCond cond;
append_buffer2.first->flush(&cond);
ASSERT_EQ(0, cond.wait());
ASSERT_EQ(0U, object->get_pending_appends());
}
TEST_F(TestObjectRecorder, AppendFlushByCount) {
std::string oid = get_temp_oid();
ASSERT_EQ(0, create(oid));
ASSERT_EQ(0, client_register(oid));
auto metadata = create_metadata(oid);
ASSERT_EQ(0, init_metadata(metadata));
ceph::mutex lock = ceph::make_mutex("object_recorder_lock");
ObjectRecorderFlusher flusher(m_ioctx, m_work_queue, 2, 0, 0, -1);
auto object = flusher.create_object(oid, 24, &lock);
journal::AppendBuffer append_buffer1 = create_append_buffer(234, 123,
"payload");
journal::AppendBuffers append_buffers;
append_buffers = {append_buffer1};
lock.lock();
ASSERT_FALSE(object->append(std::move(append_buffers)));
lock.unlock();
ASSERT_EQ(1U, object->get_pending_appends());
journal::AppendBuffer append_buffer2 = create_append_buffer(234, 124,
"payload");
append_buffers = {append_buffer2};
lock.lock();
ASSERT_FALSE(object->append(std::move(append_buffers)));
lock.unlock();
ASSERT_EQ(0U, object->get_pending_appends());
C_SaferCond cond;
append_buffer2.first->wait(&cond);
ASSERT_EQ(0, cond.wait());
}
TEST_F(TestObjectRecorder, AppendFlushByBytes) {
std::string oid = get_temp_oid();
ASSERT_EQ(0, create(oid));
ASSERT_EQ(0, client_register(oid));
auto metadata = create_metadata(oid);
ASSERT_EQ(0, init_metadata(metadata));
ceph::mutex lock = ceph::make_mutex("object_recorder_lock");
ObjectRecorderFlusher flusher(m_ioctx, m_work_queue, 0, 10, 0, -1);
auto object = flusher.create_object(oid, 24, &lock);
journal::AppendBuffer append_buffer1 = create_append_buffer(234, 123,
"payload");
journal::AppendBuffers append_buffers;
append_buffers = {append_buffer1};
lock.lock();
ASSERT_FALSE(object->append(std::move(append_buffers)));
lock.unlock();
ASSERT_EQ(1U, object->get_pending_appends());
journal::AppendBuffer append_buffer2 = create_append_buffer(234, 124,
"payload");
append_buffers = {append_buffer2};
lock.lock();
ASSERT_FALSE(object->append(std::move(append_buffers)));
lock.unlock();
ASSERT_EQ(0U, object->get_pending_appends());
C_SaferCond cond;
append_buffer2.first->wait(&cond);
ASSERT_EQ(0, cond.wait());
}
TEST_F(TestObjectRecorder, AppendFlushByAge) {
std::string oid = get_temp_oid();
ASSERT_EQ(0, create(oid));
ASSERT_EQ(0, client_register(oid));
auto metadata = create_metadata(oid);
ASSERT_EQ(0, init_metadata(metadata));
ceph::mutex lock = ceph::make_mutex("object_recorder_lock");
ObjectRecorderFlusher flusher(m_ioctx, m_work_queue, 0, 0, 0.0005, -1);
auto object = flusher.create_object(oid, 24, &lock);
journal::AppendBuffer append_buffer1 = create_append_buffer(234, 123,
"payload");
journal::AppendBuffers append_buffers;
append_buffers = {append_buffer1};
lock.lock();
ASSERT_FALSE(object->append(std::move(append_buffers)));
lock.unlock();
uint32_t offset = 0;
journal::AppendBuffer append_buffer2;
while (!append_buffer1.first->is_flush_in_progress() &&
!append_buffer1.first->is_complete()) {
usleep(1000);
append_buffer2 = create_append_buffer(234, 124 + offset, "payload");
++offset;
append_buffers = {append_buffer2};
lock.lock();
ASSERT_FALSE(object->append(std::move(append_buffers)));
lock.unlock();
}
C_SaferCond cond;
append_buffer2.first->wait(&cond);
ASSERT_EQ(0, cond.wait());
ASSERT_EQ(0U, object->get_pending_appends());
}
TEST_F(TestObjectRecorder, AppendFilledObject) {
std::string oid = get_temp_oid();
ASSERT_EQ(0, create(oid));
ASSERT_EQ(0, client_register(oid));
auto metadata = create_metadata(oid);
ASSERT_EQ(0, init_metadata(metadata));
ceph::mutex lock = ceph::make_mutex("object_recorder_lock");
ObjectRecorderFlusher flusher(m_ioctx, m_work_queue, 0, 0, 0.0, -1);
auto object = flusher.create_object(oid, 12, &lock);
std::string payload(2048, '1');
journal::AppendBuffer append_buffer1 = create_append_buffer(234, 123,
payload);
journal::AppendBuffers append_buffers;
append_buffers = {append_buffer1};
lock.lock();
ASSERT_FALSE(object->append(std::move(append_buffers)));
lock.unlock();
journal::AppendBuffer append_buffer2 = create_append_buffer(234, 124,
payload);
append_buffers = {append_buffer2};
lock.lock();
ASSERT_TRUE(object->append(std::move(append_buffers)));
lock.unlock();
C_SaferCond cond;
append_buffer2.first->wait(&cond);
ASSERT_EQ(0, cond.wait());
ASSERT_EQ(0U, object->get_pending_appends());
}
TEST_F(TestObjectRecorder, Flush) {
std::string oid = get_temp_oid();
ASSERT_EQ(0, create(oid));
ASSERT_EQ(0, client_register(oid));
auto metadata = create_metadata(oid);
ASSERT_EQ(0, init_metadata(metadata));
ceph::mutex lock = ceph::make_mutex("object_recorder_lock");
ObjectRecorderFlusher flusher(m_ioctx, m_work_queue, 0, 10, 0, -1);
auto object = flusher.create_object(oid, 24, &lock);
journal::AppendBuffer append_buffer1 = create_append_buffer(234, 123,
"payload");
journal::AppendBuffers append_buffers;
append_buffers = {append_buffer1};
lock.lock();
ASSERT_FALSE(object->append(std::move(append_buffers)));
lock.unlock();
ASSERT_EQ(1U, object->get_pending_appends());
C_SaferCond cond1;
object->flush(&cond1);
ASSERT_EQ(0, cond1.wait());
C_SaferCond cond2;
append_buffer1.first->wait(&cond2);
ASSERT_EQ(0, cond2.wait());
ASSERT_EQ(0U, object->get_pending_appends());
}
TEST_F(TestObjectRecorder, FlushFuture) {
std::string oid = get_temp_oid();
ASSERT_EQ(0, create(oid));
ASSERT_EQ(0, client_register(oid));
auto metadata = create_metadata(oid);
ASSERT_EQ(0, init_metadata(metadata));
ceph::mutex lock = ceph::make_mutex("object_recorder_lock");
ObjectRecorderFlusher flusher(m_ioctx, m_work_queue, 0, 10, 0, -1);
auto object = flusher.create_object(oid, 24, &lock);
journal::AppendBuffer append_buffer = create_append_buffer(234, 123,
"payload");
journal::AppendBuffers append_buffers;
append_buffers = {append_buffer};
lock.lock();
ASSERT_FALSE(object->append(std::move(append_buffers)));
lock.unlock();
ASSERT_EQ(1U, object->get_pending_appends());
C_SaferCond cond;
append_buffer.first->wait(&cond);
object->flush(append_buffer.first);
ASSERT_TRUE(append_buffer.first->is_flush_in_progress() ||
append_buffer.first->is_complete());
ASSERT_EQ(0, cond.wait());
}
TEST_F(TestObjectRecorder, FlushDetachedFuture) {
std::string oid = get_temp_oid();
ASSERT_EQ(0, create(oid));
ASSERT_EQ(0, client_register(oid));
auto metadata = create_metadata(oid);
ASSERT_EQ(0, init_metadata(metadata));
ceph::mutex lock = ceph::make_mutex("object_recorder_lock");
ObjectRecorderFlusher flusher(m_ioctx, m_work_queue);
auto object = flusher.create_object(oid, 24, &lock);
journal::AppendBuffer append_buffer = create_append_buffer(234, 123,
"payload");
journal::AppendBuffers append_buffers;
append_buffers = {append_buffer};
object->flush(append_buffer.first);
ASSERT_FALSE(append_buffer.first->is_flush_in_progress());
lock.lock();
ASSERT_FALSE(object->append(std::move(append_buffers)));
lock.unlock();
// should automatically flush once its attached to the object
C_SaferCond cond;
append_buffer.first->wait(&cond);
ASSERT_EQ(0, cond.wait());
}
TEST_F(TestObjectRecorder, Close) {
std::string oid = get_temp_oid();
ASSERT_EQ(0, create(oid));
ASSERT_EQ(0, client_register(oid));
auto metadata = create_metadata(oid);
ASSERT_EQ(0, init_metadata(metadata));
ceph::mutex lock = ceph::make_mutex("object_recorder_lock");
ObjectRecorderFlusher flusher(m_ioctx, m_work_queue, 2, 0, 0, -1);
auto object = flusher.create_object(oid, 24, &lock);
journal::AppendBuffer append_buffer1 = create_append_buffer(234, 123,
"payload");
journal::AppendBuffers append_buffers;
append_buffers = {append_buffer1};
lock.lock();
ASSERT_FALSE(object->append(std::move(append_buffers)));
lock.unlock();
ASSERT_EQ(1U, object->get_pending_appends());
lock.lock();
ASSERT_FALSE(object->close());
ASSERT_TRUE(ceph_mutex_is_locked(lock));
lock.unlock();
ASSERT_TRUE(flusher.wait_for_closed());
ASSERT_EQ(0U, object->get_pending_appends());
}
TEST_F(TestObjectRecorder, Overflow) {
std::string oid = get_temp_oid();
ASSERT_EQ(0, create(oid));
ASSERT_EQ(0, client_register(oid));
auto metadata = create_metadata(oid);
ASSERT_EQ(0, init_metadata(metadata));
ceph::mutex lock1 = ceph::make_mutex("object_recorder_lock_1");
ceph::mutex lock2 = ceph::make_mutex("object_recorder_lock_2");
ObjectRecorderFlusher flusher(m_ioctx, m_work_queue);
auto object1 = flusher.create_object(oid, 12, &lock1);
std::string payload(1 << 11, '1');
journal::AppendBuffer append_buffer1 = create_append_buffer(234, 123,
payload);
journal::AppendBuffer append_buffer2 = create_append_buffer(234, 124,
payload);
journal::AppendBuffers append_buffers;
append_buffers = {append_buffer1, append_buffer2};
lock1.lock();
ASSERT_TRUE(object1->append(std::move(append_buffers)));
lock1.unlock();
C_SaferCond cond;
append_buffer2.first->wait(&cond);
ASSERT_EQ(0, cond.wait());
ASSERT_EQ(0U, object1->get_pending_appends());
auto object2 = flusher.create_object(oid, 12, &lock2);
journal::AppendBuffer append_buffer3 = create_append_buffer(456, 123,
payload);
append_buffers = {append_buffer3};
lock2.lock();
ASSERT_FALSE(object2->append(std::move(append_buffers)));
lock2.unlock();
append_buffer3.first->flush(NULL);
ASSERT_TRUE(flusher.wait_for_overflow());
}
| 15,613 | 32.578495 | 80 | cc |
null | ceph-main/src/test/journal/test_main.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "gtest/gtest.h"
#include "common/ceph_argparse.h"
#include "common/ceph_crypto.h"
#include "common/config_proxy.h"
#include "global/global_context.h"
#include "global/global_init.h"
#include <vector>
int main(int argc, char **argv)
{
::testing::InitGoogleTest(&argc, argv);
auto args = argv_to_vec(argc, argv);
auto cct = global_init(nullptr, args, CEPH_ENTITY_TYPE_OSD,
CODE_ENVIRONMENT_UTILITY,
CINIT_FLAG_NO_MON_CONFIG);
g_conf().set_val("lockdep", "true");
common_init_finish(g_ceph_context);
int r = RUN_ALL_TESTS();
return r;
}
| 668 | 23.777778 | 70 | cc |
null | ceph-main/src/test/journal/mock/MockJournaler.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "MockJournaler.h"
namespace journal {
MockFuture *MockFuture::s_instance = nullptr;
MockReplayEntry *MockReplayEntry::s_instance = nullptr;
MockJournaler *MockJournaler::s_instance = nullptr;
std::ostream &operator<<(std::ostream &os, const MockJournalerProxy &) {
return os;
}
} // namespace journal
| 415 | 23.470588 | 72 | cc |
null | ceph-main/src/test/journal/mock/MockJournaler.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef TEST_RBD_MIRROR_MOCK_JOURNALER_H
#define TEST_RBD_MIRROR_MOCK_JOURNALER_H
#include <gmock/gmock.h>
#include "include/int_types.h"
#include "include/rados/librados.hpp"
#include "cls/journal/cls_journal_types.h"
#include "journal/Journaler.h"
#include <iosfwd>
#include <string>
class Context;
namespace journal {
struct ReplayHandler;
struct Settings;
struct MockFuture {
static MockFuture *s_instance;
static MockFuture &get_instance() {
ceph_assert(s_instance != nullptr);
return *s_instance;
}
MockFuture() {
s_instance = this;
}
MOCK_CONST_METHOD0(is_valid, bool());
MOCK_METHOD1(flush, void(Context *));
MOCK_METHOD1(wait, void(Context *));
};
struct MockFutureProxy {
bool is_valid() const {
return MockFuture::get_instance().is_valid();
}
void flush(Context *on_safe) {
MockFuture::get_instance().flush(on_safe);
}
void wait(Context *on_safe) {
MockFuture::get_instance().wait(on_safe);
}
};
struct MockReplayEntry {
static MockReplayEntry *s_instance;
static MockReplayEntry &get_instance() {
ceph_assert(s_instance != nullptr);
return *s_instance;
}
MockReplayEntry() {
s_instance = this;
}
MOCK_CONST_METHOD0(get_commit_tid, uint64_t());
MOCK_CONST_METHOD0(get_data, bufferlist());
};
struct MockReplayEntryProxy {
uint64_t get_commit_tid() const {
return MockReplayEntry::get_instance().get_commit_tid();
}
bufferlist get_data() const {
return MockReplayEntry::get_instance().get_data();
}
};
struct MockJournaler {
static MockJournaler *s_instance;
static MockJournaler &get_instance() {
ceph_assert(s_instance != nullptr);
return *s_instance;
}
MockJournaler() {
s_instance = this;
}
MOCK_METHOD0(construct, void());
MOCK_METHOD1(init, void(Context *));
MOCK_METHOD0(shut_down, void());
MOCK_METHOD1(shut_down, void(Context *));
MOCK_CONST_METHOD0(is_initialized, bool());
MOCK_METHOD3(get_metadata, void(uint8_t *order, uint8_t *splay_width,
int64_t *pool_id));
MOCK_METHOD4(get_mutable_metadata, void(uint64_t*, uint64_t*,
std::set<cls::journal::Client> *,
Context*));
MOCK_METHOD2(register_client, void(const bufferlist &, Context *));
MOCK_METHOD1(unregister_client, void(Context *));
MOCK_METHOD3(get_client, void(const std::string &, cls::journal::Client *,
Context *));
MOCK_METHOD2(get_cached_client, int(const std::string&, cls::journal::Client*));
MOCK_METHOD2(update_client, void(const bufferlist &, Context *));
MOCK_METHOD4(allocate_tag, void(uint64_t, const bufferlist &,
cls::journal::Tag*, Context *));
MOCK_METHOD3(get_tag, void(uint64_t, cls::journal::Tag *, Context *));
MOCK_METHOD3(get_tags, void(uint64_t, journal::Journaler::Tags*, Context*));
MOCK_METHOD4(get_tags, void(uint64_t, uint64_t, journal::Journaler::Tags*,
Context*));
MOCK_METHOD1(start_replay, void(::journal::ReplayHandler *replay_handler));
MOCK_METHOD2(start_live_replay, void(ReplayHandler *, double));
MOCK_METHOD1(try_pop_front, bool(MockReplayEntryProxy *));
MOCK_METHOD2(try_pop_front, bool(MockReplayEntryProxy *, uint64_t *));
MOCK_METHOD0(stop_replay, void());
MOCK_METHOD1(stop_replay, void(Context *on_finish));
MOCK_METHOD1(start_append, void(uint64_t));
MOCK_METHOD3(set_append_batch_options, void(int, uint64_t, double));
MOCK_CONST_METHOD0(get_max_append_size, uint64_t());
MOCK_METHOD2(append, MockFutureProxy(uint64_t tag_id,
const bufferlist &bl));
MOCK_METHOD1(flush, void(Context *on_safe));
MOCK_METHOD1(stop_append, void(Context *on_safe));
MOCK_METHOD1(committed, void(const MockReplayEntryProxy &));
MOCK_METHOD1(committed, void(const MockFutureProxy &future));
MOCK_METHOD1(flush_commit_position, void(Context*));
MOCK_METHOD1(add_listener, void(JournalMetadataListener *));
MOCK_METHOD1(remove_listener, void(JournalMetadataListener *));
};
struct MockJournalerProxy {
MockJournalerProxy() {
MockJournaler::get_instance().construct();
}
template <typename IoCtxT>
MockJournalerProxy(IoCtxT &header_ioctx, const std::string &,
const std::string &, const Settings&,
journal::CacheManagerHandler *) {
MockJournaler::get_instance().construct();
}
template <typename WorkQueue, typename Timer>
MockJournalerProxy(WorkQueue *work_queue, Timer *timer, ceph::mutex *timer_lock,
librados::IoCtx &header_ioctx,
const std::string &journal_id,
const std::string &client_id, const Settings&,
journal::CacheManagerHandler *) {
MockJournaler::get_instance().construct();
}
void exists(Context *on_finish) const {
on_finish->complete(-EINVAL);
}
void create(uint8_t order, uint8_t splay_width, int64_t pool_id, Context *on_finish) {
on_finish->complete(-EINVAL);
}
void remove(bool force, Context *on_finish) {
on_finish->complete(-EINVAL);
}
int register_client(const bufferlist &data) {
return -EINVAL;
}
void allocate_tag(uint64_t tag_class, const bufferlist &tag_data,
cls::journal::Tag* tag, Context *on_finish) {
MockJournaler::get_instance().allocate_tag(tag_class, tag_data, tag,
on_finish);
}
void init(Context *on_finish) {
MockJournaler::get_instance().init(on_finish);
}
void shut_down() {
MockJournaler::get_instance().shut_down();
}
void shut_down(Context *on_finish) {
MockJournaler::get_instance().shut_down(on_finish);
}
bool is_initialized() const {
return MockJournaler::get_instance().is_initialized();
}
void get_metadata(uint8_t *order, uint8_t *splay_width, int64_t *pool_id) {
MockJournaler::get_instance().get_metadata(order, splay_width, pool_id);
}
void get_mutable_metadata(uint64_t *min, uint64_t *active,
std::set<cls::journal::Client> *clients,
Context *on_finish) {
MockJournaler::get_instance().get_mutable_metadata(min, active, clients,
on_finish);
}
void register_client(const bufferlist &data, Context *on_finish) {
MockJournaler::get_instance().register_client(data, on_finish);
}
void unregister_client(Context *on_finish) {
MockJournaler::get_instance().unregister_client(on_finish);
}
void get_client(const std::string &client_id, cls::journal::Client *client,
Context *on_finish) {
MockJournaler::get_instance().get_client(client_id, client, on_finish);
}
int get_cached_client(const std::string& client_id,
cls::journal::Client* client) {
return MockJournaler::get_instance().get_cached_client(client_id, client);
}
void update_client(const bufferlist &client_data, Context *on_finish) {
MockJournaler::get_instance().update_client(client_data, on_finish);
}
void get_tag(uint64_t tag_tid, cls::journal::Tag *tag, Context *on_finish) {
MockJournaler::get_instance().get_tag(tag_tid, tag, on_finish);
}
void get_tags(uint64_t tag_class, journal::Journaler::Tags *tags,
Context *on_finish) {
MockJournaler::get_instance().get_tags(tag_class, tags, on_finish);
}
void get_tags(uint64_t start_after_tag_tid, uint64_t tag_class,
journal::Journaler::Tags *tags, Context *on_finish) {
MockJournaler::get_instance().get_tags(start_after_tag_tid, tag_class, tags,
on_finish);
}
void start_replay(::journal::ReplayHandler *replay_handler) {
MockJournaler::get_instance().start_replay(replay_handler);
}
void start_live_replay(ReplayHandler *handler, double interval) {
MockJournaler::get_instance().start_live_replay(handler, interval);
}
bool try_pop_front(MockReplayEntryProxy *replay_entry) {
return MockJournaler::get_instance().try_pop_front(replay_entry);
}
bool try_pop_front(MockReplayEntryProxy *entry, uint64_t *tag_tid) {
return MockJournaler::get_instance().try_pop_front(entry, tag_tid);
}
void stop_replay() {
MockJournaler::get_instance().stop_replay();
}
void stop_replay(Context *on_finish) {
MockJournaler::get_instance().stop_replay(on_finish);
}
void start_append(uint64_t max_in_flight_appends) {
MockJournaler::get_instance().start_append(max_in_flight_appends);
}
void set_append_batch_options(int flush_interval, uint64_t flush_bytes,
double flush_age) {
MockJournaler::get_instance().set_append_batch_options(
flush_interval, flush_bytes, flush_age);
}
uint64_t get_max_append_size() const {
return MockJournaler::get_instance().get_max_append_size();
}
MockFutureProxy append(uint64_t tag_id, const bufferlist &bl) {
return MockJournaler::get_instance().append(tag_id, bl);
}
void flush(Context *on_safe) {
MockJournaler::get_instance().flush(on_safe);
}
void stop_append(Context *on_safe) {
MockJournaler::get_instance().stop_append(on_safe);
}
void committed(const MockReplayEntryProxy &entry) {
MockJournaler::get_instance().committed(entry);
}
void committed(const MockFutureProxy &future) {
MockJournaler::get_instance().committed(future);
}
void flush_commit_position(Context *on_finish) {
MockJournaler::get_instance().flush_commit_position(on_finish);
}
void add_listener(JournalMetadataListener *listener) {
MockJournaler::get_instance().add_listener(listener);
}
void remove_listener(JournalMetadataListener *listener) {
MockJournaler::get_instance().remove_listener(listener);
}
};
std::ostream &operator<<(std::ostream &os, const MockJournalerProxy &);
} // namespace journal
#endif // TEST_RBD_MIRROR_MOCK_JOURNALER_H
| 10,179 | 31.420382 | 88 | h |
null | ceph-main/src/test/lazy-omap-stats/lazy_omap_stats_test.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2019 Red Hat
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <algorithm>
#include <boost/algorithm/string/trim.hpp>
#include <boost/tokenizer.hpp>
#include <boost/uuid/uuid.hpp> // uuid class
#include <boost/uuid/uuid_generators.hpp> // generators
#include <boost/uuid/uuid_io.hpp> // streaming operators etc.
#include <chrono>
#include <iostream>
#include <thread>
#include <vector>
#include "common/ceph_json.h"
#include "global/global_init.h"
#include "include/compat.h"
#include "lazy_omap_stats_test.h"
using namespace std;
void LazyOmapStatsTest::init(const int argc, const char** argv)
{
int ret = rados.init("admin");
if (ret < 0) {
ret = -ret;
cerr << "Failed to initialise rados! Error: " << ret << " " << strerror(ret)
<< endl;
exit(ret);
}
ret = rados.conf_parse_argv(argc, argv);
if (ret < 0) {
ret = -ret;
cerr << "Failed to parse command line config options! Error: " << ret << " "
<< strerror(ret) << endl;
exit(ret);
}
rados.conf_parse_env(NULL);
if (ret < 0) {
ret = -ret;
cerr << "Failed to parse environment! Error: " << ret << " "
<< strerror(ret) << endl;
exit(ret);
}
rados.conf_read_file(NULL);
if (ret < 0) {
ret = -ret;
cerr << "Failed to read config file! Error: " << ret << " " << strerror(ret)
<< endl;
exit(ret);
}
ret = rados.connect();
if (ret < 0) {
ret = -ret;
cerr << "Failed to connect to running cluster! Error: " << ret << " "
<< strerror(ret) << endl;
exit(ret);
}
string command = R"(
{
"prefix": "osd pool create",
"pool": ")" + conf.pool_name +
R"(",
"pool_type": "replicated",
"size": )" + to_string(conf.replica_count) +
R"(
})";
librados::bufferlist inbl;
string output;
ret = rados.mon_command(command, inbl, nullptr, &output);
if (output.length()) cout << output << endl;
if (ret < 0) {
ret = -ret;
cerr << "Failed to create pool! Error: " << ret << " " << strerror(ret)
<< endl;
exit(ret);
}
ret = rados.ioctx_create(conf.pool_name.c_str(), io_ctx);
if (ret < 0) {
ret = -ret;
cerr << "Failed to create ioctx! Error: " << ret << " " << strerror(ret)
<< endl;
exit(ret);
}
get_pool_id(conf.pool_name);
}
void LazyOmapStatsTest::shutdown()
{
rados.pool_delete(conf.pool_name.c_str());
rados.shutdown();
}
void LazyOmapStatsTest::write_omap(const string& object_name)
{
librados::bufferlist bl;
int ret = io_ctx.write_full(object_name, bl);
if (ret < 0) {
ret = -ret;
cerr << "Failed to create object! Error: " << ret << " " << strerror(ret)
<< endl;
exit(ret);
}
ret = io_ctx.omap_set(object_name, payload);
if (ret < 0) {
ret = -ret;
cerr << "Failed to write omap payload! Error: " << ret << " "
<< strerror(ret) << endl;
exit(ret);
}
cout << "Wrote " << conf.keys << " omap keys of " << conf.payload_size
<< " bytes to "
<< "the " << object_name << " object" << endl;
}
const string LazyOmapStatsTest::get_name() const
{
boost::uuids::uuid uuid = boost::uuids::random_generator()();
return boost::uuids::to_string(uuid);
}
void LazyOmapStatsTest::write_many(uint how_many)
{
for (uint i = 0; i < how_many; i++) {
write_omap(get_name());
}
}
void LazyOmapStatsTest::create_payload()
{
librados::bufferlist Lorem;
Lorem.append(
"Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do "
"eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut "
"enim ad minim veniam, quis nostrud exercitation ullamco laboris "
"nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in "
"reprehenderit in voluptate velit esse cillum dolore eu fugiat "
"nulla pariatur. Excepteur sint occaecat cupidatat non proident, "
"sunt in culpa qui officia deserunt mollit anim id est laborum.");
conf.payload_size = Lorem.length();
conf.total_bytes = conf.keys * conf.payload_size * conf.how_many;
conf.total_keys = conf.keys * conf.how_many;
uint i = 0;
for (i = 1; i < conf.keys + 1; ++i) {
payload[get_name()] = Lorem;
}
cout << "Created payload with " << conf.keys << " keys of "
<< conf.payload_size
<< " bytes each. Total size in bytes = " << conf.keys * conf.payload_size
<< endl;
}
void LazyOmapStatsTest::scrub()
{
cout << "Scrubbing" << endl;
cout << "Before scrub stamps:" << endl;
string target_pool(conf.pool_id);
target_pool.append(".");
bool target_pool_found = false;
map<string, string> before_scrub = get_scrub_stamps();
for (auto [pg, stamp] : before_scrub) {
cout << "pg = " << pg << " stamp = " << stamp << endl;
if (pg.rfind(target_pool, 0) == 0) {
target_pool_found = true;
}
}
if (!target_pool_found) {
cout << "Error: Target pool " << conf.pool_name << ":" << conf.pool_id
<< " not found!" << endl;
exit(2); // ENOENT
}
cout << endl;
// Short sleep to make sure the new pool is visible
sleep(5);
string command = R"({"prefix": "osd deep-scrub", "who": "all"})";
auto output = get_output(command);
cout << output << endl;
cout << "Waiting for deep-scrub to complete..." << endl;
while (sleep(1) == 0) {
cout << "Current scrub stamps:" << endl;
bool complete = true;
map<string, string> current_stamps = get_scrub_stamps();
for (auto [pg, stamp] : current_stamps) {
cout << "pg = " << pg << " stamp = " << stamp << endl;
if (stamp == before_scrub[pg]) {
// See if stamp for each pg has changed
// If not, we haven't completed the deep-scrub
complete = false;
}
}
cout << endl;
if (complete) {
break;
}
}
cout << "Scrubbing complete" << endl;
}
const int LazyOmapStatsTest::find_matches(string& output, regex& reg) const
{
sregex_iterator cur(output.begin(), output.end(), reg);
uint x = 0;
for (auto end = std::sregex_iterator(); cur != end; ++cur) {
cout << (*cur)[1].str() << endl;
x++;
}
return x;
}
const string LazyOmapStatsTest::get_output(const string command,
const bool silent,
const CommandTarget target)
{
librados::bufferlist inbl, outbl;
string output;
int ret = 0;
if (target == CommandTarget::TARGET_MON) {
ret = rados.mon_command(command, inbl, &outbl, &output);
} else {
ret = rados.mgr_command(command, inbl, &outbl, &output);
}
if (output.length() && !silent) {
cout << output << endl;
}
if (ret < 0) {
ret = -ret;
cerr << "Failed to get " << command << "! Error: " << ret << " "
<< strerror(ret) << endl;
exit(ret);
}
return string(outbl.c_str(), outbl.length());
}
void LazyOmapStatsTest::get_pool_id(const string& pool)
{
cout << R"(Querying pool id)" << endl;
string command = R"({"prefix": "osd pool ls", "detail": "detail", "format": "json"})";
librados::bufferlist inbl, outbl;
auto output = get_output(command, false, CommandTarget::TARGET_MON);
JSONParser parser;
parser.parse(output.c_str(), output.size());
for (const auto& pool : parser.get_array_elements()) {
JSONParser parser2;
parser2.parse(pool.c_str(), static_cast<int>(pool.size()));
auto* obj = parser2.find_obj("pool_name");
if (obj->get_data().compare(conf.pool_name) == 0) {
obj = parser2.find_obj("pool_id");
conf.pool_id = obj->get_data();
}
}
if (conf.pool_id.empty()) {
cout << "Failed to find pool ID for pool " << conf.pool_name << "!" << endl;
exit(2); // ENOENT
} else {
cout << "Found pool ID: " << conf.pool_id << endl;
}
}
map<string, string> LazyOmapStatsTest::get_scrub_stamps() {
map<string, string> stamps;
string command = R"({"prefix": "pg dump", "format": "json"})";
auto output = get_output(command);
JSONParser parser;
parser.parse(output.c_str(), output.size());
auto* obj = parser.find_obj("pg_map")->find_obj("pg_stats");
for (auto pg = obj->find_first(); !pg.end(); ++pg) {
stamps.insert({(*pg)->find_obj("pgid")->get_data(),
(*pg)->find_obj("last_deep_scrub_stamp")->get_data()});
}
return stamps;
}
void LazyOmapStatsTest::check_one()
{
string full_output = get_output();
cout << full_output << endl;
regex reg(
"\n"
R"((PG_STAT[\s\S]*)"
"\n)OSD_STAT"); // Strip OSD_STAT table so we don't find matches there
smatch match;
regex_search(full_output, match, reg);
auto truncated_output = match[1].str();
cout << truncated_output << endl;
reg = regex(
"\n"
R"(([0-9,s].*\s)" +
to_string(conf.keys) +
R"(\s.*))"
"\n");
cout << "Checking number of keys " << conf.keys << endl;
cout << "Found the following lines" << endl;
cout << "*************************" << endl;
uint result = find_matches(truncated_output, reg);
cout << "**********************" << endl;
cout << "Found " << result << " matching line(s)" << endl;
uint total = result;
reg = regex(
"\n"
R"(([0-9,s].*\s)" +
to_string(conf.payload_size * conf.keys) +
R"(\s.*))"
"\n");
cout << "Checking number of bytes "
<< conf.payload_size * conf.keys << endl;
cout << "Found the following lines" << endl;
cout << "*************************" << endl;
result = find_matches(truncated_output, reg);
cout << "**********************" << endl;
cout << "Found " << result << " matching line(s)" << endl;
total += result;
if (total != 6) {
cout << "Error: Found " << total << " matches, expected 6! Exiting..."
<< endl;
exit(22); // EINVAL
}
cout << "check_one successful. Found " << total << " matches as expected"
<< endl;
}
const int LazyOmapStatsTest::find_index(string& haystack, regex& needle,
string label) const
{
smatch match;
regex_search(haystack, match, needle);
auto line = match[1].str();
boost::algorithm::trim(line);
boost::char_separator<char> sep{" "};
boost::tokenizer<boost::char_separator<char>> tok(line, sep);
vector<string> tokens(tok.begin(), tok.end());
auto it = find(tokens.begin(), tokens.end(), label);
if (it != tokens.end()) {
return distance(tokens.begin(), it);
}
cerr << "find_index failed to find index for " << label << endl;
exit(2); // ENOENT
return -1; // Unreachable
}
const uint LazyOmapStatsTest::tally_column(const uint omap_bytes_index,
const string& table,
bool header) const
{
istringstream buffer(table);
string line;
uint64_t total = 0;
while (std::getline(buffer, line)) {
if (header) {
header = false;
continue;
}
boost::char_separator<char> sep{" "};
boost::tokenizer<boost::char_separator<char>> tok(line, sep);
vector<string> tokens(tok.begin(), tok.end());
total += stoi(tokens.at(omap_bytes_index));
}
return total;
}
void LazyOmapStatsTest::check_column(const int index, const string& table,
const string& type, bool header) const
{
uint expected;
string errormsg;
if (type.compare("bytes") == 0) {
expected = conf.total_bytes;
errormsg = "Error. Got unexpected byte count!";
} else {
expected = conf.total_keys;
errormsg = "Error. Got unexpected key count!";
}
uint sum = tally_column(index, table, header);
cout << "Got: " << sum << " Expected: " << expected << endl;
if (sum != expected) {
cout << errormsg << endl;
exit(22); // EINVAL
}
}
index_t LazyOmapStatsTest::get_indexes(regex& reg, string& output) const
{
index_t indexes;
indexes.byte_index = find_index(output, reg, "OMAP_BYTES*");
indexes.key_index = find_index(output, reg, "OMAP_KEYS*");
return indexes;
}
void LazyOmapStatsTest::check_pg_dump()
{
cout << R"(Checking "pg dump" output)" << endl;
string dump_output = get_output();
cout << dump_output << endl;
regex reg(
"\n"
R"((PG_STAT\s.*))"
"\n");
index_t indexes = get_indexes(reg, dump_output);
reg =
"\n"
R"((PG_STAT[\s\S]*))"
"\n +\n[0-9]";
smatch match;
regex_search(dump_output, match, reg);
auto table = match[1].str();
cout << "Checking bytes" << endl;
check_column(indexes.byte_index, table, string("bytes"));
cout << "Checking keys" << endl;
check_column(indexes.key_index, table, string("keys"));
cout << endl;
}
void LazyOmapStatsTest::check_pg_dump_summary()
{
cout << R"(Checking "pg dump summary" output)" << endl;
string command = R"({"prefix": "pg dump", "dumpcontents": ["summary"]})";
string dump_output = get_output(command);
cout << dump_output << endl;
regex reg(
"\n"
R"((PG_STAT\s.*))"
"\n");
index_t indexes = get_indexes(reg, dump_output);
reg =
"\n"
R"((sum\s.*))"
"\n";
smatch match;
regex_search(dump_output, match, reg);
auto table = match[1].str();
cout << "Checking bytes" << endl;
check_column(indexes.byte_index, table, string("bytes"), false);
cout << "Checking keys" << endl;
check_column(indexes.key_index, table, string("keys"), false);
cout << endl;
}
void LazyOmapStatsTest::check_pg_dump_pgs()
{
cout << R"(Checking "pg dump pgs" output)" << endl;
string command = R"({"prefix": "pg dump", "dumpcontents": ["pgs"]})";
string dump_output = get_output(command);
cout << dump_output << endl;
regex reg(R"(^(PG_STAT\s.*))"
"\n");
index_t indexes = get_indexes(reg, dump_output);
reg = R"(^(PG_STAT[\s\S]*))"
"\n\n";
smatch match;
regex_search(dump_output, match, reg);
auto table = match[1].str();
cout << "Checking bytes" << endl;
check_column(indexes.byte_index, table, string("bytes"));
cout << "Checking keys" << endl;
check_column(indexes.key_index, table, string("keys"));
cout << endl;
}
void LazyOmapStatsTest::check_pg_dump_pools()
{
cout << R"(Checking "pg dump pools" output)" << endl;
string command = R"({"prefix": "pg dump", "dumpcontents": ["pools"]})";
string dump_output = get_output(command);
cout << dump_output << endl;
regex reg(R"(^(POOLID\s.*))"
"\n");
index_t indexes = get_indexes(reg, dump_output);
reg =
"\n"
R"(()" +
conf.pool_id +
R"(\s.*))"
"\n";
smatch match;
regex_search(dump_output, match, reg);
auto line = match[1].str();
cout << "Checking bytes" << endl;
check_column(indexes.byte_index, line, string("bytes"), false);
cout << "Checking keys" << endl;
check_column(indexes.key_index, line, string("keys"), false);
cout << endl;
}
void LazyOmapStatsTest::check_pg_ls()
{
cout << R"(Checking "pg ls" output)" << endl;
string command = R"({"prefix": "pg ls"})";
string dump_output = get_output(command);
cout << dump_output << endl;
regex reg(R"(^(PG\s.*))"
"\n");
index_t indexes = get_indexes(reg, dump_output);
reg = R"(^(PG[\s\S]*))"
"\n\n";
smatch match;
regex_search(dump_output, match, reg);
auto table = match[1].str();
cout << "Checking bytes" << endl;
check_column(indexes.byte_index, table, string("bytes"));
cout << "Checking keys" << endl;
check_column(indexes.key_index, table, string("keys"));
cout << endl;
}
void LazyOmapStatsTest::wait_for_active_clean()
{
cout << "Waiting for active+clean" << endl;
int index = -1;
regex reg(
"\n"
R"((PG_STAT[\s\S]*))"
"\n +\n[0-9]");
string command = R"({"prefix": "pg dump"})";
int num_not_clean;
do {
string dump_output = get_output(command, true);
if (index == -1) {
regex ireg(
"\n"
R"((PG_STAT\s.*))"
"\n");
index = find_index(dump_output, ireg, "STATE");
}
smatch match;
regex_search(dump_output, match, reg);
istringstream buffer(match[1].str());
string line;
num_not_clean = 0;
while (std::getline(buffer, line)) {
if (line.compare(0, 1, "P") == 0) continue;
boost::char_separator<char> sep{" "};
boost::tokenizer<boost::char_separator<char>> tok(line, sep);
vector<string> tokens(tok.begin(), tok.end());
num_not_clean += tokens.at(index).compare("active+clean");
}
cout << "." << flush;
this_thread::sleep_for(chrono::milliseconds(250));
} while (num_not_clean);
cout << endl;
}
const int LazyOmapStatsTest::run(const int argc, const char** argv)
{
init(argc, argv);
create_payload();
wait_for_active_clean();
write_omap(get_name());
scrub();
check_one();
write_many(conf.how_many - 1); // Since we already wrote one
scrub();
check_pg_dump();
check_pg_dump_summary();
check_pg_dump_pgs();
check_pg_dump_pools();
check_pg_ls();
cout << "All tests passed. Success!" << endl;
shutdown();
return 0;
}
| 17,314 | 26.837621 | 88 | cc |
null | ceph-main/src/test/lazy-omap-stats/lazy_omap_stats_test.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2019 Red Hat
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_LAZY_OMAP_STATS_TEST_H
#define CEPH_LAZY_OMAP_STATS_TEST_H
#include <map>
#include <regex>
#include <string>
#include "include/compat.h"
#include "include/rados/librados.hpp"
struct index_t {
unsigned byte_index = 0;
unsigned key_index = 0;
};
class LazyOmapStatsTest
{
librados::IoCtx io_ctx;
librados::Rados rados;
std::map<std::string, librados::bufferlist> payload;
struct lazy_omap_test_t {
unsigned payload_size = 0;
unsigned replica_count = 3;
unsigned keys = 2000;
unsigned how_many = 50;
std::string pool_name = "lazy_omap_test_pool";
std::string pool_id;
unsigned total_bytes = 0;
unsigned total_keys = 0;
} conf;
typedef enum {
TARGET_MON,
TARGET_MGR
} CommandTarget;
LazyOmapStatsTest(LazyOmapStatsTest&) = delete;
void operator=(LazyOmapStatsTest) = delete;
void init(const int argc, const char** argv);
void shutdown();
void write_omap(const std::string& object_name);
const std::string get_name() const;
void create_payload();
void write_many(const unsigned how_many);
void scrub();
const int find_matches(std::string& output, std::regex& reg) const;
void check_one();
const int find_index(std::string& haystack, std::regex& needle,
std::string label) const;
const unsigned tally_column(const unsigned omap_bytes_index,
const std::string& table, bool header) const;
void check_column(const int index, const std::string& table,
const std::string& type, bool header = true) const;
index_t get_indexes(std::regex& reg, std::string& output) const;
void check_pg_dump();
void check_pg_dump_summary();
void check_pg_dump_pgs();
void check_pg_dump_pools();
void check_pg_ls();
const std::string get_output(
const std::string command = R"({"prefix": "pg dump"})",
const bool silent = false,
const CommandTarget target = CommandTarget::TARGET_MGR);
void get_pool_id(const std::string& pool);
std::map<std::string, std::string> get_scrub_stamps();
void wait_for_active_clean();
public:
LazyOmapStatsTest() = default;
const int run(const int argc, const char** argv);
};
#endif // CEPH_LAZY_OMAP_STATS_TEST_H
| 2,632 | 28.58427 | 71 | h |
null | ceph-main/src/test/lazy-omap-stats/main.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2019 Red Hat
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "lazy_omap_stats_test.h"
int main(const int argc, const char** argv)
{
LazyOmapStatsTest app;
return app.run(argc, argv);
}
| 537 | 23.454545 | 70 | cc |
null | ceph-main/src/test/libcephfs/access.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2011 New Dream Network
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "gtest/gtest.h"
#include "common/ceph_argparse.h"
#include "include/buffer.h"
#include "include/stringify.h"
#include "include/cephfs/libcephfs.h"
#include "include/fs_types.h"
#include "include/rados/librados.h"
#include <errno.h>
#include <fcntl.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <dirent.h>
#include <sys/uio.h>
#include <iostream>
#include <vector>
#include "json_spirit/json_spirit.h"
#include "include/fs_types.h"
#ifdef __linux__
#include <limits.h>
#include <sys/xattr.h>
#endif
using namespace std;
rados_t cluster;
string key;
int do_mon_command(string s, string *key)
{
char *outs, *outbuf;
size_t outs_len, outbuf_len;
const char *ss = s.c_str();
int r = rados_mon_command(cluster, (const char **)&ss, 1,
0, 0,
&outbuf, &outbuf_len,
&outs, &outs_len);
if (outbuf_len) {
string s(outbuf, outbuf_len);
std::cout << "out: " << s << std::endl;
// parse out the key
json_spirit::mValue v, k;
json_spirit::read_or_throw(s, v);
k = v.get_array()[0].get_obj().find("key")->second;
*key = k.get_str();
std::cout << "key: " << *key << std::endl;
free(outbuf);
} else {
return -CEPHFS_EINVAL;
}
if (outs_len) {
string s(outs, outs_len);
std::cout << "outs: " << s << std::endl;
free(outs);
}
return r;
}
string get_unique_dir()
{
return string("/ceph_test_libcephfs_access.") + stringify(rand());
}
TEST(AccessTest, Foo) {
string dir = get_unique_dir();
string user = "libcephfs_foo_test." + stringify(rand());
// admin mount to set up test
struct ceph_mount_info *admin;
ASSERT_EQ(0, ceph_create(&admin, NULL));
ASSERT_EQ(0, ceph_conf_read_file(admin, NULL));
ASSERT_EQ(0, ceph_conf_parse_env(admin, NULL));
ASSERT_EQ(0, ceph_mount(admin, "/"));
ASSERT_EQ(0, ceph_mkdir(admin, dir.c_str(), 0755));
// create access key
string key;
ASSERT_EQ(0, do_mon_command(
"{\"prefix\": \"auth get-or-create\", \"entity\": \"client." + user + "\", "
"\"caps\": [\"mon\", \"allow *\", \"osd\", \"allow rw\", "
"\"mds\", \"allow rw\""
"], \"format\": \"json\"}", &key));
struct ceph_mount_info *cmount;
ASSERT_EQ(0, ceph_create(&cmount, user.c_str()));
ASSERT_EQ(0, ceph_conf_read_file(cmount, NULL));
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(0, ceph_conf_set(cmount, "key", key.c_str()));
ASSERT_EQ(0, ceph_mount(cmount, "/"));
ceph_shutdown(cmount);
// clean up
ASSERT_EQ(0, ceph_rmdir(admin, dir.c_str()));
ceph_shutdown(admin);
}
TEST(AccessTest, Path) {
string good = get_unique_dir();
string bad = get_unique_dir();
string user = "libcephfs_path_test." + stringify(rand());
struct ceph_mount_info *admin;
ASSERT_EQ(0, ceph_create(&admin, NULL));
ASSERT_EQ(0, ceph_conf_read_file(admin, NULL));
ASSERT_EQ(0, ceph_conf_parse_env(admin, NULL));
ASSERT_EQ(0, ceph_mount(admin, "/"));
ASSERT_EQ(0, ceph_mkdir(admin, good.c_str(), 0755));
ASSERT_EQ(0, ceph_mkdir(admin, string(good + "/p").c_str(), 0755));
ASSERT_EQ(0, ceph_mkdir(admin, bad.c_str(), 0755));
ASSERT_EQ(0, ceph_mkdir(admin, string(bad + "/p").c_str(), 0755));
int fd = ceph_open(admin, string(good + "/q").c_str(), O_CREAT|O_WRONLY, 0755);
ceph_close(admin, fd);
fd = ceph_open(admin, string(bad + "/q").c_str(), O_CREAT|O_WRONLY, 0755);
ceph_close(admin, fd);
fd = ceph_open(admin, string(bad + "/z").c_str(), O_CREAT|O_WRONLY, 0755);
ceph_write(admin, fd, "TEST FAILED", 11, 0);
ceph_close(admin, fd);
string key;
ASSERT_EQ(0, do_mon_command(
"{\"prefix\": \"auth get-or-create\", \"entity\": \"client." + user + "\", "
"\"caps\": [\"mon\", \"allow r\", \"osd\", \"allow rwx\", "
"\"mds\", \"allow r, allow rw path=" + good + "\""
"], \"format\": \"json\"}", &key));
struct ceph_mount_info *cmount;
ASSERT_EQ(0, ceph_create(&cmount, user.c_str()));
ASSERT_EQ(0, ceph_conf_read_file(cmount, NULL));
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(0, ceph_conf_set(cmount, "key", key.c_str()));
ASSERT_EQ(0, ceph_mount(cmount, "/"));
// allowed
ASSERT_GE(ceph_mkdir(cmount, string(good + "/x").c_str(), 0755), 0);
ASSERT_GE(ceph_rmdir(cmount, string(good + "/p").c_str()), 0);
ASSERT_GE(ceph_unlink(cmount, string(good + "/q").c_str()), 0);
fd = ceph_open(cmount, string(good + "/y").c_str(), O_CREAT|O_WRONLY, 0755);
ASSERT_GE(fd, 0);
ceph_write(cmount, fd, "bar", 3, 0);
ceph_close(cmount, fd);
ASSERT_GE(ceph_unlink(cmount, string(good + "/y").c_str()), 0);
ASSERT_GE(ceph_rmdir(cmount, string(good + "/x").c_str()), 0);
fd = ceph_open(cmount, string(bad + "/z").c_str(), O_RDONLY, 0644);
ASSERT_GE(fd, 0);
ceph_close(cmount, fd);
// not allowed
ASSERT_LT(ceph_mkdir(cmount, string(bad + "/x").c_str(), 0755), 0);
ASSERT_LT(ceph_rmdir(cmount, string(bad + "/p").c_str()), 0);
ASSERT_LT(ceph_unlink(cmount, string(bad + "/q").c_str()), 0);
fd = ceph_open(cmount, string(bad + "/y").c_str(), O_CREAT|O_WRONLY, 0755);
ASSERT_LT(fd, 0);
// unlink open file
fd = ceph_open(cmount, string(good + "/unlinkme").c_str(), O_CREAT|O_WRONLY, 0755);
ceph_unlink(cmount, string(good + "/unlinkme").c_str());
ASSERT_GE(ceph_write(cmount, fd, "foo", 3, 0), 0);
ASSERT_GE(ceph_fchmod(cmount, fd, 0777), 0);
ASSERT_GE(ceph_ftruncate(cmount, fd, 0), 0);
ASSERT_GE(ceph_fsetxattr(cmount, fd, "user.any", "bar", 3, 0), 0);
ceph_close(cmount, fd);
// rename open file
fd = ceph_open(cmount, string(good + "/renameme").c_str(), O_CREAT|O_WRONLY, 0755);
ASSERT_EQ(ceph_rename(admin, string(good + "/renameme").c_str(),
string(bad + "/asdf").c_str()), 0);
ASSERT_GE(ceph_write(cmount, fd, "foo", 3, 0), 0);
ASSERT_GE(ceph_fchmod(cmount, fd, 0777), -CEPHFS_EACCES);
ASSERT_GE(ceph_ftruncate(cmount, fd, 0), -CEPHFS_EACCES);
ASSERT_GE(ceph_fsetxattr(cmount, fd, "user.any", "bar", 3, 0), -CEPHFS_EACCES);
ceph_close(cmount, fd);
ceph_shutdown(cmount);
ASSERT_EQ(0, ceph_unlink(admin, string(bad + "/q").c_str()));
ASSERT_EQ(0, ceph_unlink(admin, string(bad + "/z").c_str()));
ASSERT_EQ(0, ceph_rmdir(admin, string(bad + "/p").c_str()));
ASSERT_EQ(0, ceph_unlink(admin, string(bad + "/asdf").c_str()));
ASSERT_EQ(0, ceph_rmdir(admin, good.c_str()));
ASSERT_EQ(0, ceph_rmdir(admin, bad.c_str()));
ceph_shutdown(admin);
}
TEST(AccessTest, ReadOnly) {
string dir = get_unique_dir();
string dir2 = get_unique_dir();
string user = "libcephfs_readonly_test." + stringify(rand());
struct ceph_mount_info *admin;
ASSERT_EQ(0, ceph_create(&admin, NULL));
ASSERT_EQ(0, ceph_conf_read_file(admin, NULL));
ASSERT_EQ(0, ceph_conf_parse_env(admin, NULL));
ASSERT_EQ(0, ceph_mount(admin, "/"));
ASSERT_EQ(0, ceph_mkdir(admin, dir.c_str(), 0755));
int fd = ceph_open(admin, string(dir + "/out").c_str(), O_CREAT|O_WRONLY, 0755);
ceph_write(admin, fd, "foo", 3, 0);
ceph_close(admin,fd);
string key;
ASSERT_EQ(0, do_mon_command(
"{\"prefix\": \"auth get-or-create\", \"entity\": \"client." + user + "\", "
"\"caps\": [\"mon\", \"allow r\", \"osd\", \"allow rw\", "
"\"mds\", \"allow r\""
"], \"format\": \"json\"}", &key));
struct ceph_mount_info *cmount;
ASSERT_EQ(0, ceph_create(&cmount, user.c_str()));
ASSERT_EQ(0, ceph_conf_read_file(cmount, NULL));
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(0, ceph_conf_set(cmount, "key", key.c_str()));
ASSERT_EQ(0, ceph_mount(cmount, "/"));
// allowed
fd = ceph_open(cmount, string(dir + "/out").c_str(), O_RDONLY, 0644);
ASSERT_GE(fd, 0);
ceph_close(cmount,fd);
// not allowed
fd = ceph_open(cmount, string(dir + "/bar").c_str(), O_CREAT|O_WRONLY, 0755);
ASSERT_LT(fd, 0);
ASSERT_LT(ceph_mkdir(cmount, dir2.c_str(), 0755), 0);
ceph_shutdown(cmount);
ASSERT_EQ(0, ceph_unlink(admin, string(dir + "/out").c_str()));
ASSERT_EQ(0, ceph_rmdir(admin, dir.c_str()));
ceph_shutdown(admin);
}
TEST(AccessTest, User) {
string dir = get_unique_dir();
string user = "libcephfs_user_test." + stringify(rand());
// admin mount to set up test
struct ceph_mount_info *admin;
ASSERT_EQ(0, ceph_create(&admin, NULL));
ASSERT_EQ(0, ceph_conf_read_file(admin, NULL));
ASSERT_EQ(0, ceph_conf_parse_env(admin, NULL));
ASSERT_EQ(0, ceph_conf_set(admin, "client_permissions", "0"));
ASSERT_EQ(0, ceph_mount(admin, "/"));
ASSERT_EQ(0, ceph_mkdir(admin, dir.c_str(), 0755));
// create access key
string key;
ASSERT_EQ(0, do_mon_command(
"{\"prefix\": \"auth get-or-create\", \"entity\": \"client." + user + "\", "
"\"caps\": [\"mon\", \"allow *\", \"osd\", \"allow rw\", "
"\"mds\", \"allow rw uid=123 gids=456,789\""
"], \"format\": \"json\"}", &key));
struct ceph_mount_info *cmount;
ASSERT_EQ(0, ceph_create(&cmount, user.c_str()));
ASSERT_EQ(0, ceph_conf_read_file(cmount, NULL));
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(0, ceph_conf_set(cmount, "key", key.c_str()));
ASSERT_EQ(-CEPHFS_EACCES, ceph_mount(cmount, "/"));
ASSERT_EQ(0, ceph_init(cmount));
UserPerm *perms = ceph_userperm_new(123, 456, 0, NULL);
ASSERT_NE(nullptr, perms);
ASSERT_EQ(0, ceph_mount_perms_set(cmount, perms));
ceph_userperm_destroy(perms);
ASSERT_EQ(0, ceph_conf_set(cmount, "client_permissions", "0"));
ASSERT_EQ(0, ceph_mount(cmount, "/"));
// user bits
ASSERT_EQ(0, ceph_chmod(admin, dir.c_str(), 0700));
ASSERT_EQ(0, ceph_chown(admin, dir.c_str(), 123, 456));
ASSERT_EQ(0, ceph_mkdir(cmount, string(dir + "/u1").c_str(), 0755));
ASSERT_EQ(0, ceph_chown(admin, dir.c_str(), 1, 456));
ASSERT_EQ(-CEPHFS_EACCES, ceph_mkdir(cmount, string(dir + "/no").c_str(), 0755));
// group bits
ASSERT_EQ(0, ceph_chmod(admin, dir.c_str(), 0770));
ASSERT_EQ(0, ceph_chown(admin, dir.c_str(), 1, 456));
ASSERT_EQ(0, ceph_mkdir(cmount, string(dir + "/u2").c_str(), 0755));
ASSERT_EQ(0, ceph_chown(admin, dir.c_str(), 1, 2));
ASSERT_EQ(-CEPHFS_EACCES, ceph_mkdir(cmount, string(dir + "/no").c_str(), 0755));
// user overrides group
ASSERT_EQ(0, ceph_chmod(admin, dir.c_str(), 0470));
ASSERT_EQ(0, ceph_chown(admin, dir.c_str(), 123, 456));
ASSERT_EQ(-CEPHFS_EACCES, ceph_mkdir(cmount, string(dir + "/no").c_str(), 0755));
// other
ASSERT_EQ(0, ceph_chmod(admin, dir.c_str(), 0777));
ASSERT_EQ(0, ceph_chown(admin, dir.c_str(), 1, 1));
ASSERT_EQ(0, ceph_mkdir(cmount, string(dir + "/u3").c_str(), 0755));
ASSERT_EQ(0, ceph_chmod(admin, dir.c_str(), 0770));
ASSERT_EQ(-CEPHFS_EACCES, ceph_mkdir(cmount, string(dir + "/no").c_str(), 0755));
// user and group overrides other
ASSERT_EQ(0, ceph_chmod(admin, dir.c_str(), 07));
ASSERT_EQ(0, ceph_chown(admin, dir.c_str(), 1, 456));
ASSERT_EQ(-CEPHFS_EACCES, ceph_mkdir(cmount, string(dir + "/no").c_str(), 0755));
ASSERT_EQ(0, ceph_chown(admin, dir.c_str(), 123, 1));
ASSERT_EQ(-CEPHFS_EACCES, ceph_mkdir(cmount, string(dir + "/no").c_str(), 0755));
ASSERT_EQ(0, ceph_chown(admin, dir.c_str(), 123, 456));
ASSERT_EQ(-CEPHFS_EACCES, ceph_mkdir(cmount, string(dir + "/no").c_str(), 0755));
// chown and chgrp
ASSERT_EQ(0, ceph_chmod(admin, dir.c_str(), 0700));
ASSERT_EQ(0, ceph_chown(admin, dir.c_str(), 123, 456));
// FIXME: Re-enable these 789 tests once we can set multiple GIDs via libcephfs/config
// ASSERT_EQ(0, ceph_chown(cmount, dir.c_str(), 123, 789));
ASSERT_EQ(0, ceph_chown(cmount, dir.c_str(), 123, 456));
// ASSERT_EQ(0, ceph_chown(cmount, dir.c_str(), -1, 789));
ASSERT_EQ(0, ceph_chown(cmount, dir.c_str(), -1, 456));
ASSERT_EQ(-CEPHFS_EACCES, ceph_chown(cmount, dir.c_str(), 123, 1));
ASSERT_EQ(-CEPHFS_EACCES, ceph_chown(cmount, dir.c_str(), 1, 456));
ASSERT_EQ(0, ceph_chown(admin, dir.c_str(), 1, 1));
ASSERT_EQ(-CEPHFS_EACCES, ceph_chown(cmount, dir.c_str(), 123, 456));
ASSERT_EQ(-CEPHFS_EACCES, ceph_chown(cmount, dir.c_str(), 123, -1));
ASSERT_EQ(-CEPHFS_EACCES, ceph_chown(cmount, dir.c_str(), -1, 456));
ASSERT_EQ(0, ceph_chown(admin, dir.c_str(), 1, 456));
ASSERT_EQ(-CEPHFS_EACCES, ceph_chown(cmount, dir.c_str(), 123, 456));
ASSERT_EQ(-CEPHFS_EACCES, ceph_chown(cmount, dir.c_str(), 123, -1));
ASSERT_EQ(-CEPHFS_EACCES, ceph_chown(cmount, dir.c_str(), -1, 456));
ASSERT_EQ(0, ceph_chown(admin, dir.c_str(), 123, 1));
ASSERT_EQ(0, ceph_chown(cmount, dir.c_str(), -1, 456));
// ASSERT_EQ(0, ceph_chown(cmount, dir.c_str(), 123, 789));
ceph_shutdown(cmount);
// clean up
ASSERT_EQ(0, ceph_rmdir(admin, string(dir + "/u1").c_str()));
ASSERT_EQ(0, ceph_rmdir(admin, string(dir + "/u2").c_str()));
ASSERT_EQ(0, ceph_rmdir(admin, string(dir + "/u3").c_str()));
ASSERT_EQ(0, ceph_rmdir(admin, dir.c_str()));
ceph_shutdown(admin);
}
static int update_root_mode()
{
struct ceph_mount_info *admin;
int r = ceph_create(&admin, NULL);
if (r < 0)
return r;
ceph_conf_read_file(admin, NULL);
ceph_conf_parse_env(admin, NULL);
ceph_conf_set(admin, "client_permissions", "false");
r = ceph_mount(admin, "/");
if (r < 0)
goto out;
r = ceph_chmod(admin, "/", 0777);
out:
ceph_shutdown(admin);
return r;
}
int main(int argc, char **argv)
{
int r = update_root_mode();
if (r < 0)
exit(1);
::testing::InitGoogleTest(&argc, argv);
srand(getpid());
r = rados_create(&cluster, NULL);
if (r < 0)
exit(1);
r = rados_conf_read_file(cluster, NULL);
if (r < 0)
exit(1);
rados_conf_parse_env(cluster, NULL);
r = rados_connect(cluster);
if (r < 0)
exit(1);
r = RUN_ALL_TESTS();
rados_shutdown(cluster);
return r;
}
| 14,016 | 34.0425 | 88 | cc |
null | ceph-main/src/test/libcephfs/acl.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2011 New Dream Network
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "include/types.h"
#include "gtest/gtest.h"
#include "include/cephfs/libcephfs.h"
#include "include/fs_types.h"
#include "include/ceph_fs.h"
#include "client/posix_acl.h"
#include <errno.h>
#include <fcntl.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
#ifdef __linux__
#include <sys/xattr.h>
#endif
static size_t acl_ea_size(int count)
{
return sizeof(acl_ea_header) + count * sizeof(acl_ea_entry);
}
static int acl_ea_count(size_t size)
{
if (size < sizeof(acl_ea_header))
return -1;
size -= sizeof(acl_ea_header);
if (size % sizeof(acl_ea_entry))
return -1;
return size / sizeof(acl_ea_entry);
}
static int check_acl_and_mode(const void *buf, size_t size, mode_t mode)
{
const acl_ea_entry *group_entry = NULL, *mask_entry = NULL;
const acl_ea_header *header = reinterpret_cast<const acl_ea_header*>(buf);
const acl_ea_entry *entry = header->a_entries;
int count = (size - sizeof(*header)) / sizeof(*entry);
for (int i = 0; i < count; ++i) {
__u16 tag = entry->e_tag;
__u16 perm = entry->e_perm;
switch(tag) {
case ACL_USER_OBJ:
if (perm != ((mode >> 6) & 7))
return -CEPHFS_EINVAL;
break;
case ACL_USER:
case ACL_GROUP:
break;
case ACL_GROUP_OBJ:
group_entry = entry;
break;
case ACL_OTHER:
if (perm != (mode & 7))
return -CEPHFS_EINVAL;
break;
case ACL_MASK:
mask_entry = entry;
break;
default:
return -CEPHFS_EIO;
}
++entry;
}
if (mask_entry) {
__u16 perm = mask_entry->e_perm;
if (perm != ((mode >> 3) & 7))
return -CEPHFS_EINVAL;
} else {
if (!group_entry)
return -CEPHFS_EIO;
__u16 perm = group_entry->e_perm;
if (perm != ((mode >> 3) & 7))
return -CEPHFS_EINVAL;
}
return 0;
}
static int generate_test_acl(void *buf, size_t size, mode_t mode)
{
if (acl_ea_count(size) != 5)
return -1;
acl_ea_header *header = reinterpret_cast<acl_ea_header*>(buf);
header->a_version = (__u32)ACL_EA_VERSION;
acl_ea_entry *entry = header->a_entries;
entry->e_tag = ACL_USER_OBJ;
entry->e_perm = (mode >> 6) & 7;
++entry;
entry->e_tag = ACL_USER;
entry->e_perm = 7;
entry->e_id = getuid();
++entry;
entry->e_tag = ACL_GROUP_OBJ;
entry->e_perm = (mode >> 3) & 7;
++entry;
entry->e_tag = ACL_MASK;
entry->e_perm = 7;
++entry;
entry->e_tag = ACL_OTHER;
entry->e_perm = mode & 7;
return 0;
}
static int generate_empty_acl(void *buf, size_t size, mode_t mode)
{
if (acl_ea_count(size) != 3)
return -1;
acl_ea_header *header = reinterpret_cast<acl_ea_header*>(buf);
header->a_version = (__u32)ACL_EA_VERSION;
acl_ea_entry *entry = header->a_entries;
entry->e_tag = ACL_USER_OBJ;
entry->e_perm = (mode >> 6) & 7;
++entry;
entry->e_tag = ACL_GROUP_OBJ;
entry->e_perm = (mode >> 3) & 7;
++entry;
entry->e_tag = ACL_OTHER;
entry->e_perm = mode & 7;
return 0;
}
TEST(ACL, SetACL) {
struct ceph_mount_info *cmount;
ASSERT_EQ(0, ceph_create(&cmount, NULL));
ASSERT_EQ(0, ceph_conf_read_file(cmount, NULL));
ASSERT_EQ(0, ceph_mount(cmount, "/"));
ASSERT_EQ(0, ceph_conf_set(cmount, "client_acl_type", "posix_acl"));
ASSERT_EQ(0, ceph_conf_set(cmount, "client_permissions", "0"));
char test_file[256];
sprintf(test_file, "file1_setacl_%d", getpid());
int fd = ceph_open(cmount, test_file, O_CREAT|O_RDWR, 0600);
ASSERT_GT(fd, 0);
// change ownership to nobody -- we assume nobody exists and id is always 65534
ASSERT_EQ(ceph_fchown(cmount, fd, 65534, 65534), 0);
ASSERT_EQ(0, ceph_conf_set(cmount, "client_permissions", "1"));
// "nobody" will be ignored on Windows
#ifndef _WIN32
ASSERT_EQ(ceph_open(cmount, test_file, O_RDWR, 0), -CEPHFS_EACCES);
#endif
ASSERT_EQ(0, ceph_conf_set(cmount, "client_permissions", "0"));
size_t acl_buf_size = acl_ea_size(5);
void *acl_buf = malloc(acl_buf_size);
ASSERT_EQ(generate_test_acl(acl_buf, acl_buf_size, 0750), 0);
// can't set default acl for non-directory
ASSERT_EQ(ceph_fsetxattr(cmount, fd, ACL_EA_DEFAULT, acl_buf, acl_buf_size, 0), -CEPHFS_EACCES);
ASSERT_EQ(ceph_fsetxattr(cmount, fd, ACL_EA_ACCESS, acl_buf, acl_buf_size, 0), 0);
int tmpfd = ceph_open(cmount, test_file, O_RDWR, 0);
ASSERT_GT(tmpfd, 0);
ceph_close(cmount, tmpfd);
struct ceph_statx stx;
ASSERT_EQ(ceph_fstatx(cmount, fd, &stx, CEPH_STATX_MODE, 0), 0);
// mode was modified according to ACL
ASSERT_EQ(stx.stx_mode & 0777u, 0770u);
ASSERT_EQ(check_acl_and_mode(acl_buf, acl_buf_size, stx.stx_mode), 0);
acl_buf_size = acl_ea_size(3);
// setting ACL that is equivalent to file mode
ASSERT_EQ(generate_empty_acl(acl_buf, acl_buf_size, 0600), 0);
ASSERT_EQ(ceph_fsetxattr(cmount, fd, ACL_EA_ACCESS, acl_buf, acl_buf_size, 0), 0);
// ACL was deleted
ASSERT_EQ(ceph_fgetxattr(cmount, fd, ACL_EA_ACCESS, NULL, 0), -CEPHFS_ENODATA);
ASSERT_EQ(ceph_fstatx(cmount, fd, &stx, CEPH_STATX_MODE, 0), 0);
// mode was modified according to ACL
ASSERT_EQ(stx.stx_mode & 0777u, 0600u);
free(acl_buf);
ceph_close(cmount, fd);
ceph_shutdown(cmount);
}
TEST(ACL, Chmod) {
struct ceph_mount_info *cmount;
ASSERT_EQ(0, ceph_create(&cmount, NULL));
ASSERT_EQ(0, ceph_conf_read_file(cmount, NULL));
ASSERT_EQ(0, ceph_mount(cmount, "/"));
ASSERT_EQ(0, ceph_conf_set(cmount, "client_acl_type", "posix_acl"));
char test_file[256];
sprintf(test_file, "file1_acl_chmod_%d", getpid());
int fd = ceph_open(cmount, test_file, O_CREAT|O_RDWR, 0600);
ASSERT_GT(fd, 0);
int acl_buf_size = acl_ea_size(5);
void *acl_buf = malloc(acl_buf_size);
ASSERT_EQ(generate_test_acl(acl_buf, acl_buf_size, 0775), 0);
ASSERT_EQ(ceph_fsetxattr(cmount, fd, ACL_EA_ACCESS, acl_buf, acl_buf_size, 0), 0);
struct ceph_statx stx;
ASSERT_EQ(ceph_fstatx(cmount, fd, &stx, CEPH_STATX_MODE, 0), 0);
// mode was updated according to ACL
ASSERT_EQ(stx.stx_mode & 0777u, 0775u);
// change mode
ASSERT_EQ(ceph_fchmod(cmount, fd, 0640), 0);
ASSERT_EQ(ceph_fstatx(cmount, fd, &stx, CEPH_STATX_MODE, 0), 0);
ASSERT_EQ(stx.stx_mode & 0777u, 0640u);
// ACL was updated according to mode
ASSERT_EQ(ceph_fgetxattr(cmount, fd, ACL_EA_ACCESS, acl_buf, acl_buf_size), acl_buf_size);
ASSERT_EQ(check_acl_and_mode(acl_buf, acl_buf_size, stx.stx_mode), 0);
free(acl_buf);
ceph_close(cmount, fd);
ceph_shutdown(cmount);
}
TEST(ACL, DefaultACL) {
struct ceph_mount_info *cmount;
ASSERT_EQ(0, ceph_create(&cmount, NULL));
ASSERT_EQ(0, ceph_conf_read_file(cmount, NULL));
ASSERT_EQ(0, ceph_mount(cmount, "/"));
ASSERT_EQ(0, ceph_conf_set(cmount, "client_acl_type", "posix_acl"));
int acl_buf_size = acl_ea_size(5);
void *acl1_buf = malloc(acl_buf_size);
void *acl2_buf = malloc(acl_buf_size);
ASSERT_EQ(generate_test_acl(acl1_buf, acl_buf_size, 0750), 0);
char test_dir1[256];
sprintf(test_dir1, "dir1_acl_default_%d", getpid());
ASSERT_EQ(ceph_mkdir(cmount, test_dir1, 0750), 0);
// set default acl
ASSERT_EQ(ceph_setxattr(cmount, test_dir1, ACL_EA_DEFAULT, acl1_buf, acl_buf_size, 0), 0);
char test_dir2[262];
sprintf(test_dir2, "%s/dir2", test_dir1);
ASSERT_EQ(ceph_mkdir(cmount, test_dir2, 0755), 0);
// inherit default acl
ASSERT_EQ(ceph_getxattr(cmount, test_dir2, ACL_EA_DEFAULT, acl2_buf, acl_buf_size), acl_buf_size);
ASSERT_EQ(memcmp(acl1_buf, acl2_buf, acl_buf_size), 0);
// mode and ACL are updated
ASSERT_EQ(ceph_getxattr(cmount, test_dir2, ACL_EA_ACCESS, acl2_buf, acl_buf_size), acl_buf_size);
{
struct ceph_statx stx;
ASSERT_EQ(ceph_statx(cmount, test_dir2, &stx, CEPH_STATX_MODE, 0), 0);
// other bits of mode &= acl other perm
ASSERT_EQ(stx.stx_mode & 0777u, 0750u);
ASSERT_EQ(check_acl_and_mode(acl2_buf, acl_buf_size, stx.stx_mode), 0);
}
char test_file1[262];
sprintf(test_file1, "%s/file1", test_dir1);
int fd = ceph_open(cmount, test_file1, O_CREAT|O_RDWR, 0666);
ASSERT_GT(fd, 0);
// no default acl
ASSERT_EQ(ceph_fgetxattr(cmount, fd, ACL_EA_DEFAULT, NULL, 0), -CEPHFS_ENODATA);
// mode and ACL are updated
ASSERT_EQ(ceph_fgetxattr(cmount, fd, ACL_EA_ACCESS, acl2_buf, acl_buf_size), acl_buf_size);
{
struct ceph_statx stx;
ASSERT_EQ(ceph_statx(cmount, test_file1, &stx, CEPH_STATX_MODE, 0), 0);
// other bits of mode &= acl other perm
ASSERT_EQ(stx.stx_mode & 0777u, 0660u);
ASSERT_EQ(check_acl_and_mode(acl2_buf, acl_buf_size, stx.stx_mode), 0);
}
free(acl1_buf);
free(acl2_buf);
ASSERT_EQ(ceph_unlink(cmount, test_file1), 0);
ASSERT_EQ(ceph_rmdir(cmount, test_dir2), 0);
ASSERT_EQ(ceph_rmdir(cmount, test_dir1), 0);
ceph_close(cmount, fd);
ceph_shutdown(cmount);
}
TEST(ACL, Disabled) {
struct ceph_mount_info *cmount;
ASSERT_EQ(0, ceph_create(&cmount, NULL));
ASSERT_EQ(0, ceph_conf_read_file(cmount, NULL));
ASSERT_EQ(0, ceph_mount(cmount, "/"));
ASSERT_EQ(0, ceph_conf_set(cmount, "client_acl_type", ""));
size_t acl_buf_size = acl_ea_size(3);
void *acl_buf = malloc(acl_buf_size);
ASSERT_EQ(generate_empty_acl(acl_buf, acl_buf_size, 0755), 0);
char test_dir[256];
sprintf(test_dir, "dir1_acl_disabled_%d", getpid());
ASSERT_EQ(ceph_mkdir(cmount, test_dir, 0750), 0);
ASSERT_EQ(ceph_setxattr(cmount, test_dir, ACL_EA_DEFAULT, acl_buf, acl_buf_size, 0), -CEPHFS_EOPNOTSUPP);
ASSERT_EQ(ceph_setxattr(cmount, test_dir, ACL_EA_ACCESS, acl_buf, acl_buf_size, 0), -CEPHFS_EOPNOTSUPP);
ASSERT_EQ(ceph_getxattr(cmount, test_dir, ACL_EA_DEFAULT, acl_buf, acl_buf_size), -CEPHFS_EOPNOTSUPP);
ASSERT_EQ(ceph_getxattr(cmount, test_dir, ACL_EA_ACCESS, acl_buf, acl_buf_size), -CEPHFS_EOPNOTSUPP);
free(acl_buf);
ceph_shutdown(cmount);
}
TEST(ACL, SnapdirACL) {
struct ceph_mount_info *cmount;
ASSERT_EQ(0, ceph_create(&cmount, NULL));
ASSERT_EQ(0, ceph_conf_read_file(cmount, NULL));
ASSERT_EQ(0, ceph_mount(cmount, "/"));
ASSERT_EQ(0, ceph_conf_set(cmount, "client_acl_type", "posix_acl"));
int acl_buf_size = acl_ea_size(5);
void *acl1_buf = malloc(acl_buf_size);
void *acl2_buf = malloc(acl_buf_size);
void *acl3_buf = malloc(acl_buf_size);
ASSERT_EQ(generate_test_acl(acl1_buf, acl_buf_size, 0750), 0);
char test_dir1[256];
sprintf(test_dir1, "dir1_acl_default_%d", getpid());
ASSERT_EQ(ceph_mkdir(cmount, test_dir1, 0750), 0);
// set default acl
ASSERT_EQ(ceph_setxattr(cmount, test_dir1, ACL_EA_DEFAULT, acl1_buf, acl_buf_size, 0), 0);
char test_dir2[262];
sprintf(test_dir2, "%s/dir2", test_dir1);
ASSERT_EQ(ceph_mkdir(cmount, test_dir2, 0755), 0);
// inherit default acl
ASSERT_EQ(ceph_getxattr(cmount, test_dir2, ACL_EA_DEFAULT, acl2_buf, acl_buf_size), acl_buf_size);
ASSERT_EQ(memcmp(acl1_buf, acl2_buf, acl_buf_size), 0);
char test_dir2_snapdir[512];
sprintf(test_dir2_snapdir, "%s/dir2/.snap", test_dir1);
// inherit default acl
ASSERT_EQ(ceph_getxattr(cmount, test_dir2_snapdir, ACL_EA_DEFAULT, acl3_buf, acl_buf_size), acl_buf_size);
ASSERT_EQ(memcmp(acl2_buf, acl3_buf, acl_buf_size), 0);
memset(acl2_buf, 0, acl_buf_size);
memset(acl3_buf, 0, acl_buf_size);
ASSERT_EQ(ceph_getxattr(cmount, test_dir2, ACL_EA_ACCESS, acl2_buf, acl_buf_size), acl_buf_size);
ASSERT_EQ(ceph_getxattr(cmount, test_dir2_snapdir, ACL_EA_ACCESS, acl3_buf, acl_buf_size), acl_buf_size);
ASSERT_EQ(memcmp(acl2_buf, acl3_buf, acl_buf_size), 0);
free(acl1_buf);
free(acl2_buf);
free(acl3_buf);
ASSERT_EQ(ceph_rmdir(cmount, test_dir2), 0);
ASSERT_EQ(ceph_rmdir(cmount, test_dir1), 0);
ceph_shutdown(cmount);
}
| 11,920 | 31.394022 | 108 | cc |
null | ceph-main/src/test/libcephfs/caps.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2011 New Dream Network
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "include/int_types.h"
#include "gtest/gtest.h"
#include "include/compat.h"
#include "include/ceph_fs.h"
#include "include/cephfs/libcephfs.h"
#include <errno.h>
#include <fcntl.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <dirent.h>
#ifdef __linux__
#include <sys/xattr.h>
#endif
#include <signal.h>
TEST(Caps, ReadZero) {
int mypid = getpid();
struct ceph_mount_info *cmount;
ASSERT_EQ(0, ceph_create(&cmount, NULL));
ASSERT_EQ(0, ceph_conf_read_file(cmount, NULL));
ASSERT_EQ(0, ceph_mount(cmount, "/"));
int i = 0;
for(; i < 30; ++i) {
char c_path[1024];
sprintf(c_path, "/caps_rzfile_%d_%d", mypid, i);
int fd = ceph_open(cmount, c_path, O_CREAT|O_TRUNC|O_WRONLY, 0644);
ASSERT_LT(0, fd);
int expect = CEPH_CAP_FILE_EXCL | CEPH_CAP_FILE_WR | CEPH_CAP_FILE_BUFFER;
int caps = ceph_debug_get_fd_caps(cmount, fd);
ASSERT_EQ(expect, caps & expect);
ASSERT_EQ(0, ceph_close(cmount, fd));
caps = ceph_debug_get_file_caps(cmount, c_path);
ASSERT_EQ(expect, caps & expect);
char cw_path[1024];
sprintf(cw_path, "/caps_wzfile_%d_%d", mypid, i);
int wfd = ceph_open(cmount, cw_path, O_CREAT|O_TRUNC|O_WRONLY, 0644);
ASSERT_LT(0, wfd);
char wbuf[4096];
ASSERT_EQ(4096, ceph_write(cmount, wfd, wbuf, 4096, 0));
ASSERT_EQ(0, ceph_close(cmount, wfd));
struct ceph_statx stx;
ASSERT_EQ(0, ceph_statx(cmount, c_path, &stx, CEPH_STATX_MTIME, 0));
caps = ceph_debug_get_file_caps(cmount, c_path);
ASSERT_EQ(expect, caps & expect);
}
ASSERT_EQ(0, ceph_conf_set(cmount, "client_debug_inject_tick_delay", "20"));
for(i = 0; i < 30; ++i) {
char c_path[1024];
sprintf(c_path, "/caps_rzfile_%d_%d", mypid, i);
int fd = ceph_open(cmount, c_path, O_RDONLY, 0);
ASSERT_LT(0, fd);
char buf[256];
int expect = CEPH_CAP_FILE_RD | CEPH_STAT_CAP_SIZE | CEPH_CAP_FILE_CACHE;
int caps = ceph_debug_get_fd_caps(cmount, fd);
ASSERT_EQ(expect, caps & expect);
ASSERT_EQ(0, ceph_read(cmount, fd, buf, 256, 0));
caps = ceph_debug_get_fd_caps(cmount, fd);
ASSERT_EQ(expect, caps & expect);
ASSERT_EQ(0, ceph_close(cmount, fd));
}
ceph_shutdown(cmount);
}
| 2,644 | 26.552083 | 78 | cc |
null | ceph-main/src/test/libcephfs/ceph_pthread_self.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_TEST_LIBCEPHFS_PTHREAD_SELF
#define CEPH_TEST_LIBCEPHFS_PTHREAD_SELF
#include <pthread.h>
#include <type_traits>
/*
* There is a difference between libc shipped with FreeBSD and
* glibc shipped with GNU/Linux for the return type of pthread_self().
*
* Introduced a conversion function in include/compat.h
* (uint64_t)ceph_pthread_self()
*
* libc returns an opague pthread_t that is not default convertable
* to a uint64_t, which is what gtest expects.
* And tests using gtest will not compile because of this difference.
*
*/
static uint64_t ceph_pthread_self() {
auto me = pthread_self();
static_assert(std::is_convertible_v<decltype(me), uint64_t> ||
std::is_pointer_v<decltype(me)>,
"we need to use pthread_self() for the owner parameter");
return static_cast<uint64_t>(me);
}
#endif
| 958 | 28.96875 | 73 | h |
null | ceph-main/src/test/libcephfs/deleg.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Tests for Ceph delegation handling
*
* (c) 2017, Jeff Layton <jlayton@redhat.com>
*/
#include "gtest/gtest.h"
#include "include/compat.h"
#include "include/cephfs/libcephfs.h"
#include "include/fs_types.h"
#include "include/stat.h"
#include <errno.h>
#include <fcntl.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <dirent.h>
#include <sys/uio.h>
#ifdef __linux__
#include <limits.h>
#include <sys/xattr.h>
#endif
#include <map>
#include <vector>
#include <thread>
#include <atomic>
#include "include/ceph_assert.h"
/* in ms -- 1 minute */
#define MAX_WAIT (60 * 1000)
static void wait_for_atomic_bool(std::atomic_bool &recalled)
{
int i = 0;
while (!recalled.load()) {
ASSERT_LT(i++, MAX_WAIT);
usleep(1000);
}
}
static int ceph_ll_delegation_wait(struct ceph_mount_info *cmount, Fh *fh,
unsigned cmd, ceph_deleg_cb_t cb, void *priv)
{
int ret, retry = 0;
/* Wait 10s at most */
do {
ret = ceph_ll_delegation(cmount, fh, cmd, cb, priv);
usleep(10000);
} while (ret == -CEPHFS_EAGAIN && retry++ < 1000);
return ret;
}
static int set_default_deleg_timeout(struct ceph_mount_info *cmount)
{
uint32_t session_timeout = ceph_get_cap_return_timeout(cmount);
return ceph_set_deleg_timeout(cmount, session_timeout - 1);
}
static void dummy_deleg_cb(Fh *fh, void *priv)
{
std::atomic_bool *recalled = (std::atomic_bool *)priv;
recalled->store(true);
}
static void open_breaker_func(struct ceph_mount_info *cmount, const char *filename, int flags, std::atomic_bool *opened)
{
bool do_shutdown = false;
if (!cmount) {
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(ceph_conf_parse_env(cmount, NULL), 0);
ASSERT_EQ(ceph_mount(cmount, "/"), 0);
ASSERT_EQ(set_default_deleg_timeout(cmount), 0);
do_shutdown = true;
}
Inode *root, *file;
ASSERT_EQ(ceph_ll_lookup_root(cmount, &root), 0);
Fh *fh;
struct ceph_statx stx;
UserPerm *perms = ceph_mount_perms(cmount);
ASSERT_EQ(ceph_ll_lookup(cmount, root, filename, &file, &stx, CEPH_STATX_ALL_STATS, 0, perms), 0);
int ret, i = 0;
for (;;) {
ASSERT_EQ(ceph_ll_getattr(cmount, file, &stx, CEPH_STATX_ALL_STATS, 0, perms), 0);
ret = ceph_ll_open(cmount, file, flags, &fh, perms);
if (ret != -CEPHFS_EAGAIN)
break;
ASSERT_LT(i++, MAX_WAIT);
usleep(1000);
}
ASSERT_EQ(ret, 0);
opened->store(true);
ASSERT_EQ(ceph_ll_close(cmount, fh), 0);
if (do_shutdown)
ceph_shutdown(cmount);
}
enum {
DelegTestLink,
DelegTestRename,
DelegTestUnlink
};
static void namespace_breaker_func(struct ceph_mount_info *cmount, int cmd, const char *oldname, const char *newname)
{
bool do_shutdown = false;
if (!cmount) {
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, "/"), 0);
ASSERT_EQ(set_default_deleg_timeout(cmount), 0);
do_shutdown = true;
}
Inode *root, *file = nullptr;
ASSERT_EQ(ceph_ll_lookup_root(cmount, &root), 0);
struct ceph_statx stx;
UserPerm *perms = ceph_mount_perms(cmount);
int ret, i = 0;
for (;;) {
switch (cmd) {
case DelegTestRename:
ret = ceph_ll_rename(cmount, root, oldname, root, newname, perms);
break;
case DelegTestLink:
if (!file) {
ASSERT_EQ(ceph_ll_lookup(cmount, root, oldname, &file, &stx, 0, 0, perms), 0);
}
ret = ceph_ll_link(cmount, file, root, newname, perms);
break;
case DelegTestUnlink:
ret = ceph_ll_unlink(cmount, root, oldname, perms);
break;
default:
// Bad command
ceph_abort();
}
if (ret != -CEPHFS_EAGAIN)
break;
ASSERT_LT(i++, MAX_WAIT);
usleep(1000);
}
ASSERT_EQ(ret, 0);
if (do_shutdown)
ceph_shutdown(cmount);
}
static void simple_deleg_test(struct ceph_mount_info *cmount, struct ceph_mount_info *tcmount)
{
Inode *root, *file;
ASSERT_EQ(ceph_ll_lookup_root(cmount, &root), 0);
char filename[32];
Fh *fh;
struct ceph_statx stx;
UserPerm *perms = ceph_mount_perms(cmount);
std::atomic_bool recalled(false);
std::atomic_bool opened(false);
// ensure r/w open breaks a r/w delegation
sprintf(filename, "deleg.rwrw.%x", getpid());
ASSERT_EQ(ceph_ll_create(cmount, root, filename, 0666,
O_RDWR|O_CREAT|O_EXCL, &file, &fh, &stx, 0, 0, perms), 0);
ASSERT_EQ(ceph_ll_delegation_wait(cmount, fh, CEPH_DELEGATION_WR, dummy_deleg_cb, &recalled), 0);
std::thread breaker1(open_breaker_func, tcmount, filename, O_RDWR, &opened);
wait_for_atomic_bool(recalled);
ASSERT_EQ(opened.load(), false);
ASSERT_EQ(ceph_ll_delegation(cmount, fh, CEPH_DELEGATION_NONE, dummy_deleg_cb, &recalled), 0);
breaker1.join();
ASSERT_EQ(ceph_ll_close(cmount, fh), 0);
ASSERT_EQ(ceph_ll_unlink(cmount, root, filename, perms), 0);
// ensure r/o open breaks a r/w delegation
recalled.store(false);
opened.store(false);
sprintf(filename, "deleg.rorw.%x", getpid());
ASSERT_EQ(ceph_ll_create(cmount, root, filename, 0666,
O_RDWR|O_CREAT|O_EXCL, &file, &fh, &stx, 0, 0, perms), 0);
ASSERT_EQ(ceph_ll_delegation_wait(cmount, fh, CEPH_DELEGATION_WR, dummy_deleg_cb, &recalled), 0);
std::thread breaker2(open_breaker_func, tcmount, filename, O_RDONLY, &opened);
wait_for_atomic_bool(recalled);
ASSERT_EQ(opened.load(), false);
ASSERT_EQ(ceph_ll_delegation(cmount, fh, CEPH_DELEGATION_NONE, dummy_deleg_cb, &recalled), 0);
breaker2.join();
ASSERT_EQ(ceph_ll_close(cmount, fh), 0);
ASSERT_EQ(ceph_ll_unlink(cmount, root, filename, perms), 0);
// ensure r/o open does not break a r/o delegation
sprintf(filename, "deleg.rwro.%x", getpid());
ASSERT_EQ(ceph_ll_create(cmount, root, filename, 0666,
O_RDONLY|O_CREAT|O_EXCL, &file, &fh, &stx, 0, 0, perms), 0);
recalled.store(false);
ASSERT_EQ(ceph_ll_delegation_wait(cmount, fh, CEPH_DELEGATION_RD, dummy_deleg_cb, &recalled), 0);
std::thread breaker3(open_breaker_func, tcmount, filename, O_RDONLY, &opened);
breaker3.join();
ASSERT_EQ(recalled.load(), false);
// ensure that r/w open breaks r/o delegation
opened.store(false);
std::thread breaker4(open_breaker_func, tcmount, filename, O_WRONLY, &opened);
wait_for_atomic_bool(recalled);
usleep(1000);
ASSERT_EQ(opened.load(), false);
ASSERT_EQ(ceph_ll_delegation(cmount, fh, CEPH_DELEGATION_NONE, dummy_deleg_cb, &recalled), 0);
breaker4.join();
ASSERT_EQ(ceph_ll_close(cmount, fh), 0);
ASSERT_EQ(ceph_ll_unlink(cmount, root, filename, perms), 0);
// ensure hardlinking breaks a r/w delegation
recalled.store(false);
char newname[32];
sprintf(filename, "deleg.old.%x", getpid());
sprintf(newname, "deleg.new.%x", getpid());
ASSERT_EQ(ceph_ll_create(cmount, root, filename, 0666,
O_RDWR|O_CREAT|O_EXCL, &file, &fh, &stx, 0, 0, perms), 0);
ASSERT_EQ(ceph_ll_delegation_wait(cmount, fh, CEPH_DELEGATION_WR, dummy_deleg_cb, &recalled), 0);
std::thread breaker5(namespace_breaker_func, tcmount, DelegTestLink, filename, newname);
wait_for_atomic_bool(recalled);
ASSERT_EQ(ceph_ll_delegation(cmount, fh, CEPH_DELEGATION_NONE, dummy_deleg_cb, &recalled), 0);
breaker5.join();
ASSERT_EQ(ceph_ll_close(cmount, fh), 0);
ASSERT_EQ(ceph_ll_unlink(cmount, root, filename, perms), 0);
ASSERT_EQ(ceph_ll_unlink(cmount, root, newname, perms), 0);
// ensure renaming breaks a r/w delegation
recalled.store(false);
ASSERT_EQ(ceph_ll_create(cmount, root, filename, 0666,
O_RDWR|O_CREAT|O_EXCL, &file, &fh, &stx, 0, 0, perms), 0);
ASSERT_EQ(ceph_ll_delegation_wait(cmount, fh, CEPH_DELEGATION_WR, dummy_deleg_cb, &recalled), 0);
std::thread breaker6(namespace_breaker_func, tcmount, DelegTestRename, filename, newname);
wait_for_atomic_bool(recalled);
ASSERT_EQ(ceph_ll_delegation(cmount, fh, CEPH_DELEGATION_NONE, dummy_deleg_cb, &recalled), 0);
breaker6.join();
ASSERT_EQ(ceph_ll_close(cmount, fh), 0);
ASSERT_EQ(ceph_ll_unlink(cmount, root, newname, perms), 0);
// ensure unlinking breaks a r/w delegation
recalled.store(false);
ASSERT_EQ(ceph_ll_create(cmount, root, filename, 0666,
O_RDWR|O_CREAT|O_EXCL, &file, &fh, &stx, 0, 0, perms), 0);
ASSERT_EQ(ceph_ll_delegation_wait(cmount, fh, CEPH_DELEGATION_WR, dummy_deleg_cb, &recalled), 0);
std::thread breaker7(namespace_breaker_func, tcmount, DelegTestUnlink, filename, nullptr);
wait_for_atomic_bool(recalled);
ASSERT_EQ(ceph_ll_delegation(cmount, fh, CEPH_DELEGATION_NONE, dummy_deleg_cb, &recalled), 0);
breaker7.join();
ASSERT_EQ(ceph_ll_close(cmount, fh), 0);
}
TEST(LibCephFS, DelegMultiClient) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, "/"), 0);
ASSERT_EQ(set_default_deleg_timeout(cmount), 0);
simple_deleg_test(cmount, nullptr);
ceph_shutdown(cmount);
}
TEST(LibCephFS, DelegSingleClient) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, "/"), 0);
ASSERT_EQ(set_default_deleg_timeout(cmount), 0);
simple_deleg_test(cmount, cmount);
ceph_shutdown(cmount);
}
TEST(LibCephFS, DelegTimeout) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, "/"), 0);
// tweak timeout to run quickly, since we don't plan to return it anyway
ASSERT_EQ(ceph_set_deleg_timeout(cmount, 2), 0);
Inode *root, *file;
ASSERT_EQ(ceph_ll_lookup_root(cmount, &root), 0);
char filename[32];
sprintf(filename, "delegtimeo%x", getpid());
Fh *fh;
struct ceph_statx stx;
UserPerm *perms = ceph_mount_perms(cmount);
ASSERT_EQ(ceph_ll_create(cmount, root, filename, 0666,
O_RDWR|O_CREAT|O_EXCL, &file, &fh, &stx, 0, 0, perms), 0);
/* Reopen read-only */
ASSERT_EQ(ceph_ll_close(cmount, fh), 0);
ASSERT_EQ(ceph_ll_open(cmount, file, O_RDONLY, &fh, perms), 0);
std::atomic_bool recalled(false);
ASSERT_EQ(ceph_ll_delegation_wait(cmount, fh, CEPH_DELEGATION_RD, dummy_deleg_cb, &recalled), 0);
std::atomic_bool opened(false);
std::thread breaker1(open_breaker_func, nullptr, filename, O_RDWR, &opened);
breaker1.join();
ASSERT_EQ(recalled.load(), true);
ASSERT_EQ(ceph_ll_getattr(cmount, root, &stx, 0, 0, perms), -CEPHFS_ENOTCONN);
ceph_release(cmount);
}
TEST(LibCephFS, RecalledGetattr) {
struct ceph_mount_info *cmount1;
ASSERT_EQ(ceph_create(&cmount1, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount1, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount1, NULL));
ASSERT_EQ(ceph_mount(cmount1, "/"), 0);
ASSERT_EQ(set_default_deleg_timeout(cmount1), 0);
Inode *root, *file;
ASSERT_EQ(ceph_ll_lookup_root(cmount1, &root), 0);
char filename[32];
sprintf(filename, "recalledgetattr%x", getpid());
Fh *fh;
struct ceph_statx stx;
UserPerm *perms = ceph_mount_perms(cmount1);
ASSERT_EQ(ceph_ll_create(cmount1, root, filename, 0666,
O_RDWR|O_CREAT|O_EXCL, &file, &fh, &stx, 0, 0, perms), 0);
ASSERT_EQ(ceph_ll_write(cmount1, fh, 0, sizeof(filename), filename),
static_cast<int>(sizeof(filename)));
ASSERT_EQ(ceph_ll_close(cmount1, fh), 0);
/* New mount for read delegation */
struct ceph_mount_info *cmount2;
ASSERT_EQ(ceph_create(&cmount2, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount2, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount2, NULL));
ASSERT_EQ(ceph_mount(cmount2, "/"), 0);
ASSERT_EQ(set_default_deleg_timeout(cmount2), 0);
ASSERT_EQ(ceph_ll_lookup_root(cmount2, &root), 0);
perms = ceph_mount_perms(cmount2);
ASSERT_EQ(ceph_ll_lookup(cmount2, root, filename, &file, &stx, 0, 0, perms), 0);
ASSERT_EQ(ceph_ll_open(cmount2, file, O_WRONLY, &fh, perms), 0);
ASSERT_EQ(ceph_ll_write(cmount2, fh, 0, sizeof(filename), filename),
static_cast<int>(sizeof(filename)));
ASSERT_EQ(ceph_ll_close(cmount2, fh), 0);
ASSERT_EQ(ceph_ll_open(cmount2, file, O_RDONLY, &fh, perms), 0);
/* Break delegation */
std::atomic_bool recalled(false);
ASSERT_EQ(ceph_ll_delegation_wait(cmount2, fh, CEPH_DELEGATION_RD, dummy_deleg_cb, &recalled), 0);
ASSERT_EQ(ceph_ll_read(cmount2, fh, 0, sizeof(filename), filename),
static_cast<int>(sizeof(filename)));
ASSERT_EQ(ceph_ll_getattr(cmount2, file, &stx, CEPH_STATX_ALL_STATS, 0, perms), 0);
std::atomic_bool opened(false);
std::thread breaker1(open_breaker_func, cmount1, filename, O_WRONLY, &opened);
int i = 0;
do {
ASSERT_EQ(ceph_ll_getattr(cmount2, file, &stx, CEPH_STATX_ALL_STATS, 0, perms), 0);
ASSERT_LT(i++, MAX_WAIT);
usleep(1000);
} while (!recalled.load());
ASSERT_EQ(opened.load(), false);
ASSERT_EQ(ceph_ll_getattr(cmount2, file, &stx, CEPH_STATX_ALL_STATS, 0, perms), 0);
ASSERT_EQ(ceph_ll_delegation(cmount2, fh, CEPH_DELEGATION_NONE, dummy_deleg_cb, nullptr), 0);
breaker1.join();
ASSERT_EQ(ceph_ll_close(cmount2, fh), 0);
ceph_unmount(cmount2);
ceph_release(cmount2);
ceph_unmount(cmount1);
ceph_release(cmount1);
}
| 13,454 | 32.470149 | 120 | cc |
null | ceph-main/src/test/libcephfs/flock.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2011 New Dream Network
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <pthread.h>
#include "gtest/gtest.h"
#ifndef GTEST_IS_THREADSAFE
#error "!GTEST_IS_THREADSAFE"
#endif
#include "include/compat.h"
#include "include/cephfs/libcephfs.h"
#include "include/fs_types.h"
#include <errno.h>
#include <fcntl.h>
#include <unistd.h>
#include <sys/file.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <dirent.h>
#include <stdlib.h>
#include <semaphore.h>
#include <time.h>
#ifndef _WIN32
#include <sys/mman.h>
#endif
#ifdef __linux__
#include <limits.h>
#include <sys/xattr.h>
#elif __FreeBSD__
#include <sys/types.h>
#include <sys/wait.h>
#endif
#include "include/ceph_assert.h"
#include "ceph_pthread_self.h"
// Startup common: create and mount ceph fs
#define STARTUP_CEPH() do { \
ASSERT_EQ(0, ceph_create(&cmount, NULL)); \
ASSERT_EQ(0, ceph_conf_read_file(cmount, NULL)); \
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL)); \
ASSERT_EQ(0, ceph_mount(cmount, NULL)); \
} while(0)
// Cleanup common: unmount and release ceph fs
#define CLEANUP_CEPH() do { \
ASSERT_EQ(0, ceph_unmount(cmount)); \
ASSERT_EQ(0, ceph_release(cmount)); \
} while(0)
static const mode_t fileMode = S_IRWXU | S_IRWXG | S_IRWXO;
// Default wait time for normal and "slow" operations
// (5" should be enough in case of network congestion)
static const long waitMs = 10;
static const long waitSlowMs = 5000;
// Get the absolute struct timespec reference from now + 'ms' milliseconds
static const struct timespec* abstime(struct timespec &ts, long ms) {
if (clock_gettime(CLOCK_REALTIME, &ts) == -1) {
ceph_abort();
}
ts.tv_nsec += ms * 1000000;
ts.tv_sec += ts.tv_nsec / 1000000000;
ts.tv_nsec %= 1000000000;
return &ts;
}
/* Basic locking */
TEST(LibCephFS, BasicLocking) {
struct ceph_mount_info *cmount = NULL;
STARTUP_CEPH();
char c_file[1024];
sprintf(c_file, "/flock_test_%d", getpid());
const int fd = ceph_open(cmount, c_file, O_RDWR | O_CREAT, fileMode);
ASSERT_GE(fd, 0);
// Lock exclusively twice
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_EX, 42));
ASSERT_EQ(-CEPHFS_EWOULDBLOCK, ceph_flock(cmount, fd, LOCK_EX | LOCK_NB, 43));
ASSERT_EQ(-CEPHFS_EWOULDBLOCK, ceph_flock(cmount, fd, LOCK_EX | LOCK_NB, 44));
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_UN, 42));
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_EX | LOCK_NB, 43));
ASSERT_EQ(-CEPHFS_EWOULDBLOCK, ceph_flock(cmount, fd, LOCK_EX | LOCK_NB, 44));
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_UN, 43));
// Lock shared three times
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_SH, 42));
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_SH, 43));
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_SH, 44));
// And then attempt to lock exclusively
ASSERT_EQ(-CEPHFS_EWOULDBLOCK, ceph_flock(cmount, fd, LOCK_EX | LOCK_NB, 45));
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_UN, 42));
ASSERT_EQ(-CEPHFS_EWOULDBLOCK, ceph_flock(cmount, fd, LOCK_EX | LOCK_NB, 45));
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_UN, 44));
ASSERT_EQ(-CEPHFS_EWOULDBLOCK, ceph_flock(cmount, fd, LOCK_EX | LOCK_NB, 45));
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_UN, 43));
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_EX | LOCK_NB, 45));
ASSERT_EQ(-CEPHFS_EWOULDBLOCK, ceph_flock(cmount, fd, LOCK_SH | LOCK_NB, 42));
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_UN, 45));
// Lock shared with upgrade to exclusive (POSIX)
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_SH, 42));
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_EX, 42));
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_UN, 42));
// Lock exclusive with downgrade to shared (POSIX)
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_EX, 42));
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_SH, 42));
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_UN, 42));
ASSERT_EQ(0, ceph_close(cmount, fd));
ASSERT_EQ(0, ceph_unlink(cmount, c_file));
CLEANUP_CEPH();
}
/* Locking in different threads */
// Used by ConcurrentLocking test
struct str_ConcurrentLocking {
const char *file;
struct ceph_mount_info *cmount; // !NULL if shared
sem_t sem[2];
sem_t semReply[2];
void sem_init(int pshared) {
ASSERT_EQ(0, ::sem_init(&sem[0], pshared, 0));
ASSERT_EQ(0, ::sem_init(&sem[1], pshared, 0));
ASSERT_EQ(0, ::sem_init(&semReply[0], pshared, 0));
ASSERT_EQ(0, ::sem_init(&semReply[1], pshared, 0));
}
void sem_destroy() {
ASSERT_EQ(0, ::sem_destroy(&sem[0]));
ASSERT_EQ(0, ::sem_destroy(&sem[1]));
ASSERT_EQ(0, ::sem_destroy(&semReply[0]));
ASSERT_EQ(0, ::sem_destroy(&semReply[1]));
}
};
// Wakeup main (for (N) steps)
#define PING_MAIN(n) ASSERT_EQ(0, sem_post(&s.sem[n%2]))
// Wait for main to wake us up (for (RN) steps)
#define WAIT_MAIN(n) \
ASSERT_EQ(0, sem_timedwait(&s.semReply[n%2], abstime(ts, waitSlowMs)))
// Wakeup worker (for (RN) steps)
#define PING_WORKER(n) ASSERT_EQ(0, sem_post(&s.semReply[n%2]))
// Wait for worker to wake us up (for (N) steps)
#define WAIT_WORKER(n) \
ASSERT_EQ(0, sem_timedwait(&s.sem[n%2], abstime(ts, waitSlowMs)))
// Worker shall not wake us up (for (N) steps)
#define NOT_WAIT_WORKER(n) \
ASSERT_EQ(-1, sem_timedwait(&s.sem[n%2], abstime(ts, waitMs)))
// Do twice an operation
#define TWICE(EXPR) do { \
EXPR; \
EXPR; \
} while(0)
/* Locking in different threads */
// Used by ConcurrentLocking test
static void thread_ConcurrentLocking(str_ConcurrentLocking& s) {
struct ceph_mount_info *const cmount = s.cmount;
struct timespec ts;
const int fd = ceph_open(cmount, s.file, O_RDWR | O_CREAT, fileMode);
ASSERT_GE(fd, 0);
ASSERT_EQ(-CEPHFS_EWOULDBLOCK,
ceph_flock(cmount, fd, LOCK_EX | LOCK_NB, ceph_pthread_self()));
PING_MAIN(1); // (1)
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_EX, ceph_pthread_self()));
PING_MAIN(2); // (2)
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_UN, ceph_pthread_self()));
PING_MAIN(3); // (3)
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_SH, ceph_pthread_self()));
PING_MAIN(4); // (4)
WAIT_MAIN(1); // (R1)
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_UN, ceph_pthread_self()));
PING_MAIN(5); // (5)
WAIT_MAIN(2); // (R2)
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_EX, ceph_pthread_self()));
PING_MAIN(6); // (6)
WAIT_MAIN(3); // (R3)
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_UN, ceph_pthread_self()));
PING_MAIN(7); // (7)
}
// Used by ConcurrentLocking test
static void* thread_ConcurrentLocking_(void *arg) {
str_ConcurrentLocking *const s =
reinterpret_cast<str_ConcurrentLocking*>(arg);
thread_ConcurrentLocking(*s);
return NULL;
}
TEST(LibCephFS, ConcurrentLocking) {
const pid_t mypid = getpid();
struct ceph_mount_info *cmount;
STARTUP_CEPH();
char c_file[1024];
sprintf(c_file, "/flock_test_%d", mypid);
const int fd = ceph_open(cmount, c_file, O_RDWR | O_CREAT, fileMode);
ASSERT_GE(fd, 0);
// Lock
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_EX, ceph_pthread_self()));
// Start locker thread
pthread_t thread;
struct timespec ts;
str_ConcurrentLocking s = { c_file, cmount };
s.sem_init(0);
ASSERT_EQ(0, pthread_create(&thread, NULL, thread_ConcurrentLocking_, &s));
// Synchronization point with thread (failure: thread is dead)
WAIT_WORKER(1); // (1)
// Shall not have lock immediately
NOT_WAIT_WORKER(2); // (2)
// Unlock
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_UN, ceph_pthread_self()));
// Shall have lock
// Synchronization point with thread (failure: thread is dead)
WAIT_WORKER(2); // (2)
// Synchronization point with thread (failure: thread is dead)
WAIT_WORKER(3); // (3)
// Wait for thread to share lock
WAIT_WORKER(4); // (4)
ASSERT_EQ(-CEPHFS_EWOULDBLOCK,
ceph_flock(cmount, fd, LOCK_EX | LOCK_NB, ceph_pthread_self()));
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_SH | LOCK_NB, ceph_pthread_self()));
// Wake up thread to unlock shared lock
PING_WORKER(1); // (R1)
WAIT_WORKER(5); // (5)
// Now we can lock exclusively
// Upgrade to exclusive lock (as per POSIX)
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_EX, ceph_pthread_self()));
// Wake up thread to lock shared lock
PING_WORKER(2); // (R2)
// Shall not have lock immediately
NOT_WAIT_WORKER(6); // (6)
// Release lock ; thread will get it
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_UN, ceph_pthread_self()));
WAIT_WORKER(6); // (6)
// We no longer have the lock
ASSERT_EQ(-CEPHFS_EWOULDBLOCK,
ceph_flock(cmount, fd, LOCK_EX | LOCK_NB, ceph_pthread_self()));
ASSERT_EQ(-CEPHFS_EWOULDBLOCK,
ceph_flock(cmount, fd, LOCK_SH | LOCK_NB, ceph_pthread_self()));
// Wake up thread to unlock exclusive lock
PING_WORKER(3); // (R3)
WAIT_WORKER(7); // (7)
// We can lock it again
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_EX | LOCK_NB, ceph_pthread_self()));
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_UN, ceph_pthread_self()));
// Cleanup
void *retval = (void*) (uintptr_t) -1;
ASSERT_EQ(0, pthread_join(thread, &retval));
ASSERT_EQ(NULL, retval);
s.sem_destroy();
ASSERT_EQ(0, ceph_close(cmount, fd));
ASSERT_EQ(0, ceph_unlink(cmount, c_file));
CLEANUP_CEPH();
}
TEST(LibCephFS, ThreesomeLocking) {
const pid_t mypid = getpid();
struct ceph_mount_info *cmount;
STARTUP_CEPH();
char c_file[1024];
sprintf(c_file, "/flock_test_%d", mypid);
const int fd = ceph_open(cmount, c_file, O_RDWR | O_CREAT, fileMode);
ASSERT_GE(fd, 0);
// Lock
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_EX, ceph_pthread_self()));
// Start locker thread
pthread_t thread[2];
struct timespec ts;
str_ConcurrentLocking s = { c_file, cmount };
s.sem_init(0);
ASSERT_EQ(0, pthread_create(&thread[0], NULL, thread_ConcurrentLocking_, &s));
ASSERT_EQ(0, pthread_create(&thread[1], NULL, thread_ConcurrentLocking_, &s));
// Synchronization point with thread (failure: thread is dead)
TWICE(WAIT_WORKER(1)); // (1)
// Shall not have lock immediately
NOT_WAIT_WORKER(2); // (2)
// Unlock
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_UN, ceph_pthread_self()));
// Shall have lock
TWICE(// Synchronization point with thread (failure: thread is dead)
WAIT_WORKER(2); // (2)
// Synchronization point with thread (failure: thread is dead)
WAIT_WORKER(3)); // (3)
// Wait for thread to share lock
TWICE(WAIT_WORKER(4)); // (4)
ASSERT_EQ(-CEPHFS_EWOULDBLOCK,
ceph_flock(cmount, fd, LOCK_EX | LOCK_NB, ceph_pthread_self()));
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_SH | LOCK_NB, ceph_pthread_self()));
// Wake up thread to unlock shared lock
TWICE(PING_WORKER(1); // (R1)
WAIT_WORKER(5)); // (5)
// Now we can lock exclusively
// Upgrade to exclusive lock (as per POSIX)
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_EX, ceph_pthread_self()));
TWICE( // Wake up thread to lock shared lock
PING_WORKER(2); // (R2)
// Shall not have lock immediately
NOT_WAIT_WORKER(6)); // (6)
// Release lock ; thread will get it
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_UN, ceph_pthread_self()));
TWICE(WAIT_WORKER(6); // (6)
// We no longer have the lock
ASSERT_EQ(-CEPHFS_EWOULDBLOCK,
ceph_flock(cmount, fd, LOCK_EX | LOCK_NB, ceph_pthread_self()));
ASSERT_EQ(-CEPHFS_EWOULDBLOCK,
ceph_flock(cmount, fd, LOCK_SH | LOCK_NB, ceph_pthread_self()));
// Wake up thread to unlock exclusive lock
PING_WORKER(3); // (R3)
WAIT_WORKER(7); // (7)
);
// We can lock it again
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_EX | LOCK_NB, ceph_pthread_self()));
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_UN, ceph_pthread_self()));
// Cleanup
void *retval = (void*) (uintptr_t) -1;
ASSERT_EQ(0, pthread_join(thread[0], &retval));
ASSERT_EQ(NULL, retval);
ASSERT_EQ(0, pthread_join(thread[1], &retval));
ASSERT_EQ(NULL, retval);
s.sem_destroy();
ASSERT_EQ(0, ceph_close(cmount, fd));
ASSERT_EQ(0, ceph_unlink(cmount, c_file));
CLEANUP_CEPH();
}
/* Locking in different processes */
#define PROCESS_SLOW_MS() \
static const long waitMs = 100; \
(void) waitMs
// Used by ConcurrentLocking test
static void process_ConcurrentLocking(str_ConcurrentLocking& s) {
const pid_t mypid = getpid();
PROCESS_SLOW_MS();
struct ceph_mount_info *cmount = NULL;
struct timespec ts;
STARTUP_CEPH();
s.cmount = cmount;
const int fd = ceph_open(cmount, s.file, O_RDWR | O_CREAT, fileMode);
ASSERT_GE(fd, 0);
WAIT_MAIN(1); // (R1)
ASSERT_EQ(-CEPHFS_EWOULDBLOCK,
ceph_flock(cmount, fd, LOCK_EX | LOCK_NB, mypid));
PING_MAIN(1); // (1)
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_EX, mypid));
PING_MAIN(2); // (2)
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_UN, mypid));
PING_MAIN(3); // (3)
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_SH, mypid));
PING_MAIN(4); // (4)
WAIT_MAIN(2); // (R2)
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_UN, mypid));
PING_MAIN(5); // (5)
WAIT_MAIN(3); // (R3)
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_EX, mypid));
PING_MAIN(6); // (6)
WAIT_MAIN(4); // (R4)
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_UN, mypid));
PING_MAIN(7); // (7)
CLEANUP_CEPH();
s.sem_destroy();
exit(EXIT_SUCCESS);
}
#ifndef _WIN32
// Disabled because of fork() issues (http://tracker.ceph.com/issues/16556)
TEST(LibCephFS, DISABLED_InterProcessLocking) {
PROCESS_SLOW_MS();
// Process synchronization
char c_file[1024];
const pid_t mypid = getpid();
sprintf(c_file, "/flock_test_%d", mypid);
// Note: the semaphores MUST be on a shared memory segment
str_ConcurrentLocking *const shs =
reinterpret_cast<str_ConcurrentLocking*>
(mmap(0, sizeof(*shs), PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS,
-1, 0));
str_ConcurrentLocking &s = *shs;
s.file = c_file;
s.sem_init(1);
// Start locker process
const pid_t pid = fork();
ASSERT_GE(pid, 0);
if (pid == 0) {
process_ConcurrentLocking(s);
exit(EXIT_FAILURE);
}
struct timespec ts;
struct ceph_mount_info *cmount;
STARTUP_CEPH();
const int fd = ceph_open(cmount, c_file, O_RDWR | O_CREAT, fileMode);
ASSERT_GE(fd, 0);
// Lock
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_EX, mypid));
// Synchronization point with process (failure: process is dead)
PING_WORKER(1); // (R1)
WAIT_WORKER(1); // (1)
// Shall not have lock immediately
NOT_WAIT_WORKER(2); // (2)
// Unlock
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_UN, mypid));
// Shall have lock
// Synchronization point with process (failure: process is dead)
WAIT_WORKER(2); // (2)
// Synchronization point with process (failure: process is dead)
WAIT_WORKER(3); // (3)
// Wait for process to share lock
WAIT_WORKER(4); // (4)
ASSERT_EQ(-CEPHFS_EWOULDBLOCK, ceph_flock(cmount, fd, LOCK_EX | LOCK_NB, mypid));
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_SH | LOCK_NB, mypid));
// Wake up process to unlock shared lock
PING_WORKER(2); // (R2)
WAIT_WORKER(5); // (5)
// Now we can lock exclusively
// Upgrade to exclusive lock (as per POSIX)
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_EX, mypid));
// Wake up process to lock shared lock
PING_WORKER(3); // (R3)
// Shall not have lock immediately
NOT_WAIT_WORKER(6); // (6)
// Release lock ; process will get it
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_UN, mypid));
WAIT_WORKER(6); // (6)
// We no longer have the lock
ASSERT_EQ(-CEPHFS_EWOULDBLOCK, ceph_flock(cmount, fd, LOCK_EX | LOCK_NB, mypid));
ASSERT_EQ(-CEPHFS_EWOULDBLOCK, ceph_flock(cmount, fd, LOCK_SH | LOCK_NB, mypid));
// Wake up process to unlock exclusive lock
PING_WORKER(4); // (R4)
WAIT_WORKER(7); // (7)
// We can lock it again
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_EX | LOCK_NB, mypid));
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_UN, mypid));
// Wait pid
int status;
ASSERT_EQ(pid, waitpid(pid, &status, 0));
ASSERT_EQ(EXIT_SUCCESS, status);
// Cleanup
s.sem_destroy();
ASSERT_EQ(0, munmap(shs, sizeof(*shs)));
ASSERT_EQ(0, ceph_close(cmount, fd));
ASSERT_EQ(0, ceph_unlink(cmount, c_file));
CLEANUP_CEPH();
}
#endif
#ifndef _WIN32
// Disabled because of fork() issues (http://tracker.ceph.com/issues/16556)
TEST(LibCephFS, DISABLED_ThreesomeInterProcessLocking) {
PROCESS_SLOW_MS();
// Process synchronization
char c_file[1024];
const pid_t mypid = getpid();
sprintf(c_file, "/flock_test_%d", mypid);
// Note: the semaphores MUST be on a shared memory segment
str_ConcurrentLocking *const shs =
reinterpret_cast<str_ConcurrentLocking*>
(mmap(0, sizeof(*shs), PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS,
-1, 0));
str_ConcurrentLocking &s = *shs;
s.file = c_file;
s.sem_init(1);
// Start locker processes
pid_t pid[2];
pid[0] = fork();
ASSERT_GE(pid[0], 0);
if (pid[0] == 0) {
process_ConcurrentLocking(s);
exit(EXIT_FAILURE);
}
pid[1] = fork();
ASSERT_GE(pid[1], 0);
if (pid[1] == 0) {
process_ConcurrentLocking(s);
exit(EXIT_FAILURE);
}
struct timespec ts;
struct ceph_mount_info *cmount;
STARTUP_CEPH();
const int fd = ceph_open(cmount, c_file, O_RDWR | O_CREAT, fileMode);
ASSERT_GE(fd, 0);
// Lock
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_EX, mypid));
// Synchronization point with process (failure: process is dead)
TWICE(PING_WORKER(1)); // (R1)
TWICE(WAIT_WORKER(1)); // (1)
// Shall not have lock immediately
NOT_WAIT_WORKER(2); // (2)
// Unlock
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_UN, mypid));
// Shall have lock
TWICE(// Synchronization point with process (failure: process is dead)
WAIT_WORKER(2); // (2)
// Synchronization point with process (failure: process is dead)
WAIT_WORKER(3)); // (3)
// Wait for process to share lock
TWICE(WAIT_WORKER(4)); // (4)
ASSERT_EQ(-CEPHFS_EWOULDBLOCK,
ceph_flock(cmount, fd, LOCK_EX | LOCK_NB, mypid));
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_SH | LOCK_NB, mypid));
// Wake up process to unlock shared lock
TWICE(PING_WORKER(2); // (R2)
WAIT_WORKER(5)); // (5)
// Now we can lock exclusively
// Upgrade to exclusive lock (as per POSIX)
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_EX, mypid));
TWICE( // Wake up process to lock shared lock
PING_WORKER(3); // (R3)
// Shall not have lock immediately
NOT_WAIT_WORKER(6)); // (6)
// Release lock ; process will get it
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_UN, mypid));
TWICE(WAIT_WORKER(6); // (6)
// We no longer have the lock
ASSERT_EQ(-CEPHFS_EWOULDBLOCK,
ceph_flock(cmount, fd, LOCK_EX | LOCK_NB, mypid));
ASSERT_EQ(-CEPHFS_EWOULDBLOCK,
ceph_flock(cmount, fd, LOCK_SH | LOCK_NB, mypid));
// Wake up process to unlock exclusive lock
PING_WORKER(4); // (R4)
WAIT_WORKER(7); // (7)
);
// We can lock it again
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_EX | LOCK_NB, mypid));
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_UN, mypid));
// Wait pids
int status;
ASSERT_EQ(pid[0], waitpid(pid[0], &status, 0));
ASSERT_EQ(EXIT_SUCCESS, status);
ASSERT_EQ(pid[1], waitpid(pid[1], &status, 0));
ASSERT_EQ(EXIT_SUCCESS, status);
// Cleanup
s.sem_destroy();
ASSERT_EQ(0, munmap(shs, sizeof(*shs)));
ASSERT_EQ(0, ceph_close(cmount, fd));
ASSERT_EQ(0, ceph_unlink(cmount, c_file));
CLEANUP_CEPH();
}
#endif
| 19,512 | 28.79084 | 83 | cc |
null | ceph-main/src/test/libcephfs/lazyio.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2019 Red Hat Ltd
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "gtest/gtest.h"
#include "include/compat.h"
#include "include/cephfs/libcephfs.h"
#include "include/rados/librados.h"
#include <errno.h>
#include <fcntl.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <dirent.h>
#if defined(__linux__)
#include <sys/xattr.h>
#endif
rados_t cluster;
TEST(LibCephFS, LazyIOOneWriterMulipleReaders) {
struct ceph_mount_info *ca, *cb;
ASSERT_EQ(ceph_create(&ca, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(ca, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(ca, NULL));
ASSERT_EQ(ceph_mount(ca, NULL), 0);
ASSERT_EQ(ceph_create(&cb, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cb, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cb, NULL));
ASSERT_EQ(ceph_mount(cb, NULL), 0);
char name[20];
snprintf(name, sizeof(name), "foo.%d", getpid());
int fda = ceph_open(ca, name, O_CREAT|O_RDWR, 0644);
ASSERT_LE(0, fda);
int fdb = ceph_open(cb, name, O_RDONLY, 0644);
ASSERT_LE(0, fdb);
ASSERT_EQ(0, ceph_lazyio(ca, fda, 1));
ASSERT_EQ(0, ceph_lazyio(cb, fdb, 1));
char out_buf[] = "fooooooooo";
/* Client a issues a write and propagates/flushes the buffer */
ASSERT_EQ((int)sizeof(out_buf), ceph_write(ca, fda, out_buf, sizeof(out_buf), 0));
ASSERT_EQ(0, ceph_lazyio_propagate(ca, fda, 0, 0));
/* Client a issues a write and propagates/flushes the buffer */
ASSERT_EQ((int)sizeof(out_buf), ceph_write(ca, fda, out_buf, sizeof(out_buf), 10));
ASSERT_EQ(0, ceph_lazyio_propagate(ca, fda, 0, 0));
char in_buf[40];
/* Calling ceph_lazyio_synchronize here will invalidate client b's cache and hence enable client a to fetch the propagated write of client a in the subsequent read */
ASSERT_EQ(0, ceph_lazyio_synchronize(cb, fdb, 0, 0));
ASSERT_EQ(ceph_read(cb, fdb, in_buf, sizeof(in_buf), 0), 2*strlen(out_buf)+1);
ASSERT_STREQ(in_buf, "fooooooooofooooooooo");
/* Client a does not need to call ceph_lazyio_synchronize here because it is the latest writer and fda holds the updated inode*/
ASSERT_EQ(ceph_read(ca, fda, in_buf, sizeof(in_buf), 0), 2*strlen(out_buf)+1);
ASSERT_STREQ(in_buf, "fooooooooofooooooooo");
ceph_close(ca, fda);
ceph_close(cb, fdb);
ceph_shutdown(ca);
ceph_shutdown(cb);
}
TEST(LibCephFS, LazyIOMultipleWritersMulipleReaders) {
struct ceph_mount_info *ca, *cb;
ASSERT_EQ(ceph_create(&ca, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(ca, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(ca, NULL));
ASSERT_EQ(ceph_mount(ca, NULL), 0);
ASSERT_EQ(ceph_create(&cb, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cb, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cb, NULL));
ASSERT_EQ(ceph_mount(cb, NULL), 0);
char name[20];
snprintf(name, sizeof(name), "foo2.%d", getpid());
int fda = ceph_open(ca, name, O_CREAT|O_RDWR, 0644);
ASSERT_LE(0, fda);
int fdb = ceph_open(cb, name, O_RDWR, 0644);
ASSERT_LE(0, fdb);
ASSERT_EQ(0, ceph_lazyio(ca, fda, 1));
ASSERT_EQ(0, ceph_lazyio(cb, fdb, 1));
char out_buf[] = "fooooooooo";
/* Client a issues a write and propagates/flushes the buffer */
ASSERT_EQ((int)sizeof(out_buf), ceph_write(ca, fda, out_buf, sizeof(out_buf), 0));
ASSERT_EQ(0, ceph_lazyio_propagate(ca, fda, 0, 0));
/* Client b issues a write and propagates/flushes the buffer*/
ASSERT_EQ((int)sizeof(out_buf), ceph_write(cb, fdb, out_buf, sizeof(out_buf), 10));
ASSERT_EQ(0, ceph_lazyio_propagate(cb, fdb, 0, 0));
char in_buf[40];
/* Calling ceph_lazyio_synchronize here will invalidate client a's cache and hence enable client a to fetch the propagated writes of client b in the subsequent read */
ASSERT_EQ(0, ceph_lazyio_synchronize(ca, fda, 0, 0));
ASSERT_EQ(ceph_read(ca, fda, in_buf, sizeof(in_buf), 0), 2*strlen(out_buf)+1);
ASSERT_STREQ(in_buf, "fooooooooofooooooooo");
/* Client b does not need to call ceph_lazyio_synchronize here because it is the latest writer and the writes before it have already been propagated*/
ASSERT_EQ(ceph_read(cb, fdb, in_buf, sizeof(in_buf), 0), 2*strlen(out_buf)+1);
ASSERT_STREQ(in_buf, "fooooooooofooooooooo");
/* Client a issues a write */
char wait_out_buf[] = "foobarbars";
ASSERT_EQ((int)sizeof(wait_out_buf), ceph_write(ca, fda, wait_out_buf, sizeof(wait_out_buf), 20));
ASSERT_EQ(0, ceph_lazyio_propagate(ca, fda, 0, 0));
/* Client a does not need to call ceph_lazyio_synchronize here because it is the latest writer and the writes before it have already been propagated*/
ASSERT_EQ(ceph_read(ca, fda, in_buf, sizeof(in_buf), 0), (2*(strlen(out_buf)))+strlen(wait_out_buf)+1);
ASSERT_STREQ(in_buf, "fooooooooofooooooooofoobarbars");
/* Calling ceph_lazyio_synchronize here will invalidate client b's cache and hence enable client a to fetch the propagated write of client a in the subsequent read */
ASSERT_EQ(0, ceph_lazyio_synchronize(cb, fdb, 0, 0));
ASSERT_EQ(ceph_read(cb, fdb, in_buf, sizeof(in_buf), 0), (2*(strlen(out_buf)))+strlen(wait_out_buf)+1);
ASSERT_STREQ(in_buf, "fooooooooofooooooooofoobarbars");
ceph_close(ca, fda);
ceph_close(cb, fdb);
ceph_shutdown(ca);
ceph_shutdown(cb);
}
TEST(LibCephFS, LazyIOMultipleWritersOneReader) {
struct ceph_mount_info *ca, *cb;
ASSERT_EQ(ceph_create(&ca, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(ca, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(ca, NULL));
ASSERT_EQ(ceph_mount(ca, NULL), 0);
ASSERT_EQ(ceph_create(&cb, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cb, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cb, NULL));
ASSERT_EQ(ceph_mount(cb, NULL), 0);
char name[20];
snprintf(name, sizeof(name), "foo3.%d", getpid());
int fda = ceph_open(ca, name, O_CREAT|O_RDWR, 0644);
ASSERT_LE(0, fda);
int fdb = ceph_open(cb, name, O_RDWR, 0644);
ASSERT_LE(0, fdb);
ASSERT_EQ(0, ceph_lazyio(ca, fda, 1));
ASSERT_EQ(0, ceph_lazyio(cb, fdb, 1));
char out_buf[] = "fooooooooo";
/* Client a issues a write and propagates/flushes the buffer */
ASSERT_EQ((int)sizeof(out_buf), ceph_write(ca, fda, out_buf, sizeof(out_buf), 0));
ASSERT_EQ(0, ceph_lazyio_propagate(ca, fda, 0, 0));
/* Client b issues a write and propagates/flushes the buffer*/
ASSERT_EQ((int)sizeof(out_buf), ceph_write(cb, fdb, out_buf, sizeof(out_buf), 10));
ASSERT_EQ(0, ceph_lazyio_propagate(cb, fdb, 0, 0));
char in_buf[40];
/* Client a reads the file and verifies that it only reads it's propagated writes and not Client b's*/
ASSERT_EQ(ceph_read(ca, fda, in_buf, sizeof(in_buf), 0), strlen(out_buf)+1);
ASSERT_STREQ(in_buf, "fooooooooo");
/* Client a reads the file again, this time with a lazyio_synchronize to check if the cache gets invalidated and data is refetched i.e all the propagated writes are being read*/
ASSERT_EQ(0, ceph_lazyio_synchronize(ca, fda, 0, 0));
ASSERT_EQ(ceph_read(ca, fda, in_buf, sizeof(in_buf), 0), 2*strlen(out_buf)+1);
ASSERT_STREQ(in_buf, "fooooooooofooooooooo");
ceph_close(ca, fda);
ceph_close(cb, fdb);
ceph_shutdown(ca);
ceph_shutdown(cb);
}
TEST(LibCephFS, LazyIOSynchronizeFlush) {
/* Test to make sure lazyio_synchronize flushes dirty buffers */
struct ceph_mount_info *ca, *cb;
ASSERT_EQ(ceph_create(&ca, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(ca, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(ca, NULL));
ASSERT_EQ(ceph_mount(ca, NULL), 0);
ASSERT_EQ(ceph_create(&cb, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cb, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cb, NULL));
ASSERT_EQ(ceph_mount(cb, NULL), 0);
char name[20];
snprintf(name, sizeof(name), "foo4.%d", getpid());
int fda = ceph_open(ca, name, O_CREAT|O_RDWR, 0644);
ASSERT_LE(0, fda);
int fdb = ceph_open(cb, name, O_RDWR, 0644);
ASSERT_LE(0, fdb);
ASSERT_EQ(0, ceph_lazyio(ca, fda, 1));
ASSERT_EQ(0, ceph_lazyio(cb, fdb, 1));
char out_buf[] = "fooooooooo";
/* Client a issues a write and propagates it*/
ASSERT_EQ((int)sizeof(out_buf), ceph_write(ca, fda, out_buf, sizeof(out_buf), 0));
ASSERT_EQ(0, ceph_lazyio_propagate(ca, fda, 0, 0));
/* Client b issues writes and without lazyio_propagate*/
ASSERT_EQ((int)sizeof(out_buf), ceph_write(cb, fdb, out_buf, sizeof(out_buf), 10));
ASSERT_EQ((int)sizeof(out_buf), ceph_write(cb, fdb, out_buf, sizeof(out_buf), 20));
char in_buf[40];
/* Calling ceph_lazyio_synchronize here will first flush the possibly pending buffered write of client b and invalidate client b's cache and hence enable client b to fetch all the propagated writes */
ASSERT_EQ(0, ceph_lazyio_synchronize(cb, fdb, 0, 0));
ASSERT_EQ(ceph_read(cb, fdb, in_buf, sizeof(in_buf), 0), 3*strlen(out_buf)+1);
ASSERT_STREQ(in_buf, "fooooooooofooooooooofooooooooo");
/* Required to call ceph_lazyio_synchronize here since client b is the latest writer and client a is out of sync with updated file*/
ASSERT_EQ(0, ceph_lazyio_synchronize(ca, fda, 0, 0));
ASSERT_EQ(ceph_read(ca, fda, in_buf, sizeof(in_buf), 0), 3*strlen(out_buf)+1);
ASSERT_STREQ(in_buf, "fooooooooofooooooooofooooooooo");
ceph_close(ca, fda);
ceph_close(cb, fdb);
ceph_shutdown(ca);
ceph_shutdown(cb);
}
TEST(LibCephFS, WithoutandWithLazyIO) {
struct ceph_mount_info *ca, *cb;
ASSERT_EQ(ceph_create(&ca, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(ca, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(ca, NULL));
ASSERT_EQ(ceph_mount(ca, NULL), 0);
ASSERT_EQ(ceph_create(&cb, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cb, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cb, NULL));
ASSERT_EQ(ceph_mount(cb, NULL), 0);
char name[20];
snprintf(name, sizeof(name), "foo5.%d", getpid());
int fda = ceph_open(ca, name, O_CREAT|O_RDWR, 0644);
ASSERT_LE(0, fda);
int fdb = ceph_open(cb, name, O_RDWR, 0644);
ASSERT_LE(0, fdb);
char out_buf_w[] = "1234567890";
/* Doing some non lazyio writes and read*/
ASSERT_EQ((int)sizeof(out_buf_w), ceph_write(ca, fda, out_buf_w, sizeof(out_buf_w), 0));
ASSERT_EQ((int)sizeof(out_buf_w), ceph_write(cb, fdb, out_buf_w, sizeof(out_buf_w), 10));
char in_buf_w[30];
ASSERT_EQ(ceph_read(ca, fda, in_buf_w, sizeof(in_buf_w), 0), 2*strlen(out_buf_w)+1);
/* Enable lazyio*/
ASSERT_EQ(0, ceph_lazyio(ca, fda, 1));
ASSERT_EQ(0, ceph_lazyio(cb, fdb, 1));
char out_buf[] = "fooooooooo";
/* Client a issues a write and propagates/flushes the buffer*/
ASSERT_EQ((int)sizeof(out_buf), ceph_write(ca, fda, out_buf, sizeof(out_buf), 20));
ASSERT_EQ(0, ceph_lazyio_propagate(ca, fda, 0, 0));
/* Client b issues a write and propagates/flushes the buffer*/
ASSERT_EQ((int)sizeof(out_buf), ceph_write(cb, fdb, out_buf, sizeof(out_buf), 30));
ASSERT_EQ(0, ceph_lazyio_propagate(cb, fdb, 0, 0));
char in_buf[50];
/* Calling ceph_lazyio_synchronize here will invalidate client a's cache and hence enable client a to fetch the propagated writes of client b in the subsequent read */
ASSERT_EQ(0, ceph_lazyio_synchronize(ca, fda, 0, 0));
ASSERT_EQ(ceph_read(ca, fda, in_buf, sizeof(in_buf), 0), (2*(strlen(out_buf)))+(2*(strlen(out_buf_w)))+1);
ASSERT_STREQ(in_buf, "12345678901234567890fooooooooofooooooooo");
/* Client b does not need to call ceph_lazyio_synchronize here because it is the latest writer and the writes before it have already been propagated*/
ASSERT_EQ(ceph_read(cb, fdb, in_buf, sizeof(in_buf), 0), (2*(strlen(out_buf)))+(2*(strlen(out_buf_w)))+1);
ASSERT_STREQ(in_buf, "12345678901234567890fooooooooofooooooooo");
ceph_close(ca, fda);
ceph_close(cb, fdb);
ceph_shutdown(ca);
ceph_shutdown(cb);
}
static int update_root_mode()
{
struct ceph_mount_info *admin;
int r = ceph_create(&admin, NULL);
if (r < 0)
return r;
ceph_conf_read_file(admin, NULL);
ceph_conf_parse_env(admin, NULL);
ceph_conf_set(admin, "client_permissions", "false");
r = ceph_mount(admin, "/");
if (r < 0)
goto out;
r = ceph_chmod(admin, "/", 0777);
out:
ceph_shutdown(admin);
return r;
}
int main(int argc, char **argv)
{
int r = update_root_mode();
if (r < 0)
exit(1);
::testing::InitGoogleTest(&argc, argv);
srand(getpid());
r = rados_create(&cluster, NULL);
if (r < 0)
exit(1);
r = rados_conf_read_file(cluster, NULL);
if (r < 0)
exit(1);
rados_conf_parse_env(cluster, NULL);
r = rados_connect(cluster);
if (r < 0)
exit(1);
r = RUN_ALL_TESTS();
rados_shutdown(cluster);
return r;
}
| 12,730 | 34.661064 | 202 | cc |
null | ceph-main/src/test/libcephfs/main.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2011 New Dream Network
* Copyright (C) 2016 Red Hat
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "gtest/gtest.h"
#include "include/compat.h"
#include "include/cephfs/libcephfs.h"
static int update_root_mode()
{
struct ceph_mount_info *admin;
int r = ceph_create(&admin, NULL);
if (r < 0)
return r;
ceph_conf_read_file(admin, NULL);
ceph_conf_parse_env(admin, NULL);
ceph_conf_set(admin, "client_permissions", "false");
r = ceph_mount(admin, "/");
if (r < 0)
goto out;
r = ceph_chmod(admin, "/", 01777);
out:
ceph_shutdown(admin);
return r;
}
int main(int argc, char **argv)
{
int r = update_root_mode();
if (r < 0)
exit(1);
::testing::InitGoogleTest(&argc, argv);
srand(getpid());
return RUN_ALL_TESTS();
}
| 1,108 | 20.745098 | 70 | cc |
null | ceph-main/src/test/libcephfs/monconfig.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2020 Red Hat
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "gtest/gtest.h"
#include "include/compat.h"
#include "include/cephfs/libcephfs.h"
#include "include/fs_types.h"
#include "common/ceph_context.h"
#include <errno.h>
#include <fcntl.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
class MonConfig : public ::testing::Test
{
protected:
struct ceph_mount_info *ca;
void SetUp() override {
ASSERT_EQ(0, ceph_create(&ca, NULL));
ASSERT_EQ(0, ceph_conf_read_file(ca, NULL));
ASSERT_EQ(0, ceph_conf_parse_env(ca, NULL));
}
void TearDown() override {
ceph_shutdown(ca);
}
// Helper to remove/unset all possible mon information from ConfigProxy
void clear_mon_config(CephContext *cct) {
auto& conf = cct->_conf;
// Clear safe_to_start_threads, allowing updates to config values
conf._clear_safe_to_start_threads();
ASSERT_EQ(0, conf.set_val("monmap", "", nullptr));
ASSERT_EQ(0, conf.set_val("mon_host", "", nullptr));
ASSERT_EQ(0, conf.set_val("mon_dns_srv_name", "", nullptr));
conf.set_safe_to_start_threads();
}
// Helper to test basic operation on a mount
void use_mount(struct ceph_mount_info *mnt, std::string name_prefix) {
char name[20];
snprintf(name, sizeof(name), "%s.%d", name_prefix.c_str(), getpid());
int fd = ceph_open(mnt, name, O_CREAT|O_RDWR, 0644);
ASSERT_LE(0, fd);
ceph_close(mnt, fd);
}
};
TEST_F(MonConfig, MonAddrsMissing) {
CephContext *cct;
// Test mount failure when there is no known mon config source
cct = ceph_get_mount_context(ca);
ASSERT_NE(nullptr, cct);
clear_mon_config(cct);
ASSERT_EQ(-CEPHFS_ENOENT, ceph_mount(ca, NULL));
}
TEST_F(MonConfig, MonAddrsInConfigProxy) {
// Test a successful mount with default mon config source in ConfigProxy
ASSERT_EQ(0, ceph_mount(ca, NULL));
use_mount(ca, "foo");
}
TEST_F(MonConfig, MonAddrsInCct) {
struct ceph_mount_info *cb;
CephContext *cct;
// Perform mount to bootstrap mon addrs in CephContext
ASSERT_EQ(0, ceph_mount(ca, NULL));
// Reuse bootstrapped CephContext, clearing ConfigProxy mon addr sources
cct = ceph_get_mount_context(ca);
ASSERT_NE(nullptr, cct);
clear_mon_config(cct);
ASSERT_EQ(0, ceph_create_with_context(&cb, cct));
// Test a successful mount with only mon values in CephContext
ASSERT_EQ(0, ceph_mount(cb, NULL));
use_mount(ca, "bar");
use_mount(cb, "bar");
ceph_shutdown(cb);
}
| 2,854 | 26.990196 | 75 | cc |
null | ceph-main/src/test/libcephfs/multiclient.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2011 New Dream Network
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "gtest/gtest.h"
#include "include/compat.h"
#include "include/cephfs/libcephfs.h"
#include <dirent.h>
#include <errno.h>
#include <fcntl.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <thread>
#ifdef __linux__
#include <sys/xattr.h>
#endif
TEST(LibCephFS, MulticlientSimple) {
struct ceph_mount_info *ca, *cb;
ASSERT_EQ(ceph_create(&ca, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(ca, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(ca, NULL));
ASSERT_EQ(ceph_mount(ca, NULL), 0);
ASSERT_EQ(ceph_create(&cb, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cb, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cb, NULL));
ASSERT_EQ(ceph_mount(cb, NULL), 0);
char name[20];
snprintf(name, sizeof(name), "foo.%d", getpid());
int fda = ceph_open(ca, name, O_CREAT|O_RDWR, 0644);
ASSERT_LE(0, fda);
int fdb = ceph_open(cb, name, O_CREAT|O_RDWR, 0644);
ASSERT_LE(0, fdb);
char bufa[4] = "foo";
char bufb[4];
for (int i=0; i<10; i++) {
strcpy(bufa, "foo");
ASSERT_EQ((int)sizeof(bufa), ceph_write(ca, fda, bufa, sizeof(bufa), i*6));
ASSERT_EQ((int)sizeof(bufa), ceph_read(cb, fdb, bufb, sizeof(bufa), i*6));
ASSERT_EQ(0, memcmp(bufa, bufb, sizeof(bufa)));
strcpy(bufb, "bar");
ASSERT_EQ((int)sizeof(bufb), ceph_write(cb, fdb, bufb, sizeof(bufb), i*6+3));
ASSERT_EQ((int)sizeof(bufb), ceph_read(ca, fda, bufa, sizeof(bufb), i*6+3));
ASSERT_EQ(0, memcmp(bufa, bufb, sizeof(bufa)));
}
ceph_close(ca, fda);
ceph_close(cb, fdb);
ceph_shutdown(ca);
ceph_shutdown(cb);
}
TEST(LibCephFS, MulticlientHoleEOF) {
struct ceph_mount_info *ca, *cb;
ASSERT_EQ(ceph_create(&ca, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(ca, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(ca, NULL));
ASSERT_EQ(ceph_mount(ca, NULL), 0);
ASSERT_EQ(ceph_create(&cb, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cb, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cb, NULL));
ASSERT_EQ(ceph_mount(cb, NULL), 0);
char name[20];
snprintf(name, sizeof(name), "foo.%d", getpid());
int fda = ceph_open(ca, name, O_CREAT|O_RDWR, 0644);
ASSERT_LE(0, fda);
int fdb = ceph_open(cb, name, O_CREAT|O_RDWR, 0644);
ASSERT_LE(0, fdb);
ASSERT_EQ(3, ceph_write(ca, fda, "foo", 3, 0));
ASSERT_EQ(0, ceph_ftruncate(ca, fda, 1000000));
char buf[4];
ASSERT_EQ(2, ceph_read(cb, fdb, buf, sizeof(buf), 1000000-2));
ASSERT_EQ(0, buf[0]);
ASSERT_EQ(0, buf[1]);
ceph_close(ca, fda);
ceph_close(cb, fdb);
ceph_shutdown(ca);
ceph_shutdown(cb);
}
static void write_func(bool *stop)
{
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, "/"), 0);
char name[20];
snprintf(name, sizeof(name), "foo.%d", getpid());
int fd = ceph_open(cmount, name, O_CREAT|O_RDWR, 0644);
ASSERT_LE(0, fd);
int buf_size = 4096;
char *buf = (char *)malloc(buf_size);
if (!buf) {
*stop = true;
printf("write_func failed to allocate buffer!");
return;
}
memset(buf, 1, buf_size);
while (!(*stop)) {
int i;
// truncate the file size to 4096 will set the max_size to 4MB.
ASSERT_EQ(0, ceph_ftruncate(cmount, fd, 4096));
// write 4MB + extra 64KB data will make client to trigger to
// call check_cap() to report new size. And if MDS is revoking
// the Fsxrw caps and we are still holding the Fw caps and will
// trigger tracker#57244.
for (i = 0; i < 1040; i++) {
ASSERT_EQ(ceph_write(cmount, fd, buf, buf_size, 0), buf_size);
}
}
ceph_shutdown(cmount);
}
static void setattr_func(bool *stop)
{
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, "/"), 0);
char name[20];
snprintf(name, sizeof(name), "foo.%d", getpid());
int fd = ceph_open(cmount, name, O_CREAT|O_RDWR, 0644);
ASSERT_LE(0, fd);
while (!(*stop)) {
// setattr will make the MDS to acquire xlock for the filelock and
// force to revoke caps from clients
struct ceph_statx stx = {.stx_size = 0};
ASSERT_EQ(ceph_fsetattrx(cmount, fd, &stx, CEPH_SETATTR_SIZE), 0);
}
ceph_shutdown(cmount);
}
TEST(LibCephFS, MulticlientRevokeCaps) {
std::thread thread1, thread2;
bool stop = false;
int wait = 60; // in second
thread1 = std::thread(write_func, &stop);
thread2 = std::thread(setattr_func, &stop);
printf(" Will run test for %d seconds!\n", wait);
sleep(wait);
stop = true;
thread1.join();
thread2.join();
}
| 5,089 | 27.121547 | 81 | cc |
null | ceph-main/src/test/libcephfs/newops.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2021 Red Hat Inc.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "gtest/gtest-spi.h"
#include "gmock/gmock-matchers.h"
#include "gmock/gmock-more-matchers.h"
#include "include/compat.h"
#include "include/cephfs/libcephfs.h"
#include "include/fs_types.h"
#include "mds/mdstypes.h"
#include "include/stat.h"
#include <errno.h>
#include <fcntl.h>
#include <unistd.h>
#include <string.h>
#ifdef __linux__
#include <limits.h>
#include <sys/xattr.h>
#endif
#include <fmt/format.h>
#include <map>
#include <vector>
#include <thread>
#include <regex>
#include <string>
using ::testing::AnyOf;
using ::testing::Gt;
using ::testing::Eq;
using namespace std;
/*
* Test this with different ceph versions
*/
TEST(LibCephFS, NewOPs)
{
struct ceph_mount_info *cmount;
ASSERT_EQ(0, ceph_create(&cmount, NULL));
ASSERT_EQ(0, ceph_conf_read_file(cmount, NULL));
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(0, ceph_mount(cmount, "/"));
const char *test_path = "test_newops_dir";
ASSERT_EQ(0, ceph_mkdirs(cmount, test_path, 0777));
{
char value[1024] = "";
int r = ceph_getxattr(cmount, test_path, "ceph.dir.pin.random", (void*)value, sizeof(value));
// Clients will return -CEPHFS_ENODATA if new getvxattr op not support yet.
EXPECT_THAT(r, AnyOf(Gt(0), Eq(-CEPHFS_ENODATA)));
}
{
double val = (double)1.0/(double)128.0;
std::stringstream ss;
ss << val;
int r = ceph_setxattr(cmount, test_path, "ceph.dir.pin.random", (void*)ss.str().c_str(), strlen(ss.str().c_str()), XATTR_CREATE);
// Old cephs will return -CEPHFS_EINVAL if not support "ceph.dir.pin.random" yet.
EXPECT_THAT(r, AnyOf(Eq(0), Eq(-CEPHFS_EINVAL)));
char value[1024] = "";
r = ceph_getxattr(cmount, test_path, "ceph.dir.pin.random", (void*)value, sizeof(value));
// Clients will return -CEPHFS_ENODATA if new getvxattr op not support yet.
EXPECT_THAT(r, AnyOf(Gt(0), Eq(-CEPHFS_ENODATA)));
}
ASSERT_EQ(0, ceph_rmdir(cmount, test_path));
ceph_shutdown(cmount);
}
| 2,418 | 26.488636 | 133 | cc |
null | ceph-main/src/test/libcephfs/quota.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2011 New Dream Network
* Copyright (C) 2022 Red Hat, Inc.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "include/compat.h"
#include "gtest/gtest.h"
#include "include/cephfs/libcephfs.h"
#include "mds/mdstypes.h"
#include "include/stat.h"
#include <errno.h>
#include <fcntl.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/time.h>
#include <sys/resource.h>
#ifdef __linux__
#include <limits.h>
#include <sys/xattr.h>
#endif
TEST(LibCephFS, SnapQuota) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, NULL), 0);
char test_snap_dir_quota_xattr[256];
char test_snap_subdir_quota_xattr[256];
char test_snap_subdir_noquota_xattr[256];
char xattrk[128];
char xattrv[128];
char c_temp[PATH_MAX];
char gxattrv[128];
int xbuflen = sizeof(gxattrv);
pid_t mypid = getpid();
// create dir and set quota
sprintf(test_snap_dir_quota_xattr, "test_snap_dir_quota_xattr_%d", mypid);
ASSERT_EQ(0, ceph_mkdir(cmount, test_snap_dir_quota_xattr, 0777));
sprintf(xattrk, "ceph.quota.max_bytes");
sprintf(xattrv, "65536");
ASSERT_EQ(0, ceph_setxattr(cmount, test_snap_dir_quota_xattr, xattrk, (void *)xattrv, 5, XATTR_CREATE));
// create subdir and set quota
sprintf(test_snap_subdir_quota_xattr, "test_snap_dir_quota_xattr_%d/subdir_quota", mypid);
ASSERT_EQ(0, ceph_mkdirs(cmount, test_snap_subdir_quota_xattr, 0777));
sprintf(xattrk, "ceph.quota.max_bytes");
sprintf(xattrv, "32768");
ASSERT_EQ(0, ceph_setxattr(cmount, test_snap_subdir_quota_xattr, xattrk, (void *)xattrv, 5, XATTR_CREATE));
// create subdir with no quota
sprintf(test_snap_subdir_noquota_xattr, "test_snap_dir_quota_xattr_%d/subdir_noquota", mypid);
ASSERT_EQ(0, ceph_mkdirs(cmount, test_snap_subdir_noquota_xattr, 0777));
// snapshot dir
sprintf(c_temp, "/.snap/test_snap_dir_quota_xattr_snap_%d", mypid);
ASSERT_EQ(0, ceph_mkdirs(cmount, c_temp, 0777));
// check dir quota under snap
sprintf(c_temp, "/.snap/test_snap_dir_quota_xattr_snap_%d/test_snap_dir_quota_xattr_%d", mypid, mypid);
int alen = ceph_getxattr(cmount, c_temp, "ceph.quota.max_bytes", (void *)gxattrv, xbuflen);
ASSERT_LT(0, alen);
ASSERT_LT(alen, xbuflen);
gxattrv[alen] = '\0';
ASSERT_STREQ(gxattrv, "65536");
// check subdir quota under snap
sprintf(c_temp, "/.snap/test_snap_dir_quota_xattr_snap_%d/test_snap_dir_quota_xattr_%d/subdir_quota", mypid, mypid);
alen = ceph_getxattr(cmount, c_temp, "ceph.quota.max_bytes", (void *)gxattrv, xbuflen);
ASSERT_LT(0, alen);
ASSERT_LT(alen, xbuflen);
gxattrv[alen] = '\0';
ASSERT_STREQ(gxattrv, "32768");
// ensure subdir noquota xattr under snap
sprintf(c_temp, "/.snap/test_snap_dir_quota_xattr_snap_%d/test_snap_dir_quota_xattr_%d/subdir_noquota", mypid, mypid);
EXPECT_EQ(-CEPHFS_ENODATA, ceph_getxattr(cmount, c_temp, "ceph.quota.max_bytes", (void *)gxattrv, xbuflen));
// listxattr() shouldn't return ceph.quota.max_bytes vxattr
sprintf(c_temp, "/.snap/test_snap_dir_quota_xattr_snap_%d/test_snap_dir_quota_xattr_%d", mypid, mypid);
char xattrlist[512];
int len = ceph_listxattr(cmount, c_temp, xattrlist, sizeof(xattrlist));
ASSERT_GE(sizeof(xattrlist), (size_t)len);
char *p = xattrlist;
int found = 0;
while (len > 0) {
if (strcmp(p, "ceph.quota.max_bytes") == 0)
found++;
len -= strlen(p) + 1;
p += strlen(p) + 1;
}
ASSERT_EQ(found, 0);
ceph_shutdown(cmount);
}
void statfs_quota_size_check(struct ceph_mount_info *cmount, const char *path,
int blocks, int bsize)
{
struct statvfs stvfs;
ASSERT_EQ(0, ceph_statfs(cmount, path, &stvfs));
ASSERT_EQ(blocks, stvfs.f_blocks);
ASSERT_EQ(bsize, stvfs.f_bsize);
ASSERT_EQ(bsize, stvfs.f_frsize);
}
TEST(LibCephFS, QuotaRealm) {
struct ceph_mount_info *cmount, *pmount1, *pmount2;
char test_quota_realm_pdir[128];
char test_quota_realm_cdir[256];
char xattrk[32];
char xattrv[16];
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, NULL), 0);
pid_t mypid = getpid();
// create parent directory and set quota size
sprintf(test_quota_realm_pdir, "/test_quota_realm_pdir_%d", mypid);
ASSERT_EQ(0, ceph_mkdir(cmount, test_quota_realm_pdir, 0777));
sprintf(xattrk, "ceph.quota.max_bytes");
sprintf(xattrv, "8388608"); // 8MB
ASSERT_EQ(0, ceph_setxattr(cmount, test_quota_realm_pdir, xattrk, (void *)xattrv, 7, XATTR_CREATE));
// create child directory and set quota file
sprintf(test_quota_realm_cdir, "%s/test_quota_realm_cdir", test_quota_realm_pdir);
ASSERT_EQ(0, ceph_mkdir(cmount, test_quota_realm_cdir, 0777));
sprintf(xattrk, "ceph.quota.max_files");
sprintf(xattrv, "1024"); // 1K files
ASSERT_EQ(0, ceph_setxattr(cmount, test_quota_realm_cdir, xattrk, (void *)xattrv, 4, XATTR_CREATE));
ASSERT_EQ(ceph_create(&pmount1, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(pmount1, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(pmount1, NULL));
ASSERT_EQ(ceph_mount(pmount1, test_quota_realm_pdir), 0);
statfs_quota_size_check(pmount1, "/", 2, 4194304); // 8MB
ASSERT_EQ(ceph_create(&pmount2, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(pmount2, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(pmount2, NULL));
ASSERT_EQ(ceph_mount(pmount2, test_quota_realm_cdir), 0);
statfs_quota_size_check(pmount2, "/", 2, 4194304); // 8MB
ceph_shutdown(pmount1);
ceph_shutdown(pmount2);
ceph_shutdown(cmount);
}
| 6,023 | 34.857143 | 120 | cc |
null | ceph-main/src/test/libcephfs/readdir_r_cb.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2011 New Dream Network
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "gtest/gtest.h"
#include "include/compat.h"
#include "include/cephfs/libcephfs.h"
#include "include/fs_types.h"
#include <errno.h>
#include <fcntl.h>
TEST(LibCephFS, ReaddirRCB) {
struct ceph_mount_info *cmount;
ASSERT_EQ(0, ceph_create(&cmount, NULL));
ASSERT_EQ(0, ceph_conf_read_file(cmount, NULL));
ASSERT_EQ(0, ceph_mount(cmount, "/"));
char c_dir[256];
sprintf(c_dir, "/readdir_r_cb_tests_%d", getpid());
struct ceph_dir_result *dirp;
ASSERT_EQ(0, ceph_mkdirs(cmount, c_dir, 0777));
ASSERT_LE(0, ceph_opendir(cmount, c_dir, &dirp));
// dir is empty, check that it only contains . and ..
int buflen = 100;
char *buf = new char[buflen];
// . is 2, .. is 3 (for null terminators)
ASSERT_EQ(5, ceph_getdnames(cmount, dirp, buf, buflen));
char c_file[256];
sprintf(c_file, "/readdir_r_cb_tests_%d/foo", getpid());
int fd = ceph_open(cmount, c_file, O_CREAT, 0777);
ASSERT_LT(0, fd);
ASSERT_EQ(0, ceph_close(cmount, fd));
// check correctness with one entry
ASSERT_LE(0, ceph_closedir(cmount, dirp));
ASSERT_LE(0, ceph_opendir(cmount, c_dir, &dirp));
ASSERT_EQ(9, ceph_getdnames(cmount, dirp, buf, buflen)); // ., .., foo
// check correctness if buffer is too small
ASSERT_LE(0, ceph_closedir(cmount, dirp));
ASSERT_GE(0, ceph_opendir(cmount, c_dir, &dirp));
ASSERT_EQ(-CEPHFS_ERANGE, ceph_getdnames(cmount, dirp, buf, 1));
//check correctness if it needs to split listing
ASSERT_LE(0, ceph_closedir(cmount, dirp));
ASSERT_LE(0, ceph_opendir(cmount, c_dir, &dirp));
ASSERT_EQ(5, ceph_getdnames(cmount, dirp, buf, 6));
ASSERT_EQ(4, ceph_getdnames(cmount, dirp, buf, 6));
// free cmount after finishing testing
ASSERT_LE(0, ceph_closedir(cmount, dirp));
ASSERT_EQ(0, ceph_unmount(cmount));
ASSERT_EQ(0, ceph_release(cmount));
}
| 2,223 | 32.69697 | 72 | cc |
null | ceph-main/src/test/libcephfs/reclaim.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Tests for Ceph delegation handling
*
* (c) 2017, Jeff Layton <jlayton@redhat.com>
*/
#include "gtest/gtest.h"
#include "include/compat.h"
#include "include/cephfs/libcephfs.h"
#include "include/fs_types.h"
#include "include/stat.h"
#include <errno.h>
#include <fcntl.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <dirent.h>
#include <sys/uio.h>
#include <libgen.h>
#include <stdlib.h>
#ifdef __linux__
#include <sys/xattr.h>
#include <limits.h>
#endif
#ifdef __FreeBSD__
#include <sys/wait.h>
#endif
#include <map>
#include <vector>
#include <thread>
#include <atomic>
#define CEPHFS_RECLAIM_TIMEOUT 60
static int dying_client(int argc, char **argv)
{
struct ceph_mount_info *cmount;
/* Caller must pass in the uuid */
if (argc < 2)
return 1;
if (ceph_create(&cmount, nullptr) != 0)
return 1;
if (ceph_conf_read_file(cmount, nullptr) != 0)
return 1;
if (ceph_conf_parse_env(cmount, nullptr) != 0)
return 1;
if (ceph_init(cmount) != 0)
return 1;
ceph_set_session_timeout(cmount, CEPHFS_RECLAIM_TIMEOUT);
if (ceph_start_reclaim(cmount, argv[1], CEPH_RECLAIM_RESET) != -CEPHFS_ENOENT)
return 1;
ceph_set_uuid(cmount, argv[1]);
if (ceph_mount(cmount, "/") != 0)
return 1;
Inode *root, *file;
if (ceph_ll_lookup_root(cmount, &root) != 0)
return 1;
Fh *fh;
struct ceph_statx stx;
UserPerm *perms = ceph_mount_perms(cmount);
if (ceph_ll_create(cmount, root, argv[1], 0666, O_RDWR|O_CREAT|O_EXCL,
&file, &fh, &stx, 0, 0, perms) != 0)
return 1;
return 0;
}
TEST(LibCephFS, ReclaimReset) {
pid_t pid;
char uuid[256];
const char *exe = "/proc/self/exe";
sprintf(uuid, "simplereclaim:%x", getpid());
pid = fork();
ASSERT_GE(pid, 0);
if (pid == 0) {
errno = 0;
execl(exe, exe, uuid, nullptr);
/* It won't be zero of course, which is sort of the point... */
ASSERT_EQ(errno, 0);
}
/* parent - wait for child to exit */
int ret;
pid_t wp = wait(&ret);
ASSERT_GE(wp, 0);
ASSERT_EQ(WIFEXITED(ret), true);
ASSERT_EQ(WEXITSTATUS(ret), 0);
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, nullptr), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, nullptr), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, nullptr));
ASSERT_EQ(ceph_init(cmount), 0);
ceph_set_session_timeout(cmount, CEPHFS_RECLAIM_TIMEOUT);
ASSERT_EQ(ceph_start_reclaim(cmount, uuid, CEPH_RECLAIM_RESET), 0);
ceph_set_uuid(cmount, uuid);
ASSERT_EQ(ceph_mount(cmount, "/"), 0);
Inode *root, *file;
ASSERT_EQ(ceph_ll_lookup_root(cmount, &root), 0);
UserPerm *perms = ceph_mount_perms(cmount);
struct ceph_statx stx;
ASSERT_EQ(ceph_ll_lookup(cmount, root, uuid, &file, &stx, 0, 0, perms), 0);
Fh *fh;
ASSERT_EQ(ceph_ll_open(cmount, file, O_WRONLY, &fh, perms), 0);
ceph_unmount(cmount);
ceph_release(cmount);
}
static int update_root_mode()
{
struct ceph_mount_info *admin;
int r = ceph_create(&admin, nullptr);
if (r < 0)
return r;
ceph_conf_read_file(admin, nullptr);
ceph_conf_parse_env(admin, nullptr);
ceph_conf_set(admin, "client_permissions", "false");
r = ceph_mount(admin, "/");
if (r < 0)
goto out;
r = ceph_chmod(admin, "/", 01777);
out:
ceph_shutdown(admin);
return r;
}
int main(int argc, char **argv)
{
int r = update_root_mode();
if (r < 0)
exit(1);
::testing::InitGoogleTest(&argc, argv);
if (argc > 1)
return dying_client(argc, argv);
srand(getpid());
return RUN_ALL_TESTS();
}
| 3,627 | 21.121951 | 80 | cc |
null | ceph-main/src/test/libcephfs/recordlock.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2011 New Dream Network
* 2016 Red Hat
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <pthread.h>
#include "gtest/gtest.h"
#ifndef GTEST_IS_THREADSAFE
#error "!GTEST_IS_THREADSAFE"
#endif
#include "include/compat.h"
#include "include/cephfs/libcephfs.h"
#include "include/fs_types.h"
#include <errno.h>
#include <sys/fcntl.h>
#include <unistd.h>
#include <sys/file.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <dirent.h>
#include <stdlib.h>
#include <semaphore.h>
#include <time.h>
#ifndef _WIN32
#include <sys/mman.h>
#endif
#ifdef __linux__
#include <limits.h>
#include <sys/xattr.h>
#elif __FreeBSD__
#include <sys/types.h>
#include <sys/wait.h>
#endif
#include "include/ceph_assert.h"
#include "ceph_pthread_self.h"
// Startup common: create and mount ceph fs
#define STARTUP_CEPH() do { \
ASSERT_EQ(0, ceph_create(&cmount, NULL)); \
ASSERT_EQ(0, ceph_conf_read_file(cmount, NULL)); \
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL)); \
ASSERT_EQ(0, ceph_mount(cmount, NULL)); \
} while(0)
// Cleanup common: unmount and release ceph fs
#define CLEANUP_CEPH() do { \
ASSERT_EQ(0, ceph_unmount(cmount)); \
ASSERT_EQ(0, ceph_release(cmount)); \
} while(0)
static const mode_t fileMode = S_IRWXU | S_IRWXG | S_IRWXO;
// Default wait time for normal and "slow" operations
// (5" should be enough in case of network congestion)
static const long waitMs = 10;
static const long waitSlowMs = 5000;
// Get the absolute struct timespec reference from now + 'ms' milliseconds
static const struct timespec* abstime(struct timespec &ts, long ms) {
if (clock_gettime(CLOCK_REALTIME, &ts) == -1) {
ceph_abort();
}
ts.tv_nsec += ms * 1000000;
ts.tv_sec += ts.tv_nsec / 1000000000;
ts.tv_nsec %= 1000000000;
return &ts;
}
/* Basic locking */
TEST(LibCephFS, BasicRecordLocking) {
struct ceph_mount_info *cmount = NULL;
STARTUP_CEPH();
char c_file[1024];
sprintf(c_file, "recordlock_test_%d", getpid());
Fh *fh = NULL;
Inode *root = NULL, *inode = NULL;
struct ceph_statx stx;
int rc;
struct flock lock1, lock2;
UserPerm *perms = ceph_mount_perms(cmount);
// Get the root inode
rc = ceph_ll_lookup_root(cmount, &root);
ASSERT_EQ(rc, 0);
// Get the inode and Fh corresponding to c_file
rc = ceph_ll_create(cmount, root, c_file, fileMode, O_RDWR | O_CREAT,
&inode, &fh, &stx, 0, 0, perms);
ASSERT_EQ(rc, 0);
// write lock twice
lock1.l_type = F_WRLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(0, ceph_ll_setlk(cmount, fh, &lock1, 42, false));
lock2.l_type = F_WRLCK;
lock2.l_whence = SEEK_SET;
lock2.l_start = 0;
lock2.l_len = 1024;
lock2.l_pid = getpid();
ASSERT_EQ(-CEPHFS_EAGAIN, ceph_ll_setlk(cmount, fh, &lock2, 43, false));
// Now try a conflicting read lock
lock2.l_type = F_RDLCK;
lock2.l_whence = SEEK_SET;
lock2.l_start = 100;
lock2.l_len = 100;
lock2.l_pid = getpid();
ASSERT_EQ(-CEPHFS_EAGAIN, ceph_ll_setlk(cmount, fh, &lock2, 43, false));
// Now do a getlk
ASSERT_EQ(0, ceph_ll_getlk(cmount, fh, &lock2, 43));
ASSERT_EQ(lock2.l_type, F_WRLCK);
ASSERT_EQ(lock2.l_start, 0);
ASSERT_EQ(lock2.l_len, 1024);
ASSERT_EQ(lock2.l_pid, getpid());
// Extend the range of the write lock
lock1.l_type = F_WRLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 1024;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(0, ceph_ll_setlk(cmount, fh, &lock1, 42, false));
// Now do a getlk
lock2.l_type = F_RDLCK;
lock2.l_whence = SEEK_SET;
lock2.l_start = 100;
lock2.l_len = 100;
lock2.l_pid = getpid();
ASSERT_EQ(0, ceph_ll_getlk(cmount, fh, &lock2, 43));
ASSERT_EQ(lock2.l_type, F_WRLCK);
ASSERT_EQ(lock2.l_start, 0);
ASSERT_EQ(lock2.l_len, 2048);
ASSERT_EQ(lock2.l_pid, getpid());
// Now release part of the range
lock1.l_type = F_UNLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 512;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(0, ceph_ll_setlk(cmount, fh, &lock1, 42, false));
// Now do a getlk to check 1st part
lock2.l_type = F_RDLCK;
lock2.l_whence = SEEK_SET;
lock2.l_start = 100;
lock2.l_len = 100;
lock2.l_pid = getpid();
ASSERT_EQ(0, ceph_ll_getlk(cmount, fh, &lock2, 43));
ASSERT_EQ(lock2.l_type, F_WRLCK);
ASSERT_EQ(lock2.l_start, 0);
ASSERT_EQ(lock2.l_len, 512);
ASSERT_EQ(lock2.l_pid, getpid());
// Now do a getlk to check 2nd part
lock2.l_type = F_RDLCK;
lock2.l_whence = SEEK_SET;
lock2.l_start = 2000;
lock2.l_len = 100;
lock2.l_pid = getpid();
ASSERT_EQ(0, ceph_ll_getlk(cmount, fh, &lock2, 43));
ASSERT_EQ(lock2.l_type, F_WRLCK);
ASSERT_EQ(lock2.l_start, 1536);
ASSERT_EQ(lock2.l_len, 512);
ASSERT_EQ(lock2.l_pid, getpid());
// Now do a getlk to check released part
lock2.l_type = F_RDLCK;
lock2.l_whence = SEEK_SET;
lock2.l_start = 512;
lock2.l_len = 1024;
lock2.l_pid = getpid();
ASSERT_EQ(0, ceph_ll_getlk(cmount, fh, &lock2, 43));
ASSERT_EQ(lock2.l_type, F_UNLCK);
ASSERT_EQ(lock2.l_start, 512);
ASSERT_EQ(lock2.l_len, 1024);
ASSERT_EQ(lock2.l_pid, getpid());
// Now downgrade the 1st part of the lock
lock1.l_type = F_RDLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 512;
lock1.l_pid = getpid();
ASSERT_EQ(0, ceph_ll_setlk(cmount, fh, &lock1, 42, false));
// Now do a getlk to check 1st part
lock2.l_type = F_WRLCK;
lock2.l_whence = SEEK_SET;
lock2.l_start = 100;
lock2.l_len = 100;
lock2.l_pid = getpid();
ASSERT_EQ(0, ceph_ll_getlk(cmount, fh, &lock2, 43));
ASSERT_EQ(lock2.l_type, F_RDLCK);
ASSERT_EQ(lock2.l_start, 0);
ASSERT_EQ(lock2.l_len, 512);
ASSERT_EQ(lock2.l_pid, getpid());
// Now upgrade the 1st part of the lock
lock1.l_type = F_WRLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 512;
lock1.l_pid = getpid();
ASSERT_EQ(0, ceph_ll_setlk(cmount, fh, &lock1, 42, false));
// Now do a getlk to check 1st part
lock2.l_type = F_WRLCK;
lock2.l_whence = SEEK_SET;
lock2.l_start = 100;
lock2.l_len = 100;
lock2.l_pid = getpid();
ASSERT_EQ(0, ceph_ll_getlk(cmount, fh, &lock2, 43));
ASSERT_EQ(lock2.l_type, F_WRLCK);
ASSERT_EQ(lock2.l_start, 0);
ASSERT_EQ(lock2.l_len, 512);
ASSERT_EQ(lock2.l_pid, getpid());
ASSERT_EQ(0, ceph_ll_close(cmount, fh));
ASSERT_EQ(0, ceph_ll_unlink(cmount, root, c_file, perms));
CLEANUP_CEPH();
}
/* Locking in different threads */
// Used by ConcurrentLocking test
struct str_ConcurrentRecordLocking {
const char *file;
struct ceph_mount_info *cmount; // !NULL if shared
sem_t sem[2];
sem_t semReply[2];
void sem_init(int pshared) {
ASSERT_EQ(0, ::sem_init(&sem[0], pshared, 0));
ASSERT_EQ(0, ::sem_init(&sem[1], pshared, 0));
ASSERT_EQ(0, ::sem_init(&semReply[0], pshared, 0));
ASSERT_EQ(0, ::sem_init(&semReply[1], pshared, 0));
}
void sem_destroy() {
ASSERT_EQ(0, ::sem_destroy(&sem[0]));
ASSERT_EQ(0, ::sem_destroy(&sem[1]));
ASSERT_EQ(0, ::sem_destroy(&semReply[0]));
ASSERT_EQ(0, ::sem_destroy(&semReply[1]));
}
};
// Wakeup main (for (N) steps)
#define PING_MAIN(n) ASSERT_EQ(0, sem_post(&s.sem[n%2]))
// Wait for main to wake us up (for (RN) steps)
#define WAIT_MAIN(n) \
ASSERT_EQ(0, sem_timedwait(&s.semReply[n%2], abstime(ts, waitSlowMs)))
// Wakeup worker (for (RN) steps)
#define PING_WORKER(n) ASSERT_EQ(0, sem_post(&s.semReply[n%2]))
// Wait for worker to wake us up (for (N) steps)
#define WAIT_WORKER(n) \
ASSERT_EQ(0, sem_timedwait(&s.sem[n%2], abstime(ts, waitSlowMs)))
// Worker shall not wake us up (for (N) steps)
#define NOT_WAIT_WORKER(n) \
ASSERT_EQ(-1, sem_timedwait(&s.sem[n%2], abstime(ts, waitMs)))
// Do twice an operation
#define TWICE(EXPR) do { \
EXPR; \
EXPR; \
} while(0)
/* Locking in different threads */
// Used by ConcurrentLocking test
static void thread_ConcurrentRecordLocking(str_ConcurrentRecordLocking& s) {
struct ceph_mount_info *const cmount = s.cmount;
Fh *fh = NULL;
Inode *root = NULL, *inode = NULL;
struct ceph_statx stx;
struct flock lock1;
int rc;
struct timespec ts;
// Get the root inode
rc = ceph_ll_lookup_root(cmount, &root);
ASSERT_EQ(rc, 0);
// Get the inode and Fh corresponding to c_file
rc = ceph_ll_create(cmount, root, s.file, fileMode, O_RDWR | O_CREAT,
&inode, &fh, &stx, 0, 0, ceph_mount_perms(cmount));
ASSERT_EQ(rc, 0);
lock1.l_type = F_WRLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(-CEPHFS_EAGAIN, ceph_ll_setlk(cmount, fh, &lock1, ceph_pthread_self(), false));
PING_MAIN(1); // (1)
lock1.l_type = F_WRLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(0, ceph_ll_setlk(cmount, fh, &lock1, ceph_pthread_self(), true));
PING_MAIN(2); // (2)
lock1.l_type = F_UNLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(0, ceph_ll_setlk(cmount, fh, &lock1, ceph_pthread_self(), false));
PING_MAIN(3); // (3)
lock1.l_type = F_RDLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(0, ceph_ll_setlk(cmount, fh, &lock1, ceph_pthread_self(), true));
PING_MAIN(4); // (4)
WAIT_MAIN(1); // (R1)
lock1.l_type = F_UNLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(0, ceph_ll_setlk(cmount, fh, &lock1, ceph_pthread_self(), false));
PING_MAIN(5); // (5)
WAIT_MAIN(2); // (R2)
lock1.l_type = F_WRLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(0, ceph_ll_setlk(cmount, fh, &lock1, ceph_pthread_self(), true));
PING_MAIN(6); // (6)
WAIT_MAIN(3); // (R3)
lock1.l_type = F_UNLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(0, ceph_ll_setlk(cmount, fh, &lock1, ceph_pthread_self(), false));
PING_MAIN(7); // (7)
ASSERT_EQ(0, ceph_ll_close(cmount, fh));
}
// Used by ConcurrentRecordLocking test
static void* thread_ConcurrentRecordLocking_(void *arg) {
str_ConcurrentRecordLocking *const s =
reinterpret_cast<str_ConcurrentRecordLocking*>(arg);
thread_ConcurrentRecordLocking(*s);
return NULL;
}
TEST(LibCephFS, ConcurrentRecordLocking) {
const pid_t mypid = getpid();
struct ceph_mount_info *cmount;
STARTUP_CEPH();
char c_file[1024];
sprintf(c_file, "recordlock_test_%d", mypid);
Fh *fh = NULL;
Inode *root = NULL, *inode = NULL;
struct ceph_statx stx;
struct flock lock1;
int rc;
UserPerm *perms = ceph_mount_perms(cmount);
// Get the root inode
rc = ceph_ll_lookup_root(cmount, &root);
ASSERT_EQ(rc, 0);
// Get the inode and Fh corresponding to c_file
rc = ceph_ll_create(cmount, root, c_file, fileMode, O_RDWR | O_CREAT,
&inode, &fh, &stx, 0, 0, perms);
ASSERT_EQ(rc, 0);
// Lock
lock1.l_type = F_WRLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(0, ceph_ll_setlk(cmount, fh, &lock1, ceph_pthread_self(), true));
// Start locker thread
pthread_t thread;
struct timespec ts;
str_ConcurrentRecordLocking s = { c_file, cmount };
s.sem_init(0);
ASSERT_EQ(0, pthread_create(&thread, NULL, thread_ConcurrentRecordLocking_, &s));
// Synchronization point with thread (failure: thread is dead)
WAIT_WORKER(1); // (1)
// Shall not have lock immediately
NOT_WAIT_WORKER(2); // (2)
// Unlock
lock1.l_type = F_UNLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(0, ceph_ll_setlk(cmount, fh, &lock1, ceph_pthread_self(), false));
// Shall have lock
// Synchronization point with thread (failure: thread is dead)
WAIT_WORKER(2); // (2)
// Synchronization point with thread (failure: thread is dead)
WAIT_WORKER(3); // (3)
// Wait for thread to share lock
WAIT_WORKER(4); // (4)
lock1.l_type = F_WRLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(-CEPHFS_EAGAIN, ceph_ll_setlk(cmount, fh, &lock1, ceph_pthread_self(), false));
lock1.l_type = F_RDLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(0, ceph_ll_setlk(cmount, fh, &lock1, ceph_pthread_self(), false));
// Wake up thread to unlock shared lock
PING_WORKER(1); // (R1)
WAIT_WORKER(5); // (5)
// Now we can lock exclusively
// Upgrade to exclusive lock (as per POSIX)
lock1.l_type = F_WRLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(0, ceph_ll_setlk(cmount, fh, &lock1, ceph_pthread_self(), true));
// Wake up thread to lock shared lock
PING_WORKER(2); // (R2)
// Shall not have lock immediately
NOT_WAIT_WORKER(6); // (6)
// Release lock ; thread will get it
lock1.l_type = F_UNLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(0, ceph_ll_setlk(cmount, fh, &lock1, ceph_pthread_self(), false));
WAIT_WORKER(6); // (6)
// We no longer have the lock
lock1.l_type = F_WRLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(-CEPHFS_EAGAIN, ceph_ll_setlk(cmount, fh, &lock1, ceph_pthread_self(), false));
lock1.l_type = F_RDLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(-CEPHFS_EAGAIN, ceph_ll_setlk(cmount, fh, &lock1, ceph_pthread_self(), false));
// Wake up thread to unlock exclusive lock
PING_WORKER(3); // (R3)
WAIT_WORKER(7); // (7)
// We can lock it again
lock1.l_type = F_WRLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(0, ceph_ll_setlk(cmount, fh, &lock1, ceph_pthread_self(), false));
lock1.l_type = F_UNLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(0, ceph_ll_setlk(cmount, fh, &lock1, ceph_pthread_self(), false));
// Cleanup
void *retval = (void*) (uintptr_t) -1;
ASSERT_EQ(0, pthread_join(thread, &retval));
ASSERT_EQ(NULL, retval);
s.sem_destroy();
ASSERT_EQ(0, ceph_ll_close(cmount, fh));
ASSERT_EQ(0, ceph_ll_unlink(cmount, root, c_file, perms));
CLEANUP_CEPH();
}
TEST(LibCephFS, ThreesomeRecordLocking) {
const pid_t mypid = getpid();
struct ceph_mount_info *cmount;
STARTUP_CEPH();
char c_file[1024];
sprintf(c_file, "recordlock_test_%d", mypid);
Fh *fh = NULL;
Inode *root = NULL, *inode = NULL;
struct ceph_statx stx;
struct flock lock1;
int rc;
UserPerm *perms = ceph_mount_perms(cmount);
// Get the root inode
rc = ceph_ll_lookup_root(cmount, &root);
ASSERT_EQ(rc, 0);
// Get the inode and Fh corresponding to c_file
rc = ceph_ll_create(cmount, root, c_file, fileMode, O_RDWR | O_CREAT,
&inode, &fh, &stx, 0, 0, perms);
ASSERT_EQ(rc, 0);
// Lock
lock1.l_type = F_WRLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(0, ceph_ll_setlk(cmount, fh, &lock1, ceph_pthread_self(), true));
// Start locker thread
pthread_t thread[2];
struct timespec ts;
str_ConcurrentRecordLocking s = { c_file, cmount };
s.sem_init(0);
ASSERT_EQ(0, pthread_create(&thread[0], NULL, thread_ConcurrentRecordLocking_, &s));
ASSERT_EQ(0, pthread_create(&thread[1], NULL, thread_ConcurrentRecordLocking_, &s));
// Synchronization point with thread (failure: thread is dead)
TWICE(WAIT_WORKER(1)); // (1)
// Shall not have lock immediately
NOT_WAIT_WORKER(2); // (2)
// Unlock
lock1.l_type = F_UNLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(0, ceph_ll_setlk(cmount, fh, &lock1, ceph_pthread_self(), false));
// Shall have lock
TWICE(// Synchronization point with thread (failure: thread is dead)
WAIT_WORKER(2); // (2)
// Synchronization point with thread (failure: thread is dead)
WAIT_WORKER(3)); // (3)
// Wait for thread to share lock
TWICE(WAIT_WORKER(4)); // (4)
lock1.l_type = F_WRLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(-CEPHFS_EAGAIN, ceph_ll_setlk(cmount, fh, &lock1, ceph_pthread_self(), false));
lock1.l_type = F_RDLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(0, ceph_ll_setlk(cmount, fh, &lock1, ceph_pthread_self(), false));
// Wake up thread to unlock shared lock
TWICE(PING_WORKER(1); // (R1)
WAIT_WORKER(5)); // (5)
// Now we can lock exclusively
// Upgrade to exclusive lock (as per POSIX)
lock1.l_type = F_WRLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(0, ceph_ll_setlk(cmount, fh, &lock1, ceph_pthread_self(), true));
TWICE( // Wake up thread to lock shared lock
PING_WORKER(2); // (R2)
// Shall not have lock immediately
NOT_WAIT_WORKER(6)); // (6)
// Release lock ; thread will get it
lock1.l_type = F_UNLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(0, ceph_ll_setlk(cmount, fh, &lock1, ceph_pthread_self(), false));
TWICE(WAIT_WORKER(6); // (6)
// We no longer have the lock
lock1.l_type = F_WRLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(-CEPHFS_EAGAIN, ceph_ll_setlk(cmount, fh, &lock1, ceph_pthread_self(), false));
lock1.l_type = F_RDLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(-CEPHFS_EAGAIN, ceph_ll_setlk(cmount, fh, &lock1, ceph_pthread_self(), false));
// Wake up thread to unlock exclusive lock
PING_WORKER(3); // (R3)
WAIT_WORKER(7); // (7)
);
// We can lock it again
lock1.l_type = F_WRLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(0, ceph_ll_setlk(cmount, fh, &lock1, ceph_pthread_self(), false));
lock1.l_type = F_UNLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(0, ceph_ll_setlk(cmount, fh, &lock1, ceph_pthread_self(), false));
// Cleanup
void *retval = (void*) (uintptr_t) -1;
ASSERT_EQ(0, pthread_join(thread[0], &retval));
ASSERT_EQ(NULL, retval);
ASSERT_EQ(0, pthread_join(thread[1], &retval));
ASSERT_EQ(NULL, retval);
s.sem_destroy();
ASSERT_EQ(0, ceph_ll_close(cmount, fh));
ASSERT_EQ(0, ceph_ll_unlink(cmount, root, c_file, perms));
CLEANUP_CEPH();
}
/* Locking in different processes */
#define PROCESS_SLOW_MS() \
static const long waitMs = 100; \
(void) waitMs
// Used by ConcurrentLocking test
static void process_ConcurrentRecordLocking(str_ConcurrentRecordLocking& s) {
const pid_t mypid = getpid();
PROCESS_SLOW_MS();
struct ceph_mount_info *cmount = NULL;
struct timespec ts;
Fh *fh = NULL;
Inode *root = NULL, *inode = NULL;
struct ceph_statx stx;
int rc;
struct flock lock1;
STARTUP_CEPH();
s.cmount = cmount;
// Get the root inode
rc = ceph_ll_lookup_root(cmount, &root);
ASSERT_EQ(rc, 0);
// Get the inode and Fh corresponding to c_file
rc = ceph_ll_create(cmount, root, s.file, fileMode, O_RDWR | O_CREAT,
&inode, &fh, &stx, 0, 0, ceph_mount_perms(cmount));
ASSERT_EQ(rc, 0);
WAIT_MAIN(1); // (R1)
lock1.l_type = F_WRLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(-CEPHFS_EAGAIN, ceph_ll_setlk(cmount, fh, &lock1, mypid, false));
PING_MAIN(1); // (1)
lock1.l_type = F_WRLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(0, ceph_ll_setlk(cmount, fh, &lock1, mypid, true));
PING_MAIN(2); // (2)
lock1.l_type = F_UNLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(0, ceph_ll_setlk(cmount, fh, &lock1, mypid, false));
PING_MAIN(3); // (3)
lock1.l_type = F_RDLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(0, ceph_ll_setlk(cmount, fh, &lock1, mypid, true));
PING_MAIN(4); // (4)
WAIT_MAIN(2); // (R2)
lock1.l_type = F_UNLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(0, ceph_ll_setlk(cmount, fh, &lock1, mypid, false));
PING_MAIN(5); // (5)
WAIT_MAIN(3); // (R3)
lock1.l_type = F_WRLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(0, ceph_ll_setlk(cmount, fh, &lock1, mypid, true));
PING_MAIN(6); // (6)
WAIT_MAIN(4); // (R4)
lock1.l_type = F_UNLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(0, ceph_ll_setlk(cmount, fh, &lock1, mypid, false));
PING_MAIN(7); // (7)
ASSERT_EQ(0, ceph_ll_close(cmount, fh));
CLEANUP_CEPH();
s.sem_destroy();
exit(EXIT_SUCCESS);
}
// Disabled because of fork() issues (http://tracker.ceph.com/issues/16556)
#ifndef _WIN32
TEST(LibCephFS, DISABLED_InterProcessRecordLocking) {
PROCESS_SLOW_MS();
// Process synchronization
char c_file[1024];
const pid_t mypid = getpid();
sprintf(c_file, "recordlock_test_%d", mypid);
Fh *fh = NULL;
Inode *root = NULL, *inode = NULL;
struct ceph_statx stx;
struct flock lock1;
int rc;
// Note: the semaphores MUST be on a shared memory segment
str_ConcurrentRecordLocking *const shs =
reinterpret_cast<str_ConcurrentRecordLocking*>
(mmap(0, sizeof(*shs), PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS,
-1, 0));
str_ConcurrentRecordLocking &s = *shs;
s.file = c_file;
s.sem_init(1);
// Start locker process
const pid_t pid = fork();
ASSERT_GE(pid, 0);
if (pid == 0) {
process_ConcurrentRecordLocking(s);
exit(EXIT_FAILURE);
}
struct timespec ts;
struct ceph_mount_info *cmount;
STARTUP_CEPH();
UserPerm *perms = ceph_mount_perms(cmount);
// Get the root inode
rc = ceph_ll_lookup_root(cmount, &root);
ASSERT_EQ(rc, 0);
// Get the inode and Fh corresponding to c_file
rc = ceph_ll_create(cmount, root, c_file, fileMode, O_RDWR | O_CREAT,
&inode, &fh, &stx, 0, 0, perms);
ASSERT_EQ(rc, 0);
// Lock
lock1.l_type = F_WRLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(0, ceph_ll_setlk(cmount, fh, &lock1, mypid, true));
// Synchronization point with process (failure: process is dead)
PING_WORKER(1); // (R1)
WAIT_WORKER(1); // (1)
// Shall not have lock immediately
NOT_WAIT_WORKER(2); // (2)
// Unlock
lock1.l_type = F_UNLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(0, ceph_ll_setlk(cmount, fh, &lock1, mypid, false));
// Shall have lock
// Synchronization point with process (failure: process is dead)
WAIT_WORKER(2); // (2)
// Synchronization point with process (failure: process is dead)
WAIT_WORKER(3); // (3)
// Wait for process to share lock
WAIT_WORKER(4); // (4)
lock1.l_type = F_WRLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(-CEPHFS_EAGAIN, ceph_ll_setlk(cmount, fh, &lock1, mypid, false));
lock1.l_type = F_RDLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(0, ceph_ll_setlk(cmount, fh, &lock1, mypid, false));
// Wake up process to unlock shared lock
PING_WORKER(2); // (R2)
WAIT_WORKER(5); // (5)
// Now we can lock exclusively
// Upgrade to exclusive lock (as per POSIX)
lock1.l_type = F_WRLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(0, ceph_ll_setlk(cmount, fh, &lock1, mypid, true));
// Wake up process to lock shared lock
PING_WORKER(3); // (R3)
// Shall not have lock immediately
NOT_WAIT_WORKER(6); // (6)
// Release lock ; process will get it
lock1.l_type = F_UNLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(0, ceph_ll_setlk(cmount, fh, &lock1, mypid, false));
WAIT_WORKER(6); // (6)
// We no longer have the lock
lock1.l_type = F_WRLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(-CEPHFS_EAGAIN, ceph_ll_setlk(cmount, fh, &lock1, mypid, false));
lock1.l_type = F_RDLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(-CEPHFS_EAGAIN, ceph_ll_setlk(cmount, fh, &lock1, mypid, false));
// Wake up process to unlock exclusive lock
PING_WORKER(4); // (R4)
WAIT_WORKER(7); // (7)
// We can lock it again
lock1.l_type = F_WRLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(0, ceph_ll_setlk(cmount, fh, &lock1, mypid, false));
lock1.l_type = F_UNLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(0, ceph_ll_setlk(cmount, fh, &lock1, mypid, false));
// Wait pid
int status;
ASSERT_EQ(pid, waitpid(pid, &status, 0));
ASSERT_EQ(EXIT_SUCCESS, status);
// Cleanup
s.sem_destroy();
ASSERT_EQ(0, munmap(shs, sizeof(*shs)));
ASSERT_EQ(0, ceph_ll_close(cmount, fh));
ASSERT_EQ(0, ceph_ll_unlink(cmount, root, c_file, perms));
CLEANUP_CEPH();
}
#endif
#ifndef _WIN32
// Disabled because of fork() issues (http://tracker.ceph.com/issues/16556)
TEST(LibCephFS, DISABLED_ThreesomeInterProcessRecordLocking) {
PROCESS_SLOW_MS();
// Process synchronization
char c_file[1024];
const pid_t mypid = getpid();
sprintf(c_file, "recordlock_test_%d", mypid);
Fh *fh = NULL;
Inode *root = NULL, *inode = NULL;
struct ceph_statx stx;
struct flock lock1;
int rc;
// Note: the semaphores MUST be on a shared memory segment
str_ConcurrentRecordLocking *const shs =
reinterpret_cast<str_ConcurrentRecordLocking*>
(mmap(0, sizeof(*shs), PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS,
-1, 0));
str_ConcurrentRecordLocking &s = *shs;
s.file = c_file;
s.sem_init(1);
// Start locker processes
pid_t pid[2];
pid[0] = fork();
ASSERT_GE(pid[0], 0);
if (pid[0] == 0) {
process_ConcurrentRecordLocking(s);
exit(EXIT_FAILURE);
}
pid[1] = fork();
ASSERT_GE(pid[1], 0);
if (pid[1] == 0) {
process_ConcurrentRecordLocking(s);
exit(EXIT_FAILURE);
}
struct timespec ts;
struct ceph_mount_info *cmount;
STARTUP_CEPH();
// Get the root inode
rc = ceph_ll_lookup_root(cmount, &root);
ASSERT_EQ(rc, 0);
// Get the inode and Fh corresponding to c_file
UserPerm *perms = ceph_mount_perms(cmount);
rc = ceph_ll_create(cmount, root, c_file, fileMode, O_RDWR | O_CREAT,
&inode, &fh, &stx, 0, 0, perms);
ASSERT_EQ(rc, 0);
// Lock
lock1.l_type = F_WRLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(0, ceph_ll_setlk(cmount, fh, &lock1, mypid, true));
// Synchronization point with process (failure: process is dead)
TWICE(PING_WORKER(1)); // (R1)
TWICE(WAIT_WORKER(1)); // (1)
// Shall not have lock immediately
NOT_WAIT_WORKER(2); // (2)
// Unlock
lock1.l_type = F_UNLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(0, ceph_ll_setlk(cmount, fh, &lock1, mypid, false));
// Shall have lock
TWICE(// Synchronization point with process (failure: process is dead)
WAIT_WORKER(2); // (2)
// Synchronization point with process (failure: process is dead)
WAIT_WORKER(3)); // (3)
// Wait for process to share lock
TWICE(WAIT_WORKER(4)); // (4)
lock1.l_type = F_WRLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(-CEPHFS_EAGAIN, ceph_ll_setlk(cmount, fh, &lock1, mypid, false));
lock1.l_type = F_RDLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(0, ceph_ll_setlk(cmount, fh, &lock1, mypid, false));
// Wake up process to unlock shared lock
TWICE(PING_WORKER(2); // (R2)
WAIT_WORKER(5)); // (5)
// Now we can lock exclusively
// Upgrade to exclusive lock (as per POSIX)
lock1.l_type = F_WRLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(0, ceph_ll_setlk(cmount, fh, &lock1, mypid, true));
TWICE( // Wake up process to lock shared lock
PING_WORKER(3); // (R3)
// Shall not have lock immediately
NOT_WAIT_WORKER(6)); // (6)
// Release lock ; process will get it
lock1.l_type = F_UNLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(0, ceph_ll_setlk(cmount, fh, &lock1, mypid, false));
TWICE(WAIT_WORKER(6); // (6)
// We no longer have the lock
lock1.l_type = F_WRLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(-CEPHFS_EAGAIN, ceph_ll_setlk(cmount, fh, &lock1, ceph_pthread_self(), false));
lock1.l_type = F_RDLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(-CEPHFS_EAGAIN, ceph_ll_setlk(cmount, fh, &lock1, ceph_pthread_self(), false));
// Wake up process to unlock exclusive lock
PING_WORKER(4); // (R4)
WAIT_WORKER(7); // (7)
);
// We can lock it again
lock1.l_type = F_WRLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(0, ceph_ll_setlk(cmount, fh, &lock1, mypid, false));
lock1.l_type = F_UNLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(0, ceph_ll_setlk(cmount, fh, &lock1, mypid, false));
// Wait pids
int status;
ASSERT_EQ(pid[0], waitpid(pid[0], &status, 0));
ASSERT_EQ(EXIT_SUCCESS, status);
ASSERT_EQ(pid[1], waitpid(pid[1], &status, 0));
ASSERT_EQ(EXIT_SUCCESS, status);
// Cleanup
s.sem_destroy();
ASSERT_EQ(0, munmap(shs, sizeof(*shs)));
ASSERT_EQ(0, ceph_ll_close(cmount, fh));
ASSERT_EQ(0, ceph_ll_unlink(cmount, root, c_file, perms));
CLEANUP_CEPH();
}
#endif
| 31,209 | 27.218807 | 91 | cc |
null | ceph-main/src/test/libcephfs/snapdiff.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "gtest/gtest.h"
#include "include/cephfs/libcephfs.h"
#include "include/stat.h"
#include "include/ceph_assert.h"
#include "include/object.h"
#include "include/stringify.h"
#include <errno.h>
#include <fcntl.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <string>
#include <vector>
#include <algorithm>
#include <limits.h>
#include <dirent.h>
using namespace std;
class TestMount {
ceph_mount_info* cmount = nullptr;
char dir_path[64];
public:
TestMount( const char* root_dir_name = "dir0") {
ceph_create(&cmount, NULL);
ceph_conf_read_file(cmount, NULL);
ceph_conf_parse_env(cmount, NULL);
ceph_assert(0 == ceph_mount(cmount, NULL));
sprintf(dir_path, "/%s_%d", root_dir_name, getpid());
ceph_assert(0 == ceph_mkdir(cmount, dir_path, 0777));
}
~TestMount()
{
if (cmount) {
ceph_assert(0 == purge_dir(""));
}
ceph_rmdir(cmount, dir_path);
ceph_shutdown(cmount);
}
int conf_get(const char *option, char *buf, size_t len) {
return ceph_conf_get(cmount, option, buf, len);
}
string make_file_path(const char* relpath) {
char path[PATH_MAX];
sprintf(path, "%s/%s", dir_path, relpath);
return path;
}
string make_snap_name(const char* name) {
char snap_name[64];
if (name && *name) {
sprintf(snap_name, "%s_%d", name, getpid());
} else {
// just simulate empty snapname
snap_name[0] = 0;
}
return snap_name;
}
string make_snap_path(const char* sname, const char* subdir = nullptr) {
char snap_path[PATH_MAX];
string snap_name = subdir ?
concat_path(make_snap_name(sname), subdir) :
make_snap_name(sname);
sprintf(snap_path, ".snap/%s", snap_name.c_str());
return snap_path;
}
int mksnap(const char* name) {
string snap_name = make_snap_name(name);
return ceph_mksnap(cmount, dir_path, snap_name.c_str(),
0755, nullptr, 0);
}
int rmsnap(const char* name) {
string snap_name = make_snap_name(name);
return ceph_rmsnap(cmount, dir_path, snap_name.c_str());
}
int get_snapid(const char* name, uint64_t* res)
{
ceph_assert(res);
snap_info snap_info;
char snap_path[PATH_MAX];
string snap_name = make_snap_name(name);
sprintf(snap_path, "%s/.snap/%s", dir_path, snap_name.c_str());
int r = ceph_get_snap_info(cmount, snap_path, &snap_info);
if (r >= 0) {
*res = snap_info.id;
r = 0;
}
return r;
}
int write_full(const char* relpath, const string& data)
{
auto file_path = make_file_path(relpath);
int fd = ceph_open(cmount, file_path.c_str(), O_WRONLY | O_CREAT, 0666);
if (fd < 0) {
return -EACCES;
}
int r = ceph_write(cmount, fd, data.c_str(), data.size(), 0);
if (r >= 0) {
ceph_truncate(cmount, file_path.c_str(), data.size());
ceph_fsync(cmount, fd, 0);
}
ceph_close(cmount, fd);
return r;
}
string concat_path(string_view path, string_view name) {
string s(path);
if (s.empty() || s.back() != '/') {
s += '/';
}
s += name;
return s;
}
int unlink(const char* relpath)
{
auto file_path = make_file_path(relpath);
return ceph_unlink(cmount, file_path.c_str());
}
int test_open(const char* relpath)
{
auto subdir_path = make_file_path(relpath);
struct ceph_dir_result* ls_dir;
int r = ceph_opendir(cmount, subdir_path.c_str(), &ls_dir);
if (r != 0) {
return r;
}
ceph_assert(0 == ceph_closedir(cmount, ls_dir));
return r;
}
int for_each_readdir(const char* relpath,
std::function<bool(const dirent*, const struct ceph_statx*)> fn)
{
auto subdir_path = make_file_path(relpath);
struct ceph_dir_result* ls_dir;
int r = ceph_opendir(cmount, subdir_path.c_str(), &ls_dir);
if (r != 0) {
return r;
}
while (1) {
struct dirent result;
struct ceph_statx stx;
r = ceph_readdirplus_r(
cmount, ls_dir, &result, &stx, CEPH_STATX_BASIC_STATS,
0,
NULL);
if (!r)
break;
if (r < 0) {
std::cerr << "ceph_readdirplus_r failed, error: "
<< r << std::endl;
return r;
}
if (strcmp(result.d_name, ".") == 0 ||
strcmp(result.d_name, "..") == 0) {
continue;
}
if (!fn(&result, &stx)) {
r = -EINTR;
break;
}
}
ceph_assert(0 == ceph_closedir(cmount, ls_dir));
return r;
}
int readdir_and_compare(const char* relpath,
const vector<string>& expected0)
{
vector<string> expected(expected0);
auto end = expected.end();
int r = for_each_readdir(relpath,
[&](const dirent* dire, const struct ceph_statx* stx) {
std::string name(dire->d_name);
auto it = std::find(expected.begin(), end, name);
if (it == end) {
std::cerr << "readdir_and_compare error: unexpected name:"
<< name << std::endl;
return false;
}
expected.erase(it);
return true;
});
if (r == 0 && !expected.empty()) {
std::cerr << __func__ << " error: left entries:" << std::endl;
for (auto& e : expected) {
std::cerr << e << std::endl;
}
std::cerr << __func__ << " ************" << std::endl;
r = -ENOTEMPTY;
}
return r;
}
int for_each_readdir_snapdiff(const char* relpath,
const char* snap1,
const char* snap2,
std::function<bool(const dirent*, uint64_t)> fn)
{
auto s1 = make_snap_name(snap1);
auto s2 = make_snap_name(snap2);
ceph_snapdiff_info info;
ceph_snapdiff_entry_t res_entry;
int r = ceph_open_snapdiff(cmount,
dir_path,
relpath,
s1.c_str(),
s2.c_str(),
&info);
if (r != 0) {
std::cerr << " Failed to open snapdiff, ret:" << r << std::endl;
return r;
}
while (0 < (r = ceph_readdir_snapdiff(&info,
&res_entry))) {
if (strcmp(res_entry.dir_entry.d_name, ".") == 0 ||
strcmp(res_entry.dir_entry.d_name, "..") == 0) {
continue;
}
if (!fn(&res_entry.dir_entry, res_entry.snapid)) {
r = -EINTR;
break;
}
}
ceph_assert(0 == ceph_close_snapdiff(&info));
if (r != 0) {
std::cerr << " Failed to readdir snapdiff, ret:" << r
<< " " << relpath << ", " << snap1 << " vs. " << snap2
<< std::endl;
}
return r;
}
int readdir_snapdiff_and_compare(const char* relpath,
const char* snap1,
const char* snap2,
const vector<pair<string, uint64_t>>& expected0)
{
vector<pair<string, uint64_t>> expected(expected0);
auto end = expected.end();
int r = for_each_readdir_snapdiff(relpath, snap1, snap2,
[&](const dirent* dire, uint64_t snapid) {
pair<string, uint64_t> p = std::make_pair(dire->d_name, snapid);
auto it = std::find(expected.begin(), end, p);
if (it == end) {
std::cerr << "readdir_snapdiff_and_compare error: unexpected name:"
<< dire->d_name << "/" << snapid << std::endl;
return false;
}
expected.erase(it);
return true;
});
if (r == 0 && !expected.empty()) {
std::cerr << __func__ << " error: left entries:" << std::endl;
for (auto& e : expected) {
std::cerr << e.first << "/" << e.second << std::endl;
}
std::cerr << __func__ << " ************" << std::endl;
r = -ENOTEMPTY;
}
return r;
}
int mkdir(const char* relpath)
{
auto path = make_file_path(relpath);
return ceph_mkdir(cmount, path.c_str(), 0777);
}
int rmdir(const char* relpath)
{
auto path = make_file_path(relpath);
return ceph_rmdir(cmount, path.c_str());
}
int purge_dir(const char* relpath0, bool inclusive = true)
{
int r =
for_each_readdir(relpath0,
[&](const dirent* dire, const struct ceph_statx* stx) {
string relpath = concat_path(relpath0, dire->d_name);
if (S_ISDIR(stx->stx_mode)) {
purge_dir(relpath.c_str());
rmdir(relpath.c_str());
} else {
unlink(relpath.c_str());
}
return true;
});
if (r != 0) {
return r;
}
if (*relpath0 != 0) {
r = rmdir(relpath0);
}
return r;
}
void remove_all() {
purge_dir("/", false);
}
ceph_mount_info* get_cmount() {
return cmount;
}
void verify_snap_diff(vector<pair<string, uint64_t>>& expected,
const char* relpath,
const char* snap1,
const char* snap2);
void print_snap_diff(const char* relpath,
const char* snap1,
const char* snap2);
void prepareSnapDiffLib1Cases();
void prepareSnapDiffLib2Cases();
void prepareSnapDiffLib3Cases();
void prepareHugeSnapDiff(const std::string& name_prefix_start,
const std::string& name_prefix_bulk,
const std::string& name_prefix_end,
size_t file_count,
bool bulk_diff);
};
// Helper function to verify readdir_snapdiff returns expected results
void TestMount::verify_snap_diff(vector<pair<string, uint64_t>>& expected,
const char* relpath,
const char* snap1,
const char* snap2)
{
std::cout << "---------" << snap1 << " vs. " << snap2
<< " diff listing verification for /" << (relpath ? relpath : "")
<< std::endl;
ASSERT_EQ(0,
readdir_snapdiff_and_compare(relpath, snap1, snap2, expected));
};
// Helper function to print readdir_snapdiff results
void TestMount::print_snap_diff(const char* relpath,
const char* snap1,
const char* snap2)
{
std::cout << "---------" << snap1 << " vs. " << snap2
<< " diff listing for /" << (relpath ? relpath : "")
<< std::endl;
ASSERT_EQ(0, for_each_readdir_snapdiff(relpath, snap1, snap2,
[&](const dirent* dire, uint64_t snapid) {
std::cout << dire->d_name << " snap " << snapid << std::endl;
return true;
}));
};
/* The following method creates some files/folders/snapshots layout,
described in the sheet below.
We're to test SnapDiff readdir API against that structure.
* where:
- xN denotes file 'x' version N.
- X denotes folder name
- * denotes no/removed file/folder
# snap1 snap2
# fileA1 | fileA2 |
# * | fileB2 |
# fileC1 | * |
# fileD1 | fileD1 |
# dirA | dirA |
# dirA/fileA1 | dirA/fileA2 |
# * | dirB |
# * | dirB/fileb2 |
# dirC | * |
# dirC/filec1 | * |
# dirD | dirD |
# dirD/fileD1 | dirD/fileD1 |
*/
void TestMount::prepareSnapDiffLib1Cases()
{
//************ snap1 *************
ASSERT_LE(0, write_full("fileA", "hello world"));
ASSERT_LE(0, write_full("fileC", "hello world to be removed"));
ASSERT_LE(0, write_full("fileD", "hello world unmodified"));
ASSERT_EQ(0, mkdir("dirA"));
ASSERT_LE(0, write_full("dirA/fileA", "file 'A/a' v1"));
ASSERT_EQ(0, mkdir("dirC"));
ASSERT_LE(0, write_full("dirC/filec", "file 'C/c' v1"));
ASSERT_EQ(0, mkdir("dirD"));
ASSERT_LE(0, write_full("dirD/filed", "file 'D/d' v1"));
ASSERT_EQ(0, mksnap("snap1"));
//************ snap2 *************
ASSERT_LE(0, write_full("fileA", "hello world again in A"));
ASSERT_LE(0, write_full("fileB", "hello world in B"));
ASSERT_EQ(0, unlink("fileC"));
ASSERT_LE(0, write_full("dirA/fileA", "file 'A/a' v2"));
ASSERT_EQ(0, purge_dir("dirC"));
ASSERT_EQ(0, mkdir("dirB"));
ASSERT_LE(0, write_full("dirB/fileb", "file 'B/b' v2"));
ASSERT_EQ(0, mksnap("snap2"));
}
/*
* Basic functionality testing for the SnapDiff readdir API
*/
TEST(LibCephFS, SnapDiffLib)
{
TestMount test_mount;
// Create simple directory tree with a couple of snapshots
// to test against
test_mount.prepareSnapDiffLib1Cases();
uint64_t snapid1;
uint64_t snapid2;
// learn snapshot ids and do basic verification
ASSERT_EQ(0, test_mount.get_snapid("snap1", &snapid1));
ASSERT_EQ(0, test_mount.get_snapid("snap2", &snapid2));
ASSERT_GT(snapid1, 0);
ASSERT_GT(snapid2, 0);
ASSERT_GT(snapid2, snapid1);
std::cout << snapid1 << " vs. " << snapid2 << std::endl;
//
// Make sure root listing for snapshot snap1 is as expected
//
{
std::cout << "---------snap1 listing verification---------" << std::endl;
string snap_path = test_mount.make_snap_path("snap1");
vector<string> expected;
expected.push_back("fileA");
expected.push_back("fileC");
expected.push_back("fileD");
expected.push_back("dirA");
expected.push_back("dirC");
expected.push_back("dirD");
ASSERT_EQ(0,
test_mount.readdir_and_compare(snap_path.c_str(), expected));
}
//
// Make sure root listing for snapshot snap2 is as expected
//
{
std::cout << "---------snap2 listing verification---------" << std::endl;
string snap_path = test_mount.make_snap_path("snap2");
vector<string> expected;
expected.push_back("fileA");
expected.push_back("fileB");
expected.push_back("fileD");
expected.push_back("dirA");
expected.push_back("dirB");
expected.push_back("dirD");
ASSERT_EQ(0,
test_mount.readdir_and_compare(snap_path.c_str(), expected));
}
//
// Print snap1 vs. snap2 delta for the root
//
test_mount.print_snap_diff("", "snap1", "snap2");
//
// Make sure snap1 vs. snap2 delta for the root is as expected
//
{
vector<pair<string, uint64_t>> expected;
expected.emplace_back("fileA", snapid2);
expected.emplace_back("fileB", snapid2);
expected.emplace_back("fileC", snapid1);
expected.emplace_back("dirA", snapid2);
expected.emplace_back("dirB", snapid2);
expected.emplace_back("dirC", snapid1);
expected.emplace_back("dirD", snapid2);
test_mount.verify_snap_diff(expected, "", "snap1", "snap2");
}
//
// Make sure snap1 vs. snap2 delta for /dirA is as expected
//
{
vector<pair<string, uint64_t>> expected;
expected.emplace_back("fileA", snapid2);
test_mount.verify_snap_diff(expected, "dirA", "snap1", "snap2");
}
//
// Make sure snap1 vs. snap2 delta for /dirB is as expected
//
{
vector<pair<string, uint64_t>> expected;
expected.emplace_back("fileb", snapid2);
test_mount.verify_snap_diff(expected, "dirB", "snap1", "snap2");
}
//
// Make sure snap1 vs. snap2 delta for /dirC is as expected
//
{
vector<pair<string, uint64_t>> expected;
expected.emplace_back("filec", snapid1);
test_mount.verify_snap_diff(expected, "dirC", "snap2", "snap1");
}
//
// Make sure snap1 vs. snap2 delta for /dirD is as expected
//
{
vector<pair<string, uint64_t>> expected;
test_mount.verify_snap_diff(expected, "dirD", "snap1", "snap2");
}
// Make sure SnapDiff returns an error when provided with the same
// snapshot name for both parties A and B.
{
string snap_path = test_mount.make_snap_path("snap2");
string snap_other_path = snap_path;
std::cout << "---------invalid snapdiff params, the same snaps---------" << std::endl;
ASSERT_EQ(-EINVAL, test_mount.for_each_readdir_snapdiff(
"",
"snap2",
"snap2",
[&](const dirent* dire, uint64_t snapid) {
return true;
}));
}
// Make sure SnapDiff returns an error when provided with an empty
// snapshot name for one of the parties
{
std::cout << "---------invalid snapdiff params, no snap_other ---------" << std::endl;
string snap_path = test_mount.make_snap_path("snap2");
string snap_other_path;
ASSERT_EQ(-EINVAL, test_mount.for_each_readdir_snapdiff(
"",
"snap2",
"",
[&](const dirent* dire, uint64_t snapid) {
return true;
}));
}
std::cout << "------------- closing -------------" << std::endl;
ASSERT_EQ(0, test_mount.purge_dir(""));
ASSERT_EQ(0, test_mount.rmsnap("snap1"));
ASSERT_EQ(0, test_mount.rmsnap("snap2"));
}
/* The following method creates some files/folders/snapshots layout,
described in the sheet below.
We're to test SnapDiff readdir API against that structure.
* where:
- xN denotes file 'x' version N.
- X denotes folder name
- * denotes no/removed file/folder
# snap1 snap2 snap3 head
# fileA1 | fileA2 | fileA2
# * | fileB2 | fileB2
# fileC1 | * | fileC3
# fileD1 | fileD1 | fileD3
# * | * | fileE3
# fileF1 | * | *
# fileG1 | fileG2 | *
# dirA | dirA | *
# dirA/fileA1 | dirA/fileA2 | *
# * | dirB | *
# * | dirB/fileb2 | *
# dirC | * | *
# dirC/filec1 | * | *
# dirD | dirD | dirD
# dirD/filed1 | dirD/filed1 | dirD/filed1
*/
void TestMount::prepareSnapDiffLib2Cases()
{
//************ snap1 *************
ASSERT_LE(0, write_full("fileA", "hello world"));
ASSERT_LE(0, write_full("fileC", "hello world to be removed temporarily"));
ASSERT_LE(0, write_full("fileD", "hello world unmodified"));
ASSERT_LE(0, write_full("fileF", "hello world to be removed completely"));
ASSERT_LE(0, write_full("fileG", "hello world to be overwritten at snap2"));
ASSERT_EQ(0, mkdir("dirA"));
ASSERT_LE(0, write_full("dirA/fileA", "file 'A/a' v1"));
ASSERT_EQ(0, mkdir("dirC"));
ASSERT_LE(0, write_full("dirC/filec", "file 'C/c' v1"));
ASSERT_EQ(0, mkdir("dirD"));
ASSERT_LE(0, write_full("dirD/filed", "file 'D/d' v1"));
ASSERT_EQ(0, mksnap("snap1"));
//************ snap2 *************
ASSERT_LE(0, write_full("fileA", "hello world again in A"));
ASSERT_LE(0, write_full("fileB", "hello world in B"));
ASSERT_LE(0, write_full("fileG", "hello world to be removed at snap3"));
ASSERT_EQ(0, unlink("fileC"));
ASSERT_EQ(0, unlink("fileF"));
ASSERT_LE(0, write_full("dirA/fileA", "file 'A/a' v2"));
ASSERT_EQ(0, mkdir("dirB"));
ASSERT_LE(0, write_full("dirB/fileb", "file 'B/b' v2"));
ASSERT_EQ(0, purge_dir("dirC"));
ASSERT_EQ(0, mksnap("snap2"));
//************ snap3 *************
ASSERT_LE(0, write_full("fileC", "hello world in C recovered"));
ASSERT_LE(0, write_full("fileD", "hello world in D now modified"));
ASSERT_LE(0, write_full("fileE", "file 'E' created at snap3"));
ASSERT_EQ(0, unlink("fileG"));
ASSERT_EQ(0, purge_dir("dirA"));
ASSERT_EQ(0, purge_dir("dirB"));
ASSERT_EQ(0, mksnap("snap3"));
}
/* The following method creates a folder with tons of file
updated between two snapshots
We're to test SnapDiff readdir API against that structure.
* where:
- xN denotes file 'x' version N.
- X denotes folder name
- * denotes no/removed file/folder
# snap1 snap2
* aaaaA1 | aaaaA1 |
* aaaaB1 | * |
* * | aaaaC2 |
* aaaaD1 | aaaaD2 |
# file<NNN>1 | file<NNN>2|
* fileZ1 | fileA1 |
* zzzzA1 | zzzzA1 |
* zzzzB1 | * |
* * | zzzzC2 |
* zzzzD1 | zzzzD2 |
*/
void TestMount::prepareHugeSnapDiff(const std::string& name_prefix_start,
const std::string& name_prefix_bulk,
const std::string& name_prefix_end,
size_t file_count,
bool bulk_diff)
{
//************ snap1 *************
std::string startA = name_prefix_start + "A";
std::string startB = name_prefix_start + "B";
std::string startC = name_prefix_start + "C";
std::string startD = name_prefix_start + "D";
std::string endA = name_prefix_end + "A";
std::string endB = name_prefix_end + "B";
std::string endC = name_prefix_end + "C";
std::string endD = name_prefix_end + "D";
ASSERT_LE(0, write_full(startA.c_str(), "hello world"));
ASSERT_LE(0, write_full(startB.c_str(), "hello world"));
ASSERT_LE(0, write_full(startD.c_str(), "hello world"));
for(size_t i = 0; i < file_count; i++) {
auto s = name_prefix_bulk + stringify(i);
ASSERT_LE(0, write_full(s.c_str(), "hello world"));
}
ASSERT_LE(0, write_full(endA.c_str(), "hello world"));
ASSERT_LE(0, write_full(endB.c_str(), "hello world"));
ASSERT_LE(0, write_full(endD.c_str(), "hello world"));
ASSERT_EQ(0, mksnap("snap1"));
ASSERT_LE(0, unlink(startB.c_str()));
ASSERT_LE(0, write_full(startC.c_str(), "hello world2"));
ASSERT_LE(0, write_full(startD.c_str(), "hello world2"));
if (bulk_diff) {
for(size_t i = 0; i < file_count; i++) {
auto s = std::string(name_prefix_bulk) + stringify(i);
ASSERT_LE(0, write_full(s.c_str(), "hello world2"));
}
}
ASSERT_LE(0, unlink(endB.c_str()));
ASSERT_LE(0, write_full(endC.c_str(), "hello world2"));
ASSERT_LE(0, write_full(endD.c_str(), "hello world2"));
ASSERT_EQ(0, mksnap("snap2"));
}
/*
* More versatile SnapDiff readdir API verification,
* includes 3 different snapshots and interleaving/repetitive calls to make sure
* the results aren't spoiled due to caching.
*/
TEST(LibCephFS, SnapDiffLib2)
{
TestMount test_mount;
test_mount.prepareSnapDiffLib2Cases();
// Create simple directory tree with a couple of snapshots to test against
uint64_t snapid1;
uint64_t snapid2;
uint64_t snapid3;
ASSERT_EQ(0, test_mount.get_snapid("snap1", &snapid1));
ASSERT_EQ(0, test_mount.get_snapid("snap2", &snapid2));
ASSERT_EQ(0, test_mount.get_snapid("snap3", &snapid3));
std::cout << snapid1 << " vs. " << snapid2 << " vs. " << snapid3 << std::endl;
ASSERT_GT(snapid1, 0);
ASSERT_GT(snapid2, 0);
ASSERT_GT(snapid3, 0);
ASSERT_GT(snapid2, snapid1);
ASSERT_GT(snapid3, snapid2);
// define a labda which verifies snap1/snap2/snap3 listings
auto verify_snap_listing = [&]()
{
{
string snap_path = test_mount.make_snap_path("snap1");
std::cout << "---------snap1 listing verification---------" << std::endl;
vector<string> expected;
expected.push_back("fileA");
expected.push_back("fileC");
expected.push_back("fileD");
expected.push_back("fileF");
expected.push_back("fileG");
expected.push_back("dirA");
expected.push_back("dirC");
expected.push_back("dirD");
ASSERT_EQ(0,
test_mount.readdir_and_compare(snap_path.c_str(), expected));
}
{
std::cout << "---------snap2 listing verification---------" << std::endl;
string snap_path = test_mount.make_snap_path("snap2");
vector<string> expected;
expected.push_back("fileA");
expected.push_back("fileB");
expected.push_back("fileD");
expected.push_back("fileG");
expected.push_back("dirA");
expected.push_back("dirB");
expected.push_back("dirD");
ASSERT_EQ(0,
test_mount.readdir_and_compare(snap_path.c_str(), expected));
}
{
std::cout << "---------snap3 listing verification---------" << std::endl;
string snap_path = test_mount.make_snap_path("snap3");
vector<string> expected;
expected.push_back("fileA");
expected.push_back("fileB");
expected.push_back("fileC");
expected.push_back("fileD");
expected.push_back("fileE");
expected.push_back("dirD");
ASSERT_EQ(0,
test_mount.readdir_and_compare(snap_path.c_str(), expected));
}
};
// Prepare expected delta for snap1 vs. snap2
vector<pair<string, uint64_t>> snap1_2_diff_expected;
snap1_2_diff_expected.emplace_back("fileA", snapid2);
snap1_2_diff_expected.emplace_back("fileB", snapid2);
snap1_2_diff_expected.emplace_back("fileC", snapid1);
snap1_2_diff_expected.emplace_back("fileF", snapid1);
snap1_2_diff_expected.emplace_back("fileG", snapid2);
snap1_2_diff_expected.emplace_back("dirA", snapid2);
snap1_2_diff_expected.emplace_back("dirB", snapid2);
snap1_2_diff_expected.emplace_back("dirC", snapid1);
snap1_2_diff_expected.emplace_back("dirD", snapid2);
// Prepare expected delta for snap1 vs. snap3
vector<pair<string, uint64_t>> snap1_3_diff_expected;
snap1_3_diff_expected.emplace_back("fileA", snapid3);
snap1_3_diff_expected.emplace_back("fileB", snapid3);
snap1_3_diff_expected.emplace_back("fileC", snapid3);
snap1_3_diff_expected.emplace_back("fileD", snapid3);
snap1_3_diff_expected.emplace_back("fileE", snapid3);
snap1_3_diff_expected.emplace_back("fileF", snapid1);
snap1_3_diff_expected.emplace_back("fileG", snapid1);
snap1_3_diff_expected.emplace_back("dirA", snapid1);
snap1_3_diff_expected.emplace_back("dirC", snapid1);
snap1_3_diff_expected.emplace_back("dirD", snapid3);
// Prepare expected delta for snap2 vs. snap3
vector<pair<string, uint64_t>> snap2_3_diff_expected;
snap2_3_diff_expected.emplace_back("fileC", snapid3);
snap2_3_diff_expected.emplace_back("fileD", snapid3);
snap2_3_diff_expected.emplace_back("fileE", snapid3);
snap2_3_diff_expected.emplace_back("fileG", snapid2);
snap2_3_diff_expected.emplace_back("dirA", snapid2);
snap2_3_diff_expected.emplace_back("dirB", snapid2);
snap2_3_diff_expected.emplace_back("dirD", snapid3);
// Check snapshot listings on a cold cache
verify_snap_listing();
// Check snapshot listings on a warm cache
verify_snap_listing(); // served from cache
// Print snap1 vs. snap2 delta against the root folder
test_mount.print_snap_diff("", "snap1", "snap2");
// Verify snap1 vs. snap2 delta for the root
test_mount.verify_snap_diff(snap1_2_diff_expected, "", "snap1", "snap2");
// Check snapshot listings on a warm cache once again
// to make sure it wasn't spoiled by SnapDiff
verify_snap_listing(); // served from cache
// Verify snap2 vs. snap1 delta
test_mount.verify_snap_diff(snap1_2_diff_expected, "", "snap2", "snap1");
// Check snapshot listings on a warm cache once again
// to make sure it wasn't spoiled by SnapDiff
verify_snap_listing(); // served from cache
// Verify snap1 vs. snap3 delta for the root
test_mount.verify_snap_diff(snap1_3_diff_expected, "", "snap1", "snap3");
// Verify snap2 vs. snap3 delta for the root
test_mount.verify_snap_diff(snap2_3_diff_expected, "", "snap2", "snap3");
// Check snapshot listings on a warm cache once again
// to make sure it wasn't spoiled by SnapDiff
verify_snap_listing(); // served from cache
// Print snap1 vs. snap2 delta against /dirA folder
test_mount.print_snap_diff("dirA", "snap1", "snap2");
// Verify snap1 vs. snap2 delta for /dirA
{
vector<pair<string, uint64_t>> expected;
expected.emplace_back("fileA", snapid2);
test_mount.verify_snap_diff(expected, "dirA", "snap1", "snap2");
}
// Print snap1 vs. snap2 delta against /dirB folder
test_mount.print_snap_diff("dirB", "snap1", "snap2");
// Verify snap1 vs. snap2 delta for /dirB
{
vector<pair<string, uint64_t>> expected;
expected.emplace_back("fileb", snapid2);
test_mount.verify_snap_diff(expected, "dirB", "snap1", "snap2");
}
// Print snap1 vs. snap2 delta against /dirD folder
test_mount.print_snap_diff("dirD", "snap1", "snap2");
// Verify snap1 vs. snap2 delta for /dirD
{
vector<pair<string, uint64_t>> expected;
test_mount.verify_snap_diff(expected, "dirD", "snap1", "snap2");
}
// Check snapshot listings on a warm cache once again
// to make sure it wasn't spoiled by SnapDiff
verify_snap_listing(); // served from cache
// Verify snap1 vs. snap2 delta for the root once again
test_mount.verify_snap_diff(snap1_2_diff_expected, "", "snap1", "snap2");
// Verify snap2 vs. snap3 delta for the root once again
test_mount.verify_snap_diff(snap2_3_diff_expected, "", "snap3", "snap2");
// Verify snap1 vs. snap3 delta for the root once again
test_mount.verify_snap_diff(snap1_3_diff_expected, "", "snap1", "snap3");
std::cout << "------------- closing -------------" << std::endl;
ASSERT_EQ(0, test_mount.purge_dir(""));
ASSERT_EQ(0, test_mount.rmsnap("snap1"));
ASSERT_EQ(0, test_mount.rmsnap("snap2"));
ASSERT_EQ(0, test_mount.rmsnap("snap3"));
}
/* The following method creates some files/folders/snapshots layout,
described in the sheet below.
We're to test SnapDiff against that structure.
* where:
- xN denotes file 'x' version N.
- X denotes folder name
- * denotes no/removed file/folder
# snap1 snap2 snap3 head
# a1 | a1 | a3 | a4
# b1 | b2 | b3 | b3
# c1 | * | * | *
# * | d2 | d3 | d3
# f1 | f2 | * | *
# ff1 | ff1 | * | *
# g1 | * | g3 | g3
# * | * | * | h4
# i1 | i1 | i1 | i1
# S | S | S | S
# S/sa1 | S/sa2 | S/sa3 | S/sa3
# * | * | * | S/sh4
# * | T | T | T
# * | T/td2 | T/td3 | T/td3
# C | * | * | *
# C/cc1 | * | * | *
# C/C1 | * | * | *
# C/C1/c1| * | * | *
# G | * | G | G
# G/gg1 | * | G/gg3 | G/gg3
# * | k2 | * | *
# * | l2 | l2 | *
# * | K | * | *
# * | K/kk2 | * | *
# * | * | H | H
# * | * | H/hh3 | H/hh3
# I | I | I | *
# I/ii1 | I/ii2 | I/ii3 | *
# I/iii1 | I/iii1 | I/iii3| *
# * | * | I/iiii3| *
# * | I/J | I/J | *
# * | I/J/i2 | I/J/i3 | *
# * | I/J/j2 | I/J/j2 | *
# * | I/J/k2 | * | *
# * | * | I/J/l3 | *
# L | L | L | L
# L/ll1 | L/ll1 | L/ll3 | L/ll3
# L/LL | L/LL | L/LL | L/LL
# * | L/LL/ll2| L/LL/ll3| L/LL/ll4
# * | L/LM | * | *
# * | L/LM/lm2| * | *
# * | L/LN | L/LN | *
*/
void TestMount::prepareSnapDiffLib3Cases()
{
//************ snap1 *************
ASSERT_LE(0, write_full("a", "file 'a' v1"));
ASSERT_LE(0, write_full("b", "file 'b' v1"));
ASSERT_LE(0, write_full("c", "file 'c' v1"));
ASSERT_LE(0, write_full("e", "file 'e' v1"));
ASSERT_LE(0, write_full("~e", "file '~e' v1"));
ASSERT_LE(0, write_full("f", "file 'f' v1"));
ASSERT_LE(0, write_full("ff", "file 'ff' v1"));
ASSERT_LE(0, write_full("g", "file 'g' v1"));
ASSERT_LE(0, write_full("i", "file 'i' v1"));
ASSERT_EQ(0, mkdir("S"));
ASSERT_LE(0, write_full("S/sa", "file 'S/sa' v1"));
ASSERT_EQ(0, mkdir("C"));
ASSERT_LE(0, write_full("C/cc", "file 'C/cc' v1"));
ASSERT_EQ(0, mkdir("C/CC"));
ASSERT_LE(0, write_full("C/CC/c", "file 'C/CC/c' v1"));
ASSERT_EQ(0, mkdir("G"));
ASSERT_LE(0, write_full("G/gg", "file 'G/gg' v1"));
ASSERT_EQ(0, mkdir("I"));
ASSERT_LE(0, write_full("I/ii", "file 'I/ii' v1"));
ASSERT_LE(0, write_full("I/iii", "file 'I/iii' v1"));
ASSERT_EQ(0, mkdir("L"));
ASSERT_LE(0, write_full("L/ll", "file 'L/ll' v1"));
ASSERT_EQ(0, mkdir("L/LL"));
ASSERT_EQ(0, mksnap("snap1"));
//************ snap2 *************
ASSERT_LE(0, write_full("b", "file 'b' v2"));
ASSERT_EQ(0, unlink("c"));
ASSERT_LE(0, write_full("d", "file 'd' v2"));
ASSERT_LE(0, write_full("e", "file 'e' v2"));
ASSERT_LE(0, write_full("~e", "file '~e' v2"));
ASSERT_LE(0, write_full("f", "file 'f' v2"));
ASSERT_EQ(0, unlink("g"));
ASSERT_LE(0, write_full("S/sa", "file 'S/sa' v2"));
ASSERT_EQ(0, mkdir("T"));
ASSERT_LE(0, write_full("T/td", "file 'T/td' v2"));
ASSERT_EQ(0, purge_dir("C"));
ASSERT_EQ(0, purge_dir("G"));
ASSERT_LE(0, write_full("k", "file 'k' v2"));
ASSERT_LE(0, write_full("l", "file 'l' v2"));
ASSERT_EQ(0, mkdir("K"));
ASSERT_LE(0, write_full("K/kk", "file 'K/kk' v2"));
ASSERT_LE(0, write_full("I/ii", "file 'I/ii' v2"));
ASSERT_EQ(0, mkdir("I/J"));
ASSERT_LE(0, write_full("I/J/i", "file 'I/J/i' v2"));
ASSERT_LE(0, write_full("I/J/j", "file 'I/J/j' v2"));
ASSERT_LE(0, write_full("I/J/k", "file 'I/J/k' v2"));
ASSERT_LE(0, write_full("L/LL/ll", "file 'L/LL/ll' v2"));
ASSERT_EQ(0, mkdir("L/LM"));
ASSERT_LE(0, write_full("L/LM/lm", "file 'L/LM/lm' v2"));
ASSERT_EQ(0, mkdir("L/LN"));
ASSERT_EQ(0, mksnap("snap2"));
//************ snap3 *************
ASSERT_LE(0, write_full("a", "file 'a' v3"));
ASSERT_LE(0, write_full("b", "file 'b' v3"));
ASSERT_LE(0, write_full("d", "file 'd' v3"));
ASSERT_EQ(0, unlink("e"));
ASSERT_EQ(0, unlink("~e"));
ASSERT_EQ(0, unlink("f"));
ASSERT_EQ(0, unlink("ff"));
ASSERT_LE(0, write_full("g", "file 'g' v3"));
ASSERT_LE(0, write_full("S/sa", "file 'S/sa' v3"));
ASSERT_LE(0, write_full("T/td", "file 'T/td' v3"));
ASSERT_EQ(0, mkdir("G"));
ASSERT_LE(0, write_full("G/gg", "file 'G/gg' v3"));
ASSERT_EQ(0, unlink("k"));
ASSERT_EQ(0, purge_dir("K"));
ASSERT_EQ(0, mkdir("H"));
ASSERT_LE(0, write_full("H/hh", "file 'H/hh' v3"));
ASSERT_LE(0, write_full("I/ii", "file 'I/ii' v3"));
ASSERT_LE(0, write_full("I/iii", "file 'I/iii' v3"));
ASSERT_LE(0, write_full("I/iiii", "file 'I/iiii' v3"));
ASSERT_LE(0, write_full("I/J/i", "file 'I/J/i' v3"));
ASSERT_EQ(0, unlink("I/J/k"));
ASSERT_LE(0, write_full("I/J/l", "file 'I/J/l' v3"));
ASSERT_LE(0, write_full("L/ll", "file 'L/ll' v3"));
ASSERT_LE(0, write_full("L/LL/ll", "file 'L/LL/ll' v3"));
ASSERT_EQ(0, purge_dir("L/LM"));
ASSERT_EQ(0, mksnap("snap3"));
//************ head *************
ASSERT_LE(0, write_full("a", "file 'a' head"));
ASSERT_LE(0, write_full("h", "file 'h' head"));
ASSERT_LE(0, write_full("S/sh", "file 'S/sh' head"));
ASSERT_EQ(0, unlink("l"));
ASSERT_EQ(0, purge_dir("I"));
ASSERT_LE(0, write_full("L/LL/ll", "file 'L/LL/ll' head"));
ASSERT_EQ(0, purge_dir("L/LN"));
}
//
// This case tests SnapDiff functionality for snap1/snap2 snapshot delta
// It operates against FS layout created by prepareSnapDiffCases() method,
// see relevant table before that function for FS state overview.
//
TEST(LibCephFS, SnapDiffCases1_2)
{
TestMount test_mount;
// Create directory tree evolving through a bunch of snapshots
test_mount.prepareSnapDiffLib3Cases();
uint64_t snapid1;
uint64_t snapid2;
ASSERT_EQ(0, test_mount.get_snapid("snap1", &snapid1));
ASSERT_EQ(0, test_mount.get_snapid("snap2", &snapid2));
std::cout << snapid1 << " vs. " << snapid2 << std::endl;
ASSERT_GT(snapid1, 0);
ASSERT_GT(snapid2, 0);
ASSERT_GT(snapid2, snapid1);
// Print snapshot delta (snap1 vs. snap2) results for root in a
// human-readable form.
test_mount.print_snap_diff("", "snap1", "snap2");
{
// Make sure the root delta is as expected
// One should use columns snap1 and snap2 from
// the table preceeding prepareSnapDiffCases() function
// to learn which names to expect in the delta.
//
// - file 'a' is unchanged hence not present in delta
// - file 'ff' is unchanged hence not present in delta
// - file 'i' is unchanged hence not present in delta
//
vector<std::pair<string, uint64_t>> expected;
expected.emplace_back("b", snapid2); // file 'b' is updated in snap2
expected.emplace_back("c", snapid1); // file 'c' is removed in snap2
expected.emplace_back("d", snapid2); // file 'd' is created in snap2
expected.emplace_back("e", snapid2); // file 'e' is updated in snap2
expected.emplace_back("~e", snapid2); // file '~e' is updated in snap2
expected.emplace_back("f", snapid2); // file 'f' is updated in snap2
expected.emplace_back("g", snapid1); // file 'g' is removed in snap2
expected.emplace_back("S", snapid2); // folder 'S' is present in snap2 hence reported
expected.emplace_back("T", snapid2); // folder 'T' is created in snap2
expected.emplace_back("C", snapid1); // folder 'C' is removed in snap2
expected.emplace_back("G", snapid1); // folder 'G' is removed in snap2
expected.emplace_back("k", snapid2); // file 'k' is created in snap2
expected.emplace_back("l", snapid2); // file 'l' is created in snap2
expected.emplace_back("K", snapid2); // folder 'K' is created in snap2
expected.emplace_back("I", snapid2); // folder 'I' is created in snap2
expected.emplace_back("L", snapid2); // folder 'L' is present in snap2 but got more
// subfolders
test_mount.verify_snap_diff(expected, "", "snap1", "snap2");
}
{
//
// Make sure snapshot delta for /S (existed at both snap1 and snap2)
// is as expected
//
vector<std::pair<string, uint64_t>> expected;
expected.emplace_back("sa", snapid2);
test_mount.verify_snap_diff(expected, "S", "snap1", "snap2");
}
{
//
// Make sure snapshot delta for /T (created at snap2)
// is as expected
//
vector<std::pair<string, uint64_t>> expected;
expected.emplace_back("td", snapid2);
test_mount.verify_snap_diff(expected, "T", "snap1", "snap2");
}
{
//
// Make sure snapshot delta for /C (removed at snap2)
// is as expected
//
vector<std::pair<string, uint64_t>> expected;
expected.emplace_back("cc", snapid1);
expected.emplace_back("CC", snapid1);
test_mount.verify_snap_diff(expected, "C", "snap2", "snap1");
}
{
//
// Make sure snapshot delta for /C/CC (removed at snap2)
// is as expected
//
vector<std::pair<string, uint64_t>> expected;
expected.emplace_back("c", snapid1);
test_mount.verify_snap_diff(expected, "C/CC", "snap2", "snap1");
}
{
//
// Make sure snapshot delta for /I (created at snap2)
// is as expected
//
vector<std::pair<string, uint64_t>> expected;
expected.emplace_back("ii", snapid2);
expected.emplace_back("J", snapid2);
test_mount.verify_snap_diff(expected, "I", "snap1", "snap2");
}
{
//
// Make sure snapshot delta for /I/J (created at snap2)
// is as expected
//
vector<std::pair<string, uint64_t>> expected;
expected.emplace_back("i", snapid2);
expected.emplace_back("j", snapid2);
expected.emplace_back("k", snapid2);
test_mount.verify_snap_diff(expected, "I/J", "snap1", "snap2");
}
{
//
// Make sure snapshot delta for /L (extended at snap2)
// is as expected
//
vector<std::pair<string, uint64_t>> expected;
expected.emplace_back("LL", snapid2);
expected.emplace_back("LM", snapid2);
expected.emplace_back("LN", snapid2);
test_mount.verify_snap_diff(expected, "L", "snap1", "snap2");
}
{
//
// Make sure snapshot delta for /L/LL (updated at snap2)
// is as expected
//
vector<std::pair<string, uint64_t>> expected;
expected.emplace_back("ll", snapid2);
test_mount.verify_snap_diff(expected, "L/LL", "snap1", "snap2");
}
{
//
// Make sure snapshot delta for /L/LN (created empty at snap2)
// is as expected
//
vector<std::pair<string, uint64_t>> expected;
test_mount.verify_snap_diff(expected, "L/LN", "snap1", "snap2");
}
{
// Make sure snapshot delta for /L/LM (created at snap2)
// is as expected
//
vector<std::pair<string, uint64_t>> expected;
expected.emplace_back("lm", snapid2);
test_mount.verify_snap_diff(expected, "L/LM", "snap1", "snap2");
}
std::cout << "-------------" << std::endl;
test_mount.remove_all();
test_mount.rmsnap("snap1");
test_mount.rmsnap("snap2");
test_mount.rmsnap("snap3");
}
//
// This case tests SnapDiff functionality for snap2/snap3 snapshot delta
// retrieved through .snap path-based query API.
// It operates against FS layout created by prepareSnapDiffCases() method,
// see relevant table before that function for FS state overview.
//
TEST(LibCephFS, SnapDiffCases2_3)
{
TestMount test_mount;
// Create directory tree evolving through a bunch of snapshots
test_mount.prepareSnapDiffLib3Cases();
uint64_t snapid2;
uint64_t snapid3;
ASSERT_EQ(0, test_mount.get_snapid("snap2", &snapid2));
ASSERT_EQ(0, test_mount.get_snapid("snap3", &snapid3));
std::cout << snapid2 << " vs. " << snapid3 << std::endl;
ASSERT_GT(snapid3, 0);
ASSERT_GT(snapid3, 0);
ASSERT_GT(snapid3, snapid2);
// Print snapshot delta (snap2 vs. snap3) results for root in a
// human-readable form.
test_mount.print_snap_diff("", "snap2", "snap3");
{
// Make sure the root delta is as expected
// One should use columns snap1 and snap2 from
// the table preceeding prepareSnapDiffCases() function
// to learn which names to expect in the delta.
//
// - file 'c' is removed since snap1 hence not present in delta
// - file 'l' is unchanged hence not present in delta
// - file 'i' is unchanged hence not present in delta
//
vector<std::pair<string, uint64_t>> expected;
expected.emplace_back("a", snapid3); // file 'a' is updated in snap3
expected.emplace_back("b", snapid3); // file 'b' is updated in snap3
expected.emplace_back("d", snapid3); // file 'd' is updated in snap3
expected.emplace_back("~e", snapid2); // file '~e' is removed in snap3
expected.emplace_back("e", snapid2); // file 'e' is removed in snap3
expected.emplace_back("f", snapid2); // file 'f' is removed in snap3
expected.emplace_back("ff", snapid2); // file 'ff' is removed in snap3
expected.emplace_back("g", snapid3); // file 'g' re-appeared in snap3
expected.emplace_back("S", snapid3); // folder 'S' is present in snap3 hence reported
expected.emplace_back("T", snapid3); // folder 'T' is present in snap3 hence reported
expected.emplace_back("G", snapid3); // folder 'G' re-appeared in snap3 hence reported
expected.emplace_back("k", snapid2); // file 'k' is removed in snap3
expected.emplace_back("K", snapid2); // folder 'K' is removed in snap3
expected.emplace_back("H", snapid3); // folder 'H' is created in snap3 hence reported
expected.emplace_back("I", snapid3); // folder 'I' is present in snap3 hence reported
expected.emplace_back("L", snapid3); // folder 'L' is present in snap3 hence reported
test_mount.verify_snap_diff(expected, "", "snap2", "snap3");
}
{
//
// Make sure snapshot delta for /S (children updated) is as expected
//
vector<std::pair<string, uint64_t>> expected;
expected.emplace_back("sa", snapid3);
test_mount.verify_snap_diff(expected, "S", "snap2", "snap3");
}
{
//
// Make sure snapshot delta for /T (children updated) is as expected
//
vector<std::pair<string, uint64_t>> expected;
expected.emplace_back("td", snapid3);
test_mount.verify_snap_diff(expected, "T", "snap2", "snap3");
}
{
//
// Make sure snapshot delta for /G (re-appeared) is as expected
//
vector<std::pair<string, uint64_t>> expected;
expected.emplace_back("gg", snapid3);
test_mount.verify_snap_diff(expected, "G", "snap2", "snap3");
}
{
//
// Make sure snapshot delta for /K (removed) is as expected
//
vector<std::pair<string, uint64_t>> expected;
expected.emplace_back("kk", snapid2);
test_mount.verify_snap_diff(expected, "K", "snap3", "snap2");
}
{
//
// Make sure snapshot delta for /H (created) is as expected
//
vector<std::pair<string, uint64_t>> expected;
expected.emplace_back("hh", snapid3);
test_mount.verify_snap_diff(expected, "H", "snap2", "snap3");
}
{
//
// Make sure snapshot delta for /I (children updated) is as expected
//
vector<std::pair<string, uint64_t>> expected;
expected.emplace_back("ii", snapid3);
expected.emplace_back("iii", snapid3);
expected.emplace_back("iiii", snapid3);
expected.emplace_back("J", snapid3);
test_mount.verify_snap_diff(expected, "I", "snap2", "snap3");
}
{
//
// Make sure snapshot delta for /I/J (children updated/removed) is as expected
//
vector<std::pair<string, uint64_t>> expected;
expected.emplace_back("i", snapid3);
expected.emplace_back("k", snapid2);
expected.emplace_back("l", snapid3);
test_mount.verify_snap_diff(expected, "I/J", "snap2", "snap3");
}
{
//
// Make sure snapshot delta for /L (children updated/removed) is as expected
//
vector<std::pair<string, uint64_t>> expected;
expected.emplace_back("ll", snapid3);
expected.emplace_back("LL", snapid3);
expected.emplace_back("LM", snapid2);
expected.emplace_back("LN", snapid3);
test_mount.verify_snap_diff(expected, "L", "snap2", "snap3");
}
{
//
// Make sure snapshot delta for /L/LL (children updated) is as expected
//
vector<std::pair<string, uint64_t>> expected;
expected.emplace_back("ll", snapid3);
test_mount.verify_snap_diff(expected, "L/LL", "snap2", "snap3");
}
{
//
// Make sure snapshot delta for /L/LM (removed) is as expected
//
vector<std::pair<string, uint64_t>> expected;
expected.emplace_back("lm", snapid2);
test_mount.verify_snap_diff(expected, "L/LM", "snap3", "snap2");
}
{
//
// Make sure snapshot delta for /L/LN (created empty) is as expected
//
vector<std::pair<string, uint64_t>> expected;
test_mount.verify_snap_diff(expected, "L/LN", "snap2", "snap3");
}
test_mount.remove_all();
test_mount.rmsnap("snap1");
test_mount.rmsnap("snap2");
test_mount.rmsnap("snap3");
}
//
// This case tests SnapDiff functionality for snap1/snap3 snapshot delta
// retrieved through .snap path-based query API.
// It operates against FS layout created by prepareSnapDiffCases() method,
// see relevant table before that function for FS state overview.
//
TEST(LibCephFS, SnapDiffCases1_3)
{
TestMount test_mount;
// Create directory tree evolving through a bunch of snapshots
test_mount.prepareSnapDiffLib3Cases();
uint64_t snapid1;
uint64_t snapid3;
ASSERT_EQ(0, test_mount.get_snapid("snap1", &snapid1));
ASSERT_EQ(0, test_mount.get_snapid("snap3", &snapid3));
std::cout << snapid1 << " vs. " << snapid3 << std::endl;
ASSERT_GT(snapid3, 0);
ASSERT_GT(snapid3, 0);
ASSERT_GT(snapid3, snapid1);
// Print snapshot delta (snap2 vs. snap3) results for root in a
// human-readable form.
test_mount.print_snap_diff("", "snap1", "snap3");
{
// Make sure the root delta is as expected
// One should use columns snap1 and snap3 from
// the table preceeding prepareSnapDiffCases() function
// to learn which names to expect in the delta.
//
// - file 'i' is unchanged hence not present in delta
//
vector<std::pair<string, uint64_t>> expected;
expected.emplace_back("a", snapid3); // file 'a' is updated in snap3
expected.emplace_back("b", snapid3); // file 'b' is updated in snap3
expected.emplace_back("c", snapid1); // file 'c' is removed in snap2
expected.emplace_back("d", snapid3); // file 'd' is updated in snap3
expected.emplace_back("~e", snapid1); // file '~e' is removed in snap3
expected.emplace_back("e", snapid1); // file 'e' is removed in snap3
expected.emplace_back("f", snapid1); // file 'f' is removed in snap3
expected.emplace_back("ff", snapid1); // file 'ff' is removed in snap3
expected.emplace_back("g", snapid3); // file 'g' removed in snap2 and
// re-appeared in snap3
expected.emplace_back("S", snapid3); // folder 'S' is present in snap3 hence reported
expected.emplace_back("T", snapid3); // folder 'T' is present in snap3 hence reported
expected.emplace_back("C", snapid1); // folder 'C' is removed in snap2
// folder 'G' is removed in snap2 and re-appeared in snap3
// hence reporting it twice under different snapid
expected.emplace_back("G", snapid1);
expected.emplace_back("G", snapid3);
expected.emplace_back("l", snapid3); // file 'l' is created in snap2
expected.emplace_back("H", snapid3); // folder 'H' is created in snap3 hence reported
expected.emplace_back("I", snapid3); // folder 'I' is created in snap3 hence reported
expected.emplace_back("L", snapid3); // folder 'L' is created in snap3 hence reported
test_mount.verify_snap_diff(expected, "", "snap3", "snap1");
}
{
//
// Make sure snapshot delta for /S (children updated) is as expected
//
vector<std::pair<string, uint64_t>> expected;
expected.emplace_back("sa", snapid3);
test_mount.verify_snap_diff(expected, "S", "snap3", "snap1");
}
{
//
// Make sure snapshot delta for /T (created and children updated) is as expected
//
vector<std::pair<string, uint64_t>> expected;
expected.emplace_back("td", snapid3);
test_mount.verify_snap_diff(expected, "T", "snap3", "snap1");
}
{
//
// Make sure snapshot delta for /C (removed) is as expected
//
vector<std::pair<string, uint64_t>> expected;
expected.emplace_back("cc", snapid1);
expected.emplace_back("CC", snapid1);
test_mount.verify_snap_diff(expected, "C", "snap3", "snap1");
}
{
//
// Make sure snapshot delta for /C/CC (removed) is as expected
//
vector<std::pair<string, uint64_t>> expected;
expected.emplace_back("c", snapid1);
test_mount.verify_snap_diff(expected, "C/CC", "snap3", "snap1");
}
{
//
// Make sure snapshot delta for /G (removed) is as expected
// For this case (G@snap1 and G@snap3 are different entries)
// the order in which snapshot names are provided is crucial.
// Making G@snap1 vs. snap3 delta returns everything from G@snap1
// but omits any entries from G/snap3 (since it's a different entry).
// And making G@snap3 vs. snap1 delta returns everything from G@snap3
// but nothing from snap1,
vector<std::pair<string, uint64_t>> expected;
expected.emplace_back("gg", snapid1);
test_mount.verify_snap_diff(expected, "G", "snap1", "snap3");
}
{
//
// Make sure snapshot delta for /G (re-created) is as expected
// The snapshot names order is important, see above.
//
vector<std::pair<string, uint64_t>> expected;
expected.emplace_back("gg", snapid3);
test_mount.verify_snap_diff(expected, "G", "snap3", "snap1");
}
{
//
// Make sure snapshot delta for /H (created) is as expected
//
vector<std::pair<string, uint64_t>> expected;
expected.emplace_back("hh", snapid3);
test_mount.verify_snap_diff(expected, "H", "snap1", "snap3");
}
{
//
// Make sure snapshot delta for /I (chinldren updated) is as expected
//
vector<std::pair<string, uint64_t>> expected;
expected.emplace_back("ii", snapid3);
expected.emplace_back("iii", snapid3);
expected.emplace_back("iiii", snapid3);
expected.emplace_back("J", snapid3);
test_mount.verify_snap_diff(expected, "I", "snap1", "snap3");
}
{
//
// Make sure snapshot delta for /I/J (created at snap2) is as expected
//
vector<std::pair<string, uint64_t>> expected;
expected.emplace_back("i", snapid3);
expected.emplace_back("j", snapid3);
expected.emplace_back("l", snapid3);
test_mount.verify_snap_diff(expected, "I/J", "snap1", "snap3");
}
{
//
// Make sure snapshot delta for /L is as expected
//
vector<std::pair<string, uint64_t>> expected;
expected.emplace_back("ll", snapid3);
expected.emplace_back("LL", snapid3);
expected.emplace_back("LN", snapid3);
test_mount.verify_snap_diff(expected, "L", "snap1", "snap3");
}
{
//
// Make sure snapshot delta for /L/LL (children updated) is as expected
//
vector<std::pair<string, uint64_t>> expected;
expected.emplace_back("ll", snapid3);
test_mount.verify_snap_diff(expected, "L/LL", "snap1", "snap3");
}
{
vector<std::pair<string, uint64_t>> expected;
test_mount.verify_snap_diff(expected, "L/LN", "snap1", "snap3");
}
std::cout << "-------------" << std::endl;
test_mount.remove_all();
test_mount.rmsnap("snap1");
test_mount.rmsnap("snap2");
test_mount.rmsnap("snap3");
}
/*
* SnapDiff readdir API testing for huge dir
* when delta is minor.
*/
TEST(LibCephFS, HugeSnapDiffSmallDelta)
{
TestMount test_mount;
long int file_count = 10000;
printf("Seeding %ld files...\n", file_count);
// Create simple directory tree with a couple of snapshots
// to test against.
string name_prefix_start = "aaaa";
string name_prefix_bulk = "file";
string name_prefix_end = "zzzz";
test_mount.prepareHugeSnapDiff(name_prefix_start,
name_prefix_bulk,
name_prefix_end,
file_count,
false);
uint64_t snapid1;
uint64_t snapid2;
// learn snapshot ids and do basic verification
ASSERT_EQ(0, test_mount.get_snapid("snap1", &snapid1));
ASSERT_EQ(0, test_mount.get_snapid("snap2", &snapid2));
ASSERT_GT(snapid1, 0);
ASSERT_GT(snapid2, 0);
ASSERT_GT(snapid2, snapid1);
std::cout << snapid1 << " vs. " << snapid2 << std::endl;
//
// Make sure snap1 vs. snap2 delta for the root is as expected
//
{
vector<pair<string, uint64_t>> expected;
expected.emplace_back(name_prefix_start + "B", snapid1);
expected.emplace_back(name_prefix_start + "C", snapid2);
expected.emplace_back(name_prefix_start + "D", snapid2);
expected.emplace_back(name_prefix_end + "B", snapid1);
expected.emplace_back(name_prefix_end + "C", snapid2);
expected.emplace_back(name_prefix_end + "D", snapid2);
test_mount.verify_snap_diff(expected, "", "snap1", "snap2");
}
std::cout << "------------- closing -------------" << std::endl;
ASSERT_EQ(0, test_mount.purge_dir(""));
ASSERT_EQ(0, test_mount.rmsnap("snap1"));
ASSERT_EQ(0, test_mount.rmsnap("snap2"));
}
/*
* SnapDiff readdir API testing for huge dir
* when delta is large
*/
TEST(LibCephFS, HugeSnapDiffLargeDelta)
{
TestMount test_mount;
// Calculate amount of files required to have multiple directory fragments
// using relevant config parameters.
// file_count = mds_bal_spli_size * mds_bal_fragment_fast_factor + 100
char buf[256];
int r = test_mount.conf_get("mds_bal_split_size", buf, sizeof(buf));
ASSERT_TRUE(r >= 0);
long int file_count = strtol(buf, nullptr, 10);
r = test_mount.conf_get("mds_bal_fragment_fast_factor ", buf, sizeof(buf));
ASSERT_TRUE(r >= 0);
double factor = strtod(buf, nullptr);
file_count *= factor;
file_count += 100;
printf("Seeding %ld files...\n", file_count);
// Create simple directory tree with a couple of snapshots
// to test against.
string name_prefix_start = "aaaa";
string name_prefix_bulk = "file";
string name_prefix_end = "zzzz";
test_mount.prepareHugeSnapDiff(name_prefix_start,
name_prefix_bulk,
name_prefix_end,
file_count,
true);
uint64_t snapid1;
uint64_t snapid2;
// learn snapshot ids and do basic verification
ASSERT_EQ(0, test_mount.get_snapid("snap1", &snapid1));
ASSERT_EQ(0, test_mount.get_snapid("snap2", &snapid2));
ASSERT_GT(snapid1, 0);
ASSERT_GT(snapid2, 0);
ASSERT_GT(snapid2, snapid1);
std::cout << snapid1 << " vs. " << snapid2 << std::endl;
//
// Make sure snap1 vs. snap2 delta for the root is as expected
//
{
vector<pair<string, uint64_t>> expected;
expected.emplace_back(name_prefix_start + "B", snapid1);
expected.emplace_back(name_prefix_start + "C", snapid2);
expected.emplace_back(name_prefix_start + "D", snapid2);
for (size_t i = 0; i < (size_t)file_count; i++) {
expected.emplace_back(name_prefix_bulk + stringify(i), snapid2);
}
expected.emplace_back(name_prefix_end + "B", snapid1);
expected.emplace_back(name_prefix_end + "C", snapid2);
expected.emplace_back(name_prefix_end + "D", snapid2);
test_mount.verify_snap_diff(expected, "", "snap1", "snap2");
}
std::cout << "------------- closing -------------" << std::endl;
ASSERT_EQ(0, test_mount.purge_dir(""));
ASSERT_EQ(0, test_mount.rmsnap("snap1"));
ASSERT_EQ(0, test_mount.rmsnap("snap2"));
}
| 57,060 | 32.864095 | 92 | cc |
null | ceph-main/src/test/libcephfs/suidsgid.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2023 Red Hat, Inc.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "gtest/gtest.h"
#include "common/ceph_argparse.h"
#include "include/buffer.h"
#include "include/fs_types.h"
#include "include/stringify.h"
#include "include/cephfs/libcephfs.h"
#include "include/rados/librados.h"
#include <errno.h>
#include <fcntl.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <dirent.h>
#include <sys/uio.h>
#include <iostream>
#include <vector>
#include "json_spirit/json_spirit.h"
#ifdef __linux__
#include <limits.h>
#include <sys/xattr.h>
#endif
using namespace std;
struct ceph_mount_info *admin;
struct ceph_mount_info *cmount;
char filename[128];
void run_fallocate_test_case(int mode, int result, bool with_admin=false)
{
struct ceph_statx stx;
int flags = FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE;
ASSERT_EQ(0, ceph_chmod(admin, filename, mode));
struct ceph_mount_info *_cmount = cmount;
if (with_admin) {
_cmount = admin;
}
int fd = ceph_open(_cmount, filename, O_RDWR, 0);
ASSERT_LE(0, fd);
ASSERT_EQ(0, ceph_fallocate(_cmount, fd, flags, 1024, 40960));
ASSERT_EQ(ceph_statx(_cmount, filename, &stx, CEPH_STATX_MODE, 0), 0);
std::cout << "After ceph_fallocate, mode: 0" << oct << mode << " -> 0"
<< (stx.stx_mode & 07777) << dec << std::endl;
ASSERT_EQ(stx.stx_mode & (S_ISUID|S_ISGID), result);
ceph_close(_cmount, fd);
}
rados_t cluster;
int do_mon_command(string s, string *key)
{
char *outs, *outbuf;
size_t outs_len, outbuf_len;
const char *ss = s.c_str();
int r = rados_mon_command(cluster, (const char **)&ss, 1,
0, 0,
&outbuf, &outbuf_len,
&outs, &outs_len);
if (outbuf_len) {
string s(outbuf, outbuf_len);
std::cout << "out: " << s << std::endl;
// parse out the key
json_spirit::mValue v, k;
json_spirit::read_or_throw(s, v);
k = v.get_array()[0].get_obj().find("key")->second;
*key = k.get_str();
std::cout << "key: " << *key << std::endl;
free(outbuf);
} else {
return -CEPHFS_EINVAL;
}
if (outs_len) {
string s(outs, outs_len);
std::cout << "outs: " << s << std::endl;
free(outs);
}
return r;
}
void run_write_test_case(int mode, int result, bool with_admin=false)
{
struct ceph_statx stx;
ASSERT_EQ(0, ceph_chmod(admin, filename, mode));
struct ceph_mount_info *_cmount = cmount;
if (with_admin) {
_cmount = admin;
}
int fd = ceph_open(_cmount, filename, O_RDWR, 0);
ASSERT_LE(0, fd);
ASSERT_EQ(ceph_write(_cmount, fd, "foo", 3, 0), 3);
ASSERT_EQ(ceph_statx(_cmount, filename, &stx, CEPH_STATX_MODE, 0), 0);
std::cout << "After ceph_write, mode: 0" << oct << mode << " -> 0"
<< (stx.stx_mode & 07777) << dec << std::endl;
ASSERT_EQ(stx.stx_mode & (S_ISUID|S_ISGID), result);
ceph_close(_cmount, fd);
}
void run_truncate_test_case(int mode, int result, size_t size, bool with_admin=false)
{
struct ceph_statx stx;
ASSERT_EQ(0, ceph_chmod(admin, filename, mode));
struct ceph_mount_info *_cmount = cmount;
if (with_admin) {
_cmount = admin;
}
int fd = ceph_open(_cmount, filename, O_RDWR, 0);
ASSERT_LE(0, fd);
ASSERT_GE(ceph_ftruncate(_cmount, fd, size), 0);
ASSERT_EQ(ceph_statx(_cmount, filename, &stx, CEPH_STATX_MODE, 0), 0);
std::cout << "After ceph_truncate size " << size << " mode: 0" << oct
<< mode << " -> 0" << (stx.stx_mode & 07777) << dec << std::endl;
ASSERT_EQ(stx.stx_mode & (S_ISUID|S_ISGID), result);
ceph_close(_cmount, fd);
}
TEST(SuidsgidTest, WriteClearSetuid) {
ASSERT_EQ(0, ceph_create(&admin, NULL));
ASSERT_EQ(0, ceph_conf_read_file(admin, NULL));
ASSERT_EQ(0, ceph_conf_parse_env(admin, NULL));
ASSERT_EQ(0, ceph_mount(admin, "/"));
sprintf(filename, "/clear_suidsgid_file_%d", getpid());
int fd = ceph_open(admin, filename, O_CREAT|O_RDWR, 0766);
ASSERT_GE(ceph_ftruncate(admin, fd, 10000000), 0);
ceph_close(admin, fd);
string user = "clear_suidsgid_" + stringify(rand());
// create access key
string key;
ASSERT_EQ(0, do_mon_command(
"{\"prefix\": \"auth get-or-create\", \"entity\": \"client." + user + "\", "
"\"caps\": [\"mon\", \"allow *\", \"osd\", \"allow *\", \"mgr\", \"allow *\", "
"\"mds\", \"allow *\"], \"format\": \"json\"}", &key));
ASSERT_EQ(0, ceph_create(&cmount, user.c_str()));
ASSERT_EQ(0, ceph_conf_read_file(cmount, NULL));
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(0, ceph_conf_set(cmount, "key", key.c_str()));
ASSERT_EQ(ceph_init(cmount), 0);
UserPerm *perms = ceph_userperm_new(123, 456, 0, NULL);
ASSERT_NE(nullptr, perms);
ASSERT_EQ(0, ceph_mount_perms_set(cmount, perms));
ceph_userperm_destroy(perms);
ASSERT_EQ(0, ceph_mount(cmount, "/"));
// 1, Commit to a non-exec file by an unprivileged user clears suid and sgid.
run_fallocate_test_case(06666, 0); // a+rws
// 2, Commit to a group-exec file by an unprivileged user clears suid and sgid.
run_fallocate_test_case(06676, 0); // g+x,a+rws
// 3, Commit to a user-exec file by an unprivileged user clears suid and sgid.
run_fallocate_test_case(06766, 0); // u+x,a+rws,g-x
// 4, Commit to a all-exec file by an unprivileged user clears suid and sgid.
run_fallocate_test_case(06777, 0); // a+rwxs
// 5, Commit to a non-exec file by root leaves suid and sgid.
run_fallocate_test_case(06666, S_ISUID|S_ISGID, true); // a+rws
// 6, Commit to a group-exec file by root leaves suid and sgid.
run_fallocate_test_case(06676, S_ISUID|S_ISGID, true); // g+x,a+rws
// 7, Commit to a user-exec file by root leaves suid and sgid.
run_fallocate_test_case(06766, S_ISUID|S_ISGID, true); // u+x,a+rws,g-x
// 8, Commit to a all-exec file by root leaves suid and sgid.
run_fallocate_test_case(06777, S_ISUID|S_ISGID, true); // a+rwxs
// 9, Commit to a group-exec file by an unprivileged user clears sgid
run_fallocate_test_case(02676, 0); // a+rw,g+rwxs
// 10, Commit to a all-exec file by an unprivileged user clears sgid.
run_fallocate_test_case(02777, 0); // a+rwx,g+rwxs
// 11, Write by privileged user leaves the suid and sgid
run_write_test_case(06766, S_ISUID | S_ISGID, true);
// 12, Write by unprivileged user clears the suid and sgid
run_write_test_case(06766, 0);
// 13, Truncate by privileged user leaves the suid and sgid
run_truncate_test_case(06766, S_ISUID | S_ISGID, 10000, true);
// 14, Truncate by unprivileged user clears the suid and sgid
run_truncate_test_case(06766, 0, 100);
// clean up
ceph_shutdown(cmount);
ceph_shutdown(admin);
}
TEST(LibCephFS, ChownClearSetuid) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, "/"), 0);
Inode *root;
ASSERT_EQ(ceph_ll_lookup_root(cmount, &root), 0);
char filename[32];
sprintf(filename, "clearsetuid%x", getpid());
Fh *fh;
Inode *in;
struct ceph_statx stx;
const mode_t after_mode = S_IRWXU;
const mode_t before_mode = S_IRWXU | S_ISUID | S_ISGID;
const unsigned want = CEPH_STATX_UID|CEPH_STATX_GID|CEPH_STATX_MODE;
UserPerm *usercred = ceph_mount_perms(cmount);
ceph_ll_unlink(cmount, root, filename, usercred);
ASSERT_EQ(ceph_ll_create(cmount, root, filename, before_mode,
O_RDWR|O_CREAT|O_EXCL, &in, &fh, &stx, want, 0,
usercred), 0);
ASSERT_EQ(stx.stx_mode & (mode_t)ALLPERMS, before_mode);
// chown -- for this we need to be "root"
UserPerm *rootcred = ceph_userperm_new(0, 0, 0, NULL);
ASSERT_TRUE(rootcred);
stx.stx_uid++;
stx.stx_gid++;
ASSERT_EQ(ceph_ll_setattr(cmount, in, &stx, CEPH_SETATTR_UID|CEPH_SETATTR_GID, rootcred), 0);
ASSERT_EQ(ceph_ll_getattr(cmount, in, &stx, CEPH_STATX_MODE, 0, usercred), 0);
ASSERT_TRUE(stx.stx_mask & CEPH_STATX_MODE);
ASSERT_EQ(stx.stx_mode & (mode_t)ALLPERMS, after_mode);
/* test chown with supplementary groups, and chown with/without exe bit */
uid_t u = 65534;
gid_t g = 65534;
gid_t gids[] = {65533,65532};
UserPerm *altcred = ceph_userperm_new(u, g, sizeof gids / sizeof gids[0], gids);
stx.stx_uid = u;
stx.stx_gid = g;
mode_t m = S_ISGID|S_ISUID|S_IRUSR|S_IWUSR;
stx.stx_mode = m;
ASSERT_EQ(ceph_ll_setattr(cmount, in, &stx, CEPH_SETATTR_MODE|CEPH_SETATTR_UID|CEPH_SETATTR_GID, rootcred), 0);
ASSERT_EQ(ceph_ll_getattr(cmount, in, &stx, CEPH_STATX_MODE, 0, altcred), 0);
ASSERT_EQ(stx.stx_mode&(mode_t)ALLPERMS, m);
/* not dropped without exe bit */
stx.stx_gid = gids[0];
ASSERT_EQ(ceph_ll_setattr(cmount, in, &stx, CEPH_SETATTR_GID, altcred), 0);
ASSERT_EQ(ceph_ll_getattr(cmount, in, &stx, CEPH_STATX_MODE, 0, altcred), 0);
ASSERT_EQ(stx.stx_mode&(mode_t)ALLPERMS, m);
/* now check dropped with exe bit */
m = S_ISGID|S_ISUID|S_IRWXU;
stx.stx_mode = m;
ASSERT_EQ(ceph_ll_setattr(cmount, in, &stx, CEPH_STATX_MODE, altcred), 0);
ASSERT_EQ(ceph_ll_getattr(cmount, in, &stx, CEPH_STATX_MODE, 0, altcred), 0);
ASSERT_EQ(stx.stx_mode&(mode_t)ALLPERMS, m);
stx.stx_gid = gids[1];
ASSERT_EQ(ceph_ll_setattr(cmount, in, &stx, CEPH_SETATTR_GID, altcred), 0);
ASSERT_EQ(ceph_ll_getattr(cmount, in, &stx, CEPH_STATX_MODE, 0, altcred), 0);
ASSERT_EQ(stx.stx_mode&(mode_t)ALLPERMS, m&(S_IRWXU|S_IRWXG|S_IRWXO));
ceph_userperm_destroy(altcred);
ASSERT_EQ(ceph_ll_close(cmount, fh), 0);
ceph_shutdown(cmount);
}
static int update_root_mode()
{
struct ceph_mount_info *admin;
int r = ceph_create(&admin, NULL);
if (r < 0)
return r;
ceph_conf_read_file(admin, NULL);
ceph_conf_parse_env(admin, NULL);
ceph_conf_set(admin, "client_permissions", "false");
r = ceph_mount(admin, "/");
if (r < 0)
goto out;
r = ceph_chmod(admin, "/", 0777);
out:
ceph_shutdown(admin);
return r;
}
int main(int argc, char **argv)
{
int r = update_root_mode();
if (r < 0)
exit(1);
::testing::InitGoogleTest(&argc, argv);
srand(getpid());
r = rados_create(&cluster, NULL);
if (r < 0)
exit(1);
r = rados_conf_read_file(cluster, NULL);
if (r < 0)
exit(1);
rados_conf_parse_env(cluster, NULL);
r = rados_connect(cluster);
if (r < 0)
exit(1);
r = RUN_ALL_TESTS();
rados_shutdown(cluster);
return r;
}
| 10,664 | 31.123494 | 113 | cc |
null | ceph-main/src/test/libcephfs/test.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2011 New Dream Network
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "include/compat.h"
#include "gtest/gtest.h"
#include "include/cephfs/libcephfs.h"
#include "mds/mdstypes.h"
#include "include/stat.h"
#include <errno.h>
#include <fcntl.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <dirent.h>
#include <sys/uio.h>
#include <sys/time.h>
#ifndef _WIN32
#include <sys/resource.h>
#endif
#include "common/Clock.h"
#ifdef __linux__
#include <limits.h>
#include <sys/xattr.h>
#endif
#include <fmt/format.h>
#include <map>
#include <vector>
#include <thread>
#include <regex>
using namespace std;
TEST(LibCephFS, OpenEmptyComponent) {
pid_t mypid = getpid();
struct ceph_mount_info *cmount;
ASSERT_EQ(0, ceph_create(&cmount, NULL));
ASSERT_EQ(0, ceph_conf_read_file(cmount, NULL));
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(0, ceph_mount(cmount, "/"));
char c_dir[1024];
sprintf(c_dir, "/open_test_%d", mypid);
struct ceph_dir_result *dirp;
ASSERT_EQ(0, ceph_mkdirs(cmount, c_dir, 0777));
ASSERT_EQ(0, ceph_opendir(cmount, c_dir, &dirp));
char c_path[1024];
sprintf(c_path, "/open_test_%d//created_file_%d", mypid, mypid);
int fd = ceph_open(cmount, c_path, O_RDONLY|O_CREAT, 0666);
ASSERT_LT(0, fd);
ASSERT_EQ(0, ceph_close(cmount, fd));
ASSERT_EQ(0, ceph_closedir(cmount, dirp));
ceph_shutdown(cmount);
ASSERT_EQ(0, ceph_create(&cmount, NULL));
ASSERT_EQ(0, ceph_conf_read_file(cmount, NULL));
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(0, ceph_mount(cmount, "/"));
fd = ceph_open(cmount, c_path, O_RDONLY, 0666);
ASSERT_LT(0, fd);
ASSERT_EQ(0, ceph_close(cmount, fd));
// cleanup
ASSERT_EQ(0, ceph_unlink(cmount, c_path));
ASSERT_EQ(0, ceph_rmdir(cmount, c_dir));
ceph_shutdown(cmount);
}
TEST(LibCephFS, OpenReadTruncate) {
struct ceph_mount_info *cmount;
ASSERT_EQ(0, ceph_create(&cmount, NULL));
ASSERT_EQ(0, ceph_conf_read_file(cmount, NULL));
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(0, ceph_mount(cmount, "/"));
auto path = fmt::format("test_open_rdt_{}", getpid());
int fd = ceph_open(cmount, path.c_str(), O_WRONLY|O_CREAT, 0666);
ASSERT_LE(0, fd);
auto data = std::string("hello world");
ASSERT_EQ(ceph_write(cmount, fd, data.c_str(), data.size(), 0), (int)data.size());
ASSERT_EQ(0, ceph_close(cmount, fd));
fd = ceph_open(cmount, path.c_str(), O_RDONLY, 0);
ASSERT_LE(0, fd);
ASSERT_EQ(ceph_ftruncate(cmount, fd, 0), -CEPHFS_EBADF);
ASSERT_EQ(ceph_ftruncate(cmount, fd, 1), -CEPHFS_EBADF);
ASSERT_EQ(0, ceph_close(cmount, fd));
ceph_shutdown(cmount);
}
TEST(LibCephFS, OpenReadWrite) {
struct ceph_mount_info *cmount;
ASSERT_EQ(0, ceph_create(&cmount, NULL));
ASSERT_EQ(0, ceph_conf_read_file(cmount, NULL));
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(0, ceph_mount(cmount, "/"));
char c_path[1024];
sprintf(c_path, "test_open_rdwr_%d", getpid());
int fd = ceph_open(cmount, c_path, O_WRONLY|O_CREAT, 0666);
ASSERT_LT(0, fd);
const char *out_buf = "hello world";
size_t size = strlen(out_buf);
char in_buf[100];
ASSERT_EQ(ceph_write(cmount, fd, out_buf, size, 0), (int)size);
ASSERT_EQ(ceph_read(cmount, fd, in_buf, sizeof(in_buf), 0), -CEPHFS_EBADF);
ASSERT_EQ(0, ceph_close(cmount, fd));
fd = ceph_open(cmount, c_path, O_RDONLY, 0);
ASSERT_LT(0, fd);
ASSERT_EQ(ceph_write(cmount, fd, out_buf, size, 0), -CEPHFS_EBADF);
ASSERT_EQ(ceph_read(cmount, fd, in_buf, sizeof(in_buf), 0), (int)size);
ASSERT_EQ(0, ceph_close(cmount, fd));
fd = ceph_open(cmount, c_path, O_RDWR, 0);
ASSERT_LT(0, fd);
ASSERT_EQ(ceph_write(cmount, fd, out_buf, size, 0), (int)size);
ASSERT_EQ(ceph_read(cmount, fd, in_buf, sizeof(in_buf), 0), (int)size);
ASSERT_EQ(0, ceph_close(cmount, fd));
ceph_shutdown(cmount);
}
TEST(LibCephFS, MountNonExist) {
struct ceph_mount_info *cmount;
ASSERT_EQ(0, ceph_create(&cmount, NULL));
ASSERT_EQ(0, ceph_conf_read_file(cmount, NULL));
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_NE(0, ceph_mount(cmount, "/non-exist"));
ceph_shutdown(cmount);
}
TEST(LibCephFS, MountDouble) {
struct ceph_mount_info *cmount;
ASSERT_EQ(0, ceph_create(&cmount, NULL));
ASSERT_EQ(0, ceph_conf_read_file(cmount, NULL));
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(0, ceph_mount(cmount, "/"));
ASSERT_EQ(-CEPHFS_EISCONN, ceph_mount(cmount, "/"));
ceph_shutdown(cmount);
}
TEST(LibCephFS, MountRemount) {
struct ceph_mount_info *cmount;
ASSERT_EQ(0, ceph_create(&cmount, NULL));
ASSERT_EQ(0, ceph_conf_read_file(cmount, NULL));
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
CephContext *cct = ceph_get_mount_context(cmount);
ASSERT_EQ(0, ceph_mount(cmount, "/"));
ASSERT_EQ(0, ceph_unmount(cmount));
ASSERT_EQ(0, ceph_mount(cmount, "/"));
ASSERT_EQ(cct, ceph_get_mount_context(cmount));
ceph_shutdown(cmount);
}
TEST(LibCephFS, UnmountUnmounted) {
struct ceph_mount_info *cmount;
ASSERT_EQ(0, ceph_create(&cmount, NULL));
ASSERT_EQ(0, ceph_conf_read_file(cmount, NULL));
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(-CEPHFS_ENOTCONN, ceph_unmount(cmount));
ceph_shutdown(cmount);
}
TEST(LibCephFS, ReleaseUnmounted) {
struct ceph_mount_info *cmount;
ASSERT_EQ(0, ceph_create(&cmount, NULL));
ASSERT_EQ(0, ceph_conf_read_file(cmount, NULL));
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(0, ceph_release(cmount));
}
TEST(LibCephFS, ReleaseMounted) {
struct ceph_mount_info *cmount;
ASSERT_EQ(0, ceph_create(&cmount, NULL));
ASSERT_EQ(0, ceph_conf_read_file(cmount, NULL));
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(0, ceph_mount(cmount, "/"));
ASSERT_EQ(-CEPHFS_EISCONN, ceph_release(cmount));
ASSERT_EQ(0, ceph_unmount(cmount));
ASSERT_EQ(0, ceph_release(cmount));
}
TEST(LibCephFS, UnmountRelease) {
struct ceph_mount_info *cmount;
ASSERT_EQ(0, ceph_create(&cmount, NULL));
ASSERT_EQ(0, ceph_conf_read_file(cmount, NULL));
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(0, ceph_mount(cmount, "/"));
ASSERT_EQ(0, ceph_unmount(cmount));
ASSERT_EQ(0, ceph_release(cmount));
}
TEST(LibCephFS, Mount) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, NULL), 0);
ceph_shutdown(cmount);
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, NULL), 0);
ceph_shutdown(cmount);
}
TEST(LibCephFS, OpenLayout) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, NULL), 0);
/* valid layout */
char test_layout_file[256];
sprintf(test_layout_file, "test_layout_%d_b", getpid());
int fd = ceph_open_layout(cmount, test_layout_file, O_CREAT|O_WRONLY, 0666, (1<<20), 7, (1<<20), NULL);
ASSERT_GT(fd, 0);
char poolname[80];
ASSERT_LT(0, ceph_get_file_pool_name(cmount, fd, poolname, sizeof(poolname)));
ASSERT_LT(0, ceph_get_file_pool_name(cmount, fd, poolname, 0));
/* on already-written file (CEPHFS_ENOTEMPTY) */
ceph_write(cmount, fd, "hello world", 11, 0);
ceph_close(cmount, fd);
char xattrk[128];
char xattrv[128];
sprintf(xattrk, "ceph.file.layout.stripe_unit");
sprintf(xattrv, "65536");
ASSERT_EQ(-CEPHFS_ENOTEMPTY, ceph_setxattr(cmount, test_layout_file, xattrk, (void *)xattrv, 5, 0));
/* invalid layout */
sprintf(test_layout_file, "test_layout_%d_c", getpid());
fd = ceph_open_layout(cmount, test_layout_file, O_CREAT, 0666, (1<<20), 1, 19, NULL);
ASSERT_EQ(fd, -CEPHFS_EINVAL);
/* with data pool */
sprintf(test_layout_file, "test_layout_%d_d", getpid());
fd = ceph_open_layout(cmount, test_layout_file, O_CREAT, 0666, (1<<20), 7, (1<<20), poolname);
ASSERT_GT(fd, 0);
ceph_close(cmount, fd);
/* with metadata pool (invalid) */
sprintf(test_layout_file, "test_layout_%d_e", getpid());
fd = ceph_open_layout(cmount, test_layout_file, O_CREAT, 0666, (1<<20), 7, (1<<20), "metadata");
ASSERT_EQ(fd, -CEPHFS_EINVAL);
/* with metadata pool (does not exist) */
sprintf(test_layout_file, "test_layout_%d_f", getpid());
fd = ceph_open_layout(cmount, test_layout_file, O_CREAT, 0666, (1<<20), 7, (1<<20), "asdfjasdfjasdf");
ASSERT_EQ(fd, -CEPHFS_EINVAL);
ceph_shutdown(cmount);
}
TEST(LibCephFS, DirLs) {
pid_t mypid = getpid();
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, "/"), 0);
struct ceph_dir_result *ls_dir = NULL;
char foostr[256];
sprintf(foostr, "dir_ls%d", mypid);
ASSERT_EQ(ceph_opendir(cmount, foostr, &ls_dir), -CEPHFS_ENOENT);
ASSERT_EQ(ceph_mkdir(cmount, foostr, 0777), 0);
struct ceph_statx stx;
ASSERT_EQ(ceph_statx(cmount, foostr, &stx, 0, 0), 0);
ASSERT_NE(S_ISDIR(stx.stx_mode), 0);
char barstr[256];
sprintf(barstr, "dir_ls2%d", mypid);
ASSERT_EQ(ceph_statx(cmount, barstr, &stx, 0, AT_SYMLINK_NOFOLLOW), -CEPHFS_ENOENT);
// insert files into directory and test open
char bazstr[256];
int i = 0, r = rand() % 4096;
if (getenv("LIBCEPHFS_RAND")) {
r = atoi(getenv("LIBCEPHFS_RAND"));
}
printf("rand: %d\n", r);
for(; i < r; ++i) {
sprintf(bazstr, "dir_ls%d/dirf%d", mypid, i);
int fd = ceph_open(cmount, bazstr, O_CREAT|O_RDONLY, 0666);
ASSERT_GT(fd, 0);
ASSERT_EQ(ceph_close(cmount, fd), 0);
// set file sizes for readdirplus
ceph_truncate(cmount, bazstr, i);
}
ASSERT_EQ(ceph_opendir(cmount, foostr, &ls_dir), 0);
// not guaranteed to get . and .. first, but its a safe assumption in this case
struct dirent *result = ceph_readdir(cmount, ls_dir);
ASSERT_TRUE(result != NULL);
ASSERT_STREQ(result->d_name, ".");
result = ceph_readdir(cmount, ls_dir);
ASSERT_TRUE(result != NULL);
ASSERT_STREQ(result->d_name, "..");
std::vector<std::string> entries;
std::map<std::string, int64_t> offset_map;
int64_t offset = ceph_telldir(cmount, ls_dir);
for(i = 0; i < r; ++i) {
result = ceph_readdir(cmount, ls_dir);
ASSERT_TRUE(result != NULL);
entries.push_back(result->d_name);
offset_map[result->d_name] = offset;
offset = ceph_telldir(cmount, ls_dir);
}
ASSERT_TRUE(ceph_readdir(cmount, ls_dir) == NULL);
offset = ceph_telldir(cmount, ls_dir);
ASSERT_EQ(offset_map.size(), entries.size());
for(i = 0; i < r; ++i) {
sprintf(bazstr, "dirf%d", i);
ASSERT_TRUE(offset_map.count(bazstr) == 1);
}
// test seekdir
ceph_seekdir(cmount, ls_dir, offset);
ASSERT_TRUE(ceph_readdir(cmount, ls_dir) == NULL);
for (auto p = offset_map.begin(); p != offset_map.end(); ++p) {
ceph_seekdir(cmount, ls_dir, p->second);
result = ceph_readdir(cmount, ls_dir);
ASSERT_TRUE(result != NULL);
std::string d_name(result->d_name);
ASSERT_EQ(p->first, d_name);
}
// test rewinddir
ceph_rewinddir(cmount, ls_dir);
result = ceph_readdir(cmount, ls_dir);
ASSERT_TRUE(result != NULL);
ASSERT_STREQ(result->d_name, ".");
result = ceph_readdir(cmount, ls_dir);
ASSERT_TRUE(result != NULL);
ASSERT_STREQ(result->d_name, "..");
ceph_rewinddir(cmount, ls_dir);
int t = ceph_telldir(cmount, ls_dir);
ASSERT_GT(t, -1);
ASSERT_TRUE(ceph_readdir(cmount, ls_dir) != NULL);
// test seekdir - move back to the beginning
ceph_seekdir(cmount, ls_dir, t);
// test getdents
struct dirent *getdents_entries;
size_t getdents_entries_len = (r + 2) * sizeof(*getdents_entries);
getdents_entries = (struct dirent *)malloc(getdents_entries_len);
int count = 0;
std::vector<std::string> found;
while (true) {
int len = ceph_getdents(cmount, ls_dir, (char *)getdents_entries, getdents_entries_len);
if (len == 0)
break;
ASSERT_GT(len, 0);
ASSERT_TRUE((len % sizeof(*getdents_entries)) == 0);
int n = len / sizeof(*getdents_entries);
int j;
if (count == 0) {
ASSERT_STREQ(getdents_entries[0].d_name, ".");
ASSERT_STREQ(getdents_entries[1].d_name, "..");
j = 2;
} else {
j = 0;
}
count += n;
for(; j < n; ++i, ++j) {
const char *name = getdents_entries[j].d_name;
found.push_back(name);
}
}
ASSERT_EQ(found, entries);
free(getdents_entries);
// test readdir_r
ceph_rewinddir(cmount, ls_dir);
result = ceph_readdir(cmount, ls_dir);
ASSERT_TRUE(result != NULL);
ASSERT_STREQ(result->d_name, ".");
result = ceph_readdir(cmount, ls_dir);
ASSERT_TRUE(result != NULL);
ASSERT_STREQ(result->d_name, "..");
found.clear();
while (true) {
struct dirent rdent;
int len = ceph_readdir_r(cmount, ls_dir, &rdent);
if (len == 0)
break;
ASSERT_EQ(len, 1);
found.push_back(rdent.d_name);
}
ASSERT_EQ(found, entries);
// test readdirplus
ceph_rewinddir(cmount, ls_dir);
result = ceph_readdir(cmount, ls_dir);
ASSERT_TRUE(result != NULL);
ASSERT_STREQ(result->d_name, ".");
result = ceph_readdir(cmount, ls_dir);
ASSERT_TRUE(result != NULL);
ASSERT_STREQ(result->d_name, "..");
found.clear();
while (true) {
struct dirent rdent;
struct ceph_statx stx;
int len = ceph_readdirplus_r(cmount, ls_dir, &rdent, &stx,
CEPH_STATX_SIZE, AT_STATX_DONT_SYNC, NULL);
if (len == 0)
break;
ASSERT_EQ(len, 1);
const char *name = rdent.d_name;
found.push_back(name);
int size;
sscanf(name, "dirf%d", &size);
ASSERT_TRUE(stx.stx_mask & CEPH_STATX_SIZE);
ASSERT_EQ(stx.stx_size, (size_t)size);
// On Windows, dirent uses long (4B) inodes, which get trimmed
// and can't be used.
// TODO: consider defining ceph_dirent.
#ifndef _WIN32
ASSERT_EQ(stx.stx_ino, rdent.d_ino);
#endif
//ASSERT_EQ(st.st_mode, (mode_t)0666);
}
ASSERT_EQ(found, entries);
ASSERT_EQ(ceph_closedir(cmount, ls_dir), 0);
// cleanup
for(i = 0; i < r; ++i) {
sprintf(bazstr, "dir_ls%d/dirf%d", mypid, i);
ASSERT_EQ(0, ceph_unlink(cmount, bazstr));
}
ASSERT_EQ(0, ceph_rmdir(cmount, foostr));
ceph_shutdown(cmount);
}
TEST(LibCephFS, ManyNestedDirs) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, NULL), 0);
const char *many_path = "a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a";
ASSERT_EQ(ceph_mkdirs(cmount, many_path, 0755), 0);
int i = 0;
for(; i < 39; ++i) {
ASSERT_EQ(ceph_chdir(cmount, "a"), 0);
struct ceph_dir_result *dirp;
ASSERT_EQ(ceph_opendir(cmount, "a", &dirp), 0);
struct dirent *dent = ceph_readdir(cmount, dirp);
ASSERT_TRUE(dent != NULL);
ASSERT_STREQ(dent->d_name, ".");
dent = ceph_readdir(cmount, dirp);
ASSERT_TRUE(dent != NULL);
ASSERT_STREQ(dent->d_name, "..");
dent = ceph_readdir(cmount, dirp);
ASSERT_TRUE(dent != NULL);
ASSERT_STREQ(dent->d_name, "a");
ASSERT_EQ(ceph_closedir(cmount, dirp), 0);
}
ASSERT_STREQ(ceph_getcwd(cmount), "/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a");
ASSERT_EQ(ceph_chdir(cmount, "a/a/a"), 0);
for(i = 0; i < 39; ++i) {
ASSERT_EQ(ceph_chdir(cmount, ".."), 0);
ASSERT_EQ(ceph_rmdir(cmount, "a"), 0);
}
ASSERT_EQ(ceph_chdir(cmount, "/"), 0);
ASSERT_EQ(ceph_rmdir(cmount, "a/a/a"), 0);
ceph_shutdown(cmount);
}
TEST(LibCephFS, Xattrs) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, NULL), 0);
char test_xattr_file[256];
sprintf(test_xattr_file, "test_xattr_%d", getpid());
int fd = ceph_open(cmount, test_xattr_file, O_CREAT, 0666);
ASSERT_GT(fd, 0);
// test removing non-existent xattr
ASSERT_EQ(-CEPHFS_ENODATA, ceph_removexattr(cmount, test_xattr_file, "user.nosuchxattr"));
char i = 'a';
char xattrk[128];
char xattrv[128];
for(; i < 'a'+26; ++i) {
sprintf(xattrk, "user.test_xattr_%c", i);
int len = sprintf(xattrv, "testxattr%c", i);
ASSERT_EQ(ceph_setxattr(cmount, test_xattr_file, xattrk, (void *) xattrv, len, XATTR_CREATE), 0);
}
// zero size should return required buffer length
int len_needed = ceph_listxattr(cmount, test_xattr_file, NULL, 0);
ASSERT_GT(len_needed, 0);
// buffer size smaller than needed should fail
char xattrlist[128*26];
ASSERT_GT(sizeof(xattrlist), (size_t)len_needed);
int len = ceph_listxattr(cmount, test_xattr_file, xattrlist, len_needed - 1);
ASSERT_EQ(-CEPHFS_ERANGE, len);
len = ceph_listxattr(cmount, test_xattr_file, xattrlist, sizeof(xattrlist));
ASSERT_EQ(len, len_needed);
char *p = xattrlist;
char *n;
i = 'a';
while (len > 0) {
// ceph.* xattrs should not be listed
ASSERT_NE(strncmp(p, "ceph.", 5), 0);
sprintf(xattrk, "user.test_xattr_%c", i);
ASSERT_STREQ(p, xattrk);
char gxattrv[128];
std::cout << "getting attr " << p << std::endl;
int alen = ceph_getxattr(cmount, test_xattr_file, p, (void *) gxattrv, 128);
ASSERT_GT(alen, 0);
sprintf(xattrv, "testxattr%c", i);
ASSERT_TRUE(!strncmp(xattrv, gxattrv, alen));
n = strchr(p, '\0');
n++;
len -= (n - p);
p = n;
++i;
}
i = 'a';
for(i = 'a'; i < 'a'+26; ++i) {
sprintf(xattrk, "user.test_xattr_%c", i);
ASSERT_EQ(ceph_removexattr(cmount, test_xattr_file, xattrk), 0);
}
ceph_close(cmount, fd);
ceph_shutdown(cmount);
}
TEST(LibCephFS, Xattrs_ll) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, NULL), 0);
char test_xattr_file[256];
sprintf(test_xattr_file, "test_xattr_%d", getpid());
int fd = ceph_open(cmount, test_xattr_file, O_CREAT, 0666);
ASSERT_GT(fd, 0);
ceph_close(cmount, fd);
Inode *root = NULL;
Inode *existent_file_handle = NULL;
int res = ceph_ll_lookup_root(cmount, &root);
ASSERT_EQ(res, 0);
UserPerm *perms = ceph_mount_perms(cmount);
struct ceph_statx stx;
res = ceph_ll_lookup(cmount, root, test_xattr_file, &existent_file_handle,
&stx, 0, 0, perms);
ASSERT_EQ(res, 0);
const char *valid_name = "user.attrname";
const char *value = "attrvalue";
char value_buf[256] = { 0 };
res = ceph_ll_setxattr(cmount, existent_file_handle, valid_name, value, strlen(value), 0, perms);
ASSERT_EQ(res, 0);
res = ceph_ll_getxattr(cmount, existent_file_handle, valid_name, value_buf, 256, perms);
ASSERT_EQ(res, (int)strlen(value));
value_buf[res] = '\0';
ASSERT_STREQ(value_buf, value);
ceph_shutdown(cmount);
}
TEST(LibCephFS, LstatSlashdot) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, NULL), 0);
struct ceph_statx stx;
ASSERT_EQ(ceph_statx(cmount, "/.", &stx, 0, AT_SYMLINK_NOFOLLOW), 0);
ASSERT_EQ(ceph_statx(cmount, ".", &stx, 0, AT_SYMLINK_NOFOLLOW), 0);
ceph_shutdown(cmount);
}
TEST(LibCephFS, StatDirNlink) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, NULL), 0);
char test_dir1[256];
sprintf(test_dir1, "dir1_symlinks_%d", getpid());
ASSERT_EQ(ceph_mkdir(cmount, test_dir1, 0700), 0);
int fd = ceph_open(cmount, test_dir1, O_DIRECTORY|O_RDONLY, 0);
ASSERT_GT(fd, 0);
struct ceph_statx stx;
ASSERT_EQ(ceph_fstatx(cmount, fd, &stx, CEPH_STATX_NLINK, AT_SYMLINK_NOFOLLOW), 0);
ASSERT_EQ(stx.stx_nlink, 2u);
{
char test_dir2[296];
sprintf(test_dir2, "%s/.", test_dir1);
ASSERT_EQ(ceph_statx(cmount, test_dir2, &stx, CEPH_STATX_NLINK, AT_SYMLINK_NOFOLLOW), 0);
ASSERT_EQ(stx.stx_nlink, 2u);
}
{
char test_dir2[296];
sprintf(test_dir2, "%s/1", test_dir1);
ASSERT_EQ(ceph_mkdir(cmount, test_dir2, 0700), 0);
ASSERT_EQ(ceph_statx(cmount, test_dir2, &stx, CEPH_STATX_NLINK, AT_SYMLINK_NOFOLLOW), 0);
ASSERT_EQ(stx.stx_nlink, 2u);
ASSERT_EQ(ceph_statx(cmount, test_dir1, &stx, CEPH_STATX_NLINK, AT_SYMLINK_NOFOLLOW), 0);
ASSERT_EQ(stx.stx_nlink, 3u);
sprintf(test_dir2, "%s/2", test_dir1);
ASSERT_EQ(ceph_mkdir(cmount, test_dir2, 0700), 0);
ASSERT_EQ(ceph_statx(cmount, test_dir1, &stx, CEPH_STATX_NLINK, AT_SYMLINK_NOFOLLOW), 0);
ASSERT_EQ(stx.stx_nlink, 4u);
sprintf(test_dir2, "%s/1/1", test_dir1);
ASSERT_EQ(ceph_mkdir(cmount, test_dir2, 0700), 0);
ASSERT_EQ(ceph_statx(cmount, test_dir1, &stx, CEPH_STATX_NLINK, AT_SYMLINK_NOFOLLOW), 0);
ASSERT_EQ(stx.stx_nlink, 4u);
ASSERT_EQ(ceph_rmdir(cmount, test_dir2), 0);
ASSERT_EQ(ceph_statx(cmount, test_dir1, &stx, CEPH_STATX_NLINK, AT_SYMLINK_NOFOLLOW), 0);
ASSERT_EQ(stx.stx_nlink, 4u);
sprintf(test_dir2, "%s/1", test_dir1);
ASSERT_EQ(ceph_rmdir(cmount, test_dir2), 0);
ASSERT_EQ(ceph_statx(cmount, test_dir1, &stx, CEPH_STATX_NLINK, AT_SYMLINK_NOFOLLOW), 0);
ASSERT_EQ(stx.stx_nlink, 3u);
sprintf(test_dir2, "%s/2", test_dir1);
ASSERT_EQ(ceph_rmdir(cmount, test_dir2), 0);
ASSERT_EQ(ceph_statx(cmount, test_dir1, &stx, CEPH_STATX_NLINK, AT_SYMLINK_NOFOLLOW), 0);
ASSERT_EQ(stx.stx_nlink, 2u);
}
ASSERT_EQ(ceph_rmdir(cmount, test_dir1), 0);
ASSERT_EQ(ceph_fstatx(cmount, fd, &stx, CEPH_STATX_NLINK, AT_SYMLINK_NOFOLLOW), 0);
ASSERT_EQ(stx.stx_nlink, 0u);
ceph_close(cmount, fd);
ceph_shutdown(cmount);
}
TEST(LibCephFS, DoubleChmod) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, NULL), 0);
char test_file[256];
sprintf(test_file, "test_perms_%d", getpid());
int fd = ceph_open(cmount, test_file, O_CREAT|O_RDWR, 0666);
ASSERT_GT(fd, 0);
// write some stuff
const char *bytes = "foobarbaz";
ASSERT_EQ(ceph_write(cmount, fd, bytes, strlen(bytes), 0), (int)strlen(bytes));
ceph_close(cmount, fd);
// set perms to read but can't write
ASSERT_EQ(ceph_chmod(cmount, test_file, 0400), 0);
fd = ceph_open(cmount, test_file, O_RDWR, 0);
ASSERT_EQ(fd, -CEPHFS_EACCES);
fd = ceph_open(cmount, test_file, O_RDONLY, 0);
ASSERT_GT(fd, -1);
char buf[100];
int ret = ceph_read(cmount, fd, buf, 100, 0);
ASSERT_EQ(ret, (int)strlen(bytes));
buf[ret] = '\0';
ASSERT_STREQ(buf, bytes);
ASSERT_EQ(ceph_write(cmount, fd, bytes, strlen(bytes), 0), -CEPHFS_EBADF);
ceph_close(cmount, fd);
// reset back to writeable
ASSERT_EQ(ceph_chmod(cmount, test_file, 0600), 0);
// ensure perms are correct
struct ceph_statx stx;
ASSERT_EQ(ceph_statx(cmount, test_file, &stx, CEPH_STATX_MODE, AT_SYMLINK_NOFOLLOW), 0);
ASSERT_EQ(stx.stx_mode, 0100600U);
fd = ceph_open(cmount, test_file, O_RDWR, 0);
ASSERT_GT(fd, 0);
ASSERT_EQ(ceph_write(cmount, fd, bytes, strlen(bytes), 0), (int)strlen(bytes));
ceph_close(cmount, fd);
ceph_shutdown(cmount);
}
TEST(LibCephFS, Fchmod) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, NULL), 0);
char test_file[256];
sprintf(test_file, "test_perms_%d", getpid());
int fd = ceph_open(cmount, test_file, O_CREAT|O_RDWR, 0666);
ASSERT_GT(fd, 0);
// write some stuff
const char *bytes = "foobarbaz";
ASSERT_EQ(ceph_write(cmount, fd, bytes, strlen(bytes), 0), (int)strlen(bytes));
// set perms to read but can't write
ASSERT_EQ(ceph_fchmod(cmount, fd, 0400), 0);
char buf[100];
int ret = ceph_read(cmount, fd, buf, 100, 0);
ASSERT_EQ(ret, (int)strlen(bytes));
buf[ret] = '\0';
ASSERT_STREQ(buf, bytes);
ASSERT_EQ(ceph_write(cmount, fd, bytes, strlen(bytes), 0), (int)strlen(bytes));
ceph_close(cmount, fd);
ASSERT_EQ(ceph_open(cmount, test_file, O_RDWR, 0), -CEPHFS_EACCES);
// reset back to writeable
ASSERT_EQ(ceph_chmod(cmount, test_file, 0600), 0);
fd = ceph_open(cmount, test_file, O_RDWR, 0);
ASSERT_GT(fd, 0);
ASSERT_EQ(ceph_write(cmount, fd, bytes, strlen(bytes), 0), (int)strlen(bytes));
ceph_close(cmount, fd);
ceph_shutdown(cmount);
}
TEST(LibCephFS, Lchmod) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, NULL), 0);
char test_file[256];
sprintf(test_file, "test_perms_lchmod_%d", getpid());
int fd = ceph_open(cmount, test_file, O_CREAT|O_RDWR, 0666);
ASSERT_GT(fd, 0);
// write some stuff
const char *bytes = "foobarbaz";
ASSERT_EQ(ceph_write(cmount, fd, bytes, strlen(bytes), 0), (int)strlen(bytes));
ceph_close(cmount, fd);
// Create symlink
char test_symlink[256];
sprintf(test_symlink, "test_lchmod_sym_%d", getpid());
ASSERT_EQ(ceph_symlink(cmount, test_file, test_symlink), 0);
// get symlink stat - lstat
struct ceph_statx stx_orig1;
ASSERT_EQ(ceph_statx(cmount, test_symlink, &stx_orig1, CEPH_STATX_ALL_STATS, AT_SYMLINK_NOFOLLOW), 0);
// Change mode on symlink file
ASSERT_EQ(ceph_lchmod(cmount, test_symlink, 0400), 0);
struct ceph_statx stx_orig2;
ASSERT_EQ(ceph_statx(cmount, test_symlink, &stx_orig2, CEPH_STATX_ALL_STATS, AT_SYMLINK_NOFOLLOW), 0);
// Compare modes
ASSERT_NE(stx_orig1.stx_mode, stx_orig2.stx_mode);
static const int permbits = S_IRWXU|S_IRWXG|S_IRWXO;
ASSERT_EQ(permbits&stx_orig1.stx_mode, 0777);
ASSERT_EQ(permbits&stx_orig2.stx_mode, 0400);
ceph_shutdown(cmount);
}
TEST(LibCephFS, Fchown) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, NULL), 0);
char test_file[256];
sprintf(test_file, "test_fchown_%d", getpid());
int fd = ceph_open(cmount, test_file, O_CREAT|O_RDWR, 0666);
ASSERT_GT(fd, 0);
// set perms to readable and writeable only by owner
ASSERT_EQ(ceph_fchmod(cmount, fd, 0600), 0);
// change ownership to nobody -- we assume nobody exists and id is always 65534
ASSERT_EQ(ceph_conf_set(cmount, "client_permissions", "0"), 0);
ASSERT_EQ(ceph_fchown(cmount, fd, 65534, 65534), 0);
ASSERT_EQ(ceph_conf_set(cmount, "client_permissions", "1"), 0);
ceph_close(cmount, fd);
// "nobody" will be ignored on Windows
#ifndef _WIN32
fd = ceph_open(cmount, test_file, O_RDWR, 0);
ASSERT_EQ(fd, -CEPHFS_EACCES);
#endif
ceph_shutdown(cmount);
}
#if defined(__linux__) && defined(O_PATH)
TEST(LibCephFS, FlagO_PATH) {
struct ceph_mount_info *cmount;
ASSERT_EQ(0, ceph_create(&cmount, NULL));
ASSERT_EQ(0, ceph_conf_read_file(cmount, NULL));
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(0, ceph_mount(cmount, NULL));
char test_file[PATH_MAX];
sprintf(test_file, "test_oflag_%d", getpid());
int fd = ceph_open(cmount, test_file, O_CREAT|O_RDWR|O_PATH, 0666);
ASSERT_EQ(-CEPHFS_ENOENT, fd);
fd = ceph_open(cmount, test_file, O_CREAT|O_RDWR, 0666);
ASSERT_GT(fd, 0);
ASSERT_EQ(0, ceph_close(cmount, fd));
// ok, the file has been created. perform real checks now
fd = ceph_open(cmount, test_file, O_CREAT|O_RDWR|O_PATH, 0666);
ASSERT_GT(fd, 0);
char buf[128];
ASSERT_EQ(-CEPHFS_EBADF, ceph_read(cmount, fd, buf, sizeof(buf), 0));
ASSERT_EQ(-CEPHFS_EBADF, ceph_write(cmount, fd, buf, sizeof(buf), 0));
// set perms to readable and writeable only by owner
ASSERT_EQ(-CEPHFS_EBADF, ceph_fchmod(cmount, fd, 0600));
// change ownership to nobody -- we assume nobody exists and id is always 65534
ASSERT_EQ(-CEPHFS_EBADF, ceph_fchown(cmount, fd, 65534, 65534));
// try to sync
ASSERT_EQ(-CEPHFS_EBADF, ceph_fsync(cmount, fd, false));
struct ceph_statx stx;
ASSERT_EQ(0, ceph_fstatx(cmount, fd, &stx, 0, 0));
ASSERT_EQ(0, ceph_close(cmount, fd));
ceph_shutdown(cmount);
}
#endif /* __linux */
TEST(LibCephFS, Symlinks) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, NULL), 0);
char test_file[256];
sprintf(test_file, "test_symlinks_%d", getpid());
int fd = ceph_open(cmount, test_file, O_CREAT|O_RDWR, 0666);
ASSERT_GT(fd, 0);
ceph_close(cmount, fd);
char test_symlink[256];
sprintf(test_symlink, "test_symlinks_sym_%d", getpid());
ASSERT_EQ(ceph_symlink(cmount, test_file, test_symlink), 0);
// test the O_NOFOLLOW case
fd = ceph_open(cmount, test_symlink, O_NOFOLLOW, 0);
ASSERT_EQ(fd, -CEPHFS_ELOOP);
// stat the original file
struct ceph_statx stx_orig;
ASSERT_EQ(ceph_statx(cmount, test_file, &stx_orig, CEPH_STATX_ALL_STATS, 0), 0);
// stat the symlink
struct ceph_statx stx_symlink_orig;
ASSERT_EQ(ceph_statx(cmount, test_symlink, &stx_symlink_orig, CEPH_STATX_ALL_STATS, 0), 0);
// ensure the statx bufs are equal
ASSERT_EQ(memcmp(&stx_orig, &stx_symlink_orig, sizeof(stx_orig)), 0);
sprintf(test_file, "/test_symlinks_abs_%d", getpid());
fd = ceph_open(cmount, test_file, O_CREAT|O_RDWR, 0666);
ASSERT_GT(fd, 0);
ceph_close(cmount, fd);
sprintf(test_symlink, "/test_symlinks_abs_sym_%d", getpid());
ASSERT_EQ(ceph_symlink(cmount, test_file, test_symlink), 0);
// stat the original file
ASSERT_EQ(ceph_statx(cmount, test_file, &stx_orig, CEPH_STATX_ALL_STATS, 0), 0);
// stat the symlink
ASSERT_EQ(ceph_statx(cmount, test_symlink, &stx_symlink_orig, CEPH_STATX_ALL_STATS, 0), 0);
// ensure the statx bufs are equal
ASSERT_TRUE(!memcmp(&stx_orig, &stx_symlink_orig, sizeof(stx_orig)));
// test lstat
ASSERT_EQ(ceph_statx(cmount, test_symlink, &stx_orig, CEPH_STATX_ALL_STATS, AT_SYMLINK_NOFOLLOW), 0);
ASSERT_TRUE(S_ISLNK(stx_orig.stx_mode));
ceph_shutdown(cmount);
}
TEST(LibCephFS, DirSyms) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, NULL), 0);
char test_dir1[256];
sprintf(test_dir1, "dir1_symlinks_%d", getpid());
ASSERT_EQ(ceph_mkdir(cmount, test_dir1, 0700), 0);
char test_symdir[256];
sprintf(test_symdir, "symdir_symlinks_%d", getpid());
ASSERT_EQ(ceph_symlink(cmount, test_dir1, test_symdir), 0);
char test_file[256];
sprintf(test_file, "/symdir_symlinks_%d/test_symdir_file", getpid());
int fd = ceph_open(cmount, test_file, O_CREAT|O_RDWR, 0600);
ASSERT_GT(fd, 0);
ceph_close(cmount, fd);
struct ceph_statx stx;
ASSERT_EQ(ceph_statx(cmount, test_file, &stx, 0, AT_SYMLINK_NOFOLLOW), 0);
// ensure that its a file not a directory we get back
ASSERT_TRUE(S_ISREG(stx.stx_mode));
ceph_shutdown(cmount);
}
TEST(LibCephFS, LoopSyms) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, NULL), 0);
char test_dir1[256];
sprintf(test_dir1, "dir1_loopsym_%d", getpid());
ASSERT_EQ(ceph_mkdir(cmount, test_dir1, 0700), 0);
char test_dir2[256];
sprintf(test_dir2, "/dir1_loopsym_%d/loop_dir", getpid());
ASSERT_EQ(ceph_mkdir(cmount, test_dir2, 0700), 0);
// symlink it itself: /path/to/mysym -> /path/to/mysym
char test_symdir[256];
sprintf(test_symdir, "/dir1_loopsym_%d/loop_dir/symdir", getpid());
ASSERT_EQ(ceph_symlink(cmount, test_symdir, test_symdir), 0);
char test_file[256];
sprintf(test_file, "/dir1_loopsym_%d/loop_dir/symdir/test_loopsym_file", getpid());
int fd = ceph_open(cmount, test_file, O_CREAT|O_RDWR, 0600);
ASSERT_EQ(fd, -CEPHFS_ELOOP);
// loop: /a -> /b, /b -> /c, /c -> /a
char a[264], b[264], c[264];
sprintf(a, "/%s/a", test_dir1);
sprintf(b, "/%s/b", test_dir1);
sprintf(c, "/%s/c", test_dir1);
ASSERT_EQ(ceph_symlink(cmount, a, b), 0);
ASSERT_EQ(ceph_symlink(cmount, b, c), 0);
ASSERT_EQ(ceph_symlink(cmount, c, a), 0);
ASSERT_EQ(ceph_open(cmount, a, O_RDWR, 0), -CEPHFS_ELOOP);
ceph_shutdown(cmount);
}
TEST(LibCephFS, HardlinkNoOriginal) {
int mypid = getpid();
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, NULL), 0);
char dir[256];
sprintf(dir, "/test_rmdirfail%d", mypid);
ASSERT_EQ(ceph_mkdir(cmount, dir, 0777), 0);
ASSERT_EQ(ceph_chdir(cmount, dir), 0);
int fd = ceph_open(cmount, "f1", O_CREAT, 0644);
ASSERT_GT(fd, 0);
ceph_close(cmount, fd);
// create hard link
ASSERT_EQ(ceph_link(cmount, "f1", "hardl1"), 0);
// remove file link points to
ASSERT_EQ(ceph_unlink(cmount, "f1"), 0);
ceph_shutdown(cmount);
// now cleanup
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, NULL), 0);
ASSERT_EQ(ceph_chdir(cmount, dir), 0);
ASSERT_EQ(ceph_unlink(cmount, "hardl1"), 0);
ASSERT_EQ(ceph_rmdir(cmount, dir), 0);
ceph_shutdown(cmount);
}
TEST(LibCephFS, BadArgument) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, NULL), 0);
int fd = ceph_open(cmount, "test_file", O_CREAT|O_RDWR, 0666);
ASSERT_GT(fd, 0);
char buf[100];
ASSERT_EQ(ceph_write(cmount, fd, buf, sizeof(buf), 0), (int)sizeof(buf));
ASSERT_EQ(ceph_read(cmount, fd, buf, 0, 5), 0);
ceph_close(cmount, fd);
ASSERT_EQ(ceph_unlink(cmount, "test_file"), 0);
ceph_shutdown(cmount);
}
TEST(LibCephFS, BadFileDesc) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, NULL), 0);
ASSERT_EQ(ceph_fchmod(cmount, -1, 0655), -CEPHFS_EBADF);
ASSERT_EQ(ceph_close(cmount, -1), -CEPHFS_EBADF);
ASSERT_EQ(ceph_lseek(cmount, -1, 0, SEEK_SET), -CEPHFS_EBADF);
char buf[0];
ASSERT_EQ(ceph_read(cmount, -1, buf, 0, 0), -CEPHFS_EBADF);
ASSERT_EQ(ceph_write(cmount, -1, buf, 0, 0), -CEPHFS_EBADF);
ASSERT_EQ(ceph_ftruncate(cmount, -1, 0), -CEPHFS_EBADF);
ASSERT_EQ(ceph_fsync(cmount, -1, 0), -CEPHFS_EBADF);
struct ceph_statx stx;
ASSERT_EQ(ceph_fstatx(cmount, -1, &stx, 0, 0), -CEPHFS_EBADF);
struct sockaddr_storage addr;
ASSERT_EQ(ceph_get_file_stripe_address(cmount, -1, 0, &addr, 1), -CEPHFS_EBADF);
ASSERT_EQ(ceph_get_file_stripe_unit(cmount, -1), -CEPHFS_EBADF);
ASSERT_EQ(ceph_get_file_pool(cmount, -1), -CEPHFS_EBADF);
char poolname[80];
ASSERT_EQ(ceph_get_file_pool_name(cmount, -1, poolname, sizeof(poolname)), -CEPHFS_EBADF);
ASSERT_EQ(ceph_get_file_replication(cmount, -1), -CEPHFS_EBADF);
ASSERT_EQ(ceph_get_file_object_size(cmount, -1), -CEPHFS_EBADF);
int stripe_unit, stripe_count, object_size, pg_pool;
ASSERT_EQ(ceph_get_file_layout(cmount, -1, &stripe_unit, &stripe_count, &object_size, &pg_pool), -CEPHFS_EBADF);
ASSERT_EQ(ceph_get_file_stripe_count(cmount, -1), -CEPHFS_EBADF);
ceph_shutdown(cmount);
}
TEST(LibCephFS, ReadEmptyFile) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, NULL), 0);
// test the read_sync path in the client for zero files
ASSERT_EQ(ceph_conf_set(cmount, "client_debug_force_sync_read", "true"), 0);
int mypid = getpid();
char testf[256];
sprintf(testf, "test_reademptyfile%d", mypid);
int fd = ceph_open(cmount, testf, O_CREAT|O_TRUNC|O_WRONLY, 0644);
ASSERT_GT(fd, 0);
ceph_close(cmount, fd);
fd = ceph_open(cmount, testf, O_RDONLY, 0);
ASSERT_GT(fd, 0);
char buf[4096];
ASSERT_EQ(ceph_read(cmount, fd, buf, 4096, 0), 0);
ceph_close(cmount, fd);
ceph_shutdown(cmount);
}
TEST(LibCephFS, PreadvPwritev) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, NULL), 0);
int mypid = getpid();
char testf[256];
sprintf(testf, "test_preadvpwritevfile%d", mypid);
int fd = ceph_open(cmount, testf, O_CREAT|O_RDWR, 0666);
ASSERT_GT(fd, 0);
char out0[] = "hello ";
char out1[] = "world\n";
struct iovec iov_out[2] = {
{out0, sizeof(out0)},
{out1, sizeof(out1)},
};
char in0[sizeof(out0)];
char in1[sizeof(out1)];
struct iovec iov_in[2] = {
{in0, sizeof(in0)},
{in1, sizeof(in1)},
};
ssize_t nwritten = iov_out[0].iov_len + iov_out[1].iov_len;
ssize_t nread = iov_in[0].iov_len + iov_in[1].iov_len;
ASSERT_EQ(ceph_pwritev(cmount, fd, iov_out, 2, 0), nwritten);
ASSERT_EQ(ceph_preadv(cmount, fd, iov_in, 2, 0), nread);
ASSERT_EQ(0, strncmp((const char*)iov_in[0].iov_base, (const char*)iov_out[0].iov_base, iov_out[0].iov_len));
ASSERT_EQ(0, strncmp((const char*)iov_in[1].iov_base, (const char*)iov_out[1].iov_base, iov_out[1].iov_len));
ceph_close(cmount, fd);
ceph_shutdown(cmount);
}
TEST(LibCephFS, LlreadvLlwritev) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, NULL), 0);
int mypid = getpid();
char filename[256];
sprintf(filename, "test_llreadvllwritevfile%u", mypid);
Inode *root, *file;
ASSERT_EQ(ceph_ll_lookup_root(cmount, &root), 0);
Fh *fh;
struct ceph_statx stx;
UserPerm *perms = ceph_mount_perms(cmount);
ASSERT_EQ(ceph_ll_create(cmount, root, filename, 0666,
O_RDWR|O_CREAT|O_TRUNC, &file, &fh, &stx, 0, 0, perms), 0);
/* Reopen read-only */
char out0[] = "hello ";
char out1[] = "world\n";
struct iovec iov_out[2] = {
{out0, sizeof(out0)},
{out1, sizeof(out1)},
};
char in0[sizeof(out0)];
char in1[sizeof(out1)];
struct iovec iov_in[2] = {
{in0, sizeof(in0)},
{in1, sizeof(in1)},
};
ssize_t nwritten = iov_out[0].iov_len + iov_out[1].iov_len;
ssize_t nread = iov_in[0].iov_len + iov_in[1].iov_len;
ASSERT_EQ(ceph_ll_writev(cmount, fh, iov_out, 2, 0), nwritten);
ASSERT_EQ(ceph_ll_readv(cmount, fh, iov_in, 2, 0), nread);
ASSERT_EQ(0, strncmp((const char*)iov_in[0].iov_base, (const char*)iov_out[0].iov_base, iov_out[0].iov_len));
ASSERT_EQ(0, strncmp((const char*)iov_in[1].iov_base, (const char*)iov_out[1].iov_base, iov_out[1].iov_len));
ceph_ll_close(cmount, fh);
ceph_shutdown(cmount);
}
TEST(LibCephFS, StripeUnitGran) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, NULL), 0);
ASSERT_GT(ceph_get_stripe_unit_granularity(cmount), 0);
ceph_shutdown(cmount);
}
TEST(LibCephFS, Rename) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, NULL), 0);
int mypid = getpid();
char path_src[256];
char path_dst[256];
/* make a source file */
sprintf(path_src, "test_rename_src%d", mypid);
int fd = ceph_open(cmount, path_src, O_CREAT|O_TRUNC|O_WRONLY, 0777);
ASSERT_GT(fd, 0);
ASSERT_EQ(0, ceph_close(cmount, fd));
/* rename to a new dest path */
sprintf(path_dst, "test_rename_dst%d", mypid);
ASSERT_EQ(0, ceph_rename(cmount, path_src, path_dst));
/* test that dest path exists */
struct ceph_statx stx;
ASSERT_EQ(0, ceph_statx(cmount, path_dst, &stx, 0, 0));
/* test that src path doesn't exist */
ASSERT_EQ(-CEPHFS_ENOENT, ceph_statx(cmount, path_src, &stx, 0, AT_SYMLINK_NOFOLLOW));
/* rename with non-existent source path */
ASSERT_EQ(-CEPHFS_ENOENT, ceph_rename(cmount, path_src, path_dst));
ASSERT_EQ(0, ceph_unlink(cmount, path_dst));
ceph_shutdown(cmount);
}
TEST(LibCephFS, UseUnmounted) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
struct statvfs stvfs;
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_statfs(cmount, "/", &stvfs));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_get_local_osd(cmount));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_chdir(cmount, "/"));
struct ceph_dir_result *dirp;
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_opendir(cmount, "/", &dirp));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_closedir(cmount, dirp));
ceph_readdir(cmount, dirp);
EXPECT_EQ(CEPHFS_ENOTCONN, errno);
struct dirent rdent;
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_readdir_r(cmount, dirp, &rdent));
struct ceph_statx stx;
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_readdirplus_r(cmount, dirp, &rdent, &stx, 0, 0, NULL));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_getdents(cmount, dirp, NULL, 0));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_getdnames(cmount, dirp, NULL, 0));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_telldir(cmount, dirp));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_link(cmount, "/", "/link"));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_unlink(cmount, "/path"));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_rename(cmount, "/path", "/path"));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_mkdir(cmount, "/", 0655));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_mkdirs(cmount, "/", 0655));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_rmdir(cmount, "/path"));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_readlink(cmount, "/path", NULL, 0));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_symlink(cmount, "/path", "/path"));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_statx(cmount, "/path", &stx, 0, 0));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_setattrx(cmount, "/path", &stx, 0, 0));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_getxattr(cmount, "/path", "name", NULL, 0));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_lgetxattr(cmount, "/path", "name", NULL, 0));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_listxattr(cmount, "/path", NULL, 0));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_llistxattr(cmount, "/path", NULL, 0));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_removexattr(cmount, "/path", "name"));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_lremovexattr(cmount, "/path", "name"));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_setxattr(cmount, "/path", "name", NULL, 0, 0));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_lsetxattr(cmount, "/path", "name", NULL, 0, 0));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_fsetattrx(cmount, 0, &stx, 0));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_chmod(cmount, "/path", 0));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_fchmod(cmount, 0, 0));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_chown(cmount, "/path", 0, 0));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_lchown(cmount, "/path", 0, 0));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_fchown(cmount, 0, 0, 0));
struct utimbuf utb;
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_utime(cmount, "/path", &utb));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_truncate(cmount, "/path", 0));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_mknod(cmount, "/path", 0, 0));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_open(cmount, "/path", 0, 0));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_open_layout(cmount, "/path", 0, 0, 0, 0, 0, "pool"));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_close(cmount, 0));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_lseek(cmount, 0, 0, SEEK_SET));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_read(cmount, 0, NULL, 0, 0));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_write(cmount, 0, NULL, 0, 0));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_ftruncate(cmount, 0, 0));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_fsync(cmount, 0, 0));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_fstatx(cmount, 0, &stx, 0, 0));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_sync_fs(cmount));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_get_file_stripe_unit(cmount, 0));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_get_file_stripe_count(cmount, 0));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_get_file_layout(cmount, 0, NULL, NULL ,NULL ,NULL));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_get_file_object_size(cmount, 0));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_get_file_pool(cmount, 0));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_get_file_pool_name(cmount, 0, NULL, 0));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_get_file_replication(cmount, 0));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_get_path_replication(cmount, "/path"));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_get_path_layout(cmount, "/path", NULL, NULL, NULL, NULL));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_get_path_object_size(cmount, "/path"));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_get_path_stripe_count(cmount, "/path"));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_get_path_stripe_unit(cmount, "/path"));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_get_path_pool(cmount, "/path"));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_get_path_pool_name(cmount, "/path", NULL, 0));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_get_pool_name(cmount, 0, NULL, 0));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_get_file_stripe_address(cmount, 0, 0, NULL, 0));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_localize_reads(cmount, 0));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_debug_get_fd_caps(cmount, 0));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_debug_get_file_caps(cmount, "/path"));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_get_stripe_unit_granularity(cmount));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_get_pool_id(cmount, "data"));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_get_pool_replication(cmount, 1));
ceph_release(cmount);
}
TEST(LibCephFS, GetPoolId) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, NULL), 0);
char name[80];
memset(name, 0, sizeof(name));
ASSERT_LE(0, ceph_get_path_pool_name(cmount, "/", name, sizeof(name)));
ASSERT_GE(ceph_get_pool_id(cmount, name), 0);
ASSERT_EQ(ceph_get_pool_id(cmount, "weflkjwelfjwlkejf"), -CEPHFS_ENOENT);
ceph_shutdown(cmount);
}
TEST(LibCephFS, GetPoolReplication) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, NULL), 0);
/* negative pools */
ASSERT_EQ(ceph_get_pool_replication(cmount, -10), -CEPHFS_ENOENT);
/* valid pool */
int pool_id;
int stripe_unit, stripe_count, object_size;
ASSERT_EQ(0, ceph_get_path_layout(cmount, "/", &stripe_unit, &stripe_count,
&object_size, &pool_id));
ASSERT_GE(pool_id, 0);
ASSERT_GT(ceph_get_pool_replication(cmount, pool_id), 0);
ceph_shutdown(cmount);
}
TEST(LibCephFS, GetExtentOsds) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_get_file_extent_osds(cmount, 0, 0, NULL, NULL, 0));
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, NULL), 0);
int stripe_unit = (1<<18);
/* make a file! */
char test_file[256];
sprintf(test_file, "test_extent_osds_%d", getpid());
int fd = ceph_open_layout(cmount, test_file, O_CREAT|O_RDWR, 0666,
stripe_unit, 2, stripe_unit*2, NULL);
ASSERT_GT(fd, 0);
/* get back how many osds > 0 */
int ret = ceph_get_file_extent_osds(cmount, fd, 0, NULL, NULL, 0);
EXPECT_GT(ret, 0);
int64_t len;
int osds[ret];
/* full stripe extent */
EXPECT_EQ(ret, ceph_get_file_extent_osds(cmount, fd, 0, &len, osds, ret));
EXPECT_EQ(len, (int64_t)stripe_unit);
/* half stripe extent */
EXPECT_EQ(ret, ceph_get_file_extent_osds(cmount, fd, stripe_unit/2, &len, osds, ret));
EXPECT_EQ(len, (int64_t)stripe_unit/2);
/* 1.5 stripe unit offset -1 byte */
EXPECT_EQ(ret, ceph_get_file_extent_osds(cmount, fd, 3*stripe_unit/2-1, &len, osds, ret));
EXPECT_EQ(len, (int64_t)stripe_unit/2+1);
/* 1.5 stripe unit offset +1 byte */
EXPECT_EQ(ret, ceph_get_file_extent_osds(cmount, fd, 3*stripe_unit/2+1, &len, osds, ret));
EXPECT_EQ(len, (int64_t)stripe_unit/2-1);
/* only when more than 1 osd */
if (ret > 1) {
EXPECT_EQ(-CEPHFS_ERANGE, ceph_get_file_extent_osds(cmount, fd, 0, NULL, osds, 1));
}
ceph_close(cmount, fd);
ceph_shutdown(cmount);
}
TEST(LibCephFS, GetOsdCrushLocation) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_get_osd_crush_location(cmount, 0, NULL, 0));
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, NULL), 0);
ASSERT_EQ(ceph_get_osd_crush_location(cmount, 0, NULL, 1), -CEPHFS_EINVAL);
char path[256];
ASSERT_EQ(ceph_get_osd_crush_location(cmount, 9999999, path, 0), -CEPHFS_ENOENT);
ASSERT_EQ(ceph_get_osd_crush_location(cmount, -1, path, 0), -CEPHFS_EINVAL);
char test_file[256];
sprintf(test_file, "test_osds_loc_%d", getpid());
int fd = ceph_open(cmount, test_file, O_CREAT|O_RDWR, 0666);
ASSERT_GT(fd, 0);
/* get back how many osds > 0 */
int ret = ceph_get_file_extent_osds(cmount, fd, 0, NULL, NULL, 0);
EXPECT_GT(ret, 0);
/* full stripe extent */
int osds[ret];
EXPECT_EQ(ret, ceph_get_file_extent_osds(cmount, fd, 0, NULL, osds, ret));
ASSERT_GT(ceph_get_osd_crush_location(cmount, 0, path, 0), 0);
ASSERT_EQ(ceph_get_osd_crush_location(cmount, 0, path, 1), -CEPHFS_ERANGE);
for (int i = 0; i < ret; i++) {
int len = ceph_get_osd_crush_location(cmount, osds[i], path, sizeof(path));
ASSERT_GT(len, 0);
int pos = 0;
while (pos < len) {
std::string type(path + pos);
ASSERT_GT((int)type.size(), 0);
pos += type.size() + 1;
std::string name(path + pos);
ASSERT_GT((int)name.size(), 0);
pos += name.size() + 1;
}
}
ceph_close(cmount, fd);
ceph_shutdown(cmount);
}
TEST(LibCephFS, GetOsdAddr) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_get_osd_addr(cmount, 0, NULL));
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, NULL), 0);
ASSERT_EQ(-CEPHFS_EINVAL, ceph_get_osd_addr(cmount, 0, NULL));
struct sockaddr_storage addr;
ASSERT_EQ(-CEPHFS_ENOENT, ceph_get_osd_addr(cmount, -1, &addr));
ASSERT_EQ(-CEPHFS_ENOENT, ceph_get_osd_addr(cmount, 9999999, &addr));
ASSERT_EQ(0, ceph_get_osd_addr(cmount, 0, &addr));
ceph_shutdown(cmount);
}
TEST(LibCephFS, OpenNoClose) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, "/"), 0);
pid_t mypid = getpid();
char str_buf[256];
sprintf(str_buf, "open_no_close_dir%d", mypid);
ASSERT_EQ(0, ceph_mkdirs(cmount, str_buf, 0777));
struct ceph_dir_result *ls_dir = NULL;
ASSERT_EQ(ceph_opendir(cmount, str_buf, &ls_dir), 0);
sprintf(str_buf, "open_no_close_file%d", mypid);
int fd = ceph_open(cmount, str_buf, O_RDONLY|O_CREAT, 0666);
ASSERT_LT(0, fd);
// shutdown should force close opened file/dir
ceph_shutdown(cmount);
}
TEST(LibCephFS, Nlink) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, "/"), 0);
Inode *root, *dir, *file;
ASSERT_EQ(ceph_ll_lookup_root(cmount, &root), 0);
char dirname[32], filename[32], linkname[32];
sprintf(dirname, "nlinkdir%x", getpid());
sprintf(filename, "nlinkorig%x", getpid());
sprintf(linkname, "nlinklink%x", getpid());
struct ceph_statx stx;
Fh *fh;
UserPerm *perms = ceph_mount_perms(cmount);
ASSERT_EQ(ceph_ll_mkdir(cmount, root, dirname, 0755, &dir, &stx, 0, 0, perms), 0);
ASSERT_EQ(ceph_ll_create(cmount, dir, filename, 0666, O_RDWR|O_CREAT|O_EXCL,
&file, &fh, &stx, CEPH_STATX_NLINK, 0, perms), 0);
ASSERT_EQ(ceph_ll_close(cmount, fh), 0);
ASSERT_EQ(stx.stx_nlink, (nlink_t)1);
ASSERT_EQ(ceph_ll_link(cmount, file, dir, linkname, perms), 0);
ASSERT_EQ(ceph_ll_getattr(cmount, file, &stx, CEPH_STATX_NLINK, 0, perms), 0);
ASSERT_EQ(stx.stx_nlink, (nlink_t)2);
ASSERT_EQ(ceph_ll_unlink(cmount, dir, linkname, perms), 0);
ASSERT_EQ(ceph_ll_lookup(cmount, dir, filename, &file, &stx,
CEPH_STATX_NLINK, 0, perms), 0);
ASSERT_EQ(stx.stx_nlink, (nlink_t)1);
ceph_shutdown(cmount);
}
TEST(LibCephFS, SlashDotDot) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, "/"), 0);
struct ceph_statx stx;
ASSERT_EQ(ceph_statx(cmount, "/.", &stx, CEPH_STATX_INO, 0), 0);
ino_t ino = stx.stx_ino;
ASSERT_EQ(ceph_statx(cmount, "/..", &stx, CEPH_STATX_INO, 0), 0);
/* At root, "." and ".." should be the same inode */
ASSERT_EQ(ino, stx.stx_ino);
/* Test accessing the parent of an unlinked directory */
char dir1[32], dir2[56];
sprintf(dir1, "/sldotdot%x", getpid());
sprintf(dir2, "%s/sub%x", dir1, getpid());
ASSERT_EQ(ceph_mkdir(cmount, dir1, 0755), 0);
ASSERT_EQ(ceph_mkdir(cmount, dir2, 0755), 0);
ASSERT_EQ(ceph_chdir(cmount, dir2), 0);
/* Test behavior when unlinking cwd */
struct ceph_dir_result *rdir;
ASSERT_EQ(ceph_opendir(cmount, ".", &rdir), 0);
ASSERT_EQ(ceph_rmdir(cmount, dir2), 0);
/* get "." entry */
struct dirent *result = ceph_readdir(cmount, rdir);
ino = result->d_ino;
/* get ".." entry */
result = ceph_readdir(cmount, rdir);
ASSERT_EQ(ino, result->d_ino);
ceph_closedir(cmount, rdir);
/* Make sure it works same way when mounting subtree */
ASSERT_EQ(ceph_unmount(cmount), 0);
ASSERT_EQ(ceph_mount(cmount, dir1), 0);
ASSERT_EQ(ceph_statx(cmount, "/..", &stx, CEPH_STATX_INO, 0), 0);
/* Test readdir behavior */
ASSERT_EQ(ceph_opendir(cmount, "/", &rdir), 0);
result = ceph_readdir(cmount, rdir);
ASSERT_TRUE(result != NULL);
ASSERT_STREQ(result->d_name, ".");
ino = result->d_ino;
result = ceph_readdir(cmount, rdir);
ASSERT_TRUE(result != NULL);
ASSERT_STREQ(result->d_name, "..");
ASSERT_EQ(ino, result->d_ino);
ceph_shutdown(cmount);
}
static inline bool
timespec_eq(timespec const& lhs, timespec const& rhs)
{
return lhs.tv_sec == rhs.tv_sec && lhs.tv_nsec == rhs.tv_nsec;
}
TEST(LibCephFS, Btime) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, "/"), 0);
char filename[32];
sprintf(filename, "/getattrx%x", getpid());
ceph_unlink(cmount, filename);
int fd = ceph_open(cmount, filename, O_RDWR|O_CREAT|O_EXCL, 0666);
ASSERT_LT(0, fd);
/* make sure fstatx works */
struct ceph_statx stx;
ASSERT_EQ(ceph_fstatx(cmount, fd, &stx, CEPH_STATX_CTIME|CEPH_STATX_BTIME, 0), 0);
ASSERT_TRUE(stx.stx_mask & (CEPH_STATX_CTIME|CEPH_STATX_BTIME));
ASSERT_TRUE(timespec_eq(stx.stx_ctime, stx.stx_btime));
ceph_close(cmount, fd);
ASSERT_EQ(ceph_statx(cmount, filename, &stx, CEPH_STATX_CTIME|CEPH_STATX_BTIME, 0), 0);
ASSERT_TRUE(timespec_eq(stx.stx_ctime, stx.stx_btime));
ASSERT_TRUE(stx.stx_mask & (CEPH_STATX_CTIME|CEPH_STATX_BTIME));
struct timespec old_btime = stx.stx_btime;
/* Now sleep, do a chmod and verify that the ctime changed, but btime didn't */
sleep(1);
ASSERT_EQ(ceph_chmod(cmount, filename, 0644), 0);
ASSERT_EQ(ceph_statx(cmount, filename, &stx, CEPH_STATX_CTIME|CEPH_STATX_BTIME, 0), 0);
ASSERT_TRUE(stx.stx_mask & CEPH_STATX_BTIME);
ASSERT_TRUE(timespec_eq(stx.stx_btime, old_btime));
ASSERT_FALSE(timespec_eq(stx.stx_ctime, stx.stx_btime));
ceph_shutdown(cmount);
}
TEST(LibCephFS, SetBtime) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, "/"), 0);
char filename[32];
sprintf(filename, "/setbtime%x", getpid());
ceph_unlink(cmount, filename);
int fd = ceph_open(cmount, filename, O_RDWR|O_CREAT|O_EXCL, 0666);
ASSERT_LT(0, fd);
ceph_close(cmount, fd);
struct ceph_statx stx;
struct timespec old_btime = { 1, 2 };
stx.stx_btime = old_btime;
ASSERT_EQ(ceph_setattrx(cmount, filename, &stx, CEPH_SETATTR_BTIME, 0), 0);
ASSERT_EQ(ceph_statx(cmount, filename, &stx, CEPH_STATX_BTIME, 0), 0);
ASSERT_TRUE(stx.stx_mask & CEPH_STATX_BTIME);
ASSERT_TRUE(timespec_eq(stx.stx_btime, old_btime));
ceph_shutdown(cmount);
}
TEST(LibCephFS, LazyStatx) {
struct ceph_mount_info *cmount1, *cmount2;
ASSERT_EQ(ceph_create(&cmount1, NULL), 0);
ASSERT_EQ(ceph_create(&cmount2, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount1, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount2, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount1, NULL));
ASSERT_EQ(0, ceph_conf_parse_env(cmount2, NULL));
ASSERT_EQ(ceph_mount(cmount1, "/"), 0);
ASSERT_EQ(ceph_mount(cmount2, "/"), 0);
char filename[32];
sprintf(filename, "lazystatx%x", getpid());
Inode *root1, *file1, *root2, *file2;
struct ceph_statx stx;
Fh *fh;
UserPerm *perms1 = ceph_mount_perms(cmount1);
UserPerm *perms2 = ceph_mount_perms(cmount2);
ASSERT_EQ(ceph_ll_lookup_root(cmount1, &root1), 0);
ceph_ll_unlink(cmount1, root1, filename, perms1);
ASSERT_EQ(ceph_ll_create(cmount1, root1, filename, 0666, O_RDWR|O_CREAT|O_EXCL,
&file1, &fh, &stx, 0, 0, perms1), 0);
ASSERT_EQ(ceph_ll_close(cmount1, fh), 0);
ASSERT_EQ(ceph_ll_lookup_root(cmount2, &root2), 0);
ASSERT_EQ(ceph_ll_lookup(cmount2, root2, filename, &file2, &stx, CEPH_STATX_CTIME, 0, perms2), 0);
struct timespec old_ctime = stx.stx_ctime;
/*
* Now sleep, do a chmod on the first client and the see whether we get a
* different ctime with a statx that uses AT_STATX_DONT_SYNC
*/
sleep(1);
stx.stx_mode = 0644;
ASSERT_EQ(ceph_ll_setattr(cmount1, file1, &stx, CEPH_SETATTR_MODE, perms1), 0);
ASSERT_EQ(ceph_ll_getattr(cmount2, file2, &stx, CEPH_STATX_CTIME, AT_STATX_DONT_SYNC, perms2), 0);
ASSERT_TRUE(stx.stx_mask & CEPH_STATX_CTIME);
ASSERT_TRUE(stx.stx_ctime.tv_sec == old_ctime.tv_sec &&
stx.stx_ctime.tv_nsec == old_ctime.tv_nsec);
ceph_shutdown(cmount1);
ceph_shutdown(cmount2);
}
TEST(LibCephFS, ChangeAttr) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, "/"), 0);
char filename[32];
sprintf(filename, "/changeattr%x", getpid());
ceph_unlink(cmount, filename);
int fd = ceph_open(cmount, filename, O_RDWR|O_CREAT|O_EXCL, 0666);
ASSERT_LT(0, fd);
struct ceph_statx stx;
ASSERT_EQ(ceph_statx(cmount, filename, &stx, CEPH_STATX_VERSION, 0), 0);
ASSERT_TRUE(stx.stx_mask & CEPH_STATX_VERSION);
uint64_t old_change_attr = stx.stx_version;
/* do chmod, and check whether change_attr changed */
ASSERT_EQ(ceph_chmod(cmount, filename, 0644), 0);
ASSERT_EQ(ceph_statx(cmount, filename, &stx, CEPH_STATX_VERSION, 0), 0);
ASSERT_TRUE(stx.stx_mask & CEPH_STATX_VERSION);
ASSERT_NE(stx.stx_version, old_change_attr);
old_change_attr = stx.stx_version;
/* now do a write and see if it changed again */
ASSERT_EQ(3, ceph_write(cmount, fd, "foo", 3, 0));
ASSERT_EQ(ceph_statx(cmount, filename, &stx, CEPH_STATX_VERSION, 0), 0);
ASSERT_TRUE(stx.stx_mask & CEPH_STATX_VERSION);
ASSERT_NE(stx.stx_version, old_change_attr);
old_change_attr = stx.stx_version;
/* Now truncate and check again */
ASSERT_EQ(0, ceph_ftruncate(cmount, fd, 0));
ASSERT_EQ(ceph_statx(cmount, filename, &stx, CEPH_STATX_VERSION, 0), 0);
ASSERT_TRUE(stx.stx_mask & CEPH_STATX_VERSION);
ASSERT_NE(stx.stx_version, old_change_attr);
ceph_close(cmount, fd);
ceph_shutdown(cmount);
}
TEST(LibCephFS, DirChangeAttrCreateFile) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, "/"), 0);
char dirpath[32], filepath[56];
sprintf(dirpath, "/dirchange%x", getpid());
sprintf(filepath, "%s/foo", dirpath);
ASSERT_EQ(ceph_mkdir(cmount, dirpath, 0755), 0);
struct ceph_statx stx;
ASSERT_EQ(ceph_statx(cmount, dirpath, &stx, CEPH_STATX_VERSION, 0), 0);
ASSERT_TRUE(stx.stx_mask & CEPH_STATX_VERSION);
uint64_t old_change_attr = stx.stx_version;
/* Important: Follow an operation that changes the directory's ctime (setxattr)
* with one that changes the directory's mtime and ctime (create).
* Check that directory's change_attr is updated everytime ctime changes.
*/
/* set xattr on dir, and check whether dir's change_attr is incremented */
ASSERT_EQ(ceph_setxattr(cmount, dirpath, "user.name", (void*)"bob", 3, XATTR_CREATE), 0);
ASSERT_EQ(ceph_statx(cmount, dirpath, &stx, CEPH_STATX_VERSION, 0), 0);
ASSERT_TRUE(stx.stx_mask & CEPH_STATX_VERSION);
ASSERT_GT(stx.stx_version, old_change_attr);
old_change_attr = stx.stx_version;
/* create a file within dir, and check whether dir's change_attr is incremented */
int fd = ceph_open(cmount, filepath, O_RDWR|O_CREAT|O_EXCL, 0666);
ASSERT_LT(0, fd);
ceph_close(cmount, fd);
ASSERT_EQ(ceph_statx(cmount, dirpath, &stx, CEPH_STATX_VERSION, 0), 0);
ASSERT_TRUE(stx.stx_mask & CEPH_STATX_VERSION);
ASSERT_GT(stx.stx_version, old_change_attr);
ASSERT_EQ(0, ceph_unlink(cmount, filepath));
ASSERT_EQ(0, ceph_rmdir(cmount, dirpath));
ceph_shutdown(cmount);
}
TEST(LibCephFS, DirChangeAttrRenameFile) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, "/"), 0);
char dirpath[32], filepath[56], newfilepath[56];
sprintf(dirpath, "/dirchange%x", getpid());
sprintf(filepath, "%s/foo", dirpath);
ASSERT_EQ(ceph_mkdir(cmount, dirpath, 0755), 0);
int fd = ceph_open(cmount, filepath, O_RDWR|O_CREAT|O_EXCL, 0666);
ASSERT_LT(0, fd);
ceph_close(cmount, fd);
/* Important: Follow an operation that changes the directory's ctime (setattr)
* with one that changes the directory's mtime and ctime (rename).
* Check that directory's change_attr is updated everytime ctime changes.
*/
struct ceph_statx stx;
ASSERT_EQ(ceph_statx(cmount, dirpath, &stx, CEPH_STATX_VERSION, 0), 0);
ASSERT_TRUE(stx.stx_mask & CEPH_STATX_VERSION);
uint64_t old_change_attr = stx.stx_version;
/* chmod dir, and check whether dir's change_attr is incremented */
ASSERT_EQ(ceph_chmod(cmount, dirpath, 0777), 0);
ASSERT_EQ(ceph_statx(cmount, dirpath, &stx, CEPH_STATX_VERSION, 0), 0);
ASSERT_TRUE(stx.stx_mask & CEPH_STATX_VERSION);
ASSERT_GT(stx.stx_version, old_change_attr);
old_change_attr = stx.stx_version;
/* rename a file within dir, and check whether dir's change_attr is incremented */
sprintf(newfilepath, "%s/bar", dirpath);
ASSERT_EQ(ceph_rename(cmount, filepath, newfilepath), 0);
ASSERT_EQ(ceph_statx(cmount, dirpath, &stx, CEPH_STATX_VERSION, 0), 0);
ASSERT_TRUE(stx.stx_mask & CEPH_STATX_VERSION);
ASSERT_GT(stx.stx_version, old_change_attr);
ASSERT_EQ(0, ceph_unlink(cmount, newfilepath));
ASSERT_EQ(0, ceph_rmdir(cmount, dirpath));
ceph_shutdown(cmount);
}
TEST(LibCephFS, DirChangeAttrRemoveFile) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, "/"), 0);
char dirpath[32], filepath[56];
sprintf(dirpath, "/dirchange%x", getpid());
sprintf(filepath, "%s/foo", dirpath);
ASSERT_EQ(ceph_mkdir(cmount, dirpath, 0755), 0);
ASSERT_EQ(ceph_setxattr(cmount, dirpath, "user.name", (void*)"bob", 3, XATTR_CREATE), 0);
int fd = ceph_open(cmount, filepath, O_RDWR|O_CREAT|O_EXCL, 0666);
ASSERT_LT(0, fd);
ceph_close(cmount, fd);
/* Important: Follow an operation that changes the directory's ctime (removexattr)
* with one that changes the directory's mtime and ctime (remove a file).
* Check that directory's change_attr is updated everytime ctime changes.
*/
struct ceph_statx stx;
ASSERT_EQ(ceph_statx(cmount, dirpath, &stx, CEPH_STATX_VERSION, 0), 0);
ASSERT_TRUE(stx.stx_mask & CEPH_STATX_VERSION);
uint64_t old_change_attr = stx.stx_version;
/* remove xattr, and check whether dir's change_attr is incremented */
ASSERT_EQ(ceph_removexattr(cmount, dirpath, "user.name"), 0);
ASSERT_EQ(ceph_statx(cmount, dirpath, &stx, CEPH_STATX_VERSION, 0), 0);
ASSERT_TRUE(stx.stx_mask & CEPH_STATX_VERSION);
ASSERT_GT(stx.stx_version, old_change_attr);
old_change_attr = stx.stx_version;
/* remove a file within dir, and check whether dir's change_attr is incremented */
ASSERT_EQ(0, ceph_unlink(cmount, filepath));
ASSERT_EQ(ceph_statx(cmount, dirpath, &stx, CEPH_STATX_VERSION, 0), 0);
ASSERT_TRUE(stx.stx_mask & CEPH_STATX_VERSION);
ASSERT_GT(stx.stx_version, old_change_attr);
ASSERT_EQ(0, ceph_rmdir(cmount, dirpath));
ceph_shutdown(cmount);
}
TEST(LibCephFS, SetSize) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, "/"), 0);
char filename[32];
sprintf(filename, "/setsize%x", getpid());
ceph_unlink(cmount, filename);
int fd = ceph_open(cmount, filename, O_RDWR|O_CREAT|O_EXCL, 0666);
ASSERT_LT(0, fd);
struct ceph_statx stx;
uint64_t size = 8388608;
stx.stx_size = size;
ASSERT_EQ(ceph_fsetattrx(cmount, fd, &stx, CEPH_SETATTR_SIZE), 0);
ASSERT_EQ(ceph_fstatx(cmount, fd, &stx, CEPH_STATX_SIZE, 0), 0);
ASSERT_EQ(stx.stx_size, size);
ceph_close(cmount, fd);
ceph_shutdown(cmount);
}
TEST(LibCephFS, OperationsOnRoot)
{
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, "/"), 0);
char dirname[32];
sprintf(dirname, "/somedir%x", getpid());
ASSERT_EQ(ceph_mkdir(cmount, dirname, 0755), 0);
ASSERT_EQ(ceph_rmdir(cmount, "/"), -CEPHFS_EBUSY);
ASSERT_EQ(ceph_link(cmount, "/", "/"), -CEPHFS_EEXIST);
ASSERT_EQ(ceph_link(cmount, dirname, "/"), -CEPHFS_EEXIST);
ASSERT_EQ(ceph_link(cmount, "nonExisitingDir", "/"), -CEPHFS_ENOENT);
ASSERT_EQ(ceph_unlink(cmount, "/"), -CEPHFS_EISDIR);
ASSERT_EQ(ceph_rename(cmount, "/", "/"), -CEPHFS_EBUSY);
ASSERT_EQ(ceph_rename(cmount, dirname, "/"), -CEPHFS_EBUSY);
ASSERT_EQ(ceph_rename(cmount, "nonExistingDir", "/"), -CEPHFS_EBUSY);
ASSERT_EQ(ceph_rename(cmount, "/", dirname), -CEPHFS_EBUSY);
ASSERT_EQ(ceph_rename(cmount, "/", "nonExistingDir"), -CEPHFS_EBUSY);
ASSERT_EQ(ceph_mkdir(cmount, "/", 0777), -CEPHFS_EEXIST);
ASSERT_EQ(ceph_mknod(cmount, "/", 0, 0), -CEPHFS_EEXIST);
ASSERT_EQ(ceph_symlink(cmount, "/", "/"), -CEPHFS_EEXIST);
ASSERT_EQ(ceph_symlink(cmount, dirname, "/"), -CEPHFS_EEXIST);
ASSERT_EQ(ceph_symlink(cmount, "nonExistingDir", "/"), -CEPHFS_EEXIST);
ceph_shutdown(cmount);
}
// no rlimits on Windows
#ifndef _WIN32
static void shutdown_racer_func()
{
const int niter = 32;
struct ceph_mount_info *cmount;
int i;
for (i = 0; i < niter; ++i) {
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, "/"), 0);
ceph_shutdown(cmount);
}
}
// See tracker #20988
TEST(LibCephFS, ShutdownRace)
{
const int nthreads = 32;
std::thread threads[nthreads];
// Need a bunch of fd's for this test
struct rlimit rold, rnew;
ASSERT_EQ(getrlimit(RLIMIT_NOFILE, &rold), 0);
rnew = rold;
rnew.rlim_cur = rnew.rlim_max;
cout << "Setting RLIMIT_NOFILE from " << rold.rlim_cur <<
" to " << rnew.rlim_cur << std::endl;
ASSERT_EQ(setrlimit(RLIMIT_NOFILE, &rnew), 0);
for (int i = 0; i < nthreads; ++i)
threads[i] = std::thread(shutdown_racer_func);
for (int i = 0; i < nthreads; ++i)
threads[i].join();
/*
* Let's just ignore restoring the open files limit,
* the kernel will defer releasing the file descriptors
* and then the process will be possibly reachthe open
* files limit. More detail, please see tracer#43039
*/
// ASSERT_EQ(setrlimit(RLIMIT_NOFILE, &rold), 0);
}
#endif
static void get_current_time_utimbuf(struct utimbuf *utb)
{
utime_t t = ceph_clock_now();
utb->actime = t.sec();
utb->modtime = t.sec();
}
static void get_current_time_timeval(struct timeval tv[2])
{
utime_t t = ceph_clock_now();
t.copy_to_timeval(&tv[0]);
t.copy_to_timeval(&tv[1]);
}
static void get_current_time_timespec(struct timespec ts[2])
{
utime_t t = ceph_clock_now();
t.to_timespec(&ts[0]);
t.to_timespec(&ts[1]);
}
TEST(LibCephFS, TestUtime) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, NULL), 0);
char test_file[256];
sprintf(test_file, "test_utime_file_%d", getpid());
int fd = ceph_open(cmount, test_file, O_CREAT, 0666);
ASSERT_GT(fd, 0);
struct utimbuf utb;
struct ceph_statx stx;
get_current_time_utimbuf(&utb);
// ceph_utime()
EXPECT_EQ(0, ceph_utime(cmount, test_file, &utb));
ASSERT_EQ(ceph_statx(cmount, test_file, &stx,
CEPH_STATX_MTIME|CEPH_STATX_ATIME, 0), 0);
ASSERT_EQ(utime_t(stx.stx_atime), utime_t(utb.actime, 0));
ASSERT_EQ(utime_t(stx.stx_mtime), utime_t(utb.modtime, 0));
get_current_time_utimbuf(&utb);
// ceph_futime()
EXPECT_EQ(0, ceph_futime(cmount, fd, &utb));
ASSERT_EQ(ceph_statx(cmount, test_file, &stx,
CEPH_STATX_MTIME|CEPH_STATX_ATIME, 0), 0);
ASSERT_EQ(utime_t(stx.stx_atime), utime_t(utb.actime, 0));
ASSERT_EQ(utime_t(stx.stx_mtime), utime_t(utb.modtime, 0));
ceph_close(cmount, fd);
ceph_shutdown(cmount);
}
TEST(LibCephFS, TestUtimes) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, NULL), 0);
char test_file[256];
char test_symlink[256];
sprintf(test_file, "test_utimes_file_%d", getpid());
sprintf(test_symlink, "test_utimes_symlink_%d", getpid());
int fd = ceph_open(cmount, test_file, O_CREAT, 0666);
ASSERT_GT(fd, 0);
ASSERT_EQ(ceph_symlink(cmount, test_file, test_symlink), 0);
struct timeval times[2];
struct ceph_statx stx;
get_current_time_timeval(times);
// ceph_utimes() on symlink, validate target file time
EXPECT_EQ(0, ceph_utimes(cmount, test_symlink, times));
ASSERT_EQ(ceph_statx(cmount, test_symlink, &stx,
CEPH_STATX_MTIME|CEPH_STATX_ATIME, 0), 0);
ASSERT_EQ(utime_t(stx.stx_atime), utime_t(times[0]));
ASSERT_EQ(utime_t(stx.stx_mtime), utime_t(times[1]));
get_current_time_timeval(times);
// ceph_lutimes() on symlink, validate symlink time
EXPECT_EQ(0, ceph_lutimes(cmount, test_symlink, times));
ASSERT_EQ(ceph_statx(cmount, test_symlink, &stx,
CEPH_STATX_MTIME|CEPH_STATX_ATIME, AT_SYMLINK_NOFOLLOW), 0);
ASSERT_EQ(utime_t(stx.stx_atime), utime_t(times[0]));
ASSERT_EQ(utime_t(stx.stx_mtime), utime_t(times[1]));
get_current_time_timeval(times);
// ceph_futimes()
EXPECT_EQ(0, ceph_futimes(cmount, fd, times));
ASSERT_EQ(ceph_statx(cmount, test_file, &stx,
CEPH_STATX_MTIME|CEPH_STATX_ATIME, 0), 0);
ASSERT_EQ(utime_t(stx.stx_atime), utime_t(times[0]));
ASSERT_EQ(utime_t(stx.stx_mtime), utime_t(times[1]));
ceph_close(cmount, fd);
ceph_shutdown(cmount);
}
TEST(LibCephFS, TestFutimens) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, NULL), 0);
char test_file[256];
sprintf(test_file, "test_futimens_file_%d", getpid());
int fd = ceph_open(cmount, test_file, O_CREAT, 0666);
ASSERT_GT(fd, 0);
struct timespec times[2];
struct ceph_statx stx;
get_current_time_timespec(times);
// ceph_futimens()
EXPECT_EQ(0, ceph_futimens(cmount, fd, times));
ASSERT_EQ(ceph_statx(cmount, test_file, &stx,
CEPH_STATX_MTIME|CEPH_STATX_ATIME, 0), 0);
ASSERT_EQ(utime_t(stx.stx_atime), utime_t(times[0]));
ASSERT_EQ(utime_t(stx.stx_mtime), utime_t(times[1]));
ceph_close(cmount, fd);
ceph_shutdown(cmount);
}
TEST(LibCephFS, OperationsOnDotDot) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, NULL), 0);
char c_dir[512], c_dir_dot[1024], c_dir_dotdot[1024];
char c_non_existent_dir[1024], c_non_existent_dirs[1024];
char c_temp[1024];
pid_t mypid = getpid();
sprintf(c_dir, "/oodd_dir_%d", mypid);
sprintf(c_dir_dot, "/%s/.", c_dir);
sprintf(c_dir_dotdot, "/%s/..", c_dir);
sprintf(c_non_existent_dir, "/%s/../oodd_nonexistent/..", c_dir);
sprintf(c_non_existent_dirs,
"/%s/../ood_nonexistent1_%d/oodd_nonexistent2_%d", c_dir, mypid, mypid);
sprintf(c_temp, "/oodd_temp_%d", mypid);
ASSERT_EQ(0, ceph_mkdir(cmount, c_dir, 0777));
ASSERT_EQ(-CEPHFS_EEXIST, ceph_mkdir(cmount, c_dir_dot, 0777));
ASSERT_EQ(-CEPHFS_EEXIST, ceph_mkdir(cmount, c_dir_dotdot, 0777));
ASSERT_EQ(0, ceph_mkdirs(cmount, c_non_existent_dirs, 0777));
ASSERT_EQ(-CEPHFS_ENOTEMPTY, ceph_rmdir(cmount, c_dir_dot));
ASSERT_EQ(-CEPHFS_ENOTEMPTY, ceph_rmdir(cmount, c_dir_dotdot));
// non existent directory should return -CEPHFS_ENOENT
ASSERT_EQ(-CEPHFS_ENOENT, ceph_rmdir(cmount, c_non_existent_dir));
ASSERT_EQ(-CEPHFS_EBUSY, ceph_rename(cmount, c_dir_dot, c_temp));
ASSERT_EQ(0, ceph_chdir(cmount, c_dir));
ASSERT_EQ(0, ceph_mkdir(cmount, c_temp, 0777));
ASSERT_EQ(-CEPHFS_EBUSY, ceph_rename(cmount, c_temp, ".."));
ceph_shutdown(cmount);
}
TEST(LibCephFS, Caps_vxattr) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, NULL), 0);
char test_caps_vxattr_file[256];
char gxattrv[128];
int xbuflen = sizeof(gxattrv);
pid_t mypid = getpid();
sprintf(test_caps_vxattr_file, "test_caps_vxattr_%d", mypid);
int fd = ceph_open(cmount, test_caps_vxattr_file, O_CREAT, 0666);
ASSERT_GT(fd, 0);
ceph_close(cmount, fd);
int alen = ceph_getxattr(cmount, test_caps_vxattr_file, "ceph.caps", (void *)gxattrv, xbuflen);
ASSERT_GT(alen, 0);
gxattrv[alen] = '\0';
char caps_regex[] = "pA[sx]*L[sx]*X[sx]*F[sxcrwbal]*/0x[0-9a-fA-f]+";
ASSERT_TRUE(regex_match(gxattrv, regex(caps_regex)) == 1);
ceph_shutdown(cmount);
}
TEST(LibCephFS, SnapXattrs) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, NULL), 0);
char test_snap_xattr_file[256];
char c_temp[PATH_MAX];
char gxattrv[128];
char gxattrv2[128];
int xbuflen = sizeof(gxattrv);
pid_t mypid = getpid();
sprintf(test_snap_xattr_file, "test_snap_xattr_%d", mypid);
int fd = ceph_open(cmount, test_snap_xattr_file, O_CREAT, 0666);
ASSERT_GT(fd, 0);
ceph_close(cmount, fd);
sprintf(c_temp, "/.snap/test_snap_xattr_snap_%d", mypid);
ASSERT_EQ(0, ceph_mkdir(cmount, c_temp, 0777));
int alen = ceph_getxattr(cmount, c_temp, "ceph.snap.btime", (void *)gxattrv, xbuflen);
// xattr value is secs.nsecs (don't assume zero-term)
ASSERT_LT(0, alen);
ASSERT_LT(alen, xbuflen);
gxattrv[alen] = '\0';
char *s = strchr(gxattrv, '.');
char *q = NULL;
ASSERT_NE(q, s);
ASSERT_LT(s, gxattrv + alen);
ASSERT_EQ('.', *s);
*s = '\0';
utime_t btime = utime_t(strtoull(gxattrv, NULL, 10), strtoull(s + 1, NULL, 10));
*s = '.'; // restore for later strcmp
// file within the snapshot should carry the same btime
sprintf(c_temp, "/.snap/test_snap_xattr_snap_%d/%s", mypid, test_snap_xattr_file);
int alen2 = ceph_getxattr(cmount, c_temp, "ceph.snap.btime", (void *)gxattrv2, xbuflen);
ASSERT_EQ(alen, alen2);
ASSERT_EQ(0, strncmp(gxattrv, gxattrv2, alen));
// non-snap file shouldn't carry the xattr
alen = ceph_getxattr(cmount, test_snap_xattr_file, "ceph.snap.btime", (void *)gxattrv2, xbuflen);
ASSERT_EQ(-CEPHFS_ENODATA, alen);
// create a second snapshot
sprintf(c_temp, "/.snap/test_snap_xattr_snap2_%d", mypid);
ASSERT_EQ(0, ceph_mkdir(cmount, c_temp, 0777));
// check that the btime for the newer snapshot is > older
alen = ceph_getxattr(cmount, c_temp, "ceph.snap.btime", (void *)gxattrv2, xbuflen);
ASSERT_LT(0, alen);
ASSERT_LT(alen, xbuflen);
gxattrv2[alen] = '\0';
s = strchr(gxattrv2, '.');
ASSERT_NE(q, s);
ASSERT_LT(s, gxattrv2 + alen);
ASSERT_EQ('.', *s);
*s = '\0';
utime_t new_btime = utime_t(strtoull(gxattrv2, NULL, 10), strtoull(s + 1, NULL, 10));
#ifndef _WIN32
// This assertion sometimes fails on Windows, possibly due to the clock precision.
ASSERT_LT(btime, new_btime);
#endif
// listxattr() shouldn't return snap.btime vxattr
char xattrlist[512];
int len = ceph_listxattr(cmount, test_snap_xattr_file, xattrlist, sizeof(xattrlist));
ASSERT_GE(sizeof(xattrlist), (size_t)len);
char *p = xattrlist;
int found = 0;
while (len > 0) {
if (strcmp(p, "ceph.snap.btime") == 0)
found++;
len -= strlen(p) + 1;
p += strlen(p) + 1;
}
ASSERT_EQ(found, 0);
ceph_shutdown(cmount);
}
TEST(LibCephFS, Lseek) {
struct ceph_mount_info *cmount;
ASSERT_EQ(0, ceph_create(&cmount, NULL));
ASSERT_EQ(0, ceph_conf_read_file(cmount, NULL));
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(0, ceph_mount(cmount, "/"));
char c_path[1024];
sprintf(c_path, "test_lseek_%d", getpid());
int fd = ceph_open(cmount, c_path, O_RDWR|O_CREAT|O_TRUNC, 0666);
ASSERT_LT(0, fd);
const char *out_buf = "hello world";
size_t size = strlen(out_buf);
ASSERT_EQ(ceph_write(cmount, fd, out_buf, size, 0), (int)size);
/* basic SEEK_SET/END/CUR tests */
ASSERT_EQ(0, ceph_lseek(cmount, fd, 0, SEEK_SET));
ASSERT_EQ(size, ceph_lseek(cmount, fd, 0, SEEK_END));
ASSERT_EQ(0, ceph_lseek(cmount, fd, -size, SEEK_CUR));
/* Test basic functionality and out of bounds conditions for SEEK_HOLE/DATA */
#ifdef SEEK_HOLE
ASSERT_EQ(size, ceph_lseek(cmount, fd, 0, SEEK_HOLE));
ASSERT_EQ(-CEPHFS_ENXIO, ceph_lseek(cmount, fd, -1, SEEK_HOLE));
ASSERT_EQ(-CEPHFS_ENXIO, ceph_lseek(cmount, fd, size + 1, SEEK_HOLE));
#endif
#ifdef SEEK_DATA
ASSERT_EQ(0, ceph_lseek(cmount, fd, 0, SEEK_DATA));
ASSERT_EQ(-CEPHFS_ENXIO, ceph_lseek(cmount, fd, -1, SEEK_DATA));
ASSERT_EQ(-CEPHFS_ENXIO, ceph_lseek(cmount, fd, size + 1, SEEK_DATA));
#endif
ASSERT_EQ(0, ceph_close(cmount, fd));
ceph_shutdown(cmount);
}
TEST(LibCephFS, SnapInfoOnNonSnapshot) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, NULL), 0);
struct snap_info info;
ASSERT_EQ(-CEPHFS_EINVAL, ceph_get_snap_info(cmount, "/", &info));
ceph_shutdown(cmount);
}
TEST(LibCephFS, EmptySnapInfo) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, NULL), 0);
char dir_path[64];
char snap_path[PATH_MAX];
sprintf(dir_path, "/dir0_%d", getpid());
sprintf(snap_path, "%s/.snap/snap0_%d", dir_path, getpid());
ASSERT_EQ(0, ceph_mkdir(cmount, dir_path, 0755));
// snapshot without custom metadata
ASSERT_EQ(0, ceph_mkdir(cmount, snap_path, 0755));
struct snap_info info;
ASSERT_EQ(0, ceph_get_snap_info(cmount, snap_path, &info));
ASSERT_GT(info.id, 0);
ASSERT_EQ(info.nr_snap_metadata, 0);
ASSERT_EQ(0, ceph_rmdir(cmount, snap_path));
ASSERT_EQ(0, ceph_rmdir(cmount, dir_path));
ceph_shutdown(cmount);
}
TEST(LibCephFS, SnapInfo) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, NULL), 0);
char dir_path[64];
char snap_name[64];
char snap_path[PATH_MAX];
sprintf(dir_path, "/dir0_%d", getpid());
sprintf(snap_name, "snap0_%d", getpid());
sprintf(snap_path, "%s/.snap/%s", dir_path, snap_name);
ASSERT_EQ(0, ceph_mkdir(cmount, dir_path, 0755));
// snapshot with custom metadata
struct snap_metadata snap_meta[] = {{"foo", "bar"},{"this", "that"},{"abcdefg", "12345"}};
ASSERT_EQ(0, ceph_mksnap(cmount, dir_path, snap_name, 0755, snap_meta, std::size(snap_meta)));
struct snap_info info;
ASSERT_EQ(0, ceph_get_snap_info(cmount, snap_path, &info));
ASSERT_GT(info.id, 0);
ASSERT_EQ(info.nr_snap_metadata, std::size(snap_meta));
for (size_t i = 0; i < info.nr_snap_metadata; ++i) {
auto &k1 = info.snap_metadata[i].key;
auto &v1 = info.snap_metadata[i].value;
bool found = false;
for (size_t j = 0; j < info.nr_snap_metadata; ++j) {
auto &k2 = snap_meta[j].key;
auto &v2 = snap_meta[j].value;
if (strncmp(k1, k2, strlen(k1)) == 0 && strncmp(v1, v2, strlen(v1)) == 0) {
found = true;
break;
}
}
ASSERT_TRUE(found);
}
ceph_free_snap_info_buffer(&info);
ASSERT_EQ(0, ceph_rmsnap(cmount, dir_path, snap_name));
ASSERT_EQ(0, ceph_rmdir(cmount, dir_path));
ceph_shutdown(cmount);
}
TEST(LibCephFS, LookupInoMDSDir) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, NULL), 0);
Inode *inode;
auto ino = inodeno_t(0x100); /* rank 0 ~mdsdir */
ASSERT_EQ(-CEPHFS_ESTALE, ceph_ll_lookup_inode(cmount, ino, &inode));
ino = inodeno_t(0x600); /* rank 0 first stray dir */
ASSERT_EQ(-CEPHFS_ESTALE, ceph_ll_lookup_inode(cmount, ino, &inode));
ceph_shutdown(cmount);
}
TEST(LibCephFS, LookupVino) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, NULL), 0);
char dir_path[64];
char snap_name[64];
char snapdir_path[128];
char snap_path[256];
char file_path[PATH_MAX];
char snap_file[PATH_MAX];
sprintf(dir_path, "/dir0_%d", getpid());
sprintf(snap_name, "snap0_%d", getpid());
sprintf(file_path, "%s/file_%d", dir_path, getpid());
sprintf(snapdir_path, "%s/.snap", dir_path);
sprintf(snap_path, "%s/%s", snapdir_path, snap_name);
sprintf(snap_file, "%s/file_%d", snap_path, getpid());
ASSERT_EQ(0, ceph_mkdir(cmount, dir_path, 0755));
int fd = ceph_open(cmount, file_path, O_WRONLY|O_CREAT, 0666);
ASSERT_LE(0, fd);
ASSERT_EQ(0, ceph_close(cmount, fd));
ASSERT_EQ(0, ceph_mksnap(cmount, dir_path, snap_name, 0755, nullptr, 0));
// record vinos for all of them
struct ceph_statx stx;
ASSERT_EQ(0, ceph_statx(cmount, dir_path, &stx, CEPH_STATX_INO, 0));
vinodeno_t dir_vino(stx.stx_ino, stx.stx_dev);
ASSERT_EQ(0, ceph_statx(cmount, file_path, &stx, CEPH_STATX_INO, 0));
vinodeno_t file_vino(stx.stx_ino, stx.stx_dev);
ASSERT_EQ(0, ceph_statx(cmount, snapdir_path, &stx, CEPH_STATX_INO, 0));
vinodeno_t snapdir_vino(stx.stx_ino, stx.stx_dev);
ASSERT_EQ(0, ceph_statx(cmount, snap_path, &stx, CEPH_STATX_INO, 0));
vinodeno_t snap_vino(stx.stx_ino, stx.stx_dev);
ASSERT_EQ(0, ceph_statx(cmount, snap_file, &stx, CEPH_STATX_INO, 0));
vinodeno_t snap_file_vino(stx.stx_ino, stx.stx_dev);
// Remount
ASSERT_EQ(0, ceph_unmount(cmount));
ASSERT_EQ(0, ceph_create(&cmount, NULL));
ASSERT_EQ(0, ceph_conf_read_file(cmount, NULL));
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(0, ceph_mount(cmount, NULL));
// Find them all
Inode *inode;
ASSERT_EQ(0, ceph_ll_lookup_vino(cmount, dir_vino, &inode));
ceph_ll_put(cmount, inode);
ASSERT_EQ(0, ceph_ll_lookup_vino(cmount, file_vino, &inode));
ceph_ll_put(cmount, inode);
ASSERT_EQ(0, ceph_ll_lookup_vino(cmount, snapdir_vino, &inode));
ceph_ll_put(cmount, inode);
ASSERT_EQ(0, ceph_ll_lookup_vino(cmount, snap_vino, &inode));
ceph_ll_put(cmount, inode);
ASSERT_EQ(0, ceph_ll_lookup_vino(cmount, snap_file_vino, &inode));
ceph_ll_put(cmount, inode);
// cleanup
ASSERT_EQ(0, ceph_rmsnap(cmount, dir_path, snap_name));
ASSERT_EQ(0, ceph_unlink(cmount, file_path));
ASSERT_EQ(0, ceph_rmdir(cmount, dir_path));
ceph_shutdown(cmount);
}
TEST(LibCephFS, Openat) {
pid_t mypid = getpid();
struct ceph_mount_info *cmount;
ASSERT_EQ(0, ceph_create(&cmount, NULL));
ASSERT_EQ(0, ceph_conf_read_file(cmount, NULL));
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(0, ceph_mount(cmount, "/"));
char c_rel_dir[64];
char c_dir[128];
sprintf(c_rel_dir, "open_test_%d", mypid);
sprintf(c_dir, "/%s", c_rel_dir);
ASSERT_EQ(0, ceph_mkdir(cmount, c_dir, 0777));
int root_fd = ceph_open(cmount, "/", O_DIRECTORY | O_RDONLY, 0);
ASSERT_LE(0, root_fd);
int dir_fd = ceph_openat(cmount, root_fd, c_rel_dir, O_DIRECTORY | O_RDONLY, 0);
ASSERT_LE(0, dir_fd);
struct ceph_statx stx;
ASSERT_EQ(ceph_statxat(cmount, root_fd, c_rel_dir, &stx, 0, 0), 0);
ASSERT_EQ(stx.stx_mode & S_IFMT, S_IFDIR);
char c_rel_path[64];
char c_path[256];
sprintf(c_rel_path, "created_file_%d", mypid);
sprintf(c_path, "%s/created_file_%d", c_dir, mypid);
int file_fd = ceph_openat(cmount, dir_fd, c_rel_path, O_RDONLY | O_CREAT, 0666);
ASSERT_LE(0, file_fd);
ASSERT_EQ(ceph_statxat(cmount, dir_fd, c_rel_path, &stx, 0, 0), 0);
ASSERT_EQ(stx.stx_mode & S_IFMT, S_IFREG);
ASSERT_EQ(0, ceph_close(cmount, file_fd));
ASSERT_EQ(0, ceph_close(cmount, dir_fd));
ASSERT_EQ(0, ceph_close(cmount, root_fd));
ASSERT_EQ(0, ceph_unlink(cmount, c_path));
ASSERT_EQ(0, ceph_rmdir(cmount, c_dir));
ceph_shutdown(cmount);
}
TEST(LibCephFS, Statxat) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, NULL), 0);
char dir_name[64];
char rel_file_name_1[128];
char rel_file_name_2[256];
char dir_path[512];
char file_path[1024];
// relative paths for *at() calls
sprintf(dir_name, "dir0_%d", getpid());
sprintf(rel_file_name_1, "file_%d", getpid());
sprintf(rel_file_name_2, "%s/%s", dir_name, rel_file_name_1);
sprintf(dir_path, "/%s", dir_name);
sprintf(file_path, "%s/%s", dir_path, rel_file_name_1);
ASSERT_EQ(0, ceph_mkdir(cmount, dir_path, 0755));
int fd = ceph_open(cmount, file_path, O_WRONLY|O_CREAT, 0666);
ASSERT_LE(0, fd);
ASSERT_EQ(0, ceph_close(cmount, fd));
struct ceph_statx stx;
// test relative to root
fd = ceph_open(cmount, "/", O_DIRECTORY | O_RDONLY, 0);
ASSERT_LE(0, fd);
ASSERT_EQ(ceph_statxat(cmount, fd, dir_name, &stx, 0, 0), 0);
ASSERT_EQ(stx.stx_mode & S_IFMT, S_IFDIR);
ASSERT_EQ(ceph_statxat(cmount, fd, rel_file_name_2, &stx, 0, 0), 0);
ASSERT_EQ(stx.stx_mode & S_IFMT, S_IFREG);
ASSERT_EQ(0, ceph_close(cmount, fd));
// test relative to dir
fd = ceph_open(cmount, dir_path, O_DIRECTORY | O_RDONLY, 0);
ASSERT_LE(0, fd);
ASSERT_EQ(ceph_statxat(cmount, fd, rel_file_name_1, &stx, 0, 0), 0);
ASSERT_EQ(stx.stx_mode & S_IFMT, S_IFREG);
// delete the dirtree, recreate and verify
ASSERT_EQ(0, ceph_unlink(cmount, file_path));
ASSERT_EQ(0, ceph_rmdir(cmount, dir_path));
ASSERT_EQ(0, ceph_mkdir(cmount, dir_path, 0755));
int fd1 = ceph_open(cmount, file_path, O_WRONLY|O_CREAT, 0666);
ASSERT_LE(0, fd1);
ASSERT_EQ(0, ceph_close(cmount, fd1));
ASSERT_EQ(ceph_statxat(cmount, fd, rel_file_name_1, &stx, 0, 0), -CEPHFS_ENOENT);
ASSERT_EQ(0, ceph_close(cmount, fd));
ASSERT_EQ(0, ceph_unlink(cmount, file_path));
ASSERT_EQ(0, ceph_rmdir(cmount, dir_path));
ceph_shutdown(cmount);
}
TEST(LibCephFS, StatxatATFDCWD) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, NULL), 0);
char dir_name[64];
char rel_file_name_1[128];
char dir_path[512];
char file_path[1024];
// relative paths for *at() calls
sprintf(dir_name, "dir0_%d", getpid());
sprintf(rel_file_name_1, "file_%d", getpid());
sprintf(dir_path, "/%s", dir_name);
sprintf(file_path, "%s/%s", dir_path, rel_file_name_1);
ASSERT_EQ(0, ceph_mkdir(cmount, dir_path, 0755));
int fd = ceph_open(cmount, file_path, O_WRONLY|O_CREAT, 0666);
ASSERT_LE(0, fd);
ASSERT_EQ(0, ceph_close(cmount, fd));
struct ceph_statx stx;
// chdir and test with CEPHFS_AT_FDCWD
ASSERT_EQ(0, ceph_chdir(cmount, dir_path));
ASSERT_EQ(ceph_statxat(cmount, CEPHFS_AT_FDCWD, rel_file_name_1, &stx, 0, 0), 0);
ASSERT_EQ(stx.stx_mode & S_IFMT, S_IFREG);
ASSERT_EQ(0, ceph_unlink(cmount, file_path));
ASSERT_EQ(0, ceph_rmdir(cmount, dir_path));
ceph_shutdown(cmount);
}
TEST(LibCephFS, Fdopendir) {
pid_t mypid = getpid();
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, "/"), 0);
char foostr[256];
sprintf(foostr, "/dir_ls%d", mypid);
ASSERT_EQ(ceph_mkdir(cmount, foostr, 0777), 0);
char bazstr[512];
sprintf(bazstr, "%s/elif", foostr);
int fd = ceph_open(cmount, bazstr, O_CREAT|O_RDONLY, 0666);
ASSERT_LE(0, fd);
ASSERT_EQ(0, ceph_close(cmount, fd));
fd = ceph_open(cmount, foostr, O_DIRECTORY | O_RDONLY, 0);
ASSERT_LE(0, fd);
struct ceph_dir_result *ls_dir = NULL;
ASSERT_EQ(ceph_fdopendir(cmount, fd, &ls_dir), 0);
// not guaranteed to get . and .. first, but its a safe assumption in this case
struct dirent *result = ceph_readdir(cmount, ls_dir);
ASSERT_TRUE(result != NULL);
ASSERT_STREQ(result->d_name, ".");
result = ceph_readdir(cmount, ls_dir);
ASSERT_TRUE(result != NULL);
ASSERT_STREQ(result->d_name, "..");
result = ceph_readdir(cmount, ls_dir);
ASSERT_TRUE(result != NULL);
ASSERT_STREQ(result->d_name, "elif");
ASSERT_TRUE(ceph_readdir(cmount, ls_dir) == NULL);
ASSERT_EQ(0, ceph_close(cmount, fd));
ASSERT_EQ(0, ceph_closedir(cmount, ls_dir));
ASSERT_EQ(0, ceph_unlink(cmount, bazstr));
ASSERT_EQ(0, ceph_rmdir(cmount, foostr));
ceph_shutdown(cmount);
}
TEST(LibCephFS, FdopendirATFDCWD) {
pid_t mypid = getpid();
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, "/"), 0);
char foostr[256];
sprintf(foostr, "/dir_ls%d", mypid);
ASSERT_EQ(ceph_mkdir(cmount, foostr, 0777), 0);
char bazstr[512];
sprintf(bazstr, "%s/elif", foostr);
int fd = ceph_open(cmount, bazstr, O_CREAT|O_RDONLY, 0666);
ASSERT_LE(0, fd);
ASSERT_EQ(0, ceph_close(cmount, fd));
ASSERT_EQ(0, ceph_chdir(cmount, foostr));
struct ceph_dir_result *ls_dir = NULL;
ASSERT_EQ(ceph_fdopendir(cmount, CEPHFS_AT_FDCWD, &ls_dir), 0);
// not guaranteed to get . and .. first, but its a safe assumption in this case
struct dirent *result = ceph_readdir(cmount, ls_dir);
ASSERT_TRUE(result != NULL);
ASSERT_STREQ(result->d_name, ".");
result = ceph_readdir(cmount, ls_dir);
ASSERT_TRUE(result != NULL);
ASSERT_STREQ(result->d_name, "..");
result = ceph_readdir(cmount, ls_dir);
ASSERT_TRUE(result != NULL);
ASSERT_STREQ(result->d_name, "elif");
ASSERT_TRUE(ceph_readdir(cmount, ls_dir) == NULL);
ASSERT_EQ(0, ceph_closedir(cmount, ls_dir));
ASSERT_EQ(0, ceph_unlink(cmount, bazstr));
ASSERT_EQ(0, ceph_rmdir(cmount, foostr));
ceph_shutdown(cmount);
}
TEST(LibCephFS, FdopendirReaddirTestWithDelete) {
pid_t mypid = getpid();
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, "/"), 0);
char foostr[256];
sprintf(foostr, "/dir_ls%d", mypid);
ASSERT_EQ(ceph_mkdir(cmount, foostr, 0777), 0);
char bazstr[512];
sprintf(bazstr, "%s/elif", foostr);
int fd = ceph_open(cmount, bazstr, O_CREAT|O_RDONLY, 0666);
ASSERT_LE(0, fd);
ASSERT_EQ(0, ceph_close(cmount, fd));
fd = ceph_open(cmount, foostr, O_DIRECTORY | O_RDONLY, 0);
ASSERT_LE(0, fd);
struct ceph_dir_result *ls_dir = NULL;
ASSERT_EQ(ceph_fdopendir(cmount, fd, &ls_dir), 0);
ASSERT_EQ(0, ceph_unlink(cmount, bazstr));
ASSERT_EQ(0, ceph_rmdir(cmount, foostr));
// not guaranteed to get . and .. first, but its a safe assumption
// in this case. also, note that we may or may not get other
// entries.
struct dirent *result = ceph_readdir(cmount, ls_dir);
ASSERT_TRUE(result != NULL);
ASSERT_STREQ(result->d_name, ".");
result = ceph_readdir(cmount, ls_dir);
ASSERT_TRUE(result != NULL);
ASSERT_STREQ(result->d_name, "..");
ASSERT_EQ(0, ceph_close(cmount, fd));
ASSERT_EQ(0, ceph_closedir(cmount, ls_dir));
ceph_shutdown(cmount);
}
TEST(LibCephFS, FdopendirOnNonDir) {
pid_t mypid = getpid();
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, "/"), 0);
char foostr[256];
sprintf(foostr, "/dir_ls%d", mypid);
ASSERT_EQ(ceph_mkdir(cmount, foostr, 0777), 0);
char bazstr[512];
sprintf(bazstr, "%s/file", foostr);
int fd = ceph_open(cmount, bazstr, O_CREAT|O_RDONLY, 0666);
ASSERT_LE(0, fd);
struct ceph_dir_result *ls_dir = NULL;
ASSERT_EQ(ceph_fdopendir(cmount, fd, &ls_dir), -CEPHFS_ENOTDIR);
ASSERT_EQ(0, ceph_close(cmount, fd));
ASSERT_EQ(0, ceph_unlink(cmount, bazstr));
ASSERT_EQ(0, ceph_rmdir(cmount, foostr));
ceph_shutdown(cmount);
}
TEST(LibCephFS, Mkdirat) {
pid_t mypid = getpid();
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, "/"), 0);
char dir_name[128];
char dir_path1[256];
sprintf(dir_name, "dir_%d", mypid);
sprintf(dir_path1, "/%s", dir_name);
char dir_path2[512];
char rel_dir_path2[512];
sprintf(dir_path2, "%s/dir_%d", dir_path1, mypid);
sprintf(rel_dir_path2, "%s/dir_%d", dir_name, mypid);
int fd = ceph_open(cmount, "/", O_DIRECTORY | O_RDONLY, 0);
ASSERT_LE(0, fd);
ASSERT_EQ(0, ceph_mkdirat(cmount, fd, dir_name, 0777));
ASSERT_EQ(0, ceph_mkdirat(cmount, fd, rel_dir_path2, 0666));
ASSERT_EQ(0, ceph_close(cmount, fd));
ASSERT_EQ(0, ceph_rmdir(cmount, dir_path2));
ASSERT_EQ(0, ceph_rmdir(cmount, dir_path1));
ceph_shutdown(cmount);
}
TEST(LibCephFS, MkdiratATFDCWD) {
pid_t mypid = getpid();
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, "/"), 0);
char dir_name[128];
char dir_path1[256];
sprintf(dir_name, "dir_%d", mypid);
sprintf(dir_path1, "/%s", dir_name);
char dir_path2[512];
sprintf(dir_path2, "%s/dir_%d", dir_path1, mypid);
ASSERT_EQ(0, ceph_mkdirat(cmount, CEPHFS_AT_FDCWD, dir_name, 0777));
ASSERT_EQ(0, ceph_chdir(cmount, dir_path1));
ASSERT_EQ(0, ceph_mkdirat(cmount, CEPHFS_AT_FDCWD, dir_name, 0666));
ASSERT_EQ(0, ceph_rmdir(cmount, dir_path2));
ASSERT_EQ(0, ceph_rmdir(cmount, dir_path1));
ceph_shutdown(cmount);
}
TEST(LibCephFS, Readlinkat) {
pid_t mypid = getpid();
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, "/"), 0);
char dir_name[128];
char dir_path[256];
sprintf(dir_name, "dir_%d", mypid);
sprintf(dir_path, "/%s", dir_name);
ASSERT_EQ(ceph_mkdir(cmount, dir_path, 0777), 0);
char file_path[512];
char rel_file_path[512];
sprintf(rel_file_path, "%s/elif", dir_name);
sprintf(file_path, "%s/elif", dir_path);
int fd = ceph_open(cmount, file_path, O_CREAT|O_RDONLY, 0666);
ASSERT_LE(0, fd);
ASSERT_EQ(0, ceph_close(cmount, fd));
char link_path[128];
char rel_link_path[64];
sprintf(rel_link_path, "linkfile_%d", mypid);
sprintf(link_path, "/%s", rel_link_path);
ASSERT_EQ(0, ceph_symlink(cmount, rel_file_path, link_path));
fd = ceph_open(cmount, "/", O_DIRECTORY | O_RDONLY, 0);
ASSERT_LE(0, fd);
size_t target_len = strlen(rel_file_path);
char target[target_len+1];
ASSERT_EQ(target_len, ceph_readlinkat(cmount, fd, rel_link_path, target, target_len));
target[target_len] = '\0';
ASSERT_EQ(0, memcmp(target, rel_file_path, target_len));
ASSERT_EQ(0, ceph_close(cmount, fd));
ASSERT_EQ(0, ceph_unlink(cmount, link_path));
ASSERT_EQ(0, ceph_unlink(cmount, file_path));
ASSERT_EQ(0, ceph_rmdir(cmount, dir_path));
ceph_shutdown(cmount);
}
TEST(LibCephFS, ReadlinkatATFDCWD) {
pid_t mypid = getpid();
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, "/"), 0);
char dir_name[128];
char dir_path[256];
sprintf(dir_name, "dir_%d", mypid);
sprintf(dir_path, "/%s", dir_name);
ASSERT_EQ(ceph_mkdir(cmount, dir_path, 0777), 0);
char file_path[512];
char rel_file_path[512] = "./elif";
sprintf(file_path, "%s/elif", dir_path);
int fd = ceph_open(cmount, file_path, O_CREAT|O_RDONLY, 0666);
ASSERT_LE(0, fd);
ASSERT_EQ(0, ceph_close(cmount, fd));
char link_path[PATH_MAX];
char rel_link_path[1024];
sprintf(rel_link_path, "./linkfile_%d", mypid);
sprintf(link_path, "%s/%s", dir_path, rel_link_path);
ASSERT_EQ(0, ceph_symlink(cmount, rel_file_path, link_path));
ASSERT_EQ(0, ceph_chdir(cmount, dir_path));
size_t target_len = strlen(rel_file_path);
char target[target_len+1];
ASSERT_EQ(target_len, ceph_readlinkat(cmount, CEPHFS_AT_FDCWD, rel_link_path, target, target_len));
target[target_len] = '\0';
ASSERT_EQ(0, memcmp(target, rel_file_path, target_len));
ASSERT_EQ(0, ceph_unlink(cmount, link_path));
ASSERT_EQ(0, ceph_unlink(cmount, file_path));
ASSERT_EQ(0, ceph_rmdir(cmount, dir_path));
ceph_shutdown(cmount);
}
TEST(LibCephFS, Symlinkat) {
pid_t mypid = getpid();
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, "/"), 0);
char dir_name[128];
char dir_path[256];
sprintf(dir_name, "dir_%d", mypid);
sprintf(dir_path, "/%s", dir_name);
ASSERT_EQ(ceph_mkdir(cmount, dir_path, 0777), 0);
char file_path[512];
char rel_file_path[512];
sprintf(rel_file_path, "%s/elif", dir_name);
sprintf(file_path, "%s/elif", dir_path);
int fd = ceph_open(cmount, file_path, O_CREAT|O_RDONLY, 0666);
ASSERT_LE(0, fd);
ASSERT_EQ(0, ceph_close(cmount, fd));
char link_path[128];
char rel_link_path[64];
sprintf(rel_link_path, "linkfile_%d", mypid);
sprintf(link_path, "/%s", rel_link_path);
fd = ceph_open(cmount, "/", O_DIRECTORY | O_RDONLY, 0);
ASSERT_LE(0, fd);
ASSERT_EQ(0, ceph_symlinkat(cmount, rel_file_path, fd, rel_link_path));
size_t target_len = strlen(rel_file_path);
char target[target_len+1];
ASSERT_EQ(target_len, ceph_readlinkat(cmount, fd, rel_link_path, target, target_len));
target[target_len] = '\0';
ASSERT_EQ(0, memcmp(target, rel_file_path, target_len));
ASSERT_EQ(0, ceph_close(cmount, fd));
ASSERT_EQ(0, ceph_unlink(cmount, link_path));
ASSERT_EQ(0, ceph_unlink(cmount, file_path));
ASSERT_EQ(0, ceph_rmdir(cmount, dir_path));
ceph_shutdown(cmount);
}
TEST(LibCephFS, SymlinkatATFDCWD) {
pid_t mypid = getpid();
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, "/"), 0);
char dir_name[128];
char dir_path[256];
sprintf(dir_name, "dir_%d", mypid);
sprintf(dir_path, "/%s", dir_name);
ASSERT_EQ(ceph_mkdir(cmount, dir_path, 0777), 0);
char file_path[512];
char rel_file_path[512] = "./elif";
sprintf(file_path, "%s/elif", dir_path);
int fd = ceph_open(cmount, file_path, O_CREAT|O_RDONLY, 0666);
ASSERT_LE(0, fd);
ASSERT_EQ(0, ceph_close(cmount, fd));
char link_path[PATH_MAX];
char rel_link_path[1024];
sprintf(rel_link_path, "./linkfile_%d", mypid);
sprintf(link_path, "%s/%s", dir_path, rel_link_path);
ASSERT_EQ(0, ceph_chdir(cmount, dir_path));
ASSERT_EQ(0, ceph_symlinkat(cmount, rel_file_path, CEPHFS_AT_FDCWD, rel_link_path));
size_t target_len = strlen(rel_file_path);
char target[target_len+1];
ASSERT_EQ(target_len, ceph_readlinkat(cmount, CEPHFS_AT_FDCWD, rel_link_path, target, target_len));
target[target_len] = '\0';
ASSERT_EQ(0, memcmp(target, rel_file_path, target_len));
ASSERT_EQ(0, ceph_unlink(cmount, link_path));
ASSERT_EQ(0, ceph_unlink(cmount, file_path));
ASSERT_EQ(0, ceph_rmdir(cmount, dir_path));
ceph_shutdown(cmount);
}
TEST(LibCephFS, Unlinkat) {
pid_t mypid = getpid();
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, "/"), 0);
char dir_name[128];
char dir_path[256];
sprintf(dir_name, "dir_%d", mypid);
sprintf(dir_path, "/%s", dir_name);
ASSERT_EQ(ceph_mkdir(cmount, dir_path, 0777), 0);
char file_path[512];
char rel_file_path[512] = "elif";
sprintf(file_path, "%s/elif", dir_path);
int fd = ceph_open(cmount, file_path, O_CREAT|O_RDONLY, 0666);
ASSERT_LE(0, fd);
ASSERT_EQ(0, ceph_close(cmount, fd));
fd = ceph_open(cmount, dir_path, O_DIRECTORY | O_RDONLY, 0);
ASSERT_LE(0, fd);
ASSERT_EQ(-CEPHFS_ENOTDIR, ceph_unlinkat(cmount, fd, rel_file_path, AT_REMOVEDIR));
ASSERT_EQ(0, ceph_unlinkat(cmount, fd, rel_file_path, 0));
ASSERT_EQ(0, ceph_close(cmount, fd));
fd = ceph_open(cmount, "/", O_DIRECTORY | O_RDONLY, 0);
ASSERT_EQ(-CEPHFS_EISDIR, ceph_unlinkat(cmount, fd, dir_name, 0));
ASSERT_EQ(0, ceph_unlinkat(cmount, fd, dir_name, AT_REMOVEDIR));
ASSERT_LE(0, fd);
ceph_shutdown(cmount);
}
TEST(LibCephFS, UnlinkatATFDCWD) {
pid_t mypid = getpid();
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, "/"), 0);
char dir_name[128];
char dir_path[256];
sprintf(dir_name, "dir_%d", mypid);
sprintf(dir_path, "/%s", dir_name);
ASSERT_EQ(ceph_mkdir(cmount, dir_path, 0777), 0);
char file_path[512];
char rel_file_path[512] = "elif";
sprintf(file_path, "%s/elif", dir_path);
int fd = ceph_open(cmount, file_path, O_CREAT|O_RDONLY, 0666);
ASSERT_LE(0, fd);
ASSERT_EQ(0, ceph_close(cmount, fd));
ASSERT_EQ(0, ceph_chdir(cmount, dir_path));
ASSERT_EQ(-CEPHFS_ENOTDIR, ceph_unlinkat(cmount, CEPHFS_AT_FDCWD, rel_file_path, AT_REMOVEDIR));
ASSERT_EQ(0, ceph_unlinkat(cmount, CEPHFS_AT_FDCWD, rel_file_path, 0));
ASSERT_EQ(0, ceph_chdir(cmount, "/"));
ASSERT_EQ(-CEPHFS_EISDIR, ceph_unlinkat(cmount, CEPHFS_AT_FDCWD, dir_name, 0));
ASSERT_EQ(0, ceph_unlinkat(cmount, CEPHFS_AT_FDCWD, dir_name, AT_REMOVEDIR));
ceph_shutdown(cmount);
}
TEST(LibCephFS, Chownat) {
pid_t mypid = getpid();
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, "/"), 0);
char dir_name[128];
char dir_path[256];
sprintf(dir_name, "dir_%d", mypid);
sprintf(dir_path, "/%s", dir_name);
ASSERT_EQ(ceph_mkdir(cmount, dir_path, 0777), 0);
char file_path[512];
char rel_file_path[512] = "elif";
sprintf(file_path, "%s/elif", dir_path);
int fd = ceph_open(cmount, file_path, O_CREAT|O_RDWR, 0666);
ASSERT_LE(0, fd);
// set perms to readable and writeable only by owner
ASSERT_EQ(ceph_fchmod(cmount, fd, 0600), 0);
ceph_close(cmount, fd);
fd = ceph_open(cmount, dir_path, O_DIRECTORY | O_RDONLY, 0);
// change ownership to nobody -- we assume nobody exists and id is always 65534
ASSERT_EQ(ceph_conf_set(cmount, "client_permissions", "0"), 0);
ASSERT_EQ(ceph_chownat(cmount, fd, rel_file_path, 65534, 65534, 0), 0);
ASSERT_EQ(ceph_conf_set(cmount, "client_permissions", "1"), 0);
ceph_close(cmount, fd);
// "nobody" will be ignored on Windows
#ifndef _WIN32
fd = ceph_open(cmount, file_path, O_RDWR, 0);
ASSERT_EQ(fd, -CEPHFS_EACCES);
#endif
ASSERT_EQ(ceph_conf_set(cmount, "client_permissions", "0"), 0);
ASSERT_EQ(0, ceph_unlink(cmount, file_path));
ASSERT_EQ(ceph_conf_set(cmount, "client_permissions", "1"), 0);
ASSERT_EQ(0, ceph_rmdir(cmount, dir_path));
ceph_shutdown(cmount);
}
TEST(LibCephFS, ChownatATFDCWD) {
pid_t mypid = getpid();
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, "/"), 0);
char dir_name[128];
char dir_path[256];
sprintf(dir_name, "dir_%d", mypid);
sprintf(dir_path, "/%s", dir_name);
ASSERT_EQ(ceph_mkdir(cmount, dir_path, 0777), 0);
char file_path[512];
char rel_file_path[512] = "elif";
sprintf(file_path, "%s/elif", dir_path);
int fd = ceph_open(cmount, file_path, O_CREAT|O_RDWR, 0666);
ASSERT_LE(0, fd);
// set perms to readable and writeable only by owner
ASSERT_EQ(ceph_fchmod(cmount, fd, 0600), 0);
ceph_close(cmount, fd);
ASSERT_EQ(0, ceph_chdir(cmount, dir_path));
// change ownership to nobody -- we assume nobody exists and id is always 65534
ASSERT_EQ(ceph_conf_set(cmount, "client_permissions", "0"), 0);
ASSERT_EQ(ceph_chownat(cmount, CEPHFS_AT_FDCWD, rel_file_path, 65534, 65534, 0), 0);
ASSERT_EQ(ceph_conf_set(cmount, "client_permissions", "1"), 0);
// "nobody" will be ignored on Windows
#ifndef _WIN32
fd = ceph_open(cmount, file_path, O_RDWR, 0);
ASSERT_EQ(fd, -CEPHFS_EACCES);
#endif
ASSERT_EQ(ceph_conf_set(cmount, "client_permissions", "0"), 0);
ASSERT_EQ(0, ceph_unlink(cmount, file_path));
ASSERT_EQ(ceph_conf_set(cmount, "client_permissions", "1"), 0);
ASSERT_EQ(0, ceph_rmdir(cmount, dir_path));
ceph_shutdown(cmount);
}
TEST(LibCephFS, Chmodat) {
pid_t mypid = getpid();
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, "/"), 0);
char dir_name[128];
char dir_path[256];
sprintf(dir_name, "dir_%d", mypid);
sprintf(dir_path, "/%s", dir_name);
ASSERT_EQ(ceph_mkdir(cmount, dir_path, 0777), 0);
char file_path[512];
char rel_file_path[512] = "elif";
sprintf(file_path, "%s/elif", dir_path);
int fd = ceph_open(cmount, file_path, O_CREAT|O_RDWR, 0666);
ASSERT_LE(0, fd);
const char *bytes = "foobarbaz";
ASSERT_EQ(ceph_write(cmount, fd, bytes, strlen(bytes), 0), (int)strlen(bytes));
ASSERT_EQ(0, ceph_close(cmount, fd));
fd = ceph_open(cmount, dir_path, O_DIRECTORY | O_RDONLY, 0);
// set perms to read but can't write
ASSERT_EQ(ceph_chmodat(cmount, fd, rel_file_path, 0400, 0), 0);
ASSERT_EQ(ceph_open(cmount, file_path, O_RDWR, 0), -CEPHFS_EACCES);
// reset back to writeable
ASSERT_EQ(ceph_chmodat(cmount, fd, rel_file_path, 0600, 0), 0);
int fd2 = ceph_open(cmount, file_path, O_RDWR, 0);
ASSERT_LE(0, fd2);
ASSERT_EQ(0, ceph_close(cmount, fd2));
ASSERT_EQ(0, ceph_close(cmount, fd));
ASSERT_EQ(0, ceph_unlink(cmount, file_path));
ASSERT_EQ(0, ceph_rmdir(cmount, dir_path));
ceph_shutdown(cmount);
}
TEST(LibCephFS, ChmodatATFDCWD) {
pid_t mypid = getpid();
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, "/"), 0);
char dir_name[128];
char dir_path[256];
sprintf(dir_name, "dir_%d", mypid);
sprintf(dir_path, "/%s", dir_name);
ASSERT_EQ(ceph_mkdir(cmount, dir_path, 0777), 0);
char file_path[512];
char rel_file_path[512] = "elif";
sprintf(file_path, "%s/elif", dir_path);
int fd = ceph_open(cmount, file_path, O_CREAT|O_RDWR, 0666);
ASSERT_LE(0, fd);
const char *bytes = "foobarbaz";
ASSERT_EQ(ceph_write(cmount, fd, bytes, strlen(bytes), 0), (int)strlen(bytes));
ASSERT_EQ(0, ceph_close(cmount, fd));
// set perms to read but can't write
ASSERT_EQ(0, ceph_chdir(cmount, dir_path));
ASSERT_EQ(ceph_chmodat(cmount, CEPHFS_AT_FDCWD, rel_file_path, 0400, 0), 0);
ASSERT_EQ(ceph_open(cmount, file_path, O_RDWR, 0), -CEPHFS_EACCES);
// reset back to writeable
ASSERT_EQ(ceph_chmodat(cmount, CEPHFS_AT_FDCWD, rel_file_path, 0600, 0), 0);
int fd2 = ceph_open(cmount, file_path, O_RDWR, 0);
ASSERT_LE(0, fd2);
ASSERT_EQ(0, ceph_close(cmount, fd2));
ASSERT_EQ(0, ceph_unlink(cmount, file_path));
ASSERT_EQ(0, ceph_rmdir(cmount, dir_path));
ceph_shutdown(cmount);
}
TEST(LibCephFS, Utimensat) {
pid_t mypid = getpid();
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, NULL), 0);
char dir_name[128];
char dir_path[256];
sprintf(dir_name, "dir_%d", mypid);
sprintf(dir_path, "/%s", dir_name);
ASSERT_EQ(ceph_mkdir(cmount, dir_path, 0777), 0);
char file_path[512];
char rel_file_path[512] = "elif";
sprintf(file_path, "%s/elif", dir_path);
int fd = ceph_open(cmount, file_path, O_CREAT|O_RDWR, 0666);
ASSERT_LE(0, fd);
struct timespec times[2];
get_current_time_timespec(times);
fd = ceph_open(cmount, dir_path, O_DIRECTORY | O_RDONLY, 0);
ASSERT_LE(0, fd);
EXPECT_EQ(0, ceph_utimensat(cmount, fd, rel_file_path, times, 0));
ceph_close(cmount, fd);
struct ceph_statx stx;
ASSERT_EQ(ceph_statx(cmount, file_path, &stx,
CEPH_STATX_MTIME|CEPH_STATX_ATIME, 0), 0);
ASSERT_EQ(utime_t(stx.stx_atime), utime_t(times[0]));
ASSERT_EQ(utime_t(stx.stx_mtime), utime_t(times[1]));
ASSERT_EQ(0, ceph_unlink(cmount, file_path));
ASSERT_EQ(0, ceph_rmdir(cmount, dir_path));
ceph_shutdown(cmount);
}
TEST(LibCephFS, UtimensatATFDCWD) {
pid_t mypid = getpid();
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, NULL), 0);
char dir_name[128];
char dir_path[256];
sprintf(dir_name, "dir_%d", mypid);
sprintf(dir_path, "/%s", dir_name);
ASSERT_EQ(ceph_mkdir(cmount, dir_path, 0777), 0);
char file_path[512];
char rel_file_path[512] = "elif";
sprintf(file_path, "%s/elif", dir_path);
int fd = ceph_open(cmount, file_path, O_CREAT|O_RDWR, 0666);
ASSERT_LE(0, fd);
struct timespec times[2];
get_current_time_timespec(times);
ASSERT_EQ(0, ceph_chdir(cmount, dir_path));
EXPECT_EQ(0, ceph_utimensat(cmount, CEPHFS_AT_FDCWD, rel_file_path, times, 0));
struct ceph_statx stx;
ASSERT_EQ(ceph_statx(cmount, file_path, &stx,
CEPH_STATX_MTIME|CEPH_STATX_ATIME, 0), 0);
ASSERT_EQ(utime_t(stx.stx_atime), utime_t(times[0]));
ASSERT_EQ(utime_t(stx.stx_mtime), utime_t(times[1]));
ASSERT_EQ(0, ceph_unlink(cmount, file_path));
ASSERT_EQ(0, ceph_rmdir(cmount, dir_path));
ceph_shutdown(cmount);
}
TEST(LibCephFS, LookupMdsPrivateInos) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, NULL), 0);
Inode *inode;
for (int ino = 0; ino < MDS_INO_SYSTEM_BASE; ino++) {
if (MDS_IS_PRIVATE_INO(ino)) {
ASSERT_EQ(-CEPHFS_ESTALE, ceph_ll_lookup_inode(cmount, ino, &inode));
} else if (ino == CEPH_INO_ROOT || ino == CEPH_INO_GLOBAL_SNAPREALM) {
ASSERT_EQ(0, ceph_ll_lookup_inode(cmount, ino, &inode));
ceph_ll_put(cmount, inode);
} else if (ino == CEPH_INO_LOST_AND_FOUND) {
// the ino 3 will only exists after the recovery tool ran, so
// it may return -CEPHFS_ESTALE with a fresh fs cluster
int r = ceph_ll_lookup_inode(cmount, ino, &inode);
if (r == 0) {
ceph_ll_put(cmount, inode);
} else {
ASSERT_TRUE(r == -CEPHFS_ESTALE);
}
} else {
// currently the ino 0 and 4~99 is not useded yet.
ASSERT_EQ(-CEPHFS_ESTALE, ceph_ll_lookup_inode(cmount, ino, &inode));
}
}
ceph_shutdown(cmount);
}
TEST(LibCephFS, SetMountTimeoutPostMount) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, NULL), 0);
ASSERT_EQ(-CEPHFS_EINVAL, ceph_set_mount_timeout(cmount, 5));
ceph_shutdown(cmount);
}
TEST(LibCephFS, SetMountTimeout) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(0, ceph_set_mount_timeout(cmount, 5));
ASSERT_EQ(ceph_mount(cmount, NULL), 0);
ceph_shutdown(cmount);
}
TEST(LibCephFS, FsCrypt) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, NULL), 0);
char test_xattr_file[NAME_MAX];
sprintf(test_xattr_file, "test_fscrypt_%d", getpid());
int fd = ceph_open(cmount, test_xattr_file, O_RDWR|O_CREAT, 0666);
ASSERT_GT(fd, 0);
ASSERT_EQ(0, ceph_fsetxattr(cmount, fd, "ceph.fscrypt.auth", "foo", 3, CEPH_XATTR_CREATE));
ASSERT_EQ(0, ceph_fsetxattr(cmount, fd, "ceph.fscrypt.file", "foo", 3, CEPH_XATTR_CREATE));
char buf[64];
ASSERT_EQ(3, ceph_fgetxattr(cmount, fd, "ceph.fscrypt.auth", buf, sizeof(buf)));
ASSERT_EQ(3, ceph_fgetxattr(cmount, fd, "ceph.fscrypt.file", buf, sizeof(buf)));
ASSERT_EQ(0, ceph_close(cmount, fd));
ASSERT_EQ(0, ceph_unmount(cmount));
ASSERT_EQ(0, ceph_mount(cmount, NULL));
fd = ceph_open(cmount, test_xattr_file, O_RDWR, 0666);
ASSERT_GT(fd, 0);
ASSERT_EQ(3, ceph_fgetxattr(cmount, fd, "ceph.fscrypt.auth", buf, sizeof(buf)));
ASSERT_EQ(3, ceph_fgetxattr(cmount, fd, "ceph.fscrypt.file", buf, sizeof(buf)));
ASSERT_EQ(0, ceph_close(cmount, fd));
ASSERT_EQ(0, ceph_unmount(cmount));
ceph_shutdown(cmount);
}
TEST(LibCephFS, SnapdirAttrs) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, NULL), 0);
char dir_name[128];
char dir_path[256];
char snap_dir_path[512];
pid_t mypid = getpid();
sprintf(dir_name, "dir_%d", mypid);
sprintf(dir_path, "/%s", dir_name);
sprintf(snap_dir_path, "%s/.snap", dir_path);
Inode *dir, *root;
struct ceph_statx stx_dir;
struct ceph_statx stx_snap_dir;
struct ceph_statx stx_root_snap_dir;
UserPerm *perms = ceph_mount_perms(cmount);
ASSERT_EQ(ceph_ll_lookup_root(cmount, &root), 0);
ASSERT_EQ(ceph_ll_mkdir(cmount, root, dir_name, 0755, &dir, &stx_dir, 0, 0, perms), 0);
ASSERT_EQ(ceph_statx(cmount, dir_path, &stx_dir,
CEPH_STATX_MTIME|CEPH_STATX_ATIME|CEPH_STATX_MODE|CEPH_STATX_MODE|CEPH_STATX_GID|CEPH_STATX_VERSION, 0), 0);
ASSERT_EQ(ceph_statx(cmount, snap_dir_path, &stx_snap_dir,
CEPH_STATX_MTIME|CEPH_STATX_ATIME|CEPH_STATX_MODE|CEPH_STATX_MODE|CEPH_STATX_GID|CEPH_STATX_VERSION, 0), 0);
ASSERT_EQ(ceph_statx(cmount, "/.snap", &stx_root_snap_dir,
CEPH_STATX_MTIME|CEPH_STATX_ATIME|CEPH_STATX_MODE|CEPH_STATX_MODE|CEPH_STATX_GID|CEPH_STATX_VERSION, 0), 0);
// these should match the parent directories attrs
ASSERT_EQ(stx_dir.stx_mode, stx_snap_dir.stx_mode);
ASSERT_EQ(stx_dir.stx_uid, stx_snap_dir.stx_uid);
ASSERT_EQ(stx_dir.stx_gid, stx_snap_dir.stx_gid);
ASSERT_EQ(utime_t(stx_dir.stx_atime), utime_t(stx_snap_dir.stx_atime));
// these should match the closest snaprealm ancestor (root in this
// case) attrs
ASSERT_EQ(utime_t(stx_root_snap_dir.stx_mtime), utime_t(stx_snap_dir.stx_mtime));
ASSERT_EQ(utime_t(stx_root_snap_dir.stx_ctime), utime_t(stx_snap_dir.stx_ctime));
ASSERT_EQ(stx_root_snap_dir.stx_version, stx_snap_dir.stx_version);
// chown -- for this we need to be "root"
UserPerm *rootcred = ceph_userperm_new(0, 0, 0, NULL);
ASSERT_TRUE(rootcred);
stx_dir.stx_uid++;
stx_dir.stx_gid++;
ASSERT_EQ(ceph_ll_setattr(cmount, dir, &stx_dir, CEPH_SETATTR_UID|CEPH_SETATTR_GID, rootcred), 0);
memset(&stx_dir, 0, sizeof(stx_dir));
memset(&stx_snap_dir, 0, sizeof(stx_snap_dir));
ASSERT_EQ(ceph_statx(cmount, dir_path, &stx_dir,
CEPH_STATX_MTIME|CEPH_STATX_ATIME|CEPH_STATX_MODE|CEPH_STATX_MODE|CEPH_STATX_GID|CEPH_STATX_VERSION, 0), 0);
ASSERT_EQ(ceph_statx(cmount, snap_dir_path, &stx_snap_dir,
CEPH_STATX_MTIME|CEPH_STATX_ATIME|CEPH_STATX_MODE|CEPH_STATX_MODE|CEPH_STATX_GID|CEPH_STATX_VERSION, 0), 0);
ASSERT_EQ(stx_dir.stx_mode, stx_snap_dir.stx_mode);
ASSERT_EQ(stx_dir.stx_uid, stx_snap_dir.stx_uid);
ASSERT_EQ(stx_dir.stx_gid, stx_snap_dir.stx_gid);
ASSERT_EQ(utime_t(stx_dir.stx_atime), utime_t(stx_snap_dir.stx_atime));
ASSERT_EQ(utime_t(stx_root_snap_dir.stx_mtime), utime_t(stx_snap_dir.stx_mtime));
ASSERT_EQ(utime_t(stx_root_snap_dir.stx_ctime), utime_t(stx_snap_dir.stx_ctime));
ASSERT_EQ(stx_root_snap_dir.stx_version, stx_snap_dir.stx_version);
ASSERT_EQ(ceph_ll_rmdir(cmount, root, dir_name, rootcred), 0);
ASSERT_EQ(0, ceph_unmount(cmount));
ceph_shutdown(cmount);
}
TEST(LibCephFS, SnapdirAttrsOnSnapCreate) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, NULL), 0);
char dir_name[128];
char dir_path[256];
char snap_dir_path[512];
pid_t mypid = getpid();
sprintf(dir_name, "dir_%d", mypid);
sprintf(dir_path, "/%s", dir_name);
sprintf(snap_dir_path, "%s/.snap", dir_path);
ASSERT_EQ(ceph_mkdir(cmount, dir_path, 0777), 0);
struct ceph_statx stx_dir;
struct ceph_statx stx_snap_dir;
struct ceph_statx stx_root_snap_dir;
ASSERT_EQ(ceph_statx(cmount, dir_path, &stx_dir, CEPH_STATX_MTIME|CEPH_STATX_CTIME|CEPH_STATX_VERSION, 0), 0);
ASSERT_EQ(ceph_statx(cmount, snap_dir_path, &stx_snap_dir, CEPH_STATX_MTIME|CEPH_STATX_CTIME|CEPH_STATX_VERSION, 0), 0);
ASSERT_EQ(ceph_statx(cmount, "/.snap", &stx_root_snap_dir, CEPH_STATX_MTIME|CEPH_STATX_CTIME|CEPH_STATX_VERSION, 0), 0);
ASSERT_EQ(utime_t(stx_root_snap_dir.stx_mtime), utime_t(stx_snap_dir.stx_mtime));
ASSERT_EQ(utime_t(stx_root_snap_dir.stx_ctime), utime_t(stx_snap_dir.stx_ctime));
ASSERT_EQ(stx_root_snap_dir.stx_version, stx_snap_dir.stx_version);
char snap_path[1024];
sprintf(snap_path, "%s/snap_a", snap_dir_path);
ASSERT_EQ(ceph_mkdir(cmount, snap_path, 0777), 0);
struct ceph_statx stx_snap_dir_1;
ASSERT_EQ(ceph_statx(cmount, snap_dir_path, &stx_snap_dir_1, CEPH_STATX_MTIME|CEPH_STATX_CTIME|CEPH_STATX_VERSION, 0), 0);
ASSERT_LT(utime_t(stx_root_snap_dir.stx_mtime), utime_t(stx_snap_dir_1.stx_mtime));
ASSERT_LT(utime_t(stx_root_snap_dir.stx_ctime), utime_t(stx_snap_dir_1.stx_ctime));
ASSERT_LT(stx_root_snap_dir.stx_version, stx_snap_dir_1.stx_version);
ASSERT_EQ(0, ceph_rmdir(cmount, snap_path));
ASSERT_EQ(0, ceph_rmdir(cmount, dir_path));
ASSERT_EQ(0, ceph_unmount(cmount));
ceph_shutdown(cmount);
}
TEST(LibCephFS, SnapdirAttrsOnSnapDelete) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, NULL), 0);
char dir_name[128];
char dir_path[256];
char snap_dir_path[512];
pid_t mypid = getpid();
sprintf(dir_name, "dir_%d", mypid);
sprintf(dir_path, "/%s", dir_name);
sprintf(snap_dir_path, "%s/.snap", dir_path);
ASSERT_EQ(ceph_mkdir(cmount, dir_path, 0777), 0);
struct ceph_statx stx_dir;
struct ceph_statx stx_snap_dir;
struct ceph_statx stx_root_snap_dir;
ASSERT_EQ(ceph_statx(cmount, dir_path, &stx_dir, CEPH_STATX_MTIME|CEPH_STATX_CTIME|CEPH_STATX_VERSION, 0), 0);
ASSERT_EQ(ceph_statx(cmount, snap_dir_path, &stx_snap_dir, CEPH_STATX_MTIME|CEPH_STATX_CTIME|CEPH_STATX_VERSION, 0), 0);
ASSERT_EQ(ceph_statx(cmount, "/.snap", &stx_root_snap_dir, CEPH_STATX_MTIME|CEPH_STATX_CTIME|CEPH_STATX_VERSION, 0), 0);
ASSERT_EQ(utime_t(stx_root_snap_dir.stx_mtime), utime_t(stx_snap_dir.stx_mtime));
ASSERT_EQ(utime_t(stx_root_snap_dir.stx_ctime), utime_t(stx_snap_dir.stx_mtime));
ASSERT_EQ(stx_root_snap_dir.stx_version, stx_snap_dir.stx_version);
char snap_path[1024];
sprintf(snap_path, "%s/snap_a", snap_dir_path);
ASSERT_EQ(ceph_mkdir(cmount, snap_path, 0777), 0);
struct ceph_statx stx_snap_dir_1;
ASSERT_EQ(ceph_statx(cmount, snap_dir_path, &stx_snap_dir_1, CEPH_STATX_MTIME|CEPH_STATX_CTIME|CEPH_STATX_VERSION, 0), 0);
ASSERT_LT(utime_t(stx_root_snap_dir.stx_mtime), utime_t(stx_snap_dir_1.stx_mtime));
ASSERT_LT(utime_t(stx_root_snap_dir.stx_ctime), utime_t(stx_snap_dir_1.stx_ctime));
ASSERT_LT(stx_root_snap_dir.stx_version, stx_snap_dir_1.stx_version);
ASSERT_EQ(0, ceph_rmdir(cmount, snap_path));
struct ceph_statx stx_snap_dir_2;
ASSERT_EQ(ceph_statx(cmount, snap_dir_path, &stx_snap_dir_2, CEPH_STATX_MTIME|CEPH_STATX_CTIME|CEPH_STATX_VERSION, 0), 0);
// Flaky assertion on Windows, potentially due to timestamp precision.
#ifndef _WIN32
ASSERT_LT(utime_t(stx_snap_dir_1.stx_mtime), utime_t(stx_snap_dir_2.stx_mtime));
ASSERT_LT(utime_t(stx_snap_dir_1.stx_ctime), utime_t(stx_snap_dir_2.stx_ctime));
#endif
ASSERT_LT(stx_snap_dir_1.stx_version, stx_snap_dir_2.stx_version);
ASSERT_EQ(0, ceph_rmdir(cmount, dir_path));
ASSERT_EQ(0, ceph_unmount(cmount));
ceph_shutdown(cmount);
}
TEST(LibCephFS, SnapdirAttrsOnSnapRename) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, NULL), 0);
char dir_name[128];
char dir_path[256];
char snap_dir_path[512];
pid_t mypid = getpid();
sprintf(dir_name, "dir_%d", mypid);
sprintf(dir_path, "/%s", dir_name);
sprintf(snap_dir_path, "%s/.snap", dir_path);
ASSERT_EQ(ceph_mkdir(cmount, dir_path, 0777), 0);
struct ceph_statx stx_dir;
struct ceph_statx stx_snap_dir;
struct ceph_statx stx_root_snap_dir;
ASSERT_EQ(ceph_statx(cmount, dir_path, &stx_dir, CEPH_STATX_MTIME|CEPH_STATX_CTIME|CEPH_STATX_VERSION, 0), 0);
ASSERT_EQ(ceph_statx(cmount, snap_dir_path, &stx_snap_dir, CEPH_STATX_MTIME|CEPH_STATX_CTIME|CEPH_STATX_VERSION, 0), 0);
ASSERT_EQ(ceph_statx(cmount, "/.snap", &stx_root_snap_dir, CEPH_STATX_MTIME|CEPH_STATX_CTIME|CEPH_STATX_VERSION, 0), 0);
ASSERT_EQ(utime_t(stx_root_snap_dir.stx_mtime), utime_t(stx_snap_dir.stx_mtime));
ASSERT_EQ(utime_t(stx_root_snap_dir.stx_ctime), utime_t(stx_snap_dir.stx_ctime));
ASSERT_EQ(stx_root_snap_dir.stx_version, stx_snap_dir.stx_version);
char snap_path[1024];
sprintf(snap_path, "%s/snap_a", snap_dir_path);
ASSERT_EQ(ceph_mkdir(cmount, snap_path, 0777), 0);
struct ceph_statx stx_snap_dir_1;
ASSERT_EQ(ceph_statx(cmount, snap_dir_path, &stx_snap_dir_1, CEPH_STATX_MTIME|CEPH_STATX_CTIME|CEPH_STATX_VERSION, 0), 0);
ASSERT_LT(utime_t(stx_root_snap_dir.stx_mtime), utime_t(stx_snap_dir_1.stx_mtime));
ASSERT_LT(utime_t(stx_root_snap_dir.stx_ctime), utime_t(stx_snap_dir_1.stx_ctime));
ASSERT_LT(stx_root_snap_dir.stx_version, stx_snap_dir_1.stx_version);
char snap_path_r[1024];
sprintf(snap_path_r, "%s/snap_b", snap_dir_path);
ASSERT_EQ(ceph_rename(cmount, snap_path, snap_path_r), 0);
struct ceph_statx stx_snap_dir_2;
ASSERT_EQ(ceph_statx(cmount, snap_dir_path, &stx_snap_dir_2, CEPH_STATX_MTIME|CEPH_STATX_CTIME|CEPH_STATX_VERSION, 0), 0);
// Flaky assertion on Windows, potentially due to timestamp precision.
#ifndef _WIN32
ASSERT_LT(utime_t(stx_snap_dir_1.stx_mtime), utime_t(stx_snap_dir_2.stx_mtime));
ASSERT_LT(utime_t(stx_snap_dir_1.stx_ctime), utime_t(stx_snap_dir_2.stx_ctime));
#endif
ASSERT_LT(stx_snap_dir_1.stx_version, stx_snap_dir_2.stx_version);
ASSERT_EQ(0, ceph_rmdir(cmount, snap_path_r));
ASSERT_EQ(0, ceph_rmdir(cmount, dir_path));
ASSERT_EQ(0, ceph_unmount(cmount));
ceph_shutdown(cmount);
}
| 127,923 | 32.824432 | 131 | cc |
null | ceph-main/src/test/libcephfs/vxattr.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2021 Red Hat Inc.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "include/compat.h"
#include "gtest/gtest.h"
#include "include/cephfs/libcephfs.h"
#include "include/fs_types.h"
#include "mds/mdstypes.h"
#include "include/stat.h"
#include <errno.h>
#include <fcntl.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <dirent.h>
#include <sys/uio.h>
#include <sys/time.h>
#include <string.h>
#ifndef _WIN32
#include <sys/resource.h>
#endif
#include "common/Clock.h"
#include "common/ceph_json.h"
#ifdef __linux__
#include <limits.h>
#include <sys/xattr.h>
#endif
#include <fmt/format.h>
#include <map>
#include <vector>
#include <thread>
#include <regex>
#include <string>
using namespace std;
TEST(LibCephFS, LayoutVerifyDefaultLayout) {
struct ceph_mount_info *cmount;
ASSERT_EQ(0, ceph_create(&cmount, NULL));
ASSERT_EQ(0, ceph_conf_read_file(cmount, NULL));
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(0, ceph_mount(cmount, "/"));
ASSERT_EQ(0, ceph_mkdirs(cmount, "test/d0/subdir", 0777));
{
char value[1024] = "";
int r = 0;
// check for default layout
r = ceph_getxattr(cmount, "/", "ceph.dir.layout.json", (void*)value, sizeof(value));
ASSERT_GT(r, 0);
ASSERT_LT(r, sizeof value);
std::clog << "layout:" << value << std::endl;
ASSERT_STRNE((char*)NULL, strstr(value, "\"inheritance\": \"@default\""));
}
ASSERT_EQ(0, ceph_rmdir(cmount, "test/d0/subdir"));
ASSERT_EQ(0, ceph_rmdir(cmount, "test/d0"));
ASSERT_EQ(0, ceph_rmdir(cmount, "test"));
ceph_shutdown(cmount);
}
TEST(LibCephFS, LayoutSetAndVerifyNewAndInheritedLayout) {
struct ceph_mount_info *cmount;
ASSERT_EQ(0, ceph_create(&cmount, NULL));
ASSERT_EQ(0, ceph_conf_read_file(cmount, NULL));
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(0, ceph_mount(cmount, "/"));
ASSERT_EQ(0, ceph_mkdirs(cmount, "test/d0/subdir", 0777));
std::string pool_name_set;
{
char value[1024] = "";
int r = 0;
r = ceph_getxattr(cmount, "/", "ceph.dir.layout.json", (void*)value, sizeof(value));
ASSERT_GT(r, 0);
ASSERT_LT(r, sizeof value);
JSONParser json_parser;
ASSERT_EQ(json_parser.parse(value, r), 1);
ASSERT_EQ(json_parser.is_object(), 1);
std::string pool_name;
JSONDecoder::decode_json("pool_name", pool_name, &json_parser, true);
pool_name_set = pool_name;
// set a new layout
std::string new_layout;
new_layout += "{";
new_layout += "\"stripe_unit\": 65536, ";
new_layout += "\"stripe_count\": 1, ";
new_layout += "\"object_size\": 65536, ";
new_layout += "\"pool_name\": \"" + pool_name + "\"";
new_layout += "}";
ASSERT_EQ(0, ceph_setxattr(cmount, "test/d0", "ceph.dir.layout.json", (void*)new_layout.c_str(), new_layout.length(), XATTR_CREATE));
}
{
char value[1024] = "";
int r = 0;
r = ceph_getxattr(cmount, "test/d0", "ceph.dir.layout.json", (void*)value, sizeof(value));
ASSERT_GT(r, 0);
ASSERT_LT(r, sizeof value);
std::clog << "layout:" << value << std::endl;
JSONParser json_parser;
ASSERT_EQ(json_parser.parse(value, r), 1);
ASSERT_EQ(json_parser.is_object(), 1);
int64_t object_size;
int64_t stripe_unit;
int64_t stripe_count;
std::string pool_name;
std::string inheritance;
JSONDecoder::decode_json("pool_name", pool_name, &json_parser, true);
JSONDecoder::decode_json("object_size", object_size, &json_parser, true);
JSONDecoder::decode_json("stripe_unit", stripe_unit, &json_parser, true);
JSONDecoder::decode_json("stripe_count", stripe_count, &json_parser, true);
JSONDecoder::decode_json("inheritance", inheritance, &json_parser, true);
// now verify the layout
ASSERT_EQ(pool_name.compare(pool_name_set), 0);
ASSERT_EQ(object_size, 65536);
ASSERT_EQ(stripe_unit, 65536);
ASSERT_EQ(stripe_count, 1);
ASSERT_EQ(inheritance.compare("@set"), 0);
}
{
char value[1024] = "";
int r = 0;
JSONParser json_parser;
std::string inheritance;
// now check that the subdir layout is inherited
r = ceph_getxattr(cmount, "test/d0/subdir", "ceph.dir.layout.json", (void*)value, sizeof(value));
ASSERT_GT(r, 0);
ASSERT_LT(r, sizeof value);
std::clog << "layout:" << value << std::endl;
ASSERT_EQ(json_parser.parse(value, r), 1);
ASSERT_EQ(json_parser.is_object(), 1);
JSONDecoder::decode_json("inheritance", inheritance, &json_parser, true);
ASSERT_EQ(inheritance.compare("@inherited"), 0);
}
ASSERT_EQ(0, ceph_rmdir(cmount, "test/d0/subdir"));
ASSERT_EQ(0, ceph_rmdir(cmount, "test/d0"));
ASSERT_EQ(0, ceph_rmdir(cmount, "test"));
ceph_shutdown(cmount);
}
TEST(LibCephFS, LayoutSetBadJSON) {
struct ceph_mount_info *cmount;
ASSERT_EQ(0, ceph_create(&cmount, NULL));
ASSERT_EQ(0, ceph_conf_read_file(cmount, NULL));
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(0, ceph_mount(cmount, "/"));
ASSERT_EQ(0, ceph_mkdirs(cmount, "test/d0/subdir", 0777));
{
// set a new layout and verify the same
const char *new_layout = "" // bad json without starting brace
"\"stripe_unit\": 65536, "
"\"stripe_count\": 1, "
"\"object_size\": 65536, "
"\"pool_name\": \"cephfs.a.data\", "
"}";
// try to set a malformed JSON, eg. without an open brace
ASSERT_EQ(-CEPHFS_EINVAL, ceph_setxattr(cmount, "test/d0", "ceph.dir.layout.json", (void*)new_layout, strlen(new_layout), XATTR_CREATE));
}
ASSERT_EQ(0, ceph_rmdir(cmount, "test/d0/subdir"));
ASSERT_EQ(0, ceph_rmdir(cmount, "test/d0"));
ASSERT_EQ(0, ceph_rmdir(cmount, "test"));
ceph_shutdown(cmount);
}
TEST(LibCephFS, LayoutSetBadPoolName) {
struct ceph_mount_info *cmount;
ASSERT_EQ(0, ceph_create(&cmount, NULL));
ASSERT_EQ(0, ceph_conf_read_file(cmount, NULL));
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(0, ceph_mount(cmount, "/"));
ASSERT_EQ(0, ceph_mkdirs(cmount, "test/d0/subdir", 0777));
{
// try setting a bad pool name
ASSERT_EQ(-CEPHFS_EINVAL, ceph_setxattr(cmount, "test/d0", "ceph.dir.layout.pool_name", (void*)"UglyPoolName", 12, XATTR_CREATE));
}
ASSERT_EQ(0, ceph_rmdir(cmount, "test/d0/subdir"));
ASSERT_EQ(0, ceph_rmdir(cmount, "test/d0"));
ASSERT_EQ(0, ceph_rmdir(cmount, "test"));
ceph_shutdown(cmount);
}
TEST(LibCephFS, LayoutSetBadPoolId) {
struct ceph_mount_info *cmount;
ASSERT_EQ(0, ceph_create(&cmount, NULL));
ASSERT_EQ(0, ceph_conf_read_file(cmount, NULL));
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(0, ceph_mount(cmount, "/"));
ASSERT_EQ(0, ceph_mkdirs(cmount, "test/d0/subdir", 0777));
{
// try setting a bad pool id
ASSERT_EQ(-CEPHFS_EINVAL, ceph_setxattr(cmount, "test/d0", "ceph.dir.layout.pool_id", (void*)"300", 3, XATTR_CREATE));
}
ASSERT_EQ(0, ceph_rmdir(cmount, "test/d0/subdir"));
ASSERT_EQ(0, ceph_rmdir(cmount, "test/d0"));
ASSERT_EQ(0, ceph_rmdir(cmount, "test"));
ceph_shutdown(cmount);
}
TEST(LibCephFS, LayoutSetInvalidFieldName) {
struct ceph_mount_info *cmount;
ASSERT_EQ(0, ceph_create(&cmount, NULL));
ASSERT_EQ(0, ceph_conf_read_file(cmount, NULL));
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(0, ceph_mount(cmount, "/"));
ASSERT_EQ(0, ceph_mkdirs(cmount, "test/d0/subdir", 0777));
{
// try to set in invalid field
ASSERT_EQ(-CEPHFS_ENODATA, ceph_setxattr(cmount, "test/d0", "ceph.dir.layout.bad_field", (void*)"300", 3, XATTR_CREATE));
}
ASSERT_EQ(0, ceph_rmdir(cmount, "test/d0/subdir"));
ASSERT_EQ(0, ceph_rmdir(cmount, "test/d0"));
ASSERT_EQ(0, ceph_rmdir(cmount, "test"));
ceph_shutdown(cmount);
}
TEST(LibCephFS, GetAndSetDirPin) {
struct ceph_mount_info *cmount;
ASSERT_EQ(0, ceph_create(&cmount, NULL));
ASSERT_EQ(0, ceph_conf_read_file(cmount, NULL));
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(0, ceph_mount(cmount, "/"));
ASSERT_EQ(0, ceph_mkdirs(cmount, "test/d1", 0777));
{
char value[1024] = "";
int r = ceph_getxattr(cmount, "test/d1", "ceph.dir.pin", (void*)value, sizeof(value));
ASSERT_GT(r, 0);
ASSERT_LT(r, sizeof value);
ASSERT_STREQ("-1", value);
}
{
char value[1024] = "";
int r = -1;
ASSERT_EQ(0, ceph_setxattr(cmount, "test/d1", "ceph.dir.pin", (void*)"1", 1, XATTR_CREATE));
r = ceph_getxattr(cmount, "test/d1", "ceph.dir.pin", (void*)value, sizeof(value));
ASSERT_GT(r, 0);
ASSERT_LT(r, sizeof value);
ASSERT_STREQ("1", value);
}
ASSERT_EQ(0, ceph_rmdir(cmount, "test/d1"));
ASSERT_EQ(0, ceph_rmdir(cmount, "test"));
ceph_shutdown(cmount);
}
TEST(LibCephFS, GetAndSetDirDistribution) {
struct ceph_mount_info *cmount;
ASSERT_EQ(0, ceph_create(&cmount, NULL));
ASSERT_EQ(0, ceph_conf_read_file(cmount, NULL));
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(0, ceph_mount(cmount, "/"));
ASSERT_EQ(0, ceph_mkdirs(cmount, "test/d2", 0777));
{
char value[1024] = "";
int r = ceph_getxattr(cmount, "test/d2", "ceph.dir.pin.distributed", (void*)value, sizeof(value));
ASSERT_GT(r, 0);
ASSERT_LT(r, sizeof value);
ASSERT_STREQ("0", value);
}
{
char value[1024] = "";
int r = -1;
ASSERT_EQ(0, ceph_setxattr(cmount, "test/d2", "ceph.dir.pin.distributed", (void*)"1", 1, XATTR_CREATE));
r = ceph_getxattr(cmount, "test/d2", "ceph.dir.pin.distributed", (void*)value, sizeof(value));
ASSERT_GT(r, 0);
ASSERT_LT(r, sizeof value);
ASSERT_STREQ("1", value);
}
ASSERT_EQ(0, ceph_rmdir(cmount, "test/d2"));
ASSERT_EQ(0, ceph_rmdir(cmount, "test"));
ceph_shutdown(cmount);
}
TEST(LibCephFS, GetAndSetDirRandom) {
struct ceph_mount_info *cmount;
ASSERT_EQ(0, ceph_create(&cmount, NULL));
ASSERT_EQ(0, ceph_conf_read_file(cmount, NULL));
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(0, ceph_mount(cmount, "/"));
ASSERT_EQ(0, ceph_mkdirs(cmount, "test/d3", 0777));
{
char value[1024] = "";
int r = ceph_getxattr(cmount, "test/d3", "ceph.dir.pin.random", (void*)value, sizeof(value));
ASSERT_GT(r, 0);
ASSERT_LT(r, sizeof value);
ASSERT_STREQ("0", value);
}
{
double val = (double)1.0/(double)128.0;
std::stringstream ss;
ss << val;
ASSERT_EQ(0, ceph_setxattr(cmount, "test/d3", "ceph.dir.pin.random", (void*)ss.str().c_str(), strlen(ss.str().c_str()), XATTR_CREATE));
char value[1024] = "";
int r = -1;
r = ceph_getxattr(cmount, "test/d3", "ceph.dir.pin.random", (void*)value, sizeof(value));
ASSERT_GT(r, 0);
ASSERT_LT(r, sizeof value);
ASSERT_STREQ(ss.str().c_str(), value);
}
ASSERT_EQ(0, ceph_rmdir(cmount, "test/d3"));
ASSERT_EQ(0, ceph_rmdir(cmount, "test"));
ceph_shutdown(cmount);
}
| 11,147 | 27.880829 | 141 | cc |
null | ceph-main/src/test/libcephsqlite/main.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2021 Red Hat, Inc.
*
* This is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License version 2.1, as published by
* the Free Software Foundation. See file COPYING.
*
*/
#include <fstream>
#include <iostream>
#include <sstream>
#include <string>
#include <string_view>
#include <stdlib.h>
#include <string.h>
#include <sqlite3.h>
#include <fmt/format.h>
#include "gtest/gtest.h"
#include "include/uuid.h"
#include "include/rados/librados.hpp"
#include "include/libcephsqlite.h"
#include "SimpleRADOSStriper.h"
#include "common/ceph_argparse.h"
#include "common/ceph_crypto.h"
#include "common/ceph_time.h"
#include "common/common_init.h"
#include "common/debug.h"
#define dout_subsys ceph_subsys_client
#undef dout_prefix
#define dout_prefix *_dout << "unittest_libcephsqlite: "
#define sqlcatchcode(S, code) \
do {\
rc = S;\
if (rc != code) {\
std::cout << "[" << __FILE__ << ":" << __LINE__ << "]"\
<< " sqlite3 error: " << rc << " `" << sqlite3_errstr(rc)\
<< "': " << sqlite3_errmsg(db) << std::endl;\
sqlite3_finalize(stmt);\
stmt = NULL;\
goto out;\
}\
} while (0)
#define sqlcatch(S) sqlcatchcode(S, SQLITE_OK)
static boost::intrusive_ptr<CephContext> cct;
class CephSQLiteTest : public ::testing::Test {
public:
inline static const std::string pool = "cephsqlite";
static void SetUpTestSuite() {
librados::Rados cluster;
ASSERT_LE(0, cluster.init_with_context(cct.get()));
ASSERT_LE(0, cluster.connect());
if (int rc = cluster.pool_create(pool.c_str()); rc < 0 && rc != -EEXIST) {
ASSERT_EQ(0, rc);
}
cluster.shutdown();
sleep(5);
}
void SetUp() override {
uuid.generate_random();
ASSERT_LE(0, cluster.init_with_context(cct.get()));
ASSERT_LE(0, cluster.connect());
ASSERT_LE(0, cluster.wait_for_latest_osdmap());
ASSERT_EQ(0, db_open());
}
void TearDown() override {
ASSERT_EQ(SQLITE_OK, sqlite3_close(db));
db = nullptr;
cluster.shutdown();
/* Leave database behind for inspection. */
}
protected:
int db_open()
{
static const char SQL[] =
"PRAGMA journal_mode = PERSIST;"
"PRAGMA page_size = 65536;"
"PRAGMA cache_size = 32768;"
"PRAGMA temp_store = memory;"
"CREATE TEMPORARY TABLE perf (i INTEGER PRIMARY KEY, v TEXT);"
"CREATE TEMPORARY VIEW p AS"
" SELECT perf.i, J.*"
" FROM perf, json_tree(perf.v) AS J;"
"INSERT INTO perf (v)"
" VALUES (ceph_perf());"
;
sqlite3_stmt *stmt = NULL;
const char *current = SQL;
int rc;
auto&& name = get_uri();
sqlcatch(sqlite3_open_v2(name.c_str(), &db, SQLITE_OPEN_CREATE|SQLITE_OPEN_READWRITE|SQLITE_OPEN_URI, "ceph"));
std::cout << "using database: " << name << std::endl;
std::cout << SQL << std::endl;
sqlcatch(sqlite3_exec(db, current, NULL, NULL, NULL));
rc = 0;
out:
sqlite3_finalize(stmt);
return rc;
}
virtual std::string get_uri() const {
auto uri = fmt::format("file:{}:/{}?vfs=ceph", pool, get_name());
return uri;
}
virtual std::string get_name() const {
auto name = fmt::format("{}.db", uuid.to_string());
return name;
}
sqlite3* db = nullptr;
uuid_d uuid;
librados::Rados cluster;
};
TEST_F(CephSQLiteTest, Create) {
static const char SQL[] =
"CREATE TABLE foo (a INT);"
;
sqlite3_stmt *stmt = NULL;
const char *current = SQL;
int rc;
std::cout << SQL << std::endl;
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_DONE);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
rc = 0;
out:
sqlite3_finalize(stmt);
ASSERT_EQ(0, rc);
}
TEST_F(CephSQLiteTest, InsertBulk4096) {
static const char SQL[] =
"PRAGMA page_size = 4096;"
"CREATE TABLE foo (a INT);"
"WITH RECURSIVE c(x) AS"
" ("
" VALUES(1)"
" UNION ALL"
" SELECT x+1"
" FROM c"
" )"
"INSERT INTO foo (a)"
" SELECT RANDOM()"
" FROM c"
" LIMIT 1000000;"
"PRAGMA page_size;"
;
int rc;
const char *current = SQL;
sqlite3_stmt *stmt = NULL;
std::cout << SQL << std::endl;
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_DONE);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_DONE);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_DONE);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_ROW);
ASSERT_EQ(sqlite3_column_int64(stmt, 0), 4096);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
rc = 0;
out:
sqlite3_finalize(stmt);
ASSERT_EQ(0, rc);
}
TEST_F(CephSQLiteTest, InsertBulk) {
static const char SQL[] =
"CREATE TABLE foo (a INT);"
"WITH RECURSIVE c(x) AS"
" ("
" VALUES(1)"
" UNION ALL"
" SELECT x+1"
" FROM c"
" )"
"INSERT INTO foo (a)"
" SELECT RANDOM()"
" FROM c"
" LIMIT 1000000;"
;
int rc;
const char *current = SQL;
sqlite3_stmt *stmt = NULL;
std::cout << SQL << std::endl;
sqlcatch(sqlite3_exec(db, current, NULL, NULL, NULL));
rc = 0;
out:
sqlite3_finalize(stmt);
ASSERT_EQ(0, rc);
}
TEST_F(CephSQLiteTest, UpdateBulk) {
static const char SQL[] =
"CREATE TABLE foo (a INT);"
"WITH RECURSIVE c(x) AS"
" ("
" VALUES(1)"
" UNION ALL"
" SELECT x+1"
" FROM c"
" )"
"INSERT INTO foo (a)"
" SELECT x"
" FROM c"
" LIMIT 1000000;"
"SELECT SUM(a) FROM foo;"
"UPDATE foo"
" SET a = a+a;"
"SELECT SUM(a) FROM foo;"
;
int rc;
const char *current = SQL;
sqlite3_stmt *stmt = NULL;
uint64_t sum, sum2;
std::cout << SQL << std::endl;
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_DONE);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_DONE);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_ROW);
sum = sqlite3_column_int64(stmt, 0);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_DONE);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_ROW);
sum2 = sqlite3_column_int64(stmt, 0);
ASSERT_EQ(sum*2, sum2);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
rc = 0;
out:
sqlite3_finalize(stmt);
ASSERT_EQ(0, rc);
}
TEST_F(CephSQLiteTest, InsertRate) {
using clock = ceph::coarse_mono_clock;
using time = ceph::coarse_mono_time;
static const char SQL[] =
"CREATE TABLE foo (a INT);"
"INSERT INTO foo (a) VALUES (RANDOM());"
;
int rc;
const char *current = SQL;
sqlite3_stmt *stmt = NULL;
time t1, t2;
int count = 100;
std::cout << SQL << std::endl;
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_DONE);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
t1 = clock::now();
for (int i = 0; i < count; ++i) {
sqlcatchcode(sqlite3_step(stmt), SQLITE_DONE);
}
t2 = clock::now();
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
{
auto diff = std::chrono::duration<double>(t2-t1);
std::cout << "transactions per second: " << count/diff.count() << std::endl;
}
rc = 0;
out:
sqlite3_finalize(stmt);
ASSERT_EQ(0, rc);
}
TEST_F(CephSQLiteTest, DatabaseShrink) {
static const char SQL[] =
"CREATE TABLE foo (a INT);"
"WITH RECURSIVE c(x) AS"
" ("
" VALUES(1)"
" UNION ALL"
" SELECT x+1"
" FROM c"
" )"
"INSERT INTO foo (a)"
" SELECT x"
" FROM c"
" LIMIT 1000000;"
"DELETE FROM foo"
" WHERE RANDOM()%4 < 3;"
"VACUUM;"
;
int rc;
const char *current = SQL;
sqlite3_stmt *stmt = NULL;
librados::IoCtx ioctx;
std::unique_ptr<SimpleRADOSStriper> rs;
uint64_t size1, size2;
std::cout << SQL << std::endl;
ASSERT_EQ(0, cluster.ioctx_create(pool.c_str(), ioctx));
rs = std::make_unique<SimpleRADOSStriper>(ioctx, get_name());
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_DONE);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_DONE);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_DONE);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
ASSERT_EQ(0, rs->lock(1000));
ASSERT_EQ(0, rs->stat(&size1));
ASSERT_EQ(0, rs->unlock());
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_DONE);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
ASSERT_EQ(0, rs->lock(1000));
ASSERT_EQ(0, rs->stat(&size2));
ASSERT_EQ(0, rs->unlock());
ASSERT_LT(size2, size1/2);
rc = 0;
out:
sqlite3_finalize(stmt);
ASSERT_EQ(0, rc);
}
TEST_F(CephSQLiteTest, InsertExclusiveRate) {
using clock = ceph::coarse_mono_clock;
using time = ceph::coarse_mono_time;
static const char SQL[] =
"PRAGMA locking_mode=EXCLUSIVE;"
"CREATE TABLE foo (a INT);"
"INSERT INTO foo (a) VALUES (RANDOM());"
;
int rc;
const char *current = SQL;
sqlite3_stmt *stmt = NULL;
time t1, t2;
int count = 100;
std::cout << SQL << std::endl;
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_ROW);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_DONE);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
t1 = clock::now();
for (int i = 0; i < count; ++i) {
sqlcatchcode(sqlite3_step(stmt), SQLITE_DONE);
}
t2 = clock::now();
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
{
auto diff = std::chrono::duration<double>(t2-t1);
std::cout << "transactions per second: " << count/diff.count() << std::endl;
}
rc = 0;
out:
sqlite3_finalize(stmt);
ASSERT_EQ(0, rc);
}
TEST_F(CephSQLiteTest, InsertExclusiveWALRate) {
using clock = ceph::coarse_mono_clock;
using time = ceph::coarse_mono_time;
static const char SQL[] =
"PRAGMA locking_mode=EXCLUSIVE;"
"PRAGMA journal_mode=WAL;"
"CREATE TABLE foo (a INT);"
"INSERT INTO foo (a) VALUES (RANDOM());"
;
int rc;
const char *current = SQL;
sqlite3_stmt *stmt = NULL;
time t1, t2;
int count = 100;
std::cout << SQL << std::endl;
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_ROW);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_ROW);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_DONE);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
t1 = clock::now();
for (int i = 0; i < count; ++i) {
sqlcatchcode(sqlite3_step(stmt), SQLITE_DONE);
}
t2 = clock::now();
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
{
auto diff = std::chrono::duration<double>(t2-t1);
std::cout << "transactions per second: " << count/diff.count() << std::endl;
}
rc = 0;
out:
sqlite3_finalize(stmt);
ASSERT_EQ(0, rc);
}
TEST_F(CephSQLiteTest, WALTransactionSync) {
static const char SQL[] =
"PRAGMA locking_mode=EXCLUSIVE;"
"PRAGMA journal_mode=WAL;"
"CREATE TABLE foo (a INT);" /* sets up the -wal journal */
"INSERT INTO perf (v)"
" VALUES (ceph_perf());"
"BEGIN TRANSACTION;"
"INSERT INTO foo (a) VALUES (RANDOM());"
"END TRANSACTION;"
"INSERT INTO perf (v)"
" VALUES (ceph_perf());"
"SELECT a.atom-b.atom"
" FROM p AS a, p AS b"
" WHERE a.i = ? AND"
" b.i = ? AND"
" a.fullkey = '$.libcephsqlite_vfs.opf_sync.avgcount' AND"
" b.fullkey = '$.libcephsqlite_vfs.opf_sync.avgcount';"
;
int rc;
const char *current = SQL;
sqlite3_stmt *stmt = NULL;
uint64_t id;
std::cout << SQL << std::endl;
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_ROW);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_ROW);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_DONE);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_DONE);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_DONE);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
for (int i = 0; i < 10; i++) {
sqlcatchcode(sqlite3_step(stmt), SQLITE_DONE);
}
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_DONE);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_DONE);
id = sqlite3_last_insert_rowid(db);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatch(sqlite3_bind_int64(stmt, 1, id));
sqlcatch(sqlite3_bind_int64(stmt, 2, id-1));
sqlcatchcode(sqlite3_step(stmt), SQLITE_ROW);
ASSERT_EQ(sqlite3_column_int64(stmt, 0), 1);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
rc = 0;
out:
sqlite3_finalize(stmt);
ASSERT_EQ(0, rc);
}
TEST_F(CephSQLiteTest, PersistTransactionSync) {
static const char SQL[] =
"BEGIN TRANSACTION;"
"CREATE TABLE foo (a INT);"
"INSERT INTO foo (a) VALUES (RANDOM());"
"END TRANSACTION;"
"INSERT INTO perf (v)"
" VALUES (ceph_perf());"
"SELECT a.atom-b.atom"
" FROM p AS a, p AS b"
" WHERE a.i = ? AND"
" b.i = ? AND"
" a.fullkey = '$.libcephsqlite_vfs.opf_sync.avgcount' AND"
" b.fullkey = '$.libcephsqlite_vfs.opf_sync.avgcount';"
;
int rc;
const char *current = SQL;
sqlite3_stmt *stmt = NULL;
uint64_t id;
std::cout << SQL << std::endl;
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_DONE);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_DONE);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_DONE);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_DONE);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_DONE);
id = sqlite3_last_insert_rowid(db);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatch(sqlite3_bind_int64(stmt, 1, id));
sqlcatch(sqlite3_bind_int64(stmt, 2, id-1));
sqlcatchcode(sqlite3_step(stmt), SQLITE_ROW);
ASSERT_EQ(sqlite3_column_int64(stmt, 0), 3); /* journal, db, journal header (PERIST) */
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
rc = 0;
out:
sqlite3_finalize(stmt);
ASSERT_EQ(0, rc);
}
TEST_F(CephSQLiteTest, InsertExclusiveLock) {
static const char SQL[] =
"PRAGMA locking_mode=EXCLUSIVE;"
"CREATE TABLE foo (a INT);"
"INSERT INTO foo (a) VALUES (RANDOM());"
"INSERT INTO perf (v)"
" VALUES (ceph_perf());"
"SELECT a.atom, b.atom, a.atom-b.atom"
" FROM p AS a, p AS b"
" WHERE a.i = ? AND"
" b.i = ? AND"
" a.fullkey = '$.libcephsqlite_vfs.opf_lock.avgcount' AND"
" b.fullkey = '$.libcephsqlite_vfs.opf_lock.avgcount';"
"SELECT a.atom, b.atom, a.atom-b.atom"
" FROM p AS a, p AS b"
" WHERE a.i = ? AND"
" b.i = ? AND"
" a.fullkey = '$.libcephsqlite_striper.lock' AND"
" b.fullkey = '$.libcephsqlite_striper.lock';"
;
int rc;
const char *current = SQL;
sqlite3_stmt *stmt = NULL;
uint64_t id;
std::cout << SQL << std::endl;
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_ROW);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_DONE);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_DONE);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_DONE);
id = sqlite3_last_insert_rowid(db);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatch(sqlite3_bind_int64(stmt, 1, id));
sqlcatch(sqlite3_bind_int64(stmt, 2, id-1));
sqlcatchcode(sqlite3_step(stmt), SQLITE_ROW);
ASSERT_GT(sqlite3_column_int64(stmt, 0), 0);
ASSERT_GT(sqlite3_column_int64(stmt, 1), 0);
ASSERT_EQ(sqlite3_column_int64(stmt, 2), 3); /* NONE -> SHARED; SHARED -> RESERVED; RESERVED -> EXCLUSIVE */
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatch(sqlite3_bind_int64(stmt, 1, id));
sqlcatch(sqlite3_bind_int64(stmt, 2, id-1));
sqlcatchcode(sqlite3_step(stmt), SQLITE_ROW);
ASSERT_GT(sqlite3_column_int64(stmt, 0), 0);
ASSERT_GT(sqlite3_column_int64(stmt, 1), 0);
ASSERT_EQ(sqlite3_column_int64(stmt, 2), 1); /* one actual lock on the striper */
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
rc = 0;
out:
sqlite3_finalize(stmt);
ASSERT_EQ(0, rc);
}
TEST_F(CephSQLiteTest, TransactionSizeUpdate) {
static const char SQL[] =
"BEGIN TRANSACTION;"
"CREATE TABLE foo (a INT);"
"INSERT INTO foo (a) VALUES (RANDOM());"
"END TRANSACTION;"
"INSERT INTO perf (v)"
" VALUES (ceph_perf());"
"SELECT a.atom, b.atom, a.atom-b.atom"
" FROM p AS a, p AS b"
" WHERE a.i = ? AND"
" b.i = ? AND"
" a.fullkey = '$.libcephsqlite_striper.update_size' AND"
" b.fullkey = '$.libcephsqlite_striper.update_size';"
;
int rc;
const char *current = SQL;
sqlite3_stmt *stmt = NULL;
uint64_t id;
std::cout << SQL << std::endl;
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_DONE);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_DONE);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_DONE);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_DONE);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_DONE);
id = sqlite3_last_insert_rowid(db);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatch(sqlite3_bind_int64(stmt, 1, id));
sqlcatch(sqlite3_bind_int64(stmt, 2, id-1));
sqlcatchcode(sqlite3_step(stmt), SQLITE_ROW);
ASSERT_GT(sqlite3_column_int64(stmt, 0), 0);
ASSERT_GT(sqlite3_column_int64(stmt, 1), 0);
ASSERT_EQ(sqlite3_column_int64(stmt, 2), 2); /* once for journal write and db write (but not journal header clear!) */
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
rc = 0;
out:
sqlite3_finalize(stmt);
ASSERT_EQ(0, rc);
}
TEST_F(CephSQLiteTest, AllocatedGrowth) {
static const char SQL[] =
"CREATE TABLE foo (a BLOB);"
"WITH RECURSIVE c(x) AS"
" ("
" VALUES(1)"
" UNION ALL"
" SELECT x+1"
" FROM c"
" )"
"INSERT INTO foo (a)"
" SELECT RANDOMBLOB(1<<20)"
" FROM c"
" LIMIT 1024;"
"INSERT INTO perf (v)"
" VALUES (ceph_perf());"
"SELECT a.atom, b.atom, a.atom-b.atom"
" FROM p AS a, p AS b"
" WHERE a.i = ? AND"
" b.i = ? AND"
" a.fullkey = '$.libcephsqlite_striper.update_allocated' AND"
" b.fullkey = '$.libcephsqlite_striper.update_allocated';"
;
int rc;
const char *current = SQL;
sqlite3_stmt *stmt = NULL;
uint64_t id;
std::cout << SQL << std::endl;
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_DONE);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_DONE);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_DONE);
id = sqlite3_last_insert_rowid(db);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatch(sqlite3_bind_int64(stmt, 1, id));
sqlcatch(sqlite3_bind_int64(stmt, 2, id-1));
sqlcatchcode(sqlite3_step(stmt), SQLITE_ROW);
ASSERT_GT(sqlite3_column_int64(stmt, 2), 8); /* max_growth = 128MB, 1024MB of data */
ASSERT_LT(sqlite3_column_int64(stmt, 2), 12);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
rc = 0;
out:
sqlite3_finalize(stmt);
ASSERT_EQ(0, rc);
}
TEST_F(CephSQLiteTest, DeleteBulk) {
static const char SQL[] =
"CREATE TABLE foo (a INT);"
"WITH RECURSIVE c(x) AS"
" ("
" VALUES(1)"
" UNION ALL"
" SELECT x+1"
" FROM c"
" )"
"INSERT INTO foo (a)"
" SELECT x"
" FROM c"
" LIMIT 1000000;"
"DELETE FROM foo"
" WHERE RANDOM()%2 == 0;"
;
int rc;
const char *current = SQL;
sqlite3_stmt *stmt = NULL;
std::cout << SQL << std::endl;
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_DONE);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_DONE);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_DONE);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
rc = 0;
out:
sqlite3_finalize(stmt);
ASSERT_EQ(0, rc);
}
TEST_F(CephSQLiteTest, DropMassive) {
static const char SQL[] =
"CREATE TABLE foo (a BLOB);"
"WITH RECURSIVE c(x) AS"
" ("
" VALUES(1)"
" UNION ALL"
" SELECT x+1"
" FROM c"
" )"
"INSERT INTO foo (a)"
" SELECT RANDOMBLOB(1<<20)"
" FROM c"
" LIMIT 1024;"
"DROP TABLE foo;"
"VACUUM;"
"INSERT INTO perf (v)"
" VALUES (ceph_perf());"
"SELECT a.atom, b.atom"
" FROM p AS a, p AS b"
" WHERE a.i = ? AND"
" b.i = ? AND"
" a.fullkey = '$.libcephsqlite_striper.shrink' AND"
" b.fullkey = '$.libcephsqlite_striper.shrink';"
"SELECT a.atom-b.atom"
" FROM p AS a, p AS b"
" WHERE a.i = ? AND"
" b.i = ? AND"
" a.fullkey = '$.libcephsqlite_striper.shrink_bytes' AND"
" b.fullkey = '$.libcephsqlite_striper.shrink_bytes';"
;
int rc;
const char *current = SQL;
sqlite3_stmt *stmt = NULL;
uint64_t id;
std::cout << SQL << std::endl;
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_DONE);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_DONE);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_DONE);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_DONE);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_DONE);
id = sqlite3_last_insert_rowid(db);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatch(sqlite3_bind_int64(stmt, 1, id));
sqlcatch(sqlite3_bind_int64(stmt, 2, id-1));
sqlcatchcode(sqlite3_step(stmt), SQLITE_ROW);
ASSERT_GT(sqlite3_column_int64(stmt, 0), sqlite3_column_int64(stmt, 1));
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatch(sqlite3_bind_int64(stmt, 1, id));
sqlcatch(sqlite3_bind_int64(stmt, 2, id-1));
sqlcatchcode(sqlite3_step(stmt), SQLITE_ROW);
ASSERT_LT(512*(1<<20), sqlite3_column_int64(stmt, 0));
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
rc = 0;
out:
sqlite3_finalize(stmt);
ASSERT_EQ(0, rc);
}
TEST_F(CephSQLiteTest, InsertMassiveVerify) {
static const char SQL[] =
"CREATE TABLE foo (a BLOB);"
"CREATE TEMPORARY TABLE bar (a BLOB);"
"WITH RECURSIVE c(x) AS"
" ("
" VALUES(1)"
" UNION ALL"
" SELECT x+1"
" FROM c"
" )"
"INSERT INTO bar (a)"
" SELECT RANDOMBLOB(1<<20)"
" FROM c"
" LIMIT 1024;"
"SELECT a FROM bar;"
"INSERT INTO foo (a)"
" SELECT a FROM bar;"
"SELECT a FROM foo;"
;
int rc;
const char *current = SQL;
sqlite3_stmt *stmt = NULL;
std::vector<std::string> hashes1, hashes2;
std::cout << SQL << std::endl;
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_DONE);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_DONE);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_DONE);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
while ((rc = sqlite3_step(stmt)) == SQLITE_ROW) {
const void* blob = sqlite3_column_blob(stmt, 0);
ceph::bufferlist bl;
bl.append(std::string_view((const char*)blob, (size_t)sqlite3_column_bytes(stmt, 0)));
auto digest = ceph::crypto::digest<ceph::crypto::SHA1>(bl);
hashes1.emplace_back(digest.to_str());
}
sqlcatchcode(rc, SQLITE_DONE);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_DONE);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
while ((rc = sqlite3_step(stmt)) == SQLITE_ROW) {
const void* blob = sqlite3_column_blob(stmt, 0);
ceph::bufferlist bl;
bl.append(std::string_view((const char*)blob, (size_t)sqlite3_column_bytes(stmt, 0)));
auto digest = ceph::crypto::digest<ceph::crypto::SHA1>(bl);
hashes2.emplace_back(digest.to_str());
}
sqlcatchcode(rc, SQLITE_DONE);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
ASSERT_EQ(hashes1, hashes2);
rc = 0;
out:
sqlite3_finalize(stmt);
ASSERT_EQ(0, rc);
}
TEST_F(CephSQLiteTest, PerfValid) {
static const char SQL[] =
"SELECT json_valid(ceph_perf());"
;
int rc;
const char *current = SQL;
sqlite3_stmt *stmt = NULL;
std::cout << SQL << std::endl;
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_ROW);
ASSERT_EQ(sqlite3_column_int64(stmt, 0), 1);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
rc = 0;
out:
sqlite3_finalize(stmt);
ASSERT_EQ(0, rc);
}
TEST_F(CephSQLiteTest, StatusValid) {
static const char SQL[] =
"SELECT json_valid(ceph_status());"
;
int rc;
const char *current = SQL;
sqlite3_stmt *stmt = NULL;
std::cout << SQL << std::endl;
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_ROW);
ASSERT_EQ(sqlite3_column_int64(stmt, 0), 1);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
rc = 0;
out:
sqlite3_finalize(stmt);
ASSERT_EQ(0, rc);
}
TEST_F(CephSQLiteTest, CurrentTime) {
static const char SQL[] =
"SELECT strftime('%s', 'now');"
;
int rc;
const char *current = SQL;
sqlite3_stmt *stmt = NULL;
std::cout << SQL << std::endl;
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_ROW);
{
time_t now = time(0);
auto t = sqlite3_column_int64(stmt, 0);
ASSERT_LT(abs(now-t), 5);
}
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
rc = 0;
out:
sqlite3_finalize(stmt);
ASSERT_EQ(0, rc);
}
TEST_F(CephSQLiteTest, StatusFields) {
static const char SQL[] =
"SELECT json_extract(ceph_status(), '$.addr');"
"SELECT json_extract(ceph_status(), '$.id');"
;
int rc;
const char *current = SQL;
sqlite3_stmt *stmt = NULL;
std::cout << SQL << std::endl;
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_ROW);
{
auto addr = sqlite3_column_text(stmt, 0);
std::cout << addr << std::endl;
}
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_ROW);
{
auto id = sqlite3_column_int64(stmt, 0);
std::cout << id << std::endl;
ASSERT_GT(id, 0);
}
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
rc = 0;
out:
sqlite3_finalize(stmt);
ASSERT_EQ(0, rc);
}
int main(int argc, char **argv) {
auto args = argv_to_vec(argc, argv);
std::string conf_file_list;
std::string cluster;
CephInitParameters iparams = ceph_argparse_early_args(args, CEPH_ENTITY_TYPE_CLIENT, &cluster, &conf_file_list);
cct = boost::intrusive_ptr<CephContext>(common_preinit(iparams, CODE_ENVIRONMENT_UTILITY, 0), false);
cct->_conf.parse_config_files(conf_file_list.empty() ? nullptr : conf_file_list.c_str(), &std::cerr, 0);
cct->_conf.parse_env(cct->get_module_type()); // environment variables override
cct->_conf.parse_argv(args);
cct->_conf.apply_changes(nullptr);
common_init_finish(cct.get());
ldout(cct, 1) << "sqlite3 version: " << sqlite3_libversion() << dendl;
if (int rc = sqlite3_config(SQLITE_CONFIG_URI, 1); rc) {
lderr(cct) << "sqlite3 config failed: " << rc << dendl;
exit(EXIT_FAILURE);
}
sqlite3_auto_extension((void (*)())sqlite3_cephsqlite_init);
sqlite3* db = nullptr;
if (int rc = sqlite3_open_v2(":memory:", &db, SQLITE_OPEN_READWRITE, nullptr); rc == SQLITE_OK) {
sqlite3_close(db);
} else {
lderr(cct) << "could not open sqlite3: " << rc << dendl;
exit(EXIT_FAILURE);
}
if (int rc = cephsqlite_setcct(cct.get(), nullptr); rc < 0) {
lderr(cct) << "could not set cct: " << rc << dendl;
exit(EXIT_FAILURE);
}
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}
| 33,382 | 28.542478 | 120 | cc |
null | ceph-main/src/test/librados/TestCase.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include <errno.h>
#include <fmt/format.h>
#include "test/librados/test.h"
#include "test/librados/TestCase.h"
#include "include/scope_guard.h"
#include "crimson_utils.h"
std::string RadosTestNS::pool_name;
rados_t RadosTestNS::s_cluster = NULL;
void RadosTestNS::SetUpTestCase()
{
auto pool_prefix = fmt::format("{}_", ::testing::UnitTest::GetInstance()->current_test_case()->name());
pool_name = get_temp_pool_name(pool_prefix);
ASSERT_EQ("", create_one_pool(pool_name, &s_cluster));
}
void RadosTestNS::TearDownTestCase()
{
ASSERT_EQ(0, destroy_one_pool(pool_name, &s_cluster));
}
void RadosTestNS::SetUp()
{
cluster = RadosTestNS::s_cluster;
ASSERT_EQ(0, rados_ioctx_create(cluster, pool_name.c_str(), &ioctx));
int req;
ASSERT_EQ(0, rados_ioctx_pool_requires_alignment2(ioctx, &req));
ASSERT_FALSE(req);
}
void RadosTestNS::TearDown()
{
if (cleanup)
cleanup_all_objects(ioctx);
rados_ioctx_destroy(ioctx);
}
void RadosTestNS::cleanup_all_objects(rados_ioctx_t ioctx)
{
// remove all objects to avoid polluting other tests
rados_ioctx_snap_set_read(ioctx, LIBRADOS_SNAP_HEAD);
rados_ioctx_set_namespace(ioctx, LIBRADOS_ALL_NSPACES);
rados_list_ctx_t list_ctx;
ASSERT_EQ(0, rados_nobjects_list_open(ioctx, &list_ctx));
auto sg = make_scope_guard([&] { rados_nobjects_list_close(list_ctx); });
int r;
const char *entry = NULL;
const char *key = NULL;
const char *nspace = NULL;
while ((r = rados_nobjects_list_next(list_ctx, &entry, &key, &nspace)) != -ENOENT) {
ASSERT_EQ(0, r);
rados_ioctx_locator_set_key(ioctx, key);
rados_ioctx_set_namespace(ioctx, nspace);
ASSERT_EQ(0, rados_remove(ioctx, entry));
}
}
std::string RadosTestECNS::pool_name;
rados_t RadosTestECNS::s_cluster = NULL;
void RadosTestECNS::SetUpTestCase()
{
SKIP_IF_CRIMSON();
auto pool_prefix = fmt::format("{}_", ::testing::UnitTest::GetInstance()->current_test_case()->name());
pool_name = get_temp_pool_name(pool_prefix);
ASSERT_EQ("", create_one_ec_pool(pool_name, &s_cluster));
}
void RadosTestECNS::TearDownTestCase()
{
SKIP_IF_CRIMSON();
ASSERT_EQ(0, destroy_one_ec_pool(pool_name, &s_cluster));
}
void RadosTestECNS::SetUp()
{
SKIP_IF_CRIMSON();
cluster = RadosTestECNS::s_cluster;
ASSERT_EQ(0, rados_ioctx_create(cluster, pool_name.c_str(), &ioctx));
int req;
ASSERT_EQ(0, rados_ioctx_pool_requires_alignment2(ioctx, &req));
ASSERT_TRUE(req);
ASSERT_EQ(0, rados_ioctx_pool_required_alignment2(ioctx, &alignment));
ASSERT_NE(0U, alignment);
}
void RadosTestECNS::TearDown()
{
SKIP_IF_CRIMSON();
if (cleanup)
cleanup_all_objects(ioctx);
rados_ioctx_destroy(ioctx);
}
std::string RadosTest::pool_name;
rados_t RadosTest::s_cluster = NULL;
void RadosTest::SetUpTestCase()
{
auto pool_prefix = fmt::format("{}_", ::testing::UnitTest::GetInstance()->current_test_case()->name());
pool_name = get_temp_pool_name(pool_prefix);
ASSERT_EQ("", create_one_pool(pool_name, &s_cluster));
}
void RadosTest::TearDownTestCase()
{
ASSERT_EQ(0, destroy_one_pool(pool_name, &s_cluster));
}
void RadosTest::SetUp()
{
cluster = RadosTest::s_cluster;
ASSERT_EQ(0, rados_ioctx_create(cluster, pool_name.c_str(), &ioctx));
nspace = get_temp_pool_name();
rados_ioctx_set_namespace(ioctx, nspace.c_str());
int req;
ASSERT_EQ(0, rados_ioctx_pool_requires_alignment2(ioctx, &req));
ASSERT_FALSE(req);
}
void RadosTest::TearDown()
{
if (cleanup) {
cleanup_default_namespace(ioctx);
cleanup_namespace(ioctx, nspace);
}
rados_ioctx_destroy(ioctx);
}
void RadosTest::cleanup_default_namespace(rados_ioctx_t ioctx)
{
// remove all objects from the default namespace to avoid polluting
// other tests
cleanup_namespace(ioctx, "");
}
void RadosTest::cleanup_namespace(rados_ioctx_t ioctx, std::string ns)
{
rados_ioctx_snap_set_read(ioctx, LIBRADOS_SNAP_HEAD);
rados_ioctx_set_namespace(ioctx, ns.c_str());
rados_list_ctx_t list_ctx;
ASSERT_EQ(0, rados_nobjects_list_open(ioctx, &list_ctx));
auto sg = make_scope_guard([&] { rados_nobjects_list_close(list_ctx); });
int r;
const char *entry = NULL;
const char *key = NULL;
while ((r = rados_nobjects_list_next(list_ctx, &entry, &key, NULL)) != -ENOENT) {
ASSERT_EQ(0, r);
rados_ioctx_locator_set_key(ioctx, key);
ASSERT_EQ(0, rados_remove(ioctx, entry));
}
}
std::string RadosTestEC::pool_name;
rados_t RadosTestEC::s_cluster = NULL;
void RadosTestEC::SetUpTestCase()
{
SKIP_IF_CRIMSON();
auto pool_prefix = fmt::format("{}_", ::testing::UnitTest::GetInstance()->current_test_case()->name());
pool_name = get_temp_pool_name(pool_prefix);
ASSERT_EQ("", create_one_ec_pool(pool_name, &s_cluster));
}
void RadosTestEC::TearDownTestCase()
{
SKIP_IF_CRIMSON();
ASSERT_EQ(0, destroy_one_ec_pool(pool_name, &s_cluster));
}
void RadosTestEC::SetUp()
{
SKIP_IF_CRIMSON();
cluster = RadosTestEC::s_cluster;
ASSERT_EQ(0, rados_ioctx_create(cluster, pool_name.c_str(), &ioctx));
nspace = get_temp_pool_name();
rados_ioctx_set_namespace(ioctx, nspace.c_str());
int req;
ASSERT_EQ(0, rados_ioctx_pool_requires_alignment2(ioctx, &req));
ASSERT_TRUE(req);
ASSERT_EQ(0, rados_ioctx_pool_required_alignment2(ioctx, &alignment));
ASSERT_NE(0U, alignment);
}
void RadosTestEC::TearDown()
{
SKIP_IF_CRIMSON();
if (cleanup) {
cleanup_default_namespace(ioctx);
cleanup_namespace(ioctx, nspace);
}
rados_ioctx_destroy(ioctx);
}
| 5,553 | 26.22549 | 106 | cc |
null | ceph-main/src/test/librados/TestCase.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_TEST_RADOS_TESTCASE_H
#define CEPH_TEST_RADOS_TESTCASE_H
#include "include/rados/librados.h"
#include "gtest/gtest.h"
#include <string>
/**
* These test cases create a temporary pool that lives as long as the
* test case. We initially use the default namespace and assume
* test will whatever namespaces it wants. After each test all objects
* are removed.
*
* Since pool creation and deletion is slow, this allows many tests to
* run faster.
*/
class RadosTestNS : public ::testing::Test {
public:
RadosTestNS(bool c=false) : cleanup(c) {}
~RadosTestNS() override {}
protected:
static void SetUpTestCase();
static void TearDownTestCase();
static void cleanup_all_objects(rados_ioctx_t ioctx);
static rados_t s_cluster;
static std::string pool_name;
void SetUp() override;
void TearDown() override;
rados_t cluster = nullptr;
rados_ioctx_t ioctx = nullptr;
bool cleanup;
};
struct RadosTestNSCleanup : public RadosTestNS {
RadosTestNSCleanup() : RadosTestNS(true) {}
};
class RadosTestECNS : public RadosTestNS {
public:
RadosTestECNS(bool c=false) : cleanup(c) {}
~RadosTestECNS() override {}
protected:
static void SetUpTestCase();
static void TearDownTestCase();
static rados_t s_cluster;
static std::string pool_name;
void SetUp() override;
void TearDown() override;
rados_t cluster = nullptr;
rados_ioctx_t ioctx = nullptr;
uint64_t alignment = 0;
bool cleanup;
};
struct RadosTestECNSCleanup : public RadosTestECNS {
RadosTestECNSCleanup() : RadosTestECNS(true) {}
};
/**
* These test cases create a temporary pool that lives as long as the
* test case. Each test within a test case gets a new ioctx set to a
* unique namespace within the pool.
*
* Since pool creation and deletion is slow, this allows many tests to
* run faster.
*/
class RadosTest : public ::testing::Test {
public:
RadosTest(bool c=false) : cleanup(c) {}
~RadosTest() override {}
protected:
static void SetUpTestCase();
static void TearDownTestCase();
static void cleanup_default_namespace(rados_ioctx_t ioctx);
static void cleanup_namespace(rados_ioctx_t ioctx, std::string ns);
static rados_t s_cluster;
static std::string pool_name;
void SetUp() override;
void TearDown() override;
rados_t cluster = nullptr;
rados_ioctx_t ioctx = nullptr;
std::string nspace;
bool cleanup;
};
class RadosTestEC : public RadosTest {
public:
RadosTestEC(bool c=false) : cleanup(c) {}
~RadosTestEC() override {}
protected:
static void SetUpTestCase();
static void TearDownTestCase();
static rados_t s_cluster;
static std::string pool_name;
void SetUp() override;
void TearDown() override;
rados_t cluster = nullptr;
rados_ioctx_t ioctx = nullptr;
bool cleanup;
std::string nspace;
uint64_t alignment = 0;
};
/**
* Test case without creating a temporary pool in advance.
* This is necessary for scenarios such that we need to
* manually create a pool, start some long-runing tasks and
* then the related pool is suddenly gone.
*/
class RadosTestNP: public ::testing::Test {
public:
RadosTestNP() {}
~RadosTestNP() override {}
};
#endif
| 3,255 | 25.048 | 71 | h |
null | ceph-main/src/test/librados/aio.cc | #include <errno.h>
#include <fcntl.h>
#include <string>
#include <sstream>
#include <utility>
#include <boost/scoped_ptr.hpp>
#include <fmt/format.h>
#include "include/err.h"
#include "include/rados/librados.h"
#include "include/types.h"
#include "include/stringify.h"
#include "include/scope_guard.h"
#include "common/errno.h"
#include "gtest/gtest.h"
#include "test.h"
#include "crimson_utils.h"
using std::ostringstream;
class AioTestData
{
public:
AioTestData()
: m_cluster(NULL),
m_ioctx(NULL),
m_init(false)
{
}
~AioTestData()
{
if (m_init) {
rados_ioctx_destroy(m_ioctx);
destroy_one_pool(m_pool_name, &m_cluster);
}
}
std::string init()
{
int ret;
auto pool_prefix = fmt::format("{}_", ::testing::UnitTest::GetInstance()->current_test_info()->name());
m_pool_name = get_temp_pool_name(pool_prefix);
std::string err = create_one_pool(m_pool_name, &m_cluster);
if (!err.empty()) {
ostringstream oss;
oss << "create_one_pool(" << m_pool_name << ") failed: error " << err;
return oss.str();
}
ret = rados_ioctx_create(m_cluster, m_pool_name.c_str(), &m_ioctx);
if (ret) {
destroy_one_pool(m_pool_name, &m_cluster);
ostringstream oss;
oss << "rados_ioctx_create failed: error " << ret;
return oss.str();
}
m_init = true;
return "";
}
rados_t m_cluster;
rados_ioctx_t m_ioctx;
std::string m_pool_name;
bool m_init;
};
TEST(LibRadosAio, TooBig) {
AioTestData test_data;
rados_completion_t my_completion;
ASSERT_EQ("", test_data.init());
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion));
char buf[128];
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(-E2BIG, rados_aio_write(test_data.m_ioctx, "foo",
my_completion, buf, UINT_MAX, 0));
ASSERT_EQ(-E2BIG, rados_aio_write_full(test_data.m_ioctx, "foo",
my_completion, buf, UINT_MAX));
ASSERT_EQ(-E2BIG, rados_aio_append(test_data.m_ioctx, "foo",
my_completion, buf, UINT_MAX));
rados_aio_release(my_completion);
}
TEST(LibRadosAio, SimpleWrite) {
AioTestData test_data;
rados_completion_t my_completion;
ASSERT_EQ("", test_data.init());
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion));
auto sg = make_scope_guard([&] { rados_aio_release(my_completion); });
char buf[128];
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_aio_write(test_data.m_ioctx, "foo",
my_completion, buf, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion));
}
ASSERT_EQ(0, rados_aio_get_return_value(my_completion));
rados_ioctx_set_namespace(test_data.m_ioctx, "nspace");
rados_completion_t my_completion2;
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion2));
auto sg2 = make_scope_guard([&] { rados_aio_release(my_completion2); });
ASSERT_EQ(0, rados_aio_write(test_data.m_ioctx, "foo",
my_completion2, buf, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion2));
}
ASSERT_EQ(0, rados_aio_get_return_value(my_completion2));
}
TEST(LibRadosAio, WaitForSafe) {
AioTestData test_data;
rados_completion_t my_completion;
ASSERT_EQ("", test_data.init());
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion));
char buf[128];
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_aio_write(test_data.m_ioctx, "foo",
my_completion, buf, sizeof(buf), 0));
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion));
ASSERT_EQ(0, rados_aio_get_return_value(my_completion));
rados_aio_release(my_completion);
}
TEST(LibRadosAio, RoundTrip) {
AioTestData test_data;
rados_completion_t my_completion;
ASSERT_EQ("", test_data.init());
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion));
char buf[128];
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_aio_write(test_data.m_ioctx, "foo",
my_completion, buf, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion));
}
ASSERT_EQ(0, rados_aio_get_return_value(my_completion));
char buf2[256];
memset(buf2, 0, sizeof(buf2));
rados_completion_t my_completion2;
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion2));
ASSERT_EQ(0, rados_aio_read(test_data.m_ioctx, "foo",
my_completion2, buf2, sizeof(buf2), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion2));
}
ASSERT_EQ((int)sizeof(buf), rados_aio_get_return_value(my_completion2));
ASSERT_EQ(0, memcmp(buf, buf2, sizeof(buf)));
rados_aio_release(my_completion);
rados_aio_release(my_completion2);
}
TEST(LibRadosAio, RoundTrip2) {
AioTestData test_data;
rados_completion_t my_completion;
ASSERT_EQ("", test_data.init());
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion));
char buf[128];
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_aio_write(test_data.m_ioctx, "foo",
my_completion, buf, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion));
}
ASSERT_EQ(0, rados_aio_get_return_value(my_completion));
char buf2[128];
memset(buf2, 0, sizeof(buf2));
rados_completion_t my_completion2;
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion2));
ASSERT_EQ(0, rados_aio_read(test_data.m_ioctx, "foo",
my_completion2, buf2, sizeof(buf2), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion2));
}
ASSERT_EQ((int)sizeof(buf), rados_aio_get_return_value(my_completion2));
ASSERT_EQ(0, memcmp(buf, buf2, sizeof(buf)));
rados_aio_release(my_completion);
rados_aio_release(my_completion2);
}
TEST(LibRadosAio, RoundTrip3) {
AioTestData test_data;
rados_completion_t my_completion;
ASSERT_EQ("", test_data.init());
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion));
char buf[128];
memset(buf, 0xcc, sizeof(buf));
rados_write_op_t op1 = rados_create_write_op();
rados_write_op_write(op1, buf, sizeof(buf), 0);
rados_write_op_set_alloc_hint2(op1, 0, 0, LIBRADOS_OP_FLAG_FADVISE_DONTNEED);
ASSERT_EQ(0, rados_aio_write_op_operate(op1, test_data.m_ioctx, my_completion,
"foo", NULL, 0));
rados_release_write_op(op1);
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion));
}
ASSERT_EQ(0, rados_aio_get_return_value(my_completion));
rados_aio_release(my_completion);
char buf2[128];
memset(buf2, 0, sizeof(buf2));
rados_completion_t my_completion2;
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion2));
rados_read_op_t op2 = rados_create_read_op();
rados_read_op_read(op2, 0, sizeof(buf2), buf2, NULL, NULL);
rados_read_op_set_flags(op2, LIBRADOS_OP_FLAG_FADVISE_NOCACHE |
LIBRADOS_OP_FLAG_FADVISE_RANDOM);
ceph_le32 init_value(-1);
ceph_le32 checksum[2];
rados_read_op_checksum(op2, LIBRADOS_CHECKSUM_TYPE_CRC32C,
reinterpret_cast<char *>(&init_value),
sizeof(init_value), 0, 0, 0,
reinterpret_cast<char *>(&checksum),
sizeof(checksum), NULL);
ASSERT_EQ(0, rados_aio_read_op_operate(op2, test_data.m_ioctx, my_completion2,
"foo", 0));
rados_release_read_op(op2);
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion2));
}
ASSERT_EQ(0, rados_aio_get_return_value(my_completion2));
ASSERT_EQ(0, memcmp(buf, buf2, sizeof(buf)));
rados_aio_release(my_completion2);
bufferlist bl;
bl.append(buf, sizeof(buf));
ASSERT_EQ(1U, checksum[0]);
ASSERT_EQ(bl.crc32c(-1), checksum[1]);
}
TEST(LibRadosAio, RoundTripAppend) {
AioTestData test_data;
rados_completion_t my_completion, my_completion2, my_completion3;
ASSERT_EQ("", test_data.init());
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion));
char buf[128];
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_aio_append(test_data.m_ioctx, "foo",
my_completion, buf, sizeof(buf)));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion));
}
ASSERT_EQ(0, rados_aio_get_return_value(my_completion));
char buf2[128];
memset(buf2, 0xdd, sizeof(buf2));
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion2));
ASSERT_EQ(0, rados_aio_append(test_data.m_ioctx, "foo",
my_completion2, buf2, sizeof(buf2)));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion2));
}
ASSERT_EQ(0, rados_aio_get_return_value(my_completion2));
char buf3[sizeof(buf) + sizeof(buf2)];
memset(buf3, 0, sizeof(buf3));
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion3));
ASSERT_EQ(0, rados_aio_read(test_data.m_ioctx, "foo",
my_completion3, buf3, sizeof(buf3), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion3));
}
ASSERT_EQ((int)sizeof(buf3), rados_aio_get_return_value(my_completion3));
ASSERT_EQ(0, memcmp(buf3, buf, sizeof(buf)));
ASSERT_EQ(0, memcmp(buf3 + sizeof(buf), buf2, sizeof(buf2)));
rados_aio_release(my_completion);
rados_aio_release(my_completion2);
rados_aio_release(my_completion3);
}
TEST(LibRadosAio, RemoveTest) {
char buf[128];
char buf2[sizeof(buf)];
rados_completion_t my_completion;
AioTestData test_data;
ASSERT_EQ("", test_data.init());
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion));
memset(buf, 0xaa, sizeof(buf));
ASSERT_EQ(0, rados_append(test_data.m_ioctx, "foo", buf, sizeof(buf)));
ASSERT_EQ(0, rados_aio_remove(test_data.m_ioctx, "foo", my_completion));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion));
}
ASSERT_EQ(0, rados_aio_get_return_value(my_completion));
memset(buf2, 0, sizeof(buf2));
ASSERT_EQ(-ENOENT, rados_read(test_data.m_ioctx, "foo", buf2, sizeof(buf2), 0));
rados_aio_release(my_completion);
}
TEST(LibRadosAio, XattrsRoundTrip) {
char buf[128];
char attr1[] = "attr1";
char attr1_buf[] = "foo bar baz";
// append
AioTestData test_data;
ASSERT_EQ("", test_data.init());
memset(buf, 0xaa, sizeof(buf));
ASSERT_EQ(0, rados_append(test_data.m_ioctx, "foo", buf, sizeof(buf)));
// async getxattr
rados_completion_t my_completion;
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion));
ASSERT_EQ(0, rados_aio_getxattr(test_data.m_ioctx, "foo", my_completion, attr1, buf, sizeof(buf)));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion));
}
ASSERT_EQ(-ENODATA, rados_aio_get_return_value(my_completion));
rados_aio_release(my_completion);
// async setxattr
rados_completion_t my_completion2;
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion2));
ASSERT_EQ(0, rados_aio_setxattr(test_data.m_ioctx, "foo", my_completion2, attr1, attr1_buf, sizeof(attr1_buf)));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion2));
}
ASSERT_EQ(0, rados_aio_get_return_value(my_completion2));
rados_aio_release(my_completion2);
// async getxattr
rados_completion_t my_completion3;
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion3));
ASSERT_EQ(0, rados_aio_getxattr(test_data.m_ioctx, "foo", my_completion3, attr1, buf, sizeof(buf)));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion3));
}
ASSERT_EQ((int)sizeof(attr1_buf), rados_aio_get_return_value(my_completion3));
rados_aio_release(my_completion3);
// check content of attribute
ASSERT_EQ(0, memcmp(attr1_buf, buf, sizeof(attr1_buf)));
}
TEST(LibRadosAio, RmXattr) {
char buf[128];
char attr1[] = "attr1";
char attr1_buf[] = "foo bar baz";
// append
memset(buf, 0xaa, sizeof(buf));
AioTestData test_data;
ASSERT_EQ("", test_data.init());
ASSERT_EQ(0, rados_append(test_data.m_ioctx, "foo", buf, sizeof(buf)));
// async setxattr
rados_completion_t my_completion;
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion));
ASSERT_EQ(0, rados_aio_setxattr(test_data.m_ioctx, "foo", my_completion, attr1, attr1_buf, sizeof(attr1_buf)));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion));
}
ASSERT_EQ(0, rados_aio_get_return_value(my_completion));
rados_aio_release(my_completion);
// async rmxattr
rados_completion_t my_completion2;
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion2));
ASSERT_EQ(0, rados_aio_rmxattr(test_data.m_ioctx, "foo", my_completion2, attr1));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion2));
}
ASSERT_EQ(0, rados_aio_get_return_value(my_completion2));
rados_aio_release(my_completion2);
// async getxattr after deletion
rados_completion_t my_completion3;
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion3));
ASSERT_EQ(0, rados_aio_getxattr(test_data.m_ioctx, "foo", my_completion3, attr1, buf, sizeof(buf)));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion3));
}
ASSERT_EQ(-ENODATA, rados_aio_get_return_value(my_completion3));
rados_aio_release(my_completion3);
// Test rmxattr on a removed object
char buf2[128];
char attr2[] = "attr2";
char attr2_buf[] = "foo bar baz";
memset(buf2, 0xbb, sizeof(buf2));
ASSERT_EQ(0, rados_write(test_data.m_ioctx, "foo_rmxattr", buf2, sizeof(buf2), 0));
// asynx setxattr
rados_completion_t my_completion4;
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion4));
ASSERT_EQ(0, rados_aio_setxattr(test_data.m_ioctx, "foo_rmxattr", my_completion4, attr2, attr2_buf, sizeof(attr2_buf)));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion4));
}
ASSERT_EQ(0, rados_aio_get_return_value(my_completion4));
rados_aio_release(my_completion4);
// remove object
ASSERT_EQ(0, rados_remove(test_data.m_ioctx, "foo_rmxattr"));
// async rmxattr on non existing object
rados_completion_t my_completion5;
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion5));
ASSERT_EQ(0, rados_aio_rmxattr(test_data.m_ioctx, "foo_rmxattr", my_completion5, attr2));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion5));
}
ASSERT_EQ(-ENOENT, rados_aio_get_return_value(my_completion5));
rados_aio_release(my_completion5);
}
TEST(LibRadosAio, XattrIter) {
AioTestData test_data;
ASSERT_EQ("", test_data.init());
// Create an object with 2 attributes
char buf[128];
char attr1[] = "attr1";
char attr1_buf[] = "foo bar baz";
char attr2[] = "attr2";
char attr2_buf[256];
for (size_t j = 0; j < sizeof(attr2_buf); ++j) {
attr2_buf[j] = j % 0xff;
}
memset(buf, 0xaa, sizeof(buf));
ASSERT_EQ(0, rados_append(test_data.m_ioctx, "foo", buf, sizeof(buf)));
ASSERT_EQ(0, rados_setxattr(test_data.m_ioctx, "foo", attr1, attr1_buf, sizeof(attr1_buf)));
ASSERT_EQ(0, rados_setxattr(test_data.m_ioctx, "foo", attr2, attr2_buf, sizeof(attr2_buf)));
// call async version of getxattrs and wait for completion
rados_completion_t my_completion;
ASSERT_EQ(0, rados_aio_create_completion2((void*)&test_data,
nullptr, &my_completion));
rados_xattrs_iter_t iter;
ASSERT_EQ(0, rados_aio_getxattrs(test_data.m_ioctx, "foo", my_completion, &iter));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion));
}
ASSERT_EQ(0, rados_aio_get_return_value(my_completion));
// loop over attributes
int num_seen = 0;
while (true) {
const char *name;
const char *val;
size_t len;
ASSERT_EQ(0, rados_getxattrs_next(iter, &name, &val, &len));
if (name == NULL) {
break;
}
ASSERT_LT(num_seen, 2);
if ((strcmp(name, attr1) == 0) && (val != NULL) && (memcmp(val, attr1_buf, len) == 0)) {
num_seen++;
continue;
}
else if ((strcmp(name, attr2) == 0) && (val != NULL) && (memcmp(val, attr2_buf, len) == 0)) {
num_seen++;
continue;
}
else {
ASSERT_EQ(0, 1);
}
}
rados_getxattrs_end(iter);
}
TEST(LibRadosAio, IsComplete) {
AioTestData test_data;
rados_completion_t my_completion;
ASSERT_EQ("", test_data.init());
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion));
char buf[128];
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_aio_write(test_data.m_ioctx, "foo",
my_completion, buf, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion));
}
ASSERT_EQ(0, rados_aio_get_return_value(my_completion));
char buf2[128];
memset(buf2, 0, sizeof(buf2));
rados_completion_t my_completion2;
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion2));
ASSERT_EQ(0, rados_aio_read(test_data.m_ioctx, "foo",
my_completion2, buf2, sizeof(buf2), 0));
{
TestAlarm alarm;
// Busy-wait until the AIO completes.
// Normally we wouldn't do this, but we want to test rados_aio_is_complete.
while (true) {
int is_complete = rados_aio_is_complete(my_completion2);
if (is_complete)
break;
}
}
ASSERT_EQ((int)sizeof(buf), rados_aio_get_return_value(my_completion2));
ASSERT_EQ(0, memcmp(buf, buf2, sizeof(buf)));
rados_aio_release(my_completion);
rados_aio_release(my_completion2);
}
TEST(LibRadosAio, IsSafe) {
AioTestData test_data;
rados_completion_t my_completion;
ASSERT_EQ("", test_data.init());
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion));
char buf[128];
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_aio_write(test_data.m_ioctx, "foo",
my_completion, buf, sizeof(buf), 0));
{
TestAlarm alarm;
// Busy-wait until the AIO completes.
// Normally we wouldn't do this, but we want to test rados_aio_is_safe.
while (true) {
int is_safe = rados_aio_is_safe(my_completion);
if (is_safe)
break;
}
}
ASSERT_EQ(0, rados_aio_get_return_value(my_completion));
char buf2[128];
memset(buf2, 0, sizeof(buf2));
rados_completion_t my_completion2;
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion2));
ASSERT_EQ(0, rados_aio_read(test_data.m_ioctx, "foo",
my_completion2, buf2, sizeof(buf2), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion2));
}
ASSERT_EQ((int)sizeof(buf), rados_aio_get_return_value(my_completion2));
ASSERT_EQ(0, memcmp(buf, buf2, sizeof(buf)));
rados_aio_release(my_completion);
rados_aio_release(my_completion2);
}
TEST(LibRadosAio, ReturnValue) {
AioTestData test_data;
rados_completion_t my_completion;
ASSERT_EQ("", test_data.init());
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion));
char buf[128];
memset(buf, 0, sizeof(buf));
ASSERT_EQ(0, rados_aio_read(test_data.m_ioctx, "nonexistent",
my_completion, buf, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion));
}
ASSERT_EQ(-ENOENT, rados_aio_get_return_value(my_completion));
rados_aio_release(my_completion);
}
TEST(LibRadosAio, Flush) {
AioTestData test_data;
rados_completion_t my_completion;
ASSERT_EQ("", test_data.init());
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion));
char buf[128];
memset(buf, 0xee, sizeof(buf));
ASSERT_EQ(0, rados_aio_write(test_data.m_ioctx, "foo",
my_completion, buf, sizeof(buf), 0));
ASSERT_EQ(0, rados_aio_flush(test_data.m_ioctx));
ASSERT_EQ(0, rados_aio_get_return_value(my_completion));
char buf2[128];
memset(buf2, 0, sizeof(buf2));
rados_completion_t my_completion2;
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion2));
ASSERT_EQ(0, rados_aio_read(test_data.m_ioctx, "foo",
my_completion2, buf2, sizeof(buf2), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion2));
}
ASSERT_EQ((int)sizeof(buf2), rados_aio_get_return_value(my_completion2));
ASSERT_EQ(0, memcmp(buf, buf2, sizeof(buf)));
rados_aio_release(my_completion);
rados_aio_release(my_completion2);
}
TEST(LibRadosAio, FlushAsync) {
AioTestData test_data;
rados_completion_t my_completion;
ASSERT_EQ("", test_data.init());
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion));
rados_completion_t flush_completion;
ASSERT_EQ(0, rados_aio_create_completion2(nullptr, nullptr, &flush_completion));
char buf[128];
memset(buf, 0xee, sizeof(buf));
ASSERT_EQ(0, rados_aio_write(test_data.m_ioctx, "foo",
my_completion, buf, sizeof(buf), 0));
ASSERT_EQ(0, rados_aio_flush_async(test_data.m_ioctx, flush_completion));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(flush_completion));
}
ASSERT_EQ(1, rados_aio_is_complete(my_completion));
ASSERT_EQ(1, rados_aio_is_complete(flush_completion));
ASSERT_EQ(0, rados_aio_get_return_value(my_completion));
char buf2[128];
memset(buf2, 0, sizeof(buf2));
rados_completion_t my_completion2;
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion2));
ASSERT_EQ(0, rados_aio_read(test_data.m_ioctx, "foo",
my_completion2, buf2, sizeof(buf2), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion2));
}
ASSERT_EQ((int)sizeof(buf2), rados_aio_get_return_value(my_completion2));
ASSERT_EQ(0, memcmp(buf, buf2, sizeof(buf)));
rados_aio_release(my_completion);
rados_aio_release(my_completion2);
rados_aio_release(flush_completion);
}
TEST(LibRadosAio, RoundTripWriteFull) {
AioTestData test_data;
rados_completion_t my_completion, my_completion2, my_completion3;
ASSERT_EQ("", test_data.init());
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion));
char buf[128];
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_aio_write(test_data.m_ioctx, "foo",
my_completion, buf, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion));
}
ASSERT_EQ(0, rados_aio_get_return_value(my_completion));
char buf2[64];
memset(buf2, 0xdd, sizeof(buf2));
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion2));
ASSERT_EQ(0, rados_aio_write_full(test_data.m_ioctx, "foo",
my_completion2, buf2, sizeof(buf2)));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion2));
}
ASSERT_EQ(0, rados_aio_get_return_value(my_completion2));
char buf3[sizeof(buf) + sizeof(buf2)];
memset(buf3, 0, sizeof(buf3));
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion3));
ASSERT_EQ(0, rados_aio_read(test_data.m_ioctx, "foo",
my_completion3, buf3, sizeof(buf3), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion3));
}
ASSERT_EQ((int)sizeof(buf2), rados_aio_get_return_value(my_completion3));
ASSERT_EQ(0, memcmp(buf3, buf2, sizeof(buf2)));
rados_aio_release(my_completion);
rados_aio_release(my_completion2);
rados_aio_release(my_completion3);
}
TEST(LibRadosAio, RoundTripWriteSame) {
AioTestData test_data;
rados_completion_t my_completion, my_completion2, my_completion3;
ASSERT_EQ("", test_data.init());
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion));
char full[128];
memset(full, 0xcc, sizeof(full));
ASSERT_EQ(0, rados_aio_write(test_data.m_ioctx, "foo",
my_completion, full, sizeof(full), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion));
}
ASSERT_EQ(0, rados_aio_get_return_value(my_completion));
/* write the same buf four times */
char buf[32];
size_t ws_write_len = sizeof(full);
memset(buf, 0xdd, sizeof(buf));
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion2));
ASSERT_EQ(0, rados_aio_writesame(test_data.m_ioctx, "foo",
my_completion2, buf, sizeof(buf),
ws_write_len, 0));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion2));
}
ASSERT_EQ(0, rados_aio_get_return_value(my_completion2));
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion3));
ASSERT_EQ(0, rados_aio_read(test_data.m_ioctx, "foo",
my_completion3, full, sizeof(full), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion3));
}
ASSERT_EQ((int)sizeof(full), rados_aio_get_return_value(my_completion3));
for (char *cmp = full; cmp < full + sizeof(full); cmp += sizeof(buf)) {
ASSERT_EQ(0, memcmp(cmp, buf, sizeof(buf)));
}
rados_aio_release(my_completion);
rados_aio_release(my_completion2);
rados_aio_release(my_completion3);
}
TEST(LibRadosAio, SimpleStat) {
AioTestData test_data;
rados_completion_t my_completion;
ASSERT_EQ("", test_data.init());
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion));
char buf[128];
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_aio_write(test_data.m_ioctx, "foo",
my_completion, buf, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion));
}
ASSERT_EQ(0, rados_aio_get_return_value(my_completion));
uint64_t psize;
time_t pmtime;
rados_completion_t my_completion2;
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion2));
ASSERT_EQ(0, rados_aio_stat(test_data.m_ioctx, "foo",
my_completion2, &psize, &pmtime));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion2));
}
ASSERT_EQ(0, rados_aio_get_return_value(my_completion2));
ASSERT_EQ(sizeof(buf), psize);
rados_aio_release(my_completion);
rados_aio_release(my_completion2);
}
TEST(LibRadosAio, OperateMtime)
{
AioTestData test_data;
ASSERT_EQ("", test_data.init());
time_t set_mtime = 1457129052;
{
rados_write_op_t op = rados_create_write_op();
rados_write_op_create(op, LIBRADOS_CREATE_IDEMPOTENT, nullptr);
rados_completion_t completion;
ASSERT_EQ(0, rados_aio_create_completion2(nullptr, nullptr, &completion));
ASSERT_EQ(0, rados_aio_write_op_operate(op, test_data.m_ioctx, completion,
"foo", &set_mtime, 0));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(completion));
}
ASSERT_EQ(0, rados_aio_get_return_value(completion));
rados_aio_release(completion);
rados_release_write_op(op);
}
{
uint64_t size;
timespec mtime;
ASSERT_EQ(0, rados_stat2(test_data.m_ioctx, "foo", &size, &mtime));
EXPECT_EQ(0, size);
EXPECT_EQ(set_mtime, mtime.tv_sec);
EXPECT_EQ(0, mtime.tv_nsec);
}
}
TEST(LibRadosAio, Operate2Mtime)
{
AioTestData test_data;
ASSERT_EQ("", test_data.init());
timespec set_mtime{1457129052, 123456789};
{
rados_write_op_t op = rados_create_write_op();
rados_write_op_create(op, LIBRADOS_CREATE_IDEMPOTENT, nullptr);
rados_completion_t completion;
ASSERT_EQ(0, rados_aio_create_completion2(nullptr, nullptr, &completion));
ASSERT_EQ(0, rados_aio_write_op_operate2(op, test_data.m_ioctx, completion,
"foo", &set_mtime, 0));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(completion));
}
ASSERT_EQ(0, rados_aio_get_return_value(completion));
rados_aio_release(completion);
rados_release_write_op(op);
}
{
uint64_t size;
timespec mtime;
ASSERT_EQ(0, rados_stat2(test_data.m_ioctx, "foo", &size, &mtime));
EXPECT_EQ(0, size);
EXPECT_EQ(set_mtime.tv_sec, mtime.tv_sec);
EXPECT_EQ(set_mtime.tv_nsec, mtime.tv_nsec);
}
}
TEST(LibRadosAio, SimpleStatNS) {
AioTestData test_data;
rados_completion_t my_completion;
ASSERT_EQ("", test_data.init());
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion));
char buf[128];
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_aio_write(test_data.m_ioctx, "foo",
my_completion, buf, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion));
}
ASSERT_EQ(0, rados_aio_get_return_value(my_completion));
rados_ioctx_set_namespace(test_data.m_ioctx, "nspace");
char buf2[64];
memset(buf2, 0xbb, sizeof(buf2));
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion));
ASSERT_EQ(0, rados_aio_write(test_data.m_ioctx, "foo",
my_completion, buf2, sizeof(buf2), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion));
}
ASSERT_EQ(0, rados_aio_get_return_value(my_completion));
uint64_t psize;
time_t pmtime;
rados_completion_t my_completion2;
rados_ioctx_set_namespace(test_data.m_ioctx, "");
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion2));
ASSERT_EQ(0, rados_aio_stat(test_data.m_ioctx, "foo",
my_completion2, &psize, &pmtime));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion2));
}
ASSERT_EQ(0, rados_aio_get_return_value(my_completion2));
ASSERT_EQ(sizeof(buf), psize);
rados_ioctx_set_namespace(test_data.m_ioctx, "nspace");
rados_completion_t my_completion3;
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion3));
ASSERT_EQ(0, rados_aio_stat(test_data.m_ioctx, "foo",
my_completion3, &psize, &pmtime));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion3));
}
ASSERT_EQ(0, rados_aio_get_return_value(my_completion3));
ASSERT_EQ(sizeof(buf2), psize);
rados_aio_release(my_completion);
rados_aio_release(my_completion2);
rados_aio_release(my_completion3);
}
TEST(LibRadosAio, StatRemove) {
AioTestData test_data;
rados_completion_t my_completion;
ASSERT_EQ("", test_data.init());
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion));
char buf[128];
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_aio_write(test_data.m_ioctx, "foo",
my_completion, buf, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion));
}
ASSERT_EQ(0, rados_aio_get_return_value(my_completion));
uint64_t psize;
time_t pmtime;
rados_completion_t my_completion2;
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion2));
ASSERT_EQ(0, rados_aio_stat(test_data.m_ioctx, "foo",
my_completion2, &psize, &pmtime));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion2));
}
ASSERT_EQ(0, rados_aio_get_return_value(my_completion2));
ASSERT_EQ(sizeof(buf), psize);
rados_completion_t my_completion3;
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion3));
ASSERT_EQ(0, rados_aio_remove(test_data.m_ioctx, "foo", my_completion3));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion3));
}
ASSERT_EQ(0, rados_aio_get_return_value(my_completion3));
uint64_t psize2;
time_t pmtime2;
rados_completion_t my_completion4;
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion4));
ASSERT_EQ(0, rados_aio_stat(test_data.m_ioctx, "foo",
my_completion4, &psize2, &pmtime2));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion4));
}
ASSERT_EQ(-ENOENT, rados_aio_get_return_value(my_completion4));
rados_aio_release(my_completion);
rados_aio_release(my_completion2);
rados_aio_release(my_completion3);
rados_aio_release(my_completion4);
}
TEST(LibRadosAio, ExecuteClass) {
AioTestData test_data;
rados_completion_t my_completion;
ASSERT_EQ("", test_data.init());
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion));
char buf[128];
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_aio_write(test_data.m_ioctx, "foo",
my_completion, buf, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_get_return_value(my_completion));
}
rados_completion_t my_completion2;
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion2));
char out[128];
ASSERT_EQ(0, rados_aio_exec(test_data.m_ioctx, "foo", my_completion2,
"hello", "say_hello", NULL, 0, out, sizeof(out)));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion2));
}
ASSERT_EQ(13, rados_aio_get_return_value(my_completion2));
ASSERT_EQ(0, strncmp("Hello, world!", out, 13));
rados_aio_release(my_completion);
rados_aio_release(my_completion2);
}
using std::string;
using std::map;
using std::set;
TEST(LibRadosAio, MultiWrite) {
AioTestData test_data;
rados_completion_t my_completion, my_completion2, my_completion3;
ASSERT_EQ("", test_data.init());
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion));
char buf[128];
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_aio_write(test_data.m_ioctx, "foo",
my_completion, buf, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion));
}
ASSERT_EQ(0, rados_aio_get_return_value(my_completion));
char buf2[64];
memset(buf2, 0xdd, sizeof(buf2));
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion2));
ASSERT_EQ(0, rados_aio_write(test_data.m_ioctx, "foo",
my_completion2, buf2, sizeof(buf2), sizeof(buf)));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion2));
}
ASSERT_EQ(0, rados_aio_get_return_value(my_completion2));
char buf3[(sizeof(buf) + sizeof(buf2)) * 3];
memset(buf3, 0, sizeof(buf3));
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion3));
ASSERT_EQ(0, rados_aio_read(test_data.m_ioctx, "foo",
my_completion3, buf3, sizeof(buf3), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion3));
}
ASSERT_EQ((int)(sizeof(buf) + sizeof(buf2)), rados_aio_get_return_value(my_completion3));
ASSERT_EQ(0, memcmp(buf3, buf, sizeof(buf)));
ASSERT_EQ(0, memcmp(buf3 + sizeof(buf), buf2, sizeof(buf2)));
rados_aio_release(my_completion);
rados_aio_release(my_completion2);
rados_aio_release(my_completion3);
}
TEST(LibRadosAio, AioUnlock) {
AioTestData test_data;
ASSERT_EQ("", test_data.init());
ASSERT_EQ(0, rados_lock_exclusive(test_data.m_ioctx, "foo", "TestLock", "Cookie", "", NULL, 0));
rados_completion_t my_completion;
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion));
ASSERT_EQ(0, rados_aio_unlock(test_data.m_ioctx, "foo", "TestLock", "Cookie", my_completion));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion));
}
ASSERT_EQ(0, rados_aio_get_return_value(my_completion));
ASSERT_EQ(0, rados_lock_exclusive(test_data.m_ioctx, "foo", "TestLock", "Cookie", "", NULL, 0));
}
// EC test cases
class AioTestDataEC
{
public:
AioTestDataEC()
: m_cluster(NULL),
m_ioctx(NULL),
m_init(false)
{
}
~AioTestDataEC()
{
if (m_init) {
rados_ioctx_destroy(m_ioctx);
destroy_one_ec_pool(m_pool_name, &m_cluster);
}
}
std::string init()
{
int ret;
auto pool_prefix = fmt::format("{}_", ::testing::UnitTest::GetInstance()->current_test_info()->name());
m_pool_name = get_temp_pool_name(pool_prefix);
std::string err = create_one_ec_pool(m_pool_name, &m_cluster);
if (!err.empty()) {
ostringstream oss;
oss << "create_one_ec_pool(" << m_pool_name << ") failed: error " << err;
return oss.str();
}
ret = rados_ioctx_create(m_cluster, m_pool_name.c_str(), &m_ioctx);
if (ret) {
destroy_one_ec_pool(m_pool_name, &m_cluster);
ostringstream oss;
oss << "rados_ioctx_create failed: error " << ret;
return oss.str();
}
m_init = true;
return "";
}
rados_t m_cluster;
rados_ioctx_t m_ioctx;
std::string m_pool_name;
bool m_init;
};
TEST(LibRadosAioEC, SimpleWrite) {
SKIP_IF_CRIMSON();
AioTestDataEC test_data;
rados_completion_t my_completion;
ASSERT_EQ("", test_data.init());
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion));
auto sg = make_scope_guard([&] { rados_aio_release(my_completion); });
char buf[128];
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_aio_write(test_data.m_ioctx, "foo",
my_completion, buf, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion));
}
ASSERT_EQ(0, rados_aio_get_return_value(my_completion));
rados_ioctx_set_namespace(test_data.m_ioctx, "nspace");
rados_completion_t my_completion2;
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion2));
auto sg2 = make_scope_guard([&] { rados_aio_release(my_completion2); });
ASSERT_EQ(0, rados_aio_write(test_data.m_ioctx, "foo",
my_completion2, buf, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion2));
}
ASSERT_EQ(0, rados_aio_get_return_value(my_completion2));
}
TEST(LibRadosAioEC, WaitForComplete) {
SKIP_IF_CRIMSON();
AioTestDataEC test_data;
rados_completion_t my_completion;
ASSERT_EQ("", test_data.init());
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion));
char buf[128];
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_aio_write(test_data.m_ioctx, "foo",
my_completion, buf, sizeof(buf), 0));
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion));
ASSERT_EQ(0, rados_aio_get_return_value(my_completion));
rados_aio_release(my_completion);
}
TEST(LibRadosAioEC, RoundTrip) {
SKIP_IF_CRIMSON();
AioTestDataEC test_data;
rados_completion_t my_completion;
ASSERT_EQ("", test_data.init());
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion));
char buf[128];
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_aio_write(test_data.m_ioctx, "foo",
my_completion, buf, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion));
}
ASSERT_EQ(0, rados_aio_get_return_value(my_completion));
char buf2[256];
memset(buf2, 0, sizeof(buf2));
rados_completion_t my_completion2;
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion2));
ASSERT_EQ(0, rados_aio_read(test_data.m_ioctx, "foo",
my_completion2, buf2, sizeof(buf2), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion2));
}
ASSERT_EQ((int)sizeof(buf), rados_aio_get_return_value(my_completion2));
ASSERT_EQ(0, memcmp(buf, buf2, sizeof(buf)));
rados_aio_release(my_completion);
rados_aio_release(my_completion2);
}
TEST(LibRadosAioEC, RoundTrip2) {
SKIP_IF_CRIMSON();
AioTestDataEC test_data;
rados_completion_t my_completion;
ASSERT_EQ("", test_data.init());
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion));
char buf[128];
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_aio_write(test_data.m_ioctx, "foo",
my_completion, buf, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion));
}
ASSERT_EQ(0, rados_aio_get_return_value(my_completion));
char buf2[128];
memset(buf2, 0, sizeof(buf2));
rados_completion_t my_completion2;
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion2));
ASSERT_EQ(0, rados_aio_read(test_data.m_ioctx, "foo",
my_completion2, buf2, sizeof(buf2), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion2));
}
ASSERT_EQ((int)sizeof(buf), rados_aio_get_return_value(my_completion2));
ASSERT_EQ(0, memcmp(buf, buf2, sizeof(buf)));
rados_aio_release(my_completion);
rados_aio_release(my_completion2);
}
TEST(LibRadosAioEC, RoundTripAppend) {
SKIP_IF_CRIMSON();
AioTestDataEC test_data;
rados_completion_t my_completion, my_completion2, my_completion3, my_completion4;
ASSERT_EQ("", test_data.init());
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion));
int req;
ASSERT_EQ(0, rados_ioctx_pool_requires_alignment2(test_data.m_ioctx, &req));
ASSERT_NE(0, req);
uint64_t alignment;
ASSERT_EQ(0, rados_ioctx_pool_required_alignment2(test_data.m_ioctx, &alignment));
ASSERT_NE(0U, alignment);
int bsize = alignment;
char *buf = (char *)new char[bsize];
memset(buf, 0xcc, bsize);
ASSERT_EQ(0, rados_aio_append(test_data.m_ioctx, "foo",
my_completion, buf, bsize));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion));
}
ASSERT_EQ(0, rados_aio_get_return_value(my_completion));
int hbsize = bsize / 2;
char *buf2 = (char *)new char[hbsize];
memset(buf2, 0xdd, hbsize);
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion2));
ASSERT_EQ(0, rados_aio_append(test_data.m_ioctx, "foo",
my_completion2, buf2, hbsize));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion2));
}
ASSERT_EQ(0, rados_aio_get_return_value(my_completion2));
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion3));
ASSERT_EQ(0, rados_aio_append(test_data.m_ioctx, "foo",
my_completion3, buf2, hbsize));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion3));
}
EXPECT_EQ(-EOPNOTSUPP, rados_aio_get_return_value(my_completion3));
int tbsize = bsize + hbsize;
char *buf3 = (char *)new char[tbsize];
memset(buf3, 0, tbsize);
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion4));
ASSERT_EQ(0, rados_aio_read(test_data.m_ioctx, "foo",
my_completion4, buf3, bsize * 3, 0));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion4));
}
ASSERT_EQ(tbsize, rados_aio_get_return_value(my_completion4));
ASSERT_EQ(0, memcmp(buf3, buf, bsize));
ASSERT_EQ(0, memcmp(buf3 + bsize, buf2, hbsize));
rados_aio_release(my_completion);
rados_aio_release(my_completion2);
rados_aio_release(my_completion3);
rados_aio_release(my_completion4);
delete[] buf;
delete[] buf2;
delete[] buf3;
}
TEST(LibRadosAioEC, IsComplete) {
SKIP_IF_CRIMSON();
AioTestDataEC test_data;
rados_completion_t my_completion;
ASSERT_EQ("", test_data.init());
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion));
char buf[128];
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_aio_write(test_data.m_ioctx, "foo",
my_completion, buf, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion));
}
ASSERT_EQ(0, rados_aio_get_return_value(my_completion));
char buf2[128];
memset(buf2, 0, sizeof(buf2));
rados_completion_t my_completion2;
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion2));
ASSERT_EQ(0, rados_aio_read(test_data.m_ioctx, "foo",
my_completion2, buf2, sizeof(buf2), 0));
{
TestAlarm alarm;
// Busy-wait until the AIO completes.
// Normally we wouldn't do this, but we want to test rados_aio_is_complete.
while (true) {
int is_complete = rados_aio_is_complete(my_completion2);
if (is_complete)
break;
}
}
ASSERT_EQ((int)sizeof(buf), rados_aio_get_return_value(my_completion2));
ASSERT_EQ(0, memcmp(buf, buf2, sizeof(buf)));
rados_aio_release(my_completion);
rados_aio_release(my_completion2);
}
TEST(LibRadosAioEC, IsSafe) {
SKIP_IF_CRIMSON();
AioTestDataEC test_data;
rados_completion_t my_completion;
ASSERT_EQ("", test_data.init());
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion));
char buf[128];
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_aio_write(test_data.m_ioctx, "foo",
my_completion, buf, sizeof(buf), 0));
{
TestAlarm alarm;
// Busy-wait until the AIO completes.
// Normally we wouldn't do this, but we want to test rados_aio_is_safe.
while (true) {
int is_safe = rados_aio_is_safe(my_completion);
if (is_safe)
break;
}
}
ASSERT_EQ(0, rados_aio_get_return_value(my_completion));
char buf2[128];
memset(buf2, 0, sizeof(buf2));
rados_completion_t my_completion2;
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion2));
ASSERT_EQ(0, rados_aio_read(test_data.m_ioctx, "foo",
my_completion2, buf2, sizeof(buf2), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion2));
}
ASSERT_EQ((int)sizeof(buf), rados_aio_get_return_value(my_completion2));
ASSERT_EQ(0, memcmp(buf, buf2, sizeof(buf)));
rados_aio_release(my_completion);
rados_aio_release(my_completion2);
}
TEST(LibRadosAioEC, ReturnValue) {
SKIP_IF_CRIMSON();
AioTestDataEC test_data;
rados_completion_t my_completion;
ASSERT_EQ("", test_data.init());
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion));
char buf[128];
memset(buf, 0, sizeof(buf));
ASSERT_EQ(0, rados_aio_read(test_data.m_ioctx, "nonexistent",
my_completion, buf, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion));
}
ASSERT_EQ(-ENOENT, rados_aio_get_return_value(my_completion));
rados_aio_release(my_completion);
}
TEST(LibRadosAioEC, Flush) {
SKIP_IF_CRIMSON();
AioTestDataEC test_data;
rados_completion_t my_completion;
ASSERT_EQ("", test_data.init());
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion));
char buf[128];
memset(buf, 0xee, sizeof(buf));
ASSERT_EQ(0, rados_aio_write(test_data.m_ioctx, "foo",
my_completion, buf, sizeof(buf), 0));
ASSERT_EQ(0, rados_aio_flush(test_data.m_ioctx));
ASSERT_EQ(0, rados_aio_get_return_value(my_completion));
char buf2[128];
memset(buf2, 0, sizeof(buf2));
rados_completion_t my_completion2;
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion2));
ASSERT_EQ(0, rados_aio_read(test_data.m_ioctx, "foo",
my_completion2, buf2, sizeof(buf2), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion2));
}
ASSERT_EQ((int)sizeof(buf2), rados_aio_get_return_value(my_completion2));
ASSERT_EQ(0, memcmp(buf, buf2, sizeof(buf)));
rados_aio_release(my_completion);
rados_aio_release(my_completion2);
}
TEST(LibRadosAioEC, FlushAsync) {
SKIP_IF_CRIMSON();
AioTestDataEC test_data;
rados_completion_t my_completion;
ASSERT_EQ("", test_data.init());
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion));
rados_completion_t flush_completion;
ASSERT_EQ(0, rados_aio_create_completion2(nullptr, nullptr, &flush_completion));
char buf[128];
memset(buf, 0xee, sizeof(buf));
ASSERT_EQ(0, rados_aio_write(test_data.m_ioctx, "foo",
my_completion, buf, sizeof(buf), 0));
ASSERT_EQ(0, rados_aio_flush_async(test_data.m_ioctx, flush_completion));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(flush_completion));
}
ASSERT_EQ(1, rados_aio_is_complete(my_completion));
ASSERT_EQ(1, rados_aio_is_complete(flush_completion));
ASSERT_EQ(0, rados_aio_get_return_value(my_completion));
char buf2[128];
memset(buf2, 0, sizeof(buf2));
rados_completion_t my_completion2;
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion2));
ASSERT_EQ(0, rados_aio_read(test_data.m_ioctx, "foo",
my_completion2, buf2, sizeof(buf2), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion2));
}
ASSERT_EQ((int)sizeof(buf2), rados_aio_get_return_value(my_completion2));
ASSERT_EQ(0, memcmp(buf, buf2, sizeof(buf)));
rados_aio_release(my_completion);
rados_aio_release(my_completion2);
rados_aio_release(flush_completion);
}
TEST(LibRadosAioEC, RoundTripWriteFull) {
SKIP_IF_CRIMSON();
AioTestDataEC test_data;
rados_completion_t my_completion, my_completion2, my_completion3;
ASSERT_EQ("", test_data.init());
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion));
char buf[128];
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_aio_write(test_data.m_ioctx, "foo",
my_completion, buf, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion));
}
ASSERT_EQ(0, rados_aio_get_return_value(my_completion));
char buf2[64];
memset(buf2, 0xdd, sizeof(buf2));
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion2));
ASSERT_EQ(0, rados_aio_write_full(test_data.m_ioctx, "foo",
my_completion2, buf2, sizeof(buf2)));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion2));
}
ASSERT_EQ(0, rados_aio_get_return_value(my_completion2));
char buf3[sizeof(buf) + sizeof(buf2)];
memset(buf3, 0, sizeof(buf3));
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion3));
ASSERT_EQ(0, rados_aio_read(test_data.m_ioctx, "foo",
my_completion3, buf3, sizeof(buf3), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion3));
}
ASSERT_EQ((int)sizeof(buf2), rados_aio_get_return_value(my_completion3));
ASSERT_EQ(0, memcmp(buf3, buf2, sizeof(buf2)));
rados_aio_release(my_completion);
rados_aio_release(my_completion2);
rados_aio_release(my_completion3);
}
TEST(LibRadosAioEC, SimpleStat) {
SKIP_IF_CRIMSON();
AioTestDataEC test_data;
rados_completion_t my_completion;
ASSERT_EQ("", test_data.init());
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion));
char buf[128];
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_aio_write(test_data.m_ioctx, "foo",
my_completion, buf, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion));
}
ASSERT_EQ(0, rados_aio_get_return_value(my_completion));
uint64_t psize;
time_t pmtime;
rados_completion_t my_completion2;
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion2));
ASSERT_EQ(0, rados_aio_stat(test_data.m_ioctx, "foo",
my_completion2, &psize, &pmtime));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion2));
}
ASSERT_EQ(0, rados_aio_get_return_value(my_completion2));
ASSERT_EQ(sizeof(buf), psize);
rados_aio_release(my_completion);
rados_aio_release(my_completion2);
}
TEST(LibRadosAioEC, SimpleStatNS) {
SKIP_IF_CRIMSON();
AioTestDataEC test_data;
rados_completion_t my_completion;
ASSERT_EQ("", test_data.init());
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion));
char buf[128];
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_aio_write(test_data.m_ioctx, "foo",
my_completion, buf, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion));
}
ASSERT_EQ(0, rados_aio_get_return_value(my_completion));
rados_ioctx_set_namespace(test_data.m_ioctx, "nspace");
char buf2[64];
memset(buf2, 0xbb, sizeof(buf2));
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion));
ASSERT_EQ(0, rados_aio_write(test_data.m_ioctx, "foo",
my_completion, buf2, sizeof(buf2), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion));
}
ASSERT_EQ(0, rados_aio_get_return_value(my_completion));
uint64_t psize;
time_t pmtime;
rados_completion_t my_completion2;
rados_ioctx_set_namespace(test_data.m_ioctx, "");
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion2));
ASSERT_EQ(0, rados_aio_stat(test_data.m_ioctx, "foo",
my_completion2, &psize, &pmtime));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion2));
}
ASSERT_EQ(0, rados_aio_get_return_value(my_completion2));
ASSERT_EQ(sizeof(buf), psize);
rados_ioctx_set_namespace(test_data.m_ioctx, "nspace");
rados_completion_t my_completion3;
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion3));
ASSERT_EQ(0, rados_aio_stat(test_data.m_ioctx, "foo",
my_completion3, &psize, &pmtime));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion3));
}
ASSERT_EQ(0, rados_aio_get_return_value(my_completion3));
ASSERT_EQ(sizeof(buf2), psize);
rados_aio_release(my_completion);
rados_aio_release(my_completion2);
rados_aio_release(my_completion3);
}
TEST(LibRadosAioEC, StatRemove) {
SKIP_IF_CRIMSON();
AioTestDataEC test_data;
rados_completion_t my_completion;
ASSERT_EQ("", test_data.init());
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion));
char buf[128];
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_aio_write(test_data.m_ioctx, "foo",
my_completion, buf, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion));
}
ASSERT_EQ(0, rados_aio_get_return_value(my_completion));
uint64_t psize;
time_t pmtime;
rados_completion_t my_completion2;
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion2));
ASSERT_EQ(0, rados_aio_stat(test_data.m_ioctx, "foo",
my_completion2, &psize, &pmtime));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion2));
}
ASSERT_EQ(0, rados_aio_get_return_value(my_completion2));
ASSERT_EQ(sizeof(buf), psize);
rados_completion_t my_completion3;
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion3));
ASSERT_EQ(0, rados_aio_remove(test_data.m_ioctx, "foo", my_completion3));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion3));
}
ASSERT_EQ(0, rados_aio_get_return_value(my_completion3));
uint64_t psize2;
time_t pmtime2;
rados_completion_t my_completion4;
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion4));
ASSERT_EQ(0, rados_aio_stat(test_data.m_ioctx, "foo",
my_completion4, &psize2, &pmtime2));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion4));
}
ASSERT_EQ(-ENOENT, rados_aio_get_return_value(my_completion4));
rados_aio_release(my_completion);
rados_aio_release(my_completion2);
rados_aio_release(my_completion3);
rados_aio_release(my_completion4);
}
TEST(LibRadosAioEC, ExecuteClass) {
SKIP_IF_CRIMSON();
AioTestDataEC test_data;
rados_completion_t my_completion;
ASSERT_EQ("", test_data.init());
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion));
char buf[128];
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_aio_write(test_data.m_ioctx, "foo",
my_completion, buf, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion));
}
ASSERT_EQ(0, rados_aio_get_return_value(my_completion));
rados_completion_t my_completion2;
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion2));
char out[128];
ASSERT_EQ(0, rados_aio_exec(test_data.m_ioctx, "foo", my_completion2,
"hello", "say_hello", NULL, 0, out, sizeof(out)));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion2));
}
ASSERT_EQ(13, rados_aio_get_return_value(my_completion2));
ASSERT_EQ(0, strncmp("Hello, world!", out, 13));
rados_aio_release(my_completion);
rados_aio_release(my_completion2);
}
TEST(LibRadosAioEC, MultiWrite) {
SKIP_IF_CRIMSON();
AioTestDataEC test_data;
rados_completion_t my_completion, my_completion2, my_completion3;
ASSERT_EQ("", test_data.init());
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion));
char buf[128];
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_aio_write(test_data.m_ioctx, "foo",
my_completion, buf, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion));
}
ASSERT_EQ(0, rados_aio_get_return_value(my_completion));
char buf2[64];
memset(buf2, 0xdd, sizeof(buf2));
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion2));
ASSERT_EQ(0, rados_aio_write(test_data.m_ioctx, "foo",
my_completion2, buf2, sizeof(buf2), sizeof(buf)));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion2));
}
ASSERT_EQ(-EOPNOTSUPP, rados_aio_get_return_value(my_completion2));
char buf3[(sizeof(buf) + sizeof(buf2)) * 3];
memset(buf3, 0, sizeof(buf3));
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion3));
ASSERT_EQ(0, rados_aio_read(test_data.m_ioctx, "foo",
my_completion3, buf3, sizeof(buf3), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion3));
}
ASSERT_EQ((int)sizeof(buf), rados_aio_get_return_value(my_completion3));
ASSERT_EQ(0, memcmp(buf3, buf, sizeof(buf)));
rados_aio_release(my_completion);
rados_aio_release(my_completion2);
rados_aio_release(my_completion3);
}
| 58,346 | 32.824348 | 122 | cc |
null | ceph-main/src/test/librados/aio_cxx.cc | #include <errno.h>
#include <fcntl.h>
#include <sstream>
#include <string>
#include <utility>
#include <boost/scoped_ptr.hpp>
#include <fmt/format.h>
#include "gtest/gtest.h"
#include "common/errno.h"
#include "include/err.h"
#include "include/rados/librados.hpp"
#include "include/types.h"
#include "include/stringify.h"
#include "include/scope_guard.h"
#include "common/ceph_mutex.h"
#include <fmt/format.h>
#include "test_cxx.h"
#include "crimson_utils.h"
using namespace std;
using namespace librados;
class AioTestDataPP
{
public:
AioTestDataPP()
: m_init(false),
m_oid("foo")
{
}
~AioTestDataPP()
{
if (m_init) {
m_ioctx.close();
destroy_one_pool_pp(m_pool_name, m_cluster);
}
}
std::string init()
{
return init({});
}
std::string init(const std::map<std::string, std::string> &config)
{
int ret;
auto pool_prefix = fmt::format("{}_", ::testing::UnitTest::GetInstance()->current_test_info()->name());
m_pool_name = get_temp_pool_name(pool_prefix);
std::string err = create_one_pool_pp(m_pool_name, m_cluster, config);
if (!err.empty()) {
ostringstream oss;
oss << "create_one_pool(" << m_pool_name << ") failed: error " << err;
return oss.str();
}
ret = m_cluster.ioctx_create(m_pool_name.c_str(), m_ioctx);
if (ret) {
destroy_one_pool_pp(m_pool_name, m_cluster);
ostringstream oss;
oss << "rados_ioctx_create failed: error " << ret;
return oss.str();
}
m_oid = fmt::format("oid_{}_", ::testing::UnitTest::GetInstance()->current_test_info()->name());
m_init = true;
return "";
}
Rados m_cluster;
IoCtx m_ioctx;
std::string m_pool_name;
bool m_init;
std::string m_oid;
};
TEST(LibRadosAio, TooBigPP) {
AioTestDataPP test_data;
ASSERT_EQ("", test_data.init());
bufferlist bl;
auto aio_completion = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_EQ(-E2BIG, test_data.m_ioctx.aio_write(test_data.m_oid, aio_completion.get(), bl, UINT_MAX, 0));
ASSERT_EQ(-E2BIG, test_data.m_ioctx.aio_append(test_data.m_oid, aio_completion.get(), bl, UINT_MAX));
// ioctx.aio_write_full no way to overflow bl.length()
}
TEST(LibRadosAio, PoolQuotaPP) {
AioTestDataPP test_data;
ASSERT_EQ("", test_data.init());
auto pool_prefix = fmt::format("{}_", ::testing::UnitTest::GetInstance()->current_test_info()->name());
string p = get_temp_pool_name(pool_prefix);
ASSERT_EQ(0, test_data.m_cluster.pool_create(p.c_str()));
IoCtx ioctx;
ASSERT_EQ(0, test_data.m_cluster.ioctx_create(p.c_str(), ioctx));
ioctx.application_enable("rados", true);
bufferlist inbl;
ASSERT_EQ(0, test_data.m_cluster.mon_command(
"{\"prefix\": \"osd pool set-quota\", \"pool\": \"" + p +
"\", \"field\": \"max_bytes\", \"val\": \"4096\"}",
inbl, NULL, NULL));
bufferlist bl;
bufferptr z(4096);
bl.append(z);
int n;
for (n = 0; n < 1024; ++n) {
ObjectWriteOperation op;
op.write_full(bl);
auto completion = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_EQ(0, ioctx.aio_operate(test_data.m_oid + stringify(n),
completion.get(), &op,
librados::OPERATION_FULL_TRY));
completion->wait_for_complete();
int r = completion->get_return_value();
if (r == -EDQUOT)
break;
ASSERT_EQ(0, r);
sleep(1);
}
ASSERT_LT(n, 1024);
// make sure we have latest map that marked the pool full
test_data.m_cluster.wait_for_latest_osdmap();
// make sure we block without FULL_TRY
{
ObjectWriteOperation op;
op.write_full(bl);
auto completion = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_EQ(0, ioctx.aio_operate("bar", completion.get(), &op, 0));
sleep(5);
ASSERT_FALSE(completion->is_complete());
}
ioctx.close();
ASSERT_EQ(0, test_data.m_cluster.pool_delete(p.c_str()));
}
TEST(LibRadosAio, SimpleWritePP) {
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
{
AioTestDataPP test_data;
ASSERT_EQ("", test_data.init());
auto my_completion = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion);
ASSERT_EQ(0, test_data.m_ioctx.aio_write(test_data.m_oid, my_completion.get(),
bl1, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion->wait_for_complete());
}
ASSERT_EQ(0, my_completion->get_return_value());
}
{
AioTestDataPP test_data;
ASSERT_EQ("", test_data.init());
test_data.m_ioctx.set_namespace("nspace");
auto my_completion = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_EQ(0, test_data.m_ioctx.aio_write(test_data.m_oid, my_completion.get(),
bl1, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion->wait_for_complete());
}
ASSERT_EQ(0, my_completion->get_return_value());
}
}
TEST(LibRadosAio, WaitForSafePP) {
AioTestDataPP test_data;
ASSERT_EQ("", test_data.init());
auto my_completion = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion);
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, test_data.m_ioctx.aio_write(test_data.m_oid, my_completion.get(),
bl1, sizeof(buf), 0));
TestAlarm alarm;
ASSERT_EQ(0, my_completion->wait_for_complete());
ASSERT_EQ(0, my_completion->get_return_value());
}
TEST(LibRadosAio, RoundTripPP) {
AioTestDataPP test_data;
ASSERT_EQ("", test_data.init());
auto my_completion = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion);
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, test_data.m_ioctx.aio_write(test_data.m_oid, my_completion.get(),
bl1, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion->wait_for_complete());
}
ASSERT_EQ(0, my_completion->get_return_value());
bufferlist bl2;
auto my_completion2 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion2);
ASSERT_EQ(0, test_data.m_ioctx.aio_read(test_data.m_oid, my_completion2.get(),
&bl2, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion2->wait_for_complete());
}
ASSERT_EQ((int)sizeof(buf), my_completion2->get_return_value());
ASSERT_EQ(sizeof(buf), bl2.length());
ASSERT_EQ(0, memcmp(buf, bl2.c_str(), sizeof(buf)));
}
TEST(LibRadosAio, RoundTripPP2) {
AioTestDataPP test_data;
ASSERT_EQ("", test_data.init());
auto my_completion = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion);
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, test_data.m_ioctx.aio_write(test_data.m_oid, my_completion.get(),
bl1, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion->wait_for_complete());
}
ASSERT_EQ(0, my_completion->get_return_value());
bufferlist bl2;
auto my_completion2 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion2);
ASSERT_EQ(0, test_data.m_ioctx.aio_read(test_data.m_oid, my_completion2.get(),
&bl2, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion2->wait_for_complete());
}
ASSERT_EQ((int)sizeof(buf), my_completion2->get_return_value());
ASSERT_EQ(sizeof(buf), bl2.length());
ASSERT_EQ(0, memcmp(buf, bl2.c_str(), sizeof(buf)));
}
//using ObjectWriteOperation/ObjectReadOperation with iohint
TEST(LibRadosAio, RoundTripPP3)
{
Rados cluster;
auto pool_prefix = fmt::format("{}_", ::testing::UnitTest::GetInstance()->current_test_info()->name());
std::string pool_name = get_temp_pool_name(pool_prefix);
ASSERT_EQ("", create_one_pool_pp(pool_name, cluster));
IoCtx ioctx;
cluster.ioctx_create(pool_name.c_str(), ioctx);
auto my_completion1 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ObjectWriteOperation op;
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl;
bl.append(buf, sizeof(buf));
op.write(0, bl);
op.set_op_flags2(LIBRADOS_OP_FLAG_FADVISE_DONTNEED);
ioctx.aio_operate("test_obj", my_completion1.get(), &op);
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion1->wait_for_complete());
}
EXPECT_EQ(0, my_completion1->get_return_value());
auto my_completion2 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
bl.clear();
ObjectReadOperation op1;
op1.read(0, sizeof(buf), &bl, NULL);
op1.set_op_flags2(LIBRADOS_OP_FLAG_FADVISE_DONTNEED|LIBRADOS_OP_FLAG_FADVISE_RANDOM);
bufferlist init_value_bl;
encode(static_cast<int32_t>(-1), init_value_bl);
bufferlist csum_bl;
op1.checksum(LIBRADOS_CHECKSUM_TYPE_CRC32C, init_value_bl,
0, 0, 0, &csum_bl, nullptr);
ioctx.aio_operate("test_obj", my_completion2.get(), &op1, 0);
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion2->wait_for_complete());
}
EXPECT_EQ(0, my_completion2->get_return_value());
ASSERT_EQ(0, memcmp(buf, bl.c_str(), sizeof(buf)));
ASSERT_EQ(8U, csum_bl.length());
auto csum_bl_it = csum_bl.cbegin();
uint32_t csum_count;
uint32_t csum;
decode(csum_count, csum_bl_it);
ASSERT_EQ(1U, csum_count);
decode(csum, csum_bl_it);
ASSERT_EQ(bl.crc32c(-1), csum);
ioctx.remove("test_obj");
destroy_one_pool_pp(pool_name, cluster);
}
TEST(LibRadosAio, RoundTripSparseReadPP) {
AioTestDataPP test_data;
ASSERT_EQ("", test_data.init());
auto my_completion = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion);
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, test_data.m_ioctx.aio_write(test_data.m_oid, my_completion.get(),
bl1, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion->wait_for_complete());
}
ASSERT_EQ(0, my_completion->get_return_value());
std::map<uint64_t, uint64_t> extents;
bufferlist bl2;
auto my_completion2 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion2);
ASSERT_EQ(0, test_data.m_ioctx.aio_sparse_read(test_data.m_oid, my_completion2.get(),
&extents, &bl2, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion2->wait_for_complete());
}
ASSERT_EQ(0, my_completion2->get_return_value());
assert_eq_sparse(bl1, extents, bl2);
}
TEST(LibRadosAioPP, ReadIntoBufferlist) {
// here we test reading into a non-empty bufferlist referencing existing
// buffers
AioTestDataPP test_data;
ASSERT_EQ("", test_data.init());
auto my_completion = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion);
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, test_data.m_ioctx.aio_write(test_data.m_oid, my_completion.get(),
bl1, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion->wait_for_complete());
}
ASSERT_EQ(0, my_completion->get_return_value());
bufferlist bl2;
char buf2[sizeof(buf)];
memset(buf2, 0xbb, sizeof(buf2));
bl2.append(buffer::create_static(sizeof(buf2), buf2));
auto my_completion2 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion2);
ASSERT_EQ(0, test_data.m_ioctx.aio_read(test_data.m_oid, my_completion2.get(),
&bl2, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion2->wait_for_complete());
}
ASSERT_EQ((int)sizeof(buf), my_completion2->get_return_value());
ASSERT_EQ(0, memcmp(buf, buf2, sizeof(buf)));
}
TEST(LibRadosAioPP, XattrsRoundTripPP) {
char buf[128];
char attr1[] = "attr1";
char attr1_buf[] = "foo bar baz";
memset(buf, 0xaa, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
AioTestDataPP test_data;
ASSERT_EQ("", test_data.init());
ASSERT_EQ(0, test_data.m_ioctx.append(test_data.m_oid, bl1, sizeof(buf)));
bufferlist bl2;
// async getxattr
auto my_completion = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_EQ(0, test_data.m_ioctx.aio_getxattr(test_data.m_oid, my_completion.get(), attr1, bl2));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion->wait_for_complete());
}
ASSERT_EQ(-ENODATA, my_completion->get_return_value());
// append
bufferlist bl3;
bl3.append(attr1_buf, sizeof(attr1_buf));
// async setxattr
AioTestDataPP test_data2;
ASSERT_EQ("", test_data2.init());
auto my_completion2 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_EQ(0, test_data.m_ioctx.aio_setxattr(test_data.m_oid, my_completion2.get(), attr1, bl3));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion2->wait_for_complete());
}
ASSERT_EQ(0, my_completion2->get_return_value());
// async getxattr
bufferlist bl4;
AioTestDataPP test_data3;
ASSERT_EQ("", test_data3.init());
auto my_completion3 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_EQ(0, test_data.m_ioctx.aio_getxattr(test_data.m_oid, my_completion3.get(), attr1, bl4));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion3->wait_for_complete());
}
ASSERT_EQ((int)sizeof(attr1_buf), my_completion3->get_return_value());
// check content of attribute
ASSERT_EQ(0, memcmp(bl4.c_str(), attr1_buf, sizeof(attr1_buf)));
}
TEST(LibRadosAioPP, RmXattrPP) {
char buf[128];
char attr1[] = "attr1";
char attr1_buf[] = "foo bar baz";
memset(buf, 0xaa, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
AioTestDataPP test_data;
ASSERT_EQ("", test_data.init());
ASSERT_EQ(0, test_data.m_ioctx.append(test_data.m_oid, bl1, sizeof(buf)));
// async setxattr
bufferlist bl2;
bl2.append(attr1_buf, sizeof(attr1_buf));
auto my_completion = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_EQ(0, test_data.m_ioctx.aio_setxattr(test_data.m_oid, my_completion.get(), attr1, bl2));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion->wait_for_complete());
}
ASSERT_EQ(0, my_completion->get_return_value());
// async rmxattr
AioTestDataPP test_data2;
ASSERT_EQ("", test_data2.init());
auto my_completion2 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_EQ(0, test_data.m_ioctx.aio_rmxattr(test_data.m_oid, my_completion2.get(), attr1));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion2->wait_for_complete());
}
ASSERT_EQ(0, my_completion2->get_return_value());
// async getxattr
AioTestDataPP test_data3;
ASSERT_EQ("", test_data3.init());
auto my_completion3 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
bufferlist bl3;
ASSERT_EQ(0, test_data.m_ioctx.aio_getxattr(test_data.m_oid, my_completion3.get(), attr1, bl3));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion3->wait_for_complete());
}
ASSERT_EQ(-ENODATA, my_completion3->get_return_value());
// Test rmxattr on a removed object
char buf2[128];
char attr2[] = "attr2";
char attr2_buf[] = "foo bar baz";
memset(buf2, 0xbb, sizeof(buf2));
bufferlist bl21;
bl21.append(buf, sizeof(buf));
ASSERT_EQ(0, test_data.m_ioctx.write("foo_rmxattr", bl21, sizeof(buf2), 0));
bufferlist bl22;
bl22.append(attr2_buf, sizeof(attr2_buf));
// async setxattr
AioTestDataPP test_data4;
ASSERT_EQ("", test_data4.init());
auto my_completion4 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_EQ(0, test_data.m_ioctx.aio_setxattr("foo_rmxattr", my_completion4.get(), attr2, bl22));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion4->wait_for_complete());
}
ASSERT_EQ(0, my_completion4->get_return_value());
// remove object
ASSERT_EQ(0, test_data.m_ioctx.remove("foo_rmxattr"));
// async rmxattr on non existing object
AioTestDataPP test_data5;
ASSERT_EQ("", test_data5.init());
auto my_completion5 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_EQ(0, test_data.m_ioctx.aio_rmxattr("foo_rmxattr", my_completion5.get(), attr2));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion5->wait_for_complete());
}
ASSERT_EQ(-ENOENT, my_completion5->get_return_value());
}
TEST(LibRadosIoPP, XattrListPP) {
AioTestDataPP test_data;
ASSERT_EQ("", test_data.init());
// create an object with 2 attributes
char buf[128];
char attr1[] = "attr1";
char attr1_buf[] = "foo bar baz";
char attr2[] = "attr2";
char attr2_buf[256];
for (size_t j = 0; j < sizeof(attr2_buf); ++j) {
attr2_buf[j] = j % 0xff;
}
memset(buf, 0xaa, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, test_data.m_ioctx.append(test_data.m_oid, bl1, sizeof(buf)));
bufferlist bl2;
bl2.append(attr1_buf, sizeof(attr1_buf));
ASSERT_EQ(0, test_data.m_ioctx.setxattr(test_data.m_oid, attr1, bl2));
bufferlist bl3;
bl3.append(attr2_buf, sizeof(attr2_buf));
ASSERT_EQ(0, test_data.m_ioctx.setxattr(test_data.m_oid, attr2, bl3));
// call async version of getxattrs
auto my_completion = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
std::map<std::string, bufferlist> attrset;
ASSERT_EQ(0, test_data.m_ioctx.aio_getxattrs(test_data.m_oid, my_completion.get(), attrset));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion->wait_for_complete());
}
ASSERT_EQ(0, my_completion->get_return_value());
for (std::map<std::string, bufferlist>::iterator i = attrset.begin();
i != attrset.end(); ++i) {
if (i->first == string(attr1)) {
ASSERT_EQ(0, memcmp(i->second.c_str(), attr1_buf, sizeof(attr1_buf)));
}
else if (i->first == string(attr2)) {
ASSERT_EQ(0, memcmp(i->second.c_str(), attr2_buf, sizeof(attr2_buf)));
}
else {
ASSERT_EQ(0, 1);
}
}
}
TEST(LibRadosAio, IsCompletePP) {
AioTestDataPP test_data;
ASSERT_EQ("", test_data.init());
auto my_completion = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion);
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, test_data.m_ioctx.aio_write(test_data.m_oid, my_completion.get(),
bl1, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion->wait_for_complete());
}
ASSERT_EQ(0, my_completion->get_return_value());
bufferlist bl2;
auto my_completion2 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion2);
ASSERT_EQ(0, test_data.m_ioctx.aio_read(test_data.m_oid, my_completion2.get(),
&bl2, sizeof(buf), 0));
{
TestAlarm alarm;
// Busy-wait until the AIO completes.
// Normally we wouldn't do this, but we want to test is_complete.
while (true) {
int is_complete = my_completion2->is_complete();
if (is_complete)
break;
}
}
ASSERT_EQ((int)sizeof(buf), my_completion2->get_return_value());
ASSERT_EQ(sizeof(buf), bl2.length());
ASSERT_EQ(0, memcmp(buf, bl2.c_str(), sizeof(buf)));
}
TEST(LibRadosAio, IsSafePP) {
AioTestDataPP test_data;
ASSERT_EQ("", test_data.init());
auto my_completion = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion);
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, test_data.m_ioctx.aio_write(test_data.m_oid, my_completion.get(),
bl1, sizeof(buf), 0));
{
TestAlarm alarm;
// Busy-wait until the AIO completes.
// Normally we wouldn't do this, but we want to test rados_aio_is_safe.
while (true) {
int is_complete = my_completion->is_complete();
if (is_complete)
break;
}
}
ASSERT_EQ(0, my_completion->get_return_value());
auto my_completion2 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
bufferlist bl2;
ASSERT_TRUE(my_completion2);
ASSERT_EQ(0, test_data.m_ioctx.aio_read(test_data.m_oid, my_completion2.get(),
&bl2, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion2->wait_for_complete());
}
ASSERT_EQ((int)sizeof(buf), my_completion2->get_return_value());
ASSERT_EQ(sizeof(buf), bl2.length());
ASSERT_EQ(0, memcmp(buf, bl2.c_str(), sizeof(buf)));
}
TEST(LibRadosAio, ReturnValuePP) {
AioTestDataPP test_data;
ASSERT_EQ("", test_data.init());
auto my_completion = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion);
bufferlist bl1;
ASSERT_EQ(0, test_data.m_ioctx.aio_read("nonexistent", my_completion.get(),
&bl1, 128, 0));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion->wait_for_complete());
}
ASSERT_EQ(-ENOENT, my_completion->get_return_value());
}
TEST(LibRadosAio, FlushPP) {
AioTestDataPP test_data;
ASSERT_EQ("", test_data.init());
auto my_completion = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion);
char buf[128];
memset(buf, 0xee, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, test_data.m_ioctx.aio_write(test_data.m_oid, my_completion.get(),
bl1, sizeof(buf), 0));
ASSERT_EQ(0, test_data.m_ioctx.aio_flush());
ASSERT_EQ(0, my_completion->get_return_value());
bufferlist bl2;
auto my_completion2 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion2);
ASSERT_EQ(0, test_data.m_ioctx.aio_read(test_data.m_oid, my_completion2.get(),
&bl2, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion2->wait_for_complete());
}
ASSERT_EQ((int)sizeof(buf), my_completion2->get_return_value());
ASSERT_EQ(sizeof(buf), bl2.length());
ASSERT_EQ(0, memcmp(buf, bl2.c_str(), sizeof(buf)));
}
TEST(LibRadosAio, FlushAsyncPP) {
AioTestDataPP test_data;
ASSERT_EQ("", test_data.init());
auto my_completion = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
auto flush_completion = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion);
char buf[128];
memset(buf, 0xee, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, test_data.m_ioctx.aio_write(test_data.m_oid, my_completion.get(),
bl1, sizeof(buf), 0));
ASSERT_EQ(0, test_data.m_ioctx.aio_flush_async(flush_completion.get()));
{
TestAlarm alarm;
ASSERT_EQ(0, flush_completion->wait_for_complete());
}
ASSERT_EQ(1, my_completion->is_complete());
ASSERT_EQ(1, flush_completion->is_complete());
ASSERT_EQ(0, my_completion->get_return_value());
bufferlist bl2;
auto my_completion2 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion2);
ASSERT_EQ(0, test_data.m_ioctx.aio_read(test_data.m_oid, my_completion2.get(),
&bl2, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion2->wait_for_complete());
}
ASSERT_EQ((int)sizeof(buf), my_completion2->get_return_value());
ASSERT_EQ(sizeof(buf), bl2.length());
ASSERT_EQ(0, memcmp(buf, bl2.c_str(), sizeof(buf)));
}
TEST(LibRadosAio, RoundTripWriteFullPP) {
AioTestDataPP test_data;
ASSERT_EQ("", test_data.init());
auto my_completion = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion);
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, test_data.m_ioctx.aio_write(test_data.m_oid, my_completion.get(),
bl1, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion->wait_for_complete());
}
ASSERT_EQ(0, my_completion->get_return_value());
char buf2[64];
memset(buf2, 0xdd, sizeof(buf2));
bufferlist bl2;
bl2.append(buf2, sizeof(buf2));
auto my_completion2 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion2);
ASSERT_EQ(0, test_data.m_ioctx.aio_write_full(test_data.m_oid, my_completion2.get(), bl2));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion2->wait_for_complete());
}
ASSERT_EQ(0, my_completion2->get_return_value());
bufferlist bl3;
auto my_completion3 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion3);
ASSERT_EQ(0, test_data.m_ioctx.aio_read(test_data.m_oid, my_completion3.get(),
&bl3, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion3->wait_for_complete());
}
ASSERT_EQ((int)sizeof(buf2), my_completion3->get_return_value());
ASSERT_EQ(sizeof(buf2), bl3.length());
ASSERT_EQ(0, memcmp(bl3.c_str(), buf2, sizeof(buf2)));
}
//using ObjectWriteOperation/ObjectReadOperation with iohint
TEST(LibRadosAio, RoundTripWriteFullPP2)
{
Rados cluster;
auto pool_prefix = fmt::format("{}_", ::testing::UnitTest::GetInstance()->current_test_info()->name());
std::string pool_name = get_temp_pool_name(pool_prefix);
ASSERT_EQ("", create_one_pool_pp(pool_name, cluster));
IoCtx ioctx;
cluster.ioctx_create(pool_name.c_str(), ioctx);
auto my_completion1 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ObjectWriteOperation op;
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl;
bl.append(buf);
op.write_full(bl);
op.set_op_flags2(LIBRADOS_OP_FLAG_FADVISE_DONTNEED);
ioctx.aio_operate("test_obj", my_completion1.get(), &op);
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion1->wait_for_complete());
}
EXPECT_EQ(0, my_completion1->get_return_value());
auto my_completion2 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
bl.clear();
ObjectReadOperation op1;
op1.read(0, sizeof(buf), &bl, NULL);
op1.set_op_flags2(LIBRADOS_OP_FLAG_FADVISE_DONTNEED|LIBRADOS_OP_FLAG_FADVISE_RANDOM);
ioctx.aio_operate("test_obj", my_completion2.get(), &op1, 0);
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion2->wait_for_complete());
}
EXPECT_EQ(0, my_completion2->get_return_value());
ASSERT_EQ(0, memcmp(buf, bl.c_str(), sizeof(buf)));
ioctx.remove("test_obj");
destroy_one_pool_pp(pool_name, cluster);
}
TEST(LibRadosAio, RoundTripWriteSamePP) {
AioTestDataPP test_data;
ASSERT_EQ("", test_data.init());
auto my_completion = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion);
char full[128];
memset(full, 0xcc, sizeof(full));
bufferlist bl1;
bl1.append(full, sizeof(full));
ASSERT_EQ(0, test_data.m_ioctx.aio_write(test_data.m_oid, my_completion.get(),
bl1, sizeof(full), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion->wait_for_complete());
}
ASSERT_EQ(0, my_completion->get_return_value());
/* write the same buf four times */
char buf[32];
size_t ws_write_len = sizeof(full);
memset(buf, 0xdd, sizeof(buf));
bufferlist bl2;
bl2.append(buf, sizeof(buf));
auto my_completion2 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion2);
ASSERT_EQ(0, test_data.m_ioctx.aio_writesame(test_data.m_oid, my_completion2.get(), bl2,
ws_write_len, 0));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion2->wait_for_complete());
}
ASSERT_EQ(0, my_completion2->get_return_value());
bufferlist bl3;
auto my_completion3 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion3);
ASSERT_EQ(0, test_data.m_ioctx.aio_read(test_data.m_oid, my_completion3.get(),
&bl3, sizeof(full), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion3->wait_for_complete());
}
ASSERT_EQ((int)sizeof(full), my_completion3->get_return_value());
ASSERT_EQ(sizeof(full), bl3.length());
for (char *cmp = bl3.c_str(); cmp < bl3.c_str() + bl3.length();
cmp += sizeof(buf)) {
ASSERT_EQ(0, memcmp(cmp, buf, sizeof(buf)));
}
}
TEST(LibRadosAio, RoundTripWriteSamePP2)
{
Rados cluster;
auto pool_prefix = fmt::format("{}_", ::testing::UnitTest::GetInstance()->current_test_info()->name());
std::string pool_name = get_temp_pool_name(pool_prefix);
ASSERT_EQ("", create_one_pool_pp(pool_name, cluster));
IoCtx ioctx;
cluster.ioctx_create(pool_name.c_str(), ioctx);
auto wr_cmpl = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ObjectWriteOperation op;
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl;
bl.append(buf, sizeof(buf));
op.writesame(0, sizeof(buf) * 4, bl);
op.set_op_flags2(LIBRADOS_OP_FLAG_FADVISE_DONTNEED);
ioctx.aio_operate("test_obj", wr_cmpl.get(), &op);
{
TestAlarm alarm;
ASSERT_EQ(0, wr_cmpl->wait_for_complete());
}
EXPECT_EQ(0, wr_cmpl->get_return_value());
boost::scoped_ptr<AioCompletion>
rd_cmpl(cluster.aio_create_completion(0, 0));
char *cmp;
char full[sizeof(buf) * 4];
memset(full, 0, sizeof(full));
bufferlist fl;
fl.append(full, sizeof(full));
ObjectReadOperation op1;
op1.read(0, sizeof(full), &fl, NULL);
op1.set_op_flags2(LIBRADOS_OP_FLAG_FADVISE_DONTNEED);
ioctx.aio_operate("test_obj", rd_cmpl.get(), &op1, 0);
{
TestAlarm alarm;
ASSERT_EQ(0, rd_cmpl->wait_for_complete());
}
EXPECT_EQ(0, rd_cmpl->get_return_value());
for (cmp = fl.c_str(); cmp < fl.c_str() + fl.length(); cmp += sizeof(buf)) {
ASSERT_EQ(0, memcmp(cmp, buf, sizeof(buf)));
}
ioctx.remove("test_obj");
destroy_one_pool_pp(pool_name, cluster);
}
TEST(LibRadosAio, SimpleStatPPNS) {
AioTestDataPP test_data;
ASSERT_EQ("", test_data.init());
auto my_completion = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion);
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, test_data.m_ioctx.aio_write(test_data.m_oid, my_completion.get(),
bl1, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion->wait_for_complete());
}
ASSERT_EQ(0, my_completion->get_return_value());
uint64_t psize;
time_t pmtime;
auto my_completion2 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion2);
ASSERT_EQ(0, test_data.m_ioctx.aio_stat(test_data.m_oid, my_completion2.get(),
&psize, &pmtime));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion2->wait_for_complete());
}
ASSERT_EQ(0, my_completion2->get_return_value());
ASSERT_EQ(sizeof(buf), psize);
}
TEST(LibRadosAio, SimpleStatPP) {
AioTestDataPP test_data;
ASSERT_EQ("", test_data.init());
auto my_completion = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion);
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, test_data.m_ioctx.aio_write(test_data.m_oid, my_completion.get(),
bl1, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion->wait_for_complete());
}
ASSERT_EQ(0, my_completion->get_return_value());
uint64_t psize;
time_t pmtime;
auto my_completion2 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion2);
ASSERT_EQ(0, test_data.m_ioctx.aio_stat(test_data.m_oid, my_completion2.get(),
&psize, &pmtime));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion2->wait_for_complete());
}
ASSERT_EQ(0, my_completion2->get_return_value());
ASSERT_EQ(sizeof(buf), psize);
}
TEST(LibRadosAio, OperateMtime)
{
AioTestDataPP test_data;
ASSERT_EQ("", test_data.init());
time_t set_mtime = 1457129052;
{
auto c = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
librados::ObjectWriteOperation op;
op.mtime(&set_mtime);
op.create(false);
ASSERT_EQ(0, test_data.m_ioctx.aio_operate(test_data.m_oid, c.get(), &op));
{
TestAlarm alarm;
ASSERT_EQ(0, c->wait_for_complete());
}
ASSERT_EQ(0, c->get_return_value());
}
{
uint64_t size;
timespec mtime;
ASSERT_EQ(0, test_data.m_ioctx.stat2(test_data.m_oid, &size, &mtime));
EXPECT_EQ(0, size);
EXPECT_EQ(set_mtime, mtime.tv_sec);
EXPECT_EQ(0, mtime.tv_nsec);
}
}
TEST(LibRadosAio, OperateMtime2)
{
AioTestDataPP test_data;
ASSERT_EQ("", test_data.init());
timespec set_mtime{1457129052, 123456789};
{
auto c = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
librados::ObjectWriteOperation op;
op.mtime2(&set_mtime);
op.create(false);
ASSERT_EQ(0, test_data.m_ioctx.aio_operate(test_data.m_oid, c.get(), &op));
{
TestAlarm alarm;
ASSERT_EQ(0, c->wait_for_complete());
}
ASSERT_EQ(0, c->get_return_value());
}
{
uint64_t size;
timespec mtime;
ASSERT_EQ(0, test_data.m_ioctx.stat2(test_data.m_oid, &size, &mtime));
EXPECT_EQ(0, size);
EXPECT_EQ(set_mtime.tv_sec, mtime.tv_sec);
EXPECT_EQ(set_mtime.tv_nsec, mtime.tv_nsec);
}
}
TEST(LibRadosAio, StatRemovePP) {
AioTestDataPP test_data;
ASSERT_EQ("", test_data.init());
auto my_completion = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion);
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, test_data.m_ioctx.aio_write(test_data.m_oid, my_completion.get(),
bl1, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion->wait_for_complete());
}
ASSERT_EQ(0, my_completion->get_return_value());
uint64_t psize;
time_t pmtime;
auto my_completion2 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion2);
ASSERT_EQ(0, test_data.m_ioctx.aio_stat(test_data.m_oid, my_completion2.get(),
&psize, &pmtime));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion2->wait_for_complete());
}
ASSERT_EQ(0, my_completion2->get_return_value());
ASSERT_EQ(sizeof(buf), psize);
uint64_t psize2;
time_t pmtime2;
auto my_completion3 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion3);
ASSERT_EQ(0, test_data.m_ioctx.aio_remove(test_data.m_oid, my_completion3.get()));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion3->wait_for_complete());
}
ASSERT_EQ(0, my_completion3->get_return_value());
auto my_completion4 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion4);
ASSERT_EQ(0, test_data.m_ioctx.aio_stat(test_data.m_oid, my_completion4.get(),
&psize2, &pmtime2));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion4->wait_for_complete());
}
ASSERT_EQ(-ENOENT, my_completion4->get_return_value());
}
TEST(LibRadosAio, ExecuteClassPP) {
AioTestDataPP test_data;
ASSERT_EQ("", test_data.init());
auto my_completion = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion);
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, test_data.m_ioctx.aio_write(test_data.m_oid, my_completion.get(),
bl1, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion->wait_for_complete());
}
ASSERT_EQ(0, my_completion->get_return_value());
auto my_completion2 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion2);
bufferlist in, out;
ASSERT_EQ(0, test_data.m_ioctx.aio_exec(test_data.m_oid, my_completion2.get(),
"hello", "say_hello", in, &out));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion2->wait_for_complete());
}
ASSERT_EQ(0, my_completion2->get_return_value());
ASSERT_EQ(std::string("Hello, world!"), std::string(out.c_str(), out.length()));
}
using std::string;
using std::map;
using std::set;
TEST(LibRadosAio, OmapPP) {
Rados cluster;
auto pool_prefix = fmt::format("{}_", ::testing::UnitTest::GetInstance()->current_test_info()->name());
std::string pool_name = get_temp_pool_name(pool_prefix);
ASSERT_EQ("", create_one_pool_pp(pool_name, cluster));
IoCtx ioctx;
cluster.ioctx_create(pool_name.c_str(), ioctx);
string header_str = "baz";
bufferptr bp(header_str.c_str(), header_str.size() + 1);
bufferlist header_to_set;
header_to_set.push_back(bp);
map<string, bufferlist> to_set;
{
boost::scoped_ptr<AioCompletion> my_completion(cluster.aio_create_completion(0, 0));
ObjectWriteOperation op;
to_set["foo"] = header_to_set;
to_set["foo2"] = header_to_set;
to_set["qfoo3"] = header_to_set;
op.omap_set(to_set);
op.omap_set_header(header_to_set);
ioctx.aio_operate("test_obj", my_completion.get(), &op);
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion->wait_for_complete());
}
EXPECT_EQ(0, my_completion->get_return_value());
}
{
boost::scoped_ptr<AioCompletion> my_completion(cluster.aio_create_completion(0, 0));
ObjectReadOperation op;
map<string, pair<bufferlist, int> > assertions;
bufferlist val;
val.append(string("bar"));
assertions["foo"] = pair<bufferlist, int>(val, CEPH_OSD_CMPXATTR_OP_EQ);
int r;
op.omap_cmp(assertions, &r);
ioctx.aio_operate("test_obj", my_completion.get(), &op, 0);
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion->wait_for_complete());
}
EXPECT_EQ(-ECANCELED, my_completion->get_return_value());
ASSERT_EQ(-ECANCELED, r);
}
{
boost::scoped_ptr<AioCompletion> my_completion(cluster.aio_create_completion(0, 0));
ObjectReadOperation op;
set<string> set_got;
map<string, bufferlist> map_got;
set<string> to_get;
map<string, bufferlist> got3;
map<string, bufferlist> got4;
bufferlist header;
op.omap_get_keys2("", 1, &set_got, nullptr, 0);
op.omap_get_vals2("foo", 1, &map_got, nullptr, 0);
to_get.insert("foo");
to_get.insert("qfoo3");
op.omap_get_vals_by_keys(to_get, &got3, 0);
op.omap_get_header(&header, 0);
op.omap_get_vals2("foo2", "q", 1, &got4, nullptr, 0);
ioctx.aio_operate("test_obj", my_completion.get(), &op, 0);
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion->wait_for_complete());
}
EXPECT_EQ(0, my_completion->get_return_value());
ASSERT_EQ(header.length(), header_to_set.length());
ASSERT_EQ(set_got.size(), (unsigned)1);
ASSERT_EQ(*set_got.begin(), "foo");
ASSERT_EQ(map_got.size(), (unsigned)1);
ASSERT_EQ(map_got.begin()->first, "foo2");
ASSERT_EQ(got3.size(), (unsigned)2);
ASSERT_EQ(got3.begin()->first, "foo");
ASSERT_EQ(got3.rbegin()->first, "qfoo3");
ASSERT_EQ(got4.size(), (unsigned)1);
ASSERT_EQ(got4.begin()->first, "qfoo3");
}
{
boost::scoped_ptr<AioCompletion> my_completion(cluster.aio_create_completion(0, 0));
ObjectWriteOperation op;
set<string> to_remove;
to_remove.insert("foo2");
op.omap_rm_keys(to_remove);
ioctx.aio_operate("test_obj", my_completion.get(), &op);
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion->wait_for_complete());
}
EXPECT_EQ(0, my_completion->get_return_value());
}
{
boost::scoped_ptr<AioCompletion> my_completion(cluster.aio_create_completion(0, 0));
ObjectReadOperation op;
set<string> set_got;
op.omap_get_keys2("", -1, &set_got, nullptr, 0);
ioctx.aio_operate("test_obj", my_completion.get(), &op, 0);
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion->wait_for_complete());
}
EXPECT_EQ(0, my_completion->get_return_value());
ASSERT_EQ(set_got.size(), (unsigned)2);
}
{
boost::scoped_ptr<AioCompletion> my_completion(cluster.aio_create_completion(0, 0));
ObjectWriteOperation op;
op.omap_clear();
ioctx.aio_operate("test_obj", my_completion.get(), &op);
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion->wait_for_complete());
}
EXPECT_EQ(0, my_completion->get_return_value());
}
{
boost::scoped_ptr<AioCompletion> my_completion(cluster.aio_create_completion(0, 0));
ObjectReadOperation op;
set<string> set_got;
op.omap_get_keys2("", -1, &set_got, nullptr, 0);
ioctx.aio_operate("test_obj", my_completion.get(), &op, 0);
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion->wait_for_complete());
}
EXPECT_EQ(0, my_completion->get_return_value());
ASSERT_EQ(set_got.size(), (unsigned)0);
}
// omap_clear clears header *and* keys
{
boost::scoped_ptr<AioCompletion> my_completion(cluster.aio_create_completion(0, 0));
ObjectWriteOperation op;
bufferlist bl;
bl.append("some data");
map<string,bufferlist> to_set;
to_set["foo"] = bl;
to_set["foo2"] = bl;
to_set["qfoo3"] = bl;
op.omap_set(to_set);
op.omap_set_header(bl);
ioctx.aio_operate("foo3", my_completion.get(), &op);
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion->wait_for_complete());
}
EXPECT_EQ(0, my_completion->get_return_value());
}
{
boost::scoped_ptr<AioCompletion> my_completion(cluster.aio_create_completion(0, 0));
ObjectWriteOperation op;
op.omap_clear();
ioctx.aio_operate("foo3", my_completion.get(), &op);
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion->wait_for_complete());
}
EXPECT_EQ(0, my_completion->get_return_value());
}
{
boost::scoped_ptr<AioCompletion> my_completion(cluster.aio_create_completion(0, 0));
ObjectReadOperation op;
set<string> set_got;
bufferlist hdr;
op.omap_get_keys2("", -1, &set_got, nullptr, 0);
op.omap_get_header(&hdr, NULL);
ioctx.aio_operate("foo3", my_completion.get(), &op, 0);
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion->wait_for_complete());
}
EXPECT_EQ(0, my_completion->get_return_value());
ASSERT_EQ(set_got.size(), (unsigned)0);
ASSERT_EQ(hdr.length(), 0u);
}
ioctx.remove("test_obj");
destroy_one_pool_pp(pool_name, cluster);
}
TEST(LibRadosAio, MultiWritePP) {
AioTestDataPP test_data;
ASSERT_EQ("", test_data.init());
auto my_completion = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion);
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, test_data.m_ioctx.aio_write(test_data.m_oid, my_completion.get(),
bl1, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion->wait_for_complete());
}
ASSERT_EQ(0, my_completion->get_return_value());
char buf2[64];
memset(buf2, 0xdd, sizeof(buf2));
bufferlist bl2;
bl2.append(buf2, sizeof(buf2));
auto my_completion2 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion2);
ASSERT_EQ(0, test_data.m_ioctx.aio_write(test_data.m_oid, my_completion2.get(),
bl2, sizeof(buf2), sizeof(buf)));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion2->wait_for_complete());
}
ASSERT_EQ(0, my_completion2->get_return_value());
bufferlist bl3;
auto my_completion3 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion3);
ASSERT_EQ(0, test_data.m_ioctx.aio_read(test_data.m_oid, my_completion3.get(),
&bl3, (sizeof(buf) + sizeof(buf2) * 3), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion3->wait_for_complete());
}
ASSERT_EQ((int)(sizeof(buf) + sizeof(buf2)), my_completion3->get_return_value());
ASSERT_EQ(sizeof(buf) + sizeof(buf2), bl3.length());
ASSERT_EQ(0, memcmp(bl3.c_str(), buf, sizeof(buf)));
ASSERT_EQ(0, memcmp(bl3.c_str() + sizeof(buf), buf2, sizeof(buf2)));
}
TEST(LibRadosAio, AioUnlockPP) {
AioTestDataPP test_data;
ASSERT_EQ("", test_data.init());
ASSERT_EQ(0, test_data.m_ioctx.lock_exclusive(test_data.m_oid, "TestLock", "Cookie", "", NULL, 0));
auto my_completion = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_EQ(0, test_data.m_ioctx.aio_unlock(test_data.m_oid, "TestLock", "Cookie", my_completion.get()));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion->wait_for_complete());
}
ASSERT_EQ(0, my_completion->get_return_value());
bufferlist bl2;
ASSERT_EQ(0, test_data.m_ioctx.lock_exclusive(test_data.m_oid, "TestLock", "Cookie", "", NULL, 0));
}
class AioTestDataECPP
{
public:
AioTestDataECPP()
: m_init(false),
m_oid("foo")
{}
~AioTestDataECPP()
{
if (m_init) {
m_ioctx.close();
destroy_one_ec_pool_pp(m_pool_name, m_cluster);
}
}
std::string init()
{
int ret;
auto pool_prefix = fmt::format("{}_", ::testing::UnitTest::GetInstance()->current_test_info()->name());
m_pool_name = get_temp_pool_name(pool_prefix);
std::string err = create_one_ec_pool_pp(m_pool_name, m_cluster);
if (!err.empty()) {
ostringstream oss;
oss << "create_one_ec_pool(" << m_pool_name << ") failed: error " << err;
return oss.str();
}
ret = m_cluster.ioctx_create(m_pool_name.c_str(), m_ioctx);
if (ret) {
destroy_one_ec_pool_pp(m_pool_name, m_cluster);
ostringstream oss;
oss << "rados_ioctx_create failed: error " << ret;
return oss.str();
}
m_oid = fmt::format("oid_{}_", ::testing::UnitTest::GetInstance()->current_test_info()->name());
m_init = true;
return "";
}
Rados m_cluster;
IoCtx m_ioctx;
std::string m_pool_name;
bool m_init;
std::string m_oid;
};
// EC test cases
TEST(LibRadosAioEC, SimpleWritePP) {
SKIP_IF_CRIMSON();
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
{
AioTestDataECPP test_data;
ASSERT_EQ("", test_data.init());
auto my_completion = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion);
ASSERT_EQ(0, test_data.m_ioctx.aio_write(test_data.m_oid, my_completion.get(),
bl1, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion->wait_for_complete());
}
ASSERT_EQ(0, my_completion->get_return_value());
}
{
AioTestDataECPP test_data;
ASSERT_EQ("", test_data.init());
test_data.m_ioctx.set_namespace("nspace");
auto my_completion = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_EQ(0, test_data.m_ioctx.aio_write(test_data.m_oid, my_completion.get(),
bl1, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion->wait_for_complete());
}
ASSERT_EQ(0, my_completion->get_return_value());
}
}
TEST(LibRadosAioEC, WaitForSafePP) {
SKIP_IF_CRIMSON();
AioTestDataECPP test_data;
ASSERT_EQ("", test_data.init());
auto my_completion = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion);
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, test_data.m_ioctx.aio_write(test_data.m_oid, my_completion.get(),
bl1, sizeof(buf), 0));
TestAlarm alarm;
ASSERT_EQ(0, my_completion->wait_for_complete());
ASSERT_EQ(0, my_completion->get_return_value());
}
TEST(LibRadosAioEC, RoundTripPP) {
SKIP_IF_CRIMSON();
AioTestDataECPP test_data;
ASSERT_EQ("", test_data.init());
auto my_completion = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion);
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, test_data.m_ioctx.aio_write(test_data.m_oid, my_completion.get(),
bl1, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion->wait_for_complete());
}
ASSERT_EQ(0, my_completion->get_return_value());
bufferlist bl2;
auto my_completion2 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion2);
ASSERT_EQ(0, test_data.m_ioctx.aio_read(test_data.m_oid, my_completion2.get(),
&bl2, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion2->wait_for_complete());
}
ASSERT_EQ((int)sizeof(buf), my_completion2->get_return_value());
ASSERT_EQ(sizeof(buf), bl2.length());
ASSERT_EQ(0, memcmp(buf, bl2.c_str(), sizeof(buf)));
}
TEST(LibRadosAioEC, RoundTripPP2) {
SKIP_IF_CRIMSON();
AioTestDataECPP test_data;
ASSERT_EQ("", test_data.init());
auto my_completion = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion);
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, test_data.m_ioctx.aio_write(test_data.m_oid, my_completion.get(),
bl1, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion->wait_for_complete());
}
ASSERT_EQ(0, my_completion->get_return_value());
bufferlist bl2;
auto my_completion2 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion2);
ASSERT_EQ(0, test_data.m_ioctx.aio_read(test_data.m_oid, my_completion2.get(),
&bl2, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion2->wait_for_complete());
}
ASSERT_EQ((int)sizeof(buf), my_completion2->get_return_value());
ASSERT_EQ(sizeof(buf), bl2.length());
ASSERT_EQ(0, memcmp(buf, bl2.c_str(), sizeof(buf)));
}
//using ObjectWriteOperation/ObjectReadOperation with iohint
TEST(LibRadosAioEC, RoundTripPP3)
{
SKIP_IF_CRIMSON();
Rados cluster;
auto pool_prefix = fmt::format("{}_", ::testing::UnitTest::GetInstance()->current_test_info()->name());
std::string pool_name = get_temp_pool_name(pool_prefix);
ASSERT_EQ("", create_one_pool_pp(pool_name, cluster));
IoCtx ioctx;
cluster.ioctx_create(pool_name.c_str(), ioctx);
auto my_completion1 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};;
ObjectWriteOperation op;
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl;
bl.append(buf);
op.write(0, bl);
op.set_op_flags2(LIBRADOS_OP_FLAG_FADVISE_DONTNEED);
ioctx.aio_operate("test_obj", my_completion1.get(), &op);
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion1->wait_for_complete());
}
EXPECT_EQ(0, my_completion1->get_return_value());
auto my_completion2 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
bl.clear();
ObjectReadOperation op1;
op1.read(0, sizeof(buf), &bl, NULL);
op1.set_op_flags2(LIBRADOS_OP_FLAG_FADVISE_DONTNEED|LIBRADOS_OP_FLAG_FADVISE_RANDOM);
ioctx.aio_operate("test_obj", my_completion2.get(), &op1, 0);
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion2->wait_for_complete());
}
EXPECT_EQ(0, my_completion2->get_return_value());
ASSERT_EQ(0, memcmp(buf, bl.c_str(), sizeof(buf)));
ioctx.remove("test_obj");
destroy_one_pool_pp(pool_name, cluster);
}
TEST(LibRadosAio, RoundTripAppendPP) {
AioTestDataPP test_data;
ASSERT_EQ("", test_data.init());
auto my_completion = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion);
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, test_data.m_ioctx.aio_append(test_data.m_oid, my_completion.get(),
bl1, sizeof(buf)));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion->wait_for_complete());
}
ASSERT_EQ(0, my_completion->get_return_value());
char buf2[128];
memset(buf2, 0xdd, sizeof(buf2));
bufferlist bl2;
bl2.append(buf2, sizeof(buf2));
auto my_completion2 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion2);
ASSERT_EQ(0, test_data.m_ioctx.aio_append(test_data.m_oid, my_completion2.get(),
bl2, sizeof(buf2)));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion2->wait_for_complete());
}
ASSERT_EQ(0, my_completion2->get_return_value());
bufferlist bl3;
auto my_completion3 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion3);
ASSERT_EQ(0, test_data.m_ioctx.aio_read(test_data.m_oid, my_completion3.get(),
&bl3, 2 * sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion3->wait_for_complete());
}
ASSERT_EQ((int)(sizeof(buf) * 2), my_completion3->get_return_value());
ASSERT_EQ(sizeof(buf) * 2, bl3.length());
ASSERT_EQ(0, memcmp(bl3.c_str(), buf, sizeof(buf)));
ASSERT_EQ(0, memcmp(bl3.c_str() + sizeof(buf), buf2, sizeof(buf2)));
}
TEST(LibRadosAioPP, RemoveTestPP) {
char buf[128];
memset(buf, 0xaa, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
AioTestDataPP test_data;
ASSERT_EQ("", test_data.init());
ASSERT_EQ(0, test_data.m_ioctx.append(test_data.m_oid, bl1, sizeof(buf)));
auto my_completion = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_EQ(0, test_data.m_ioctx.aio_remove(test_data.m_oid, my_completion.get()));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion->wait_for_complete());
}
ASSERT_EQ(0, my_completion->get_return_value());
bufferlist bl2;
ASSERT_EQ(-ENOENT, test_data.m_ioctx.read(test_data.m_oid, bl2, sizeof(buf), 0));
}
TEST(LibRadosAioEC, RoundTripSparseReadPP) {
SKIP_IF_CRIMSON();
AioTestDataECPP test_data;
ASSERT_EQ("", test_data.init());
auto my_completion = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion);
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, test_data.m_ioctx.aio_write(test_data.m_oid, my_completion.get(),
bl1, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion->wait_for_complete());
}
ASSERT_EQ(0, my_completion->get_return_value());
map<uint64_t, uint64_t> extents;
bufferlist bl2;
auto my_completion2 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion2);
ASSERT_EQ(0, test_data.m_ioctx.aio_sparse_read(test_data.m_oid, my_completion2.get(),
&extents, &bl2, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion2->wait_for_complete());
}
ASSERT_EQ(0, my_completion2->get_return_value());
assert_eq_sparse(bl1, extents, bl2);
}
TEST(LibRadosAioEC, RoundTripAppendPP) {
SKIP_IF_CRIMSON();
AioTestDataECPP test_data;
ASSERT_EQ("", test_data.init());
auto my_completion = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion);
bool req;
ASSERT_EQ(0, test_data.m_ioctx.pool_requires_alignment2(&req));
ASSERT_TRUE(req);
uint64_t alignment;
ASSERT_EQ(0, test_data.m_ioctx.pool_required_alignment2(&alignment));
ASSERT_NE((unsigned)0, alignment);
int bsize = alignment;
char *buf = (char *)new char[bsize];
memset(buf, 0xcc, bsize);
bufferlist bl1;
bl1.append(buf, bsize);
ASSERT_EQ(0, test_data.m_ioctx.aio_append(test_data.m_oid, my_completion.get(),
bl1, bsize));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion->wait_for_complete());
}
ASSERT_EQ(0, my_completion->get_return_value());
int hbsize = bsize / 2;
char *buf2 = (char *)new char[hbsize];
memset(buf2, 0xdd, hbsize);
bufferlist bl2;
bl2.append(buf2, hbsize);
auto my_completion2 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion2);
ASSERT_EQ(0, test_data.m_ioctx.aio_append(test_data.m_oid, my_completion2.get(),
bl2, hbsize));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion2->wait_for_complete());
}
ASSERT_EQ(0, my_completion2->get_return_value());
auto my_completion3 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion3);
ASSERT_EQ(0, test_data.m_ioctx.aio_append(test_data.m_oid, my_completion3.get(),
bl2, hbsize));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion3->wait_for_complete());
}
EXPECT_EQ(-EOPNOTSUPP, my_completion3->get_return_value());
bufferlist bl3;
auto my_completion4 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion4);
ASSERT_EQ(0, test_data.m_ioctx.aio_read(test_data.m_oid, my_completion4.get(),
&bl3, bsize * 3, 0));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion4->wait_for_complete());
}
int tbsize = bsize + hbsize;
ASSERT_EQ(tbsize, my_completion4->get_return_value());
ASSERT_EQ((unsigned)tbsize, bl3.length());
ASSERT_EQ(0, memcmp(bl3.c_str(), buf, bsize));
ASSERT_EQ(0, memcmp(bl3.c_str() + bsize, buf2, hbsize));
delete[] buf;
delete[] buf2;
}
TEST(LibRadosAioEC, IsCompletePP) {
SKIP_IF_CRIMSON();
AioTestDataECPP test_data;
ASSERT_EQ("", test_data.init());
auto my_completion = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion);
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, test_data.m_ioctx.aio_write(test_data.m_oid, my_completion.get(),
bl1, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion->wait_for_complete());
}
ASSERT_EQ(0, my_completion->get_return_value());
bufferlist bl2;
auto my_completion2 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion2);
ASSERT_EQ(0, test_data.m_ioctx.aio_read(test_data.m_oid, my_completion2.get(),
&bl2, sizeof(buf), 0));
{
TestAlarm alarm;
// Busy-wait until the AIO completes.
// Normally we wouldn't do this, but we want to test is_complete.
while (true) {
int is_complete = my_completion2->is_complete();
if (is_complete)
break;
}
}
ASSERT_EQ((int)sizeof(buf), my_completion2->get_return_value());
ASSERT_EQ(sizeof(buf), bl2.length());
ASSERT_EQ(0, memcmp(buf, bl2.c_str(), sizeof(buf)));
}
TEST(LibRadosAioEC, IsSafePP) {
SKIP_IF_CRIMSON();
AioTestDataECPP test_data;
ASSERT_EQ("", test_data.init());
auto my_completion = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion);
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, test_data.m_ioctx.aio_write(test_data.m_oid, my_completion.get(),
bl1, sizeof(buf), 0));
{
TestAlarm alarm;
// Busy-wait until the AIO completes.
// Normally we wouldn't do this, but we want to test rados_aio_is_safe.
while (true) {
int is_complete = my_completion->is_complete();
if (is_complete)
break;
}
}
ASSERT_EQ(0, my_completion->get_return_value());
auto my_completion2 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
bufferlist bl2;
ASSERT_TRUE(my_completion2);
ASSERT_EQ(0, test_data.m_ioctx.aio_read(test_data.m_oid, my_completion2.get(),
&bl2, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion2->wait_for_complete());
}
ASSERT_EQ((int)sizeof(buf), my_completion2->get_return_value());
ASSERT_EQ(sizeof(buf), bl2.length());
ASSERT_EQ(0, memcmp(buf, bl2.c_str(), sizeof(buf)));
}
TEST(LibRadosAioEC, ReturnValuePP) {
SKIP_IF_CRIMSON();
AioTestDataECPP test_data;
ASSERT_EQ("", test_data.init());
auto my_completion = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion);
bufferlist bl1;
ASSERT_EQ(0, test_data.m_ioctx.aio_read("nonexistent", my_completion.get(),
&bl1, 128, 0));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion->wait_for_complete());
}
ASSERT_EQ(-ENOENT, my_completion->get_return_value());
}
TEST(LibRadosAioEC, FlushPP) {
SKIP_IF_CRIMSON();
AioTestDataECPP test_data;
ASSERT_EQ("", test_data.init());
auto my_completion = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion);
char buf[128];
memset(buf, 0xee, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, test_data.m_ioctx.aio_write(test_data.m_oid, my_completion.get(),
bl1, sizeof(buf), 0));
ASSERT_EQ(0, test_data.m_ioctx.aio_flush());
ASSERT_EQ(0, my_completion->get_return_value());
bufferlist bl2;
auto my_completion2 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion2);
ASSERT_EQ(0, test_data.m_ioctx.aio_read(test_data.m_oid, my_completion2.get(),
&bl2, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion2->wait_for_complete());
}
ASSERT_EQ((int)sizeof(buf), my_completion2->get_return_value());
ASSERT_EQ(sizeof(buf), bl2.length());
ASSERT_EQ(0, memcmp(buf, bl2.c_str(), sizeof(buf)));
}
TEST(LibRadosAioEC, FlushAsyncPP) {
SKIP_IF_CRIMSON();
AioTestDataECPP test_data;
ASSERT_EQ("", test_data.init());
auto my_completion = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
auto flush_completion = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion);
char buf[128];
memset(buf, 0xee, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, test_data.m_ioctx.aio_write(test_data.m_oid, my_completion.get(),
bl1, sizeof(buf), 0));
ASSERT_EQ(0, test_data.m_ioctx.aio_flush_async(flush_completion.get()));
{
TestAlarm alarm;
ASSERT_EQ(0, flush_completion->wait_for_complete());
}
ASSERT_EQ(1, my_completion->is_complete());
ASSERT_EQ(1, flush_completion->is_complete());
ASSERT_EQ(0, my_completion->get_return_value());
bufferlist bl2;
auto my_completion2 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion2);
ASSERT_EQ(0, test_data.m_ioctx.aio_read(test_data.m_oid, my_completion2.get(),
&bl2, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion2->wait_for_complete());
}
ASSERT_EQ((int)sizeof(buf), my_completion2->get_return_value());
ASSERT_EQ(sizeof(buf), bl2.length());
ASSERT_EQ(0, memcmp(buf, bl2.c_str(), sizeof(buf)));
}
TEST(LibRadosAioEC, RoundTripWriteFullPP) {
SKIP_IF_CRIMSON();
AioTestDataECPP test_data;
ASSERT_EQ("", test_data.init());
auto my_completion = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion);
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, test_data.m_ioctx.aio_write(test_data.m_oid, my_completion.get(),
bl1, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion->wait_for_complete());
}
ASSERT_EQ(0, my_completion->get_return_value());
char buf2[64];
memset(buf2, 0xdd, sizeof(buf2));
bufferlist bl2;
bl2.append(buf2, sizeof(buf2));
auto my_completion2 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion2);
ASSERT_EQ(0, test_data.m_ioctx.aio_write_full(test_data.m_oid, my_completion2.get(), bl2));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion2->wait_for_complete());
}
ASSERT_EQ(0, my_completion2->get_return_value());
bufferlist bl3;
auto my_completion3 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion3);
ASSERT_EQ(0, test_data.m_ioctx.aio_read(test_data.m_oid, my_completion3.get(),
&bl3, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion3->wait_for_complete());
}
ASSERT_EQ((int)sizeof(buf2), my_completion3->get_return_value());
ASSERT_EQ(sizeof(buf2), bl3.length());
ASSERT_EQ(0, memcmp(bl3.c_str(), buf2, sizeof(buf2)));
}
//using ObjectWriteOperation/ObjectReadOperation with iohint
TEST(LibRadosAioEC, RoundTripWriteFullPP2)
{
SKIP_IF_CRIMSON();
Rados cluster;
auto pool_prefix = fmt::format("{}_", ::testing::UnitTest::GetInstance()->current_test_info()->name());
std::string pool_name = get_temp_pool_name(pool_prefix);
ASSERT_EQ("", create_one_pool_pp(pool_name, cluster));
IoCtx ioctx;
cluster.ioctx_create(pool_name.c_str(), ioctx);
auto my_completion1 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ObjectWriteOperation op;
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl;
bl.append(buf);
op.write_full(bl);
op.set_op_flags2(LIBRADOS_OP_FLAG_FADVISE_NOCACHE);
ioctx.aio_operate("test_obj", my_completion1.get(), &op);
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion1->wait_for_complete());
}
EXPECT_EQ(0, my_completion1->get_return_value());
auto my_completion2 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
bl.clear();
ObjectReadOperation op1;
op1.read(0, sizeof(buf), &bl, NULL);
op1.set_op_flags2(LIBRADOS_OP_FLAG_FADVISE_NOCACHE|LIBRADOS_OP_FLAG_FADVISE_RANDOM);
ioctx.aio_operate("test_obj", my_completion2.get(), &op1, 0);
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion2->wait_for_complete());
}
EXPECT_EQ(0, my_completion2->get_return_value());
ASSERT_EQ(0, memcmp(buf, bl.c_str(), sizeof(buf)));
ioctx.remove("test_obj");
destroy_one_pool_pp(pool_name, cluster);
}
TEST(LibRadosAioEC, SimpleStatPP) {
SKIP_IF_CRIMSON();
AioTestDataECPP test_data;
ASSERT_EQ("", test_data.init());
auto my_completion = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion);
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, test_data.m_ioctx.aio_write(test_data.m_oid, my_completion.get(),
bl1, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion->wait_for_complete());
}
ASSERT_EQ(0, my_completion->get_return_value());
uint64_t psize;
time_t pmtime;
auto my_completion2 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion2);
ASSERT_EQ(0, test_data.m_ioctx.aio_stat(test_data.m_oid, my_completion2.get(),
&psize, &pmtime));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion2->wait_for_complete());
}
ASSERT_EQ(0, my_completion2->get_return_value());
ASSERT_EQ(sizeof(buf), psize);
}
TEST(LibRadosAioEC, SimpleStatPPNS) {
SKIP_IF_CRIMSON();
AioTestDataECPP test_data;
ASSERT_EQ("", test_data.init());
auto my_completion = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion);
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, test_data.m_ioctx.aio_write(test_data.m_oid, my_completion.get(),
bl1, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion->wait_for_complete());
}
ASSERT_EQ(0, my_completion->get_return_value());
uint64_t psize;
time_t pmtime;
auto my_completion2 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion2);
ASSERT_EQ(0, test_data.m_ioctx.aio_stat(test_data.m_oid, my_completion2.get(),
&psize, &pmtime));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion2->wait_for_complete());
}
ASSERT_EQ(0, my_completion2->get_return_value());
ASSERT_EQ(sizeof(buf), psize);
}
TEST(LibRadosAioEC, StatRemovePP) {
SKIP_IF_CRIMSON();
AioTestDataECPP test_data;
ASSERT_EQ("", test_data.init());
auto my_completion = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion);
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, test_data.m_ioctx.aio_write(test_data.m_oid, my_completion.get(),
bl1, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion->wait_for_complete());
}
ASSERT_EQ(0, my_completion->get_return_value());
uint64_t psize;
time_t pmtime;
auto my_completion2 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion2);
ASSERT_EQ(0, test_data.m_ioctx.aio_stat(test_data.m_oid, my_completion2.get(),
&psize, &pmtime));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion2->wait_for_complete());
}
ASSERT_EQ(0, my_completion2->get_return_value());
ASSERT_EQ(sizeof(buf), psize);
uint64_t psize2;
time_t pmtime2;
auto my_completion3 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion3);
ASSERT_EQ(0, test_data.m_ioctx.aio_remove(test_data.m_oid, my_completion3.get()));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion3->wait_for_complete());
}
ASSERT_EQ(0, my_completion3->get_return_value());
auto my_completion4 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion4);
ASSERT_EQ(0, test_data.m_ioctx.aio_stat(test_data.m_oid, my_completion4.get(),
&psize2, &pmtime2));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion4->wait_for_complete());
}
ASSERT_EQ(-ENOENT, my_completion4->get_return_value());
}
TEST(LibRadosAioEC, ExecuteClassPP) {
SKIP_IF_CRIMSON();
AioTestDataECPP test_data;
ASSERT_EQ("", test_data.init());
auto my_completion = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion);
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, test_data.m_ioctx.aio_write(test_data.m_oid, my_completion.get(),
bl1, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion->wait_for_complete());
}
ASSERT_EQ(0, my_completion->get_return_value());
auto my_completion2 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion2);
bufferlist in, out;
ASSERT_EQ(0, test_data.m_ioctx.aio_exec(test_data.m_oid, my_completion2.get(),
"hello", "say_hello", in, &out));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion2->wait_for_complete());
}
ASSERT_EQ(0, my_completion2->get_return_value());
ASSERT_EQ(std::string("Hello, world!"), std::string(out.c_str(), out.length()));
}
TEST(LibRadosAioEC, OmapPP) {
SKIP_IF_CRIMSON();
Rados cluster;
auto pool_prefix = fmt::format("{}_", ::testing::UnitTest::GetInstance()->current_test_info()->name());
std::string pool_name = get_temp_pool_name(pool_prefix);
ASSERT_EQ("", create_one_ec_pool_pp(pool_name, cluster));
IoCtx ioctx;
cluster.ioctx_create(pool_name.c_str(), ioctx);
string header_str = "baz";
bufferptr bp(header_str.c_str(), header_str.size() + 1);
bufferlist header_to_set;
header_to_set.push_back(bp);
map<string, bufferlist> to_set;
{
auto my_completion = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ObjectWriteOperation op;
to_set["foo"] = header_to_set;
to_set["foo2"] = header_to_set;
to_set["qfoo3"] = header_to_set;
op.omap_set(to_set);
op.omap_set_header(header_to_set);
ioctx.aio_operate("test_obj", my_completion.get(), &op);
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion->wait_for_complete());
}
EXPECT_EQ(-EOPNOTSUPP, my_completion->get_return_value());
}
ioctx.remove("test_obj");
destroy_one_ec_pool_pp(pool_name, cluster);
}
TEST(LibRadosAioEC, MultiWritePP) {
SKIP_IF_CRIMSON();
AioTestDataECPP test_data;
ASSERT_EQ("", test_data.init());
auto my_completion = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion);
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, test_data.m_ioctx.aio_write(test_data.m_oid, my_completion.get(),
bl1, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion->wait_for_complete());
}
ASSERT_EQ(0, my_completion->get_return_value());
char buf2[64];
memset(buf2, 0xdd, sizeof(buf2));
bufferlist bl2;
bl2.append(buf2, sizeof(buf2));
auto my_completion2 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion2);
ASSERT_EQ(0, test_data.m_ioctx.aio_write(test_data.m_oid, my_completion2.get(),
bl2, sizeof(buf2), sizeof(buf)));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion2->wait_for_complete());
}
ASSERT_EQ(-EOPNOTSUPP, my_completion2->get_return_value());
bufferlist bl3;
auto my_completion3 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion3);
ASSERT_EQ(0, test_data.m_ioctx.aio_read(test_data.m_oid, my_completion3.get(),
&bl3, (sizeof(buf) + sizeof(buf2) * 3), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion3->wait_for_complete());
}
ASSERT_EQ((int)sizeof(buf), my_completion3->get_return_value());
ASSERT_EQ(sizeof(buf), bl3.length());
ASSERT_EQ(0, memcmp(bl3.c_str(), buf, sizeof(buf)));
}
TEST(LibRadosAio, RacingRemovePP) {
AioTestDataPP test_data;
ASSERT_EQ("", test_data.init({{"objecter_retry_writes_after_first_reply", "true"}}));
auto my_completion = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion);
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl;
bl.append(buf, sizeof(buf));
auto my_completion2 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion2);
ASSERT_EQ(0, test_data.m_ioctx.aio_remove(test_data.m_oid, my_completion2.get()));
ASSERT_EQ(0, test_data.m_ioctx.aio_write(test_data.m_oid, my_completion.get(),
bl, sizeof(buf), 0));
{
TestAlarm alarm;
my_completion2->wait_for_complete();
my_completion->wait_for_complete();
}
ASSERT_EQ(-ENOENT, my_completion2->get_return_value());
ASSERT_EQ(0, my_completion->get_return_value());
ASSERT_EQ(0, test_data.m_ioctx.stat(test_data.m_oid, nullptr, nullptr));
}
TEST(LibRadosAio, RoundTripCmpExtPP) {
AioTestDataPP test_data;
ASSERT_EQ("", test_data.init());
auto my_completion = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion);
char full[128];
memset(full, 0xcc, sizeof(full));
bufferlist bl1;
bl1.append(full, sizeof(full));
ASSERT_EQ(0, test_data.m_ioctx.aio_write(test_data.m_oid, my_completion.get(),
bl1, sizeof(full), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion->wait_for_complete());
}
ASSERT_EQ(0, my_completion->get_return_value());
/* compare with match */
bufferlist cbl;
cbl.append(full, sizeof(full));
auto my_completion2 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_EQ(0, test_data.m_ioctx.aio_cmpext(test_data.m_oid, my_completion2.get(), 0, cbl));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion2->wait_for_complete());
}
ASSERT_EQ(0, my_completion2->get_return_value());
/* compare with mismatch */
memset(full, 0xdd, sizeof(full));
cbl.clear();
cbl.append(full, sizeof(full));
auto my_completion3 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_EQ(0, test_data.m_ioctx.aio_cmpext(test_data.m_oid, my_completion3.get(), 0, cbl));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion3->wait_for_complete());
}
ASSERT_EQ(-MAX_ERRNO, my_completion3->get_return_value());
}
TEST(LibRadosAio, RoundTripCmpExtPP2)
{
int ret;
char buf[128];
char miscmp_buf[128];
bufferlist cbl;
Rados cluster;
auto pool_prefix = fmt::format("{}_", ::testing::UnitTest::GetInstance()->current_test_info()->name());
std::string pool_name = get_temp_pool_name(pool_prefix);
ASSERT_EQ("", create_one_pool_pp(pool_name, cluster));
IoCtx ioctx;
cluster.ioctx_create(pool_name.c_str(), ioctx);
auto wr_cmpl = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ObjectWriteOperation wr_op;
memset(buf, 0xcc, sizeof(buf));
memset(miscmp_buf, 0xdd, sizeof(miscmp_buf));
bufferlist bl;
bl.append(buf, sizeof(buf));
wr_op.write_full(bl);
wr_op.set_op_flags2(LIBRADOS_OP_FLAG_FADVISE_DONTNEED);
ioctx.aio_operate("test_obj", wr_cmpl.get(), &wr_op);
{
TestAlarm alarm;
ASSERT_EQ(0, wr_cmpl->wait_for_complete());
}
EXPECT_EQ(0, wr_cmpl->get_return_value());
/* cmpext as write op. first match then mismatch */
auto wr_cmpext_cmpl = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
cbl.append(buf, sizeof(buf));
ret = 0;
wr_op.cmpext(0, cbl, &ret);
wr_op.set_op_flags2(LIBRADOS_OP_FLAG_FADVISE_DONTNEED);
ioctx.aio_operate("test_obj", wr_cmpext_cmpl.get(), &wr_op);
{
TestAlarm alarm;
ASSERT_EQ(0, wr_cmpext_cmpl->wait_for_complete());
}
EXPECT_EQ(0, wr_cmpext_cmpl->get_return_value());
EXPECT_EQ(0, ret);
auto wr_cmpext_cmpl2 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
cbl.clear();
cbl.append(miscmp_buf, sizeof(miscmp_buf));
ret = 0;
wr_op.cmpext(0, cbl, &ret);
wr_op.set_op_flags2(LIBRADOS_OP_FLAG_FADVISE_DONTNEED);
ioctx.aio_operate("test_obj", wr_cmpext_cmpl2.get(), &wr_op);
{
TestAlarm alarm;
ASSERT_EQ(0, wr_cmpext_cmpl2->wait_for_complete());
}
EXPECT_EQ(-MAX_ERRNO, wr_cmpext_cmpl2->get_return_value());
EXPECT_EQ(-MAX_ERRNO, ret);
/* cmpext as read op */
auto rd_cmpext_cmpl = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ObjectReadOperation rd_op;
cbl.clear();
cbl.append(buf, sizeof(buf));
ret = 0;
rd_op.cmpext(0, cbl, &ret);
rd_op.set_op_flags2(LIBRADOS_OP_FLAG_FADVISE_DONTNEED);
ioctx.aio_operate("test_obj", rd_cmpext_cmpl.get(), &rd_op, 0);
{
TestAlarm alarm;
ASSERT_EQ(0, rd_cmpext_cmpl->wait_for_complete());
}
EXPECT_EQ(0, rd_cmpext_cmpl->get_return_value());
EXPECT_EQ(0, ret);
auto rd_cmpext_cmpl2 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
cbl.clear();
cbl.append(miscmp_buf, sizeof(miscmp_buf));
ret = 0;
rd_op.cmpext(0, cbl, &ret);
rd_op.set_op_flags2(LIBRADOS_OP_FLAG_FADVISE_DONTNEED);
ioctx.aio_operate("test_obj", rd_cmpext_cmpl2.get(), &rd_op, 0);
{
TestAlarm alarm;
ASSERT_EQ(0, rd_cmpext_cmpl2->wait_for_complete());
}
EXPECT_EQ(-MAX_ERRNO, rd_cmpext_cmpl2->get_return_value());
EXPECT_EQ(-MAX_ERRNO, ret);
ioctx.remove("test_obj");
destroy_one_pool_pp(pool_name, cluster);
}
ceph::mutex my_lock = ceph::make_mutex("my_lock");
set<unsigned> inflight;
unsigned max_success = 0;
unsigned min_failed = 0;
struct io_info {
unsigned i;
AioCompletion *c;
};
void pool_io_callback(completion_t cb, void *arg /* Actually AioCompletion* */)
{
io_info *info = (io_info *)arg;
unsigned long i = info->i;
{
TestAlarm alarm;
ASSERT_EQ(0, info->c->wait_for_complete());
}
int r = info->c->get_return_value();
//cout << "finish " << i << " r = " << r << std::endl;
std::scoped_lock l(my_lock);
inflight.erase(i);
if (r == 0) {
if (i > max_success) {
max_success = i;
}
} else {
if (!min_failed || i < min_failed) {
min_failed = i;
}
}
}
TEST(LibRadosAio, PoolEIOFlag) {
AioTestDataPP test_data;
ASSERT_EQ("", test_data.init());
bufferlist bl;
bl.append("some data");
std::thread *t = nullptr;
unsigned max = 100;
unsigned timeout = max * 10;
unsigned long i = 1;
my_lock.lock();
for (; min_failed == 0 && i <= timeout; ++i) {
io_info *info = new io_info;
info->i = i;
info->c = Rados::aio_create_completion();
info->c->set_complete_callback((void*)info, pool_io_callback);
inflight.insert(i);
my_lock.unlock();
int r = test_data.m_ioctx.aio_write(test_data.m_oid, info->c, bl, bl.length(), 0);
//cout << "start " << i << " r = " << r << std::endl;
if (i == max / 2) {
cout << "setting pool EIO" << std::endl;
t = new std::thread(
[&] {
bufferlist empty;
ASSERT_EQ(0, test_data.m_cluster.mon_command(
fmt::format(R"({{
"prefix": "osd pool set",
"pool": "{}",
"var": "eio",
"val": "true"
}})", test_data.m_pool_name),
empty, nullptr, nullptr));
});
}
std::this_thread::sleep_for(10ms);
my_lock.lock();
if (r < 0) {
inflight.erase(i);
break;
}
}
t->join();
delete t;
// wait for ios to finish
for (; !inflight.empty(); ++i) {
cout << "waiting for " << inflight.size() << std::endl;
my_lock.unlock();
sleep(1);
my_lock.lock();
}
cout << "max_success " << max_success << ", min_failed " << min_failed << std::endl;
ASSERT_TRUE(max_success + 1 == min_failed);
my_lock.unlock();
}
// This test case reproduces https://tracker.ceph.com/issues/57152
TEST(LibRadosAio, MultiReads) {
// here we test multithreaded aio reads
AioTestDataPP test_data;
ASSERT_EQ("", test_data.init());
auto my_completion = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion);
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, test_data.m_ioctx.aio_write("foo", my_completion.get(),
bl1, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion->wait_for_complete());
}
ASSERT_EQ(0, my_completion->get_return_value());
// Don't use std::vector to store bufferlists (e.g for parallelizing aio_reads),
// as they are being moved whenever the vector resizes
// and will cause invalidated references.
std::deque<std::pair<bufferlist, std::unique_ptr<AioCompletion>>> reads;
for (int i = 0; i < 100; i++) {
// std::deque is appropriate here as emplace_back() is obliged to
// preserve the referenced inserted element. (Unlike insert() or erase())
auto& [bl, aiocp] = reads.emplace_back();
aiocp = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(aiocp);
ASSERT_EQ(0, test_data.m_ioctx.aio_read("foo", aiocp.get(),
&bl, sizeof(buf), 0));
}
for (auto& [bl, aiocp] : reads) {
{
TestAlarm alarm;
ASSERT_EQ(0, aiocp->wait_for_complete());
}
ASSERT_EQ((int)sizeof(buf), aiocp->get_return_value());
ASSERT_EQ(0, memcmp(buf, bl.c_str(), sizeof(buf)));
}
}
| 82,588 | 32.463938 | 107 | cc |
null | ceph-main/src/test/librados/asio.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2017 Red Hat, Inc.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*/
#include "librados/librados_asio.h"
#include <gtest/gtest.h>
#include "common/ceph_argparse.h"
#include "common/debug.h"
#include "common/errno.h"
#include "global/global_init.h"
#include <boost/range/begin.hpp>
#include <boost/range/end.hpp>
#include <spawn/spawn.hpp>
#include <boost/asio/use_future.hpp>
#define dout_subsys ceph_subsys_rados
#define dout_context g_ceph_context
using namespace std;
// test fixture for global setup/teardown
class AsioRados : public ::testing::Test {
static constexpr auto poolname = "ceph_test_rados_api_asio";
protected:
static librados::Rados rados;
static librados::IoCtx io;
// writes to snapio fail immediately with -EROFS. this is used to test errors
// that come from inside the initiating function, rather than passed to the
// AioCompletion callback
static librados::IoCtx snapio;
public:
static void SetUpTestCase() {
ASSERT_EQ(0, rados.init_with_context(g_ceph_context));
ASSERT_EQ(0, rados.connect());
// open/create test pool
int r = rados.ioctx_create(poolname, io);
if (r == -ENOENT) {
r = rados.pool_create(poolname);
if (r == -EEXIST) {
r = 0;
} else if (r == 0) {
r = rados.ioctx_create(poolname, io);
}
}
ASSERT_EQ(0, r);
ASSERT_EQ(0, rados.ioctx_create(poolname, snapio));
snapio.snap_set_read(1);
// initialize the "exist" object
bufferlist bl;
bl.append("hello");
ASSERT_EQ(0, io.write_full("exist", bl));
}
static void TearDownTestCase() {
rados.shutdown();
}
};
librados::Rados AsioRados::rados;
librados::IoCtx AsioRados::io;
librados::IoCtx AsioRados::snapio;
TEST_F(AsioRados, AsyncReadCallback)
{
boost::asio::io_service service;
auto success_cb = [&] (boost::system::error_code ec, bufferlist bl) {
EXPECT_FALSE(ec);
EXPECT_EQ("hello", bl.to_str());
};
librados::async_read(service, io, "exist", 256, 0, success_cb);
auto failure_cb = [&] (boost::system::error_code ec, bufferlist bl) {
EXPECT_EQ(boost::system::errc::no_such_file_or_directory, ec);
};
librados::async_read(service, io, "noexist", 256, 0, failure_cb);
service.run();
}
TEST_F(AsioRados, AsyncReadFuture)
{
boost::asio::io_service service;
std::future<bufferlist> f1 = librados::async_read(service, io, "exist", 256,
0, boost::asio::use_future);
std::future<bufferlist> f2 = librados::async_read(service, io, "noexist", 256,
0, boost::asio::use_future);
service.run();
EXPECT_NO_THROW({
auto bl = f1.get();
EXPECT_EQ("hello", bl.to_str());
});
EXPECT_THROW(f2.get(), boost::system::system_error);
}
TEST_F(AsioRados, AsyncReadYield)
{
boost::asio::io_service service;
auto success_cr = [&] (spawn::yield_context yield) {
boost::system::error_code ec;
auto bl = librados::async_read(service, io, "exist", 256, 0, yield[ec]);
EXPECT_FALSE(ec);
EXPECT_EQ("hello", bl.to_str());
};
spawn::spawn(service, success_cr);
auto failure_cr = [&] (spawn::yield_context yield) {
boost::system::error_code ec;
auto bl = librados::async_read(service, io, "noexist", 256, 0, yield[ec]);
EXPECT_EQ(boost::system::errc::no_such_file_or_directory, ec);
};
spawn::spawn(service, failure_cr);
service.run();
}
TEST_F(AsioRados, AsyncWriteCallback)
{
boost::asio::io_service service;
bufferlist bl;
bl.append("hello");
auto success_cb = [&] (boost::system::error_code ec) {
EXPECT_FALSE(ec);
};
librados::async_write(service, io, "exist", bl, bl.length(), 0,
success_cb);
auto failure_cb = [&] (boost::system::error_code ec) {
EXPECT_EQ(boost::system::errc::read_only_file_system, ec);
};
librados::async_write(service, snapio, "exist", bl, bl.length(), 0,
failure_cb);
service.run();
}
TEST_F(AsioRados, AsyncWriteFuture)
{
boost::asio::io_service service;
bufferlist bl;
bl.append("hello");
auto f1 = librados::async_write(service, io, "exist", bl, bl.length(), 0,
boost::asio::use_future);
auto f2 = librados::async_write(service, snapio, "exist", bl, bl.length(), 0,
boost::asio::use_future);
service.run();
EXPECT_NO_THROW(f1.get());
EXPECT_THROW(f2.get(), boost::system::system_error);
}
TEST_F(AsioRados, AsyncWriteYield)
{
boost::asio::io_service service;
bufferlist bl;
bl.append("hello");
auto success_cr = [&] (spawn::yield_context yield) {
boost::system::error_code ec;
librados::async_write(service, io, "exist", bl, bl.length(), 0,
yield[ec]);
EXPECT_FALSE(ec);
EXPECT_EQ("hello", bl.to_str());
};
spawn::spawn(service, success_cr);
auto failure_cr = [&] (spawn::yield_context yield) {
boost::system::error_code ec;
librados::async_write(service, snapio, "exist", bl, bl.length(), 0,
yield[ec]);
EXPECT_EQ(boost::system::errc::read_only_file_system, ec);
};
spawn::spawn(service, failure_cr);
service.run();
}
TEST_F(AsioRados, AsyncReadOperationCallback)
{
boost::asio::io_service service;
{
librados::ObjectReadOperation op;
op.read(0, 0, nullptr, nullptr);
auto success_cb = [&] (boost::system::error_code ec, bufferlist bl) {
EXPECT_FALSE(ec);
EXPECT_EQ("hello", bl.to_str());
};
librados::async_operate(service, io, "exist", &op, 0, success_cb);
}
{
librados::ObjectReadOperation op;
op.read(0, 0, nullptr, nullptr);
auto failure_cb = [&] (boost::system::error_code ec, bufferlist bl) {
EXPECT_EQ(boost::system::errc::no_such_file_or_directory, ec);
};
librados::async_operate(service, io, "noexist", &op, 0, failure_cb);
}
service.run();
}
TEST_F(AsioRados, AsyncReadOperationFuture)
{
boost::asio::io_service service;
std::future<bufferlist> f1;
{
librados::ObjectReadOperation op;
op.read(0, 0, nullptr, nullptr);
f1 = librados::async_operate(service, io, "exist", &op, 0,
boost::asio::use_future);
}
std::future<bufferlist> f2;
{
librados::ObjectReadOperation op;
op.read(0, 0, nullptr, nullptr);
f2 = librados::async_operate(service, io, "noexist", &op, 0,
boost::asio::use_future);
}
service.run();
EXPECT_NO_THROW({
auto bl = f1.get();
EXPECT_EQ("hello", bl.to_str());
});
EXPECT_THROW(f2.get(), boost::system::system_error);
}
TEST_F(AsioRados, AsyncReadOperationYield)
{
boost::asio::io_service service;
auto success_cr = [&] (spawn::yield_context yield) {
librados::ObjectReadOperation op;
op.read(0, 0, nullptr, nullptr);
boost::system::error_code ec;
auto bl = librados::async_operate(service, io, "exist", &op, 0,
yield[ec]);
EXPECT_FALSE(ec);
EXPECT_EQ("hello", bl.to_str());
};
spawn::spawn(service, success_cr);
auto failure_cr = [&] (spawn::yield_context yield) {
librados::ObjectReadOperation op;
op.read(0, 0, nullptr, nullptr);
boost::system::error_code ec;
auto bl = librados::async_operate(service, io, "noexist", &op, 0,
yield[ec]);
EXPECT_EQ(boost::system::errc::no_such_file_or_directory, ec);
};
spawn::spawn(service, failure_cr);
service.run();
}
TEST_F(AsioRados, AsyncWriteOperationCallback)
{
boost::asio::io_service service;
bufferlist bl;
bl.append("hello");
{
librados::ObjectWriteOperation op;
op.write_full(bl);
auto success_cb = [&] (boost::system::error_code ec) {
EXPECT_FALSE(ec);
};
librados::async_operate(service, io, "exist", &op, 0, success_cb);
}
{
librados::ObjectWriteOperation op;
op.write_full(bl);
auto failure_cb = [&] (boost::system::error_code ec) {
EXPECT_EQ(boost::system::errc::read_only_file_system, ec);
};
librados::async_operate(service, snapio, "exist", &op, 0, failure_cb);
}
service.run();
}
TEST_F(AsioRados, AsyncWriteOperationFuture)
{
boost::asio::io_service service;
bufferlist bl;
bl.append("hello");
std::future<void> f1;
{
librados::ObjectWriteOperation op;
op.write_full(bl);
f1 = librados::async_operate(service, io, "exist", &op, 0,
boost::asio::use_future);
}
std::future<void> f2;
{
librados::ObjectWriteOperation op;
op.write_full(bl);
f2 = librados::async_operate(service, snapio, "exist", &op, 0,
boost::asio::use_future);
}
service.run();
EXPECT_NO_THROW(f1.get());
EXPECT_THROW(f2.get(), boost::system::system_error);
}
TEST_F(AsioRados, AsyncWriteOperationYield)
{
boost::asio::io_service service;
bufferlist bl;
bl.append("hello");
auto success_cr = [&] (spawn::yield_context yield) {
librados::ObjectWriteOperation op;
op.write_full(bl);
boost::system::error_code ec;
librados::async_operate(service, io, "exist", &op, 0, yield[ec]);
EXPECT_FALSE(ec);
};
spawn::spawn(service, success_cr);
auto failure_cr = [&] (spawn::yield_context yield) {
librados::ObjectWriteOperation op;
op.write_full(bl);
boost::system::error_code ec;
librados::async_operate(service, snapio, "exist", &op, 0, yield[ec]);
EXPECT_EQ(boost::system::errc::read_only_file_system, ec);
};
spawn::spawn(service, failure_cr);
service.run();
}
int main(int argc, char **argv)
{
auto args = argv_to_vec(argc, argv);
env_to_vec(args);
auto cct = global_init(NULL, args, CEPH_ENTITY_TYPE_CLIENT,
CODE_ENVIRONMENT_UTILITY, 0);
common_init_finish(cct.get());
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}
| 10,242 | 26.683784 | 80 | cc |
null | ceph-main/src/test/librados/c_read_operations.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// Tests for the C API coverage of atomic read operations
#include <cstring> // For memcpy
#include <errno.h>
#include <string>
#include "include/buffer.h"
#include "include/denc.h"
#include "include/err.h"
#include "include/rados/librados.h"
#include "include/rbd/features.h" // For RBD_FEATURES_ALL
#include "include/scope_guard.h"
#include "test/librados/TestCase.h"
#include "test/librados/test.h"
const char *data = "testdata";
const char *obj = "testobj";
const size_t len = strlen(data);
class CReadOpsTest : public RadosTest {
protected:
void write_object() {
// Create an object and write to it
ASSERT_EQ(0, rados_write(ioctx, obj, data, len, 0));
}
void remove_object() {
ASSERT_EQ(0, rados_remove(ioctx, obj));
}
int cmp_xattr(const char *xattr, const char *value, size_t value_len,
uint8_t cmp_op)
{
rados_read_op_t op = rados_create_read_op();
rados_read_op_cmpxattr(op, xattr, cmp_op, value, value_len);
int r = rados_read_op_operate(op, ioctx, obj, 0);
rados_release_read_op(op);
return r;
}
void fetch_and_verify_omap_vals(char const* const* keys,
char const* const* vals,
const size_t *lens,
size_t len)
{
rados_omap_iter_t iter_vals, iter_keys, iter_vals_by_key;
int r_vals, r_keys, r_vals_by_key;
rados_read_op_t op = rados_create_read_op();
rados_read_op_omap_get_vals2(op, NULL, NULL, 100, &iter_vals, NULL, &r_vals);
rados_read_op_omap_get_keys2(op, NULL, 100, &iter_keys, NULL, &r_keys);
rados_read_op_omap_get_vals_by_keys(op, keys, len,
&iter_vals_by_key, &r_vals_by_key);
ASSERT_EQ(0, rados_read_op_operate(op, ioctx, obj, 0));
rados_release_read_op(op);
ASSERT_EQ(0, r_vals);
ASSERT_EQ(0, r_keys);
ASSERT_EQ(0, r_vals_by_key);
const char *zeros[len];
size_t zero_lens[len];
memset(zeros, 0, sizeof(zeros));
memset(zero_lens, 0, sizeof(zero_lens));
compare_omap_vals(keys, vals, lens, len, iter_vals);
compare_omap_vals(keys, zeros, zero_lens, len, iter_keys);
compare_omap_vals(keys, vals, lens, len, iter_vals_by_key);
}
void compare_omap_vals(char const* const* keys,
char const* const* vals,
const size_t *lens,
size_t len,
rados_omap_iter_t iter)
{
size_t i = 0;
char *key = NULL;
char *val = NULL;
size_t val_len = 0;
ASSERT_EQ(len, rados_omap_iter_size(iter));
while (i < len) {
ASSERT_EQ(0, rados_omap_get_next(iter, &key, &val, &val_len));
if (val_len == 0 && key == NULL && val == NULL)
break;
if (key)
EXPECT_EQ(std::string(keys[i]), std::string(key));
else
EXPECT_EQ(keys[i], key);
ASSERT_EQ(0, memcmp(vals[i], val, val_len));
ASSERT_EQ(lens[i], val_len);
++i;
}
ASSERT_EQ(i, len);
ASSERT_EQ(0, rados_omap_get_next(iter, &key, &val, &val_len));
ASSERT_EQ((char*)NULL, key);
ASSERT_EQ((char*)NULL, val);
ASSERT_EQ(0u, val_len);
rados_omap_get_end(iter);
}
// these two used to test omap funcs that accept length for both keys and vals
void fetch_and_verify_omap_vals2(char const* const* keys,
char const* const* vals,
const size_t *keylens,
const size_t *vallens,
size_t len)
{
rados_omap_iter_t iter_vals_by_key;
int r_vals_by_key;
rados_read_op_t op = rados_create_read_op();
rados_read_op_omap_get_vals_by_keys2(op, keys, len, keylens,
&iter_vals_by_key, &r_vals_by_key);
ASSERT_EQ(0, rados_read_op_operate(op, ioctx, obj, 0));
rados_release_read_op(op);
ASSERT_EQ(0, r_vals_by_key);
compare_omap_vals2(keys, vals, keylens, vallens, len, iter_vals_by_key);
}
void compare_omap_vals2(char const* const* keys,
char const* const* vals,
const size_t *keylens,
const size_t *vallens,
size_t len,
rados_omap_iter_t iter)
{
size_t i = 0;
char *key = NULL;
char *val = NULL;
size_t key_len = 0;
size_t val_len = 0;
ASSERT_EQ(len, rados_omap_iter_size(iter));
while (i < len) {
ASSERT_EQ(0, rados_omap_get_next2(iter, &key, &val, &key_len, &val_len));
if (key_len == 0 && val_len == 0 && key == NULL && val == NULL)
break;
if (key)
EXPECT_EQ(std::string(keys[i], keylens[i]), std::string(key, key_len));
else
EXPECT_EQ(keys[i], key);
ASSERT_EQ(val_len, vallens[i]);
ASSERT_EQ(key_len, keylens[i]);
ASSERT_EQ(0, memcmp(vals[i], val, val_len));
++i;
}
ASSERT_EQ(i, len);
ASSERT_EQ(0, rados_omap_get_next2(iter, &key, &val, &key_len, &val_len));
ASSERT_EQ((char*)NULL, key);
ASSERT_EQ((char*)NULL, val);
ASSERT_EQ(0u, key_len);
ASSERT_EQ(0u, val_len);
rados_omap_get_end(iter);
}
void compare_xattrs(char const* const* keys,
char const* const* vals,
const size_t *lens,
size_t len,
rados_xattrs_iter_t iter)
{
size_t i = 0;
char *key = NULL;
char *val = NULL;
size_t val_len = 0;
while (i < len) {
ASSERT_EQ(0, rados_getxattrs_next(iter, (const char**) &key,
(const char**) &val, &val_len));
if (key == NULL)
break;
EXPECT_EQ(std::string(keys[i]), std::string(key));
if (val != NULL) {
EXPECT_EQ(0, memcmp(vals[i], val, val_len));
}
EXPECT_EQ(lens[i], val_len);
++i;
}
ASSERT_EQ(i, len);
ASSERT_EQ(0, rados_getxattrs_next(iter, (const char**)&key,
(const char**)&val, &val_len));
ASSERT_EQ((char*)NULL, key);
ASSERT_EQ((char*)NULL, val);
ASSERT_EQ(0u, val_len);
rados_getxattrs_end(iter);
}
};
TEST_F(CReadOpsTest, NewDelete) {
rados_read_op_t op = rados_create_read_op();
ASSERT_TRUE(op);
rados_release_read_op(op);
}
TEST_F(CReadOpsTest, SetOpFlags) {
write_object();
rados_read_op_t op = rados_create_read_op();
size_t bytes_read = 0;
char *out = NULL;
int rval = 0;
rados_read_op_exec(op, "rbd", "get_id", NULL, 0, &out,
&bytes_read, &rval);
rados_read_op_set_flags(op, LIBRADOS_OP_FLAG_FAILOK);
EXPECT_EQ(0, rados_read_op_operate(op, ioctx, obj, 0));
EXPECT_EQ(-EIO, rval);
EXPECT_EQ(0u, bytes_read);
EXPECT_EQ((char*)NULL, out);
rados_release_read_op(op);
remove_object();
}
TEST_F(CReadOpsTest, AssertExists) {
rados_read_op_t op = rados_create_read_op();
rados_read_op_assert_exists(op);
ASSERT_EQ(-ENOENT, rados_read_op_operate(op, ioctx, obj, 0));
rados_release_read_op(op);
op = rados_create_read_op();
rados_read_op_assert_exists(op);
rados_completion_t completion;
ASSERT_EQ(0, rados_aio_create_completion(NULL, NULL, NULL, &completion));
auto sg = make_scope_guard([&] { rados_aio_release(completion); });
ASSERT_EQ(0, rados_aio_read_op_operate(op, ioctx, completion, obj, 0));
rados_aio_wait_for_complete(completion);
ASSERT_EQ(-ENOENT, rados_aio_get_return_value(completion));
rados_release_read_op(op);
write_object();
op = rados_create_read_op();
rados_read_op_assert_exists(op);
ASSERT_EQ(0, rados_read_op_operate(op, ioctx, obj, 0));
rados_release_read_op(op);
remove_object();
}
TEST_F(CReadOpsTest, AssertVersion) {
write_object();
// Write to the object a second time to guarantee that its
// version number is greater than 0
write_object();
uint64_t v = rados_get_last_version(ioctx);
rados_read_op_t op = rados_create_read_op();
rados_read_op_assert_version(op, v+1);
ASSERT_EQ(-EOVERFLOW, rados_read_op_operate(op, ioctx, obj, 0));
rados_release_read_op(op);
op = rados_create_read_op();
rados_read_op_assert_version(op, v-1);
ASSERT_EQ(-ERANGE, rados_read_op_operate(op, ioctx, obj, 0));
rados_release_read_op(op);
op = rados_create_read_op();
rados_read_op_assert_version(op, v);
ASSERT_EQ(0, rados_read_op_operate(op, ioctx, obj, 0));
rados_release_read_op(op);
remove_object();
}
TEST_F(CReadOpsTest, CmpXattr) {
write_object();
char buf[len];
memset(buf, 0xcc, sizeof(buf));
const char *xattr = "test";
rados_setxattr(ioctx, obj, xattr, buf, sizeof(buf));
// equal value
EXPECT_EQ(1, cmp_xattr(xattr, buf, sizeof(buf), LIBRADOS_CMPXATTR_OP_EQ));
EXPECT_EQ(-ECANCELED, cmp_xattr(xattr, buf, sizeof(buf), LIBRADOS_CMPXATTR_OP_NE));
EXPECT_EQ(-ECANCELED, cmp_xattr(xattr, buf, sizeof(buf), LIBRADOS_CMPXATTR_OP_GT));
EXPECT_EQ(1, cmp_xattr(xattr, buf, sizeof(buf), LIBRADOS_CMPXATTR_OP_GTE));
EXPECT_EQ(-ECANCELED, cmp_xattr(xattr, buf, sizeof(buf), LIBRADOS_CMPXATTR_OP_LT));
EXPECT_EQ(1, cmp_xattr(xattr, buf, sizeof(buf), LIBRADOS_CMPXATTR_OP_LTE));
// < value
EXPECT_EQ(-ECANCELED, cmp_xattr(xattr, buf, sizeof(buf) - 1, LIBRADOS_CMPXATTR_OP_EQ));
EXPECT_EQ(1, cmp_xattr(xattr, buf, sizeof(buf) - 1, LIBRADOS_CMPXATTR_OP_NE));
EXPECT_EQ(-ECANCELED, cmp_xattr(xattr, buf, sizeof(buf) - 1, LIBRADOS_CMPXATTR_OP_GT));
EXPECT_EQ(-ECANCELED, cmp_xattr(xattr, buf, sizeof(buf) - 1, LIBRADOS_CMPXATTR_OP_GTE));
EXPECT_EQ(1, cmp_xattr(xattr, buf, sizeof(buf) - 1, LIBRADOS_CMPXATTR_OP_LT));
EXPECT_EQ(1, cmp_xattr(xattr, buf, sizeof(buf) - 1, LIBRADOS_CMPXATTR_OP_LTE));
// > value
memset(buf, 0xcd, sizeof(buf));
EXPECT_EQ(-ECANCELED, cmp_xattr(xattr, buf, sizeof(buf), LIBRADOS_CMPXATTR_OP_EQ));
EXPECT_EQ(1, cmp_xattr(xattr, buf, sizeof(buf), LIBRADOS_CMPXATTR_OP_NE));
EXPECT_EQ(1, cmp_xattr(xattr, buf, sizeof(buf), LIBRADOS_CMPXATTR_OP_GT));
EXPECT_EQ(1, cmp_xattr(xattr, buf, sizeof(buf), LIBRADOS_CMPXATTR_OP_GTE));
EXPECT_EQ(-ECANCELED, cmp_xattr(xattr, buf, sizeof(buf), LIBRADOS_CMPXATTR_OP_LT));
EXPECT_EQ(-ECANCELED, cmp_xattr(xattr, buf, sizeof(buf), LIBRADOS_CMPXATTR_OP_LTE));
// check that null bytes are compared correctly
rados_setxattr(ioctx, obj, xattr, "\0\0", 2);
buf[0] = '\0';
EXPECT_EQ(-ECANCELED, cmp_xattr(xattr, buf, 2, LIBRADOS_CMPXATTR_OP_EQ));
EXPECT_EQ(1, cmp_xattr(xattr, buf, 2, LIBRADOS_CMPXATTR_OP_NE));
EXPECT_EQ(1, cmp_xattr(xattr, buf, 2, LIBRADOS_CMPXATTR_OP_GT));
EXPECT_EQ(1, cmp_xattr(xattr, buf, 2, LIBRADOS_CMPXATTR_OP_GTE));
EXPECT_EQ(-ECANCELED, cmp_xattr(xattr, buf, 2, LIBRADOS_CMPXATTR_OP_LT));
EXPECT_EQ(-ECANCELED, cmp_xattr(xattr, buf, 2, LIBRADOS_CMPXATTR_OP_LTE));
buf[1] = '\0';
EXPECT_EQ(1, cmp_xattr(xattr, buf, 2, LIBRADOS_CMPXATTR_OP_EQ));
EXPECT_EQ(-ECANCELED, cmp_xattr(xattr, buf, 2, LIBRADOS_CMPXATTR_OP_NE));
EXPECT_EQ(-ECANCELED, cmp_xattr(xattr, buf, 2, LIBRADOS_CMPXATTR_OP_GT));
EXPECT_EQ(1, cmp_xattr(xattr, buf, 2, LIBRADOS_CMPXATTR_OP_GTE));
EXPECT_EQ(-ECANCELED, cmp_xattr(xattr, buf, 2, LIBRADOS_CMPXATTR_OP_LT));
EXPECT_EQ(1, cmp_xattr(xattr, buf, 2, LIBRADOS_CMPXATTR_OP_LTE));
remove_object();
}
TEST_F(CReadOpsTest, Read) {
write_object();
char buf[len];
// check that using read_ops returns the same data with
// or without bytes_read and rval out params
{
rados_read_op_t op = rados_create_read_op();
rados_read_op_read(op, 0, len, buf, NULL, NULL);
ASSERT_EQ(0, rados_read_op_operate(op, ioctx, obj, 0));
ASSERT_EQ(0, memcmp(data, buf, len));
rados_release_read_op(op);
}
{
rados_read_op_t op = rados_create_read_op();
int rval;
rados_read_op_read(op, 0, len, buf, NULL, &rval);
ASSERT_EQ(0, rados_read_op_operate(op, ioctx, obj, 0));
ASSERT_EQ(0, rval);
ASSERT_EQ(0, memcmp(data, buf, len));
rados_release_read_op(op);
}
{
rados_read_op_t op = rados_create_read_op();
size_t bytes_read = 0;
rados_read_op_read(op, 0, len, buf, &bytes_read, NULL);
ASSERT_EQ(0, rados_read_op_operate(op, ioctx, obj, 0));
ASSERT_EQ(len, bytes_read);
ASSERT_EQ(0, memcmp(data, buf, len));
rados_release_read_op(op);
}
{
rados_read_op_t op = rados_create_read_op();
size_t bytes_read = 0;
int rval;
rados_read_op_read(op, 0, len, buf, &bytes_read, &rval);
ASSERT_EQ(0, rados_read_op_operate(op, ioctx, obj, 0));
ASSERT_EQ(len, bytes_read);
ASSERT_EQ(0, rval);
ASSERT_EQ(0, memcmp(data, buf, len));
rados_release_read_op(op);
}
{
rados_read_op_t op = rados_create_read_op();
size_t bytes_read = 0;
int rval;
rados_read_op_read(op, 0, len, buf, &bytes_read, &rval);
rados_read_op_set_flags(op, LIBRADOS_OP_FLAG_FADVISE_DONTNEED);
ASSERT_EQ(0, rados_read_op_operate(op, ioctx, obj, 0));
ASSERT_EQ(len, bytes_read);
ASSERT_EQ(0, rval);
ASSERT_EQ(0, memcmp(data, buf, len));
rados_release_read_op(op);
}
remove_object();
}
TEST_F(CReadOpsTest, Checksum) {
write_object();
{
rados_read_op_t op = rados_create_read_op();
ceph_le64 init_value(-1);
rados_read_op_checksum(op, LIBRADOS_CHECKSUM_TYPE_XXHASH64,
reinterpret_cast<char *>(&init_value),
sizeof(init_value), 0, len, 0, NULL, 0, NULL);
ASSERT_EQ(0, rados_read_op_operate(op, ioctx, obj, 0));
rados_release_read_op(op);
}
{
ceph_le32 init_value(-1);
ceph_le32 crc[2];
rados_read_op_t op = rados_create_read_op();
rados_read_op_checksum(op, LIBRADOS_CHECKSUM_TYPE_CRC32C,
reinterpret_cast<char *>(&init_value),
sizeof(init_value), 0, len, 0,
reinterpret_cast<char *>(&crc), sizeof(crc),
nullptr);
ASSERT_EQ(0, rados_read_op_operate(op, ioctx, obj, 0));
ASSERT_EQ(1U, crc[0]);
uint32_t expected_crc = ceph_crc32c(
-1, reinterpret_cast<const uint8_t*>(data), static_cast<uint32_t>(len));
ASSERT_EQ(expected_crc, crc[1]);
rados_release_read_op(op);
}
{
ceph_le32 init_value(-1);
int rval;
rados_read_op_t op = rados_create_read_op();
rados_read_op_checksum(op, LIBRADOS_CHECKSUM_TYPE_XXHASH32,
reinterpret_cast<char *>(&init_value),
sizeof(init_value), 0, len, 0, nullptr, 0, &rval);
ASSERT_EQ(0, rados_read_op_operate(op, ioctx, obj, 0));
ASSERT_EQ(0, rval);
rados_release_read_op(op);
}
{
ceph_le32 init_value(-1);
ceph_le32 crc[3];
int rval;
rados_read_op_t op = rados_create_read_op();
rados_read_op_checksum(op, LIBRADOS_CHECKSUM_TYPE_CRC32C,
reinterpret_cast<char *>(&init_value),
sizeof(init_value), 0, len, 4,
reinterpret_cast<char *>(&crc), sizeof(crc), &rval);
ASSERT_EQ(0, rados_read_op_operate(op, ioctx, obj, 0));
ASSERT_EQ(2U, crc[0]);
uint32_t expected_crc[2];
expected_crc[0] = ceph_crc32c(
-1, reinterpret_cast<const uint8_t*>(data), 4U);
expected_crc[1] = ceph_crc32c(
-1, reinterpret_cast<const uint8_t*>(data + 4), 4U);
ASSERT_EQ(expected_crc[0], crc[1]);
ASSERT_EQ(expected_crc[1], crc[2]);
ASSERT_EQ(0, rval);
rados_release_read_op(op);
}
remove_object();
}
TEST_F(CReadOpsTest, RWOrderedRead) {
write_object();
char buf[len];
rados_read_op_t op = rados_create_read_op();
size_t bytes_read = 0;
int rval;
rados_read_op_read(op, 0, len, buf, &bytes_read, &rval);
rados_read_op_set_flags(op, LIBRADOS_OP_FLAG_FADVISE_DONTNEED);
ASSERT_EQ(0, rados_read_op_operate(op, ioctx, obj,
LIBRADOS_OPERATION_ORDER_READS_WRITES));
ASSERT_EQ(len, bytes_read);
ASSERT_EQ(0, rval);
ASSERT_EQ(0, memcmp(data, buf, len));
rados_release_read_op(op);
remove_object();
}
TEST_F(CReadOpsTest, ShortRead) {
write_object();
char buf[len * 2];
// check that using read_ops returns the same data with
// or without bytes_read and rval out params
{
rados_read_op_t op = rados_create_read_op();
rados_read_op_read(op, 0, len * 2, buf, NULL, NULL);
ASSERT_EQ(0, rados_read_op_operate(op, ioctx, obj, 0));
ASSERT_EQ(0, memcmp(data, buf, len));
rados_release_read_op(op);
}
{
rados_read_op_t op = rados_create_read_op();
int rval;
rados_read_op_read(op, 0, len * 2, buf, NULL, &rval);
ASSERT_EQ(0, rados_read_op_operate(op, ioctx, obj, 0));
ASSERT_EQ(0, rval);
ASSERT_EQ(0, memcmp(data, buf, len));
rados_release_read_op(op);
}
{
rados_read_op_t op = rados_create_read_op();
size_t bytes_read = 0;
rados_read_op_read(op, 0, len * 2, buf, &bytes_read, NULL);
ASSERT_EQ(0, rados_read_op_operate(op, ioctx, obj, 0));
ASSERT_EQ(len, bytes_read);
ASSERT_EQ(0, memcmp(data, buf, len));
rados_release_read_op(op);
}
{
rados_read_op_t op = rados_create_read_op();
size_t bytes_read = 0;
int rval;
rados_read_op_read(op, 0, len * 2, buf, &bytes_read, &rval);
ASSERT_EQ(0, rados_read_op_operate(op, ioctx, obj, 0));
ASSERT_EQ(len, bytes_read);
ASSERT_EQ(0, rval);
ASSERT_EQ(0, memcmp(data, buf, len));
rados_release_read_op(op);
}
remove_object();
}
TEST_F(CReadOpsTest, Exec) {
// create object so we don't get -ENOENT
write_object();
rados_read_op_t op = rados_create_read_op();
ASSERT_TRUE(op);
size_t bytes_read = 0;
char *out = NULL;
int rval = 0;
rados_read_op_exec(op, "rbd", "get_all_features", NULL, 0, &out,
&bytes_read, &rval);
ASSERT_EQ(0, rados_read_op_operate(op, ioctx, obj, 0));
rados_release_read_op(op);
EXPECT_EQ(0, rval);
EXPECT_TRUE(out);
uint64_t features;
EXPECT_EQ(sizeof(features), bytes_read);
// make sure buffer is at least as long as it claims
bufferlist bl;
bl.append(out, bytes_read);
auto it = bl.cbegin();
ceph::decode(features, it);
ASSERT_EQ(RBD_FEATURES_ALL, features);
rados_buffer_free(out);
remove_object();
}
TEST_F(CReadOpsTest, ExecUserBuf) {
// create object so we don't get -ENOENT
write_object();
rados_read_op_t op = rados_create_read_op();
size_t bytes_read = 0;
uint64_t features;
char out[sizeof(features)];
int rval = 0;
rados_read_op_exec_user_buf(op, "rbd", "get_all_features", NULL, 0, out,
sizeof(out), &bytes_read, &rval);
ASSERT_EQ(0, rados_read_op_operate(op, ioctx, obj, 0));
rados_release_read_op(op);
EXPECT_EQ(0, rval);
EXPECT_EQ(sizeof(features), bytes_read);
// buffer too short
bytes_read = 1024;
op = rados_create_read_op();
rados_read_op_exec_user_buf(op, "rbd", "get_all_features", NULL, 0, out,
sizeof(features) - 1, &bytes_read, &rval);
ASSERT_EQ(0, rados_read_op_operate(op, ioctx, obj, 0));
rados_release_read_op(op);
EXPECT_EQ(0u, bytes_read);
EXPECT_EQ(-ERANGE, rval);
// input buffer and no rval or bytes_read
op = rados_create_read_op();
rados_read_op_exec_user_buf(op, "rbd", "get_all_features", out, sizeof(out),
out, sizeof(out), NULL, NULL);
ASSERT_EQ(0, rados_read_op_operate(op, ioctx, obj, 0));
rados_release_read_op(op);
remove_object();
}
TEST_F(CReadOpsTest, Stat) {
rados_read_op_t op = rados_create_read_op();
uint64_t size = 1;
int rval = 0;
rados_read_op_stat(op, &size, NULL, &rval);
EXPECT_EQ(-ENOENT, rados_read_op_operate(op, ioctx, obj, 0));
EXPECT_EQ(-EIO, rval);
EXPECT_EQ(1u, size);
rados_release_read_op(op);
time_t ts = 1457129052;
rados_write_op_t wop = rados_create_write_op();
rados_write_op_write(wop, data, len, 0);
ASSERT_EQ(0, rados_write_op_operate(wop, ioctx, obj, &ts, 0));
rados_release_write_op(wop);
time_t ts2;
op = rados_create_read_op();
rados_read_op_stat(op, &size, &ts2, &rval);
ASSERT_EQ(0, rados_read_op_operate(op, ioctx, obj, 0));
EXPECT_EQ(0, rval);
EXPECT_EQ(len, size);
EXPECT_EQ(ts2, ts);
rados_release_read_op(op);
op = rados_create_read_op();
rados_read_op_stat(op, NULL, NULL, NULL);
EXPECT_EQ(0, rados_read_op_operate(op, ioctx, obj, 0));
rados_release_read_op(op);
remove_object();
op = rados_create_read_op();
rados_read_op_stat(op, NULL, NULL, NULL);
EXPECT_EQ(-ENOENT, rados_read_op_operate(op, ioctx, obj, 0));
rados_release_read_op(op);
}
TEST_F(CReadOpsTest, Stat2) {
rados_read_op_t op = rados_create_read_op();
uint64_t size = 1;
int rval = 0;
rados_read_op_stat2(op, &size, NULL, &rval);
EXPECT_EQ(-ENOENT, rados_read_op_operate(op, ioctx, obj, 0));
EXPECT_EQ(-EIO, rval);
EXPECT_EQ(1u, size);
rados_release_read_op(op);
struct timespec ts;
ts.tv_sec = 1457129052;
ts.tv_nsec = 123456789;
rados_write_op_t wop = rados_create_write_op();
rados_write_op_write(wop, data, len, 0);
ASSERT_EQ(0, rados_write_op_operate2(wop, ioctx, obj, &ts, 0));
rados_release_write_op(wop);
struct timespec ts2 = {};
op = rados_create_read_op();
rados_read_op_stat2(op, &size, &ts2, &rval);
ASSERT_EQ(0, rados_read_op_operate(op, ioctx, obj, 0));
EXPECT_EQ(0, rval);
EXPECT_EQ(len, size);
EXPECT_EQ(ts2.tv_sec, ts.tv_sec);
EXPECT_EQ(ts2.tv_nsec, ts.tv_nsec);
rados_release_read_op(op);
op = rados_create_read_op();
rados_read_op_stat2(op, NULL, NULL, NULL);
EXPECT_EQ(0, rados_read_op_operate(op, ioctx, obj, 0));
rados_release_read_op(op);
remove_object();
op = rados_create_read_op();
rados_read_op_stat2(op, NULL, NULL, NULL);
EXPECT_EQ(-ENOENT, rados_read_op_operate(op, ioctx, obj, 0));
rados_release_read_op(op);
}
TEST_F(CReadOpsTest, Omap) {
char *keys[] = {(char*)"bar",
(char*)"foo",
(char*)"test1",
(char*)"test2"};
char *vals[] = {(char*)"",
(char*)"\0",
(char*)"abc",
(char*)"va\0lue"};
size_t lens[] = {0, 1, 3, 6};
// check for -ENOENT before the object exists and when it exists
// with no omap entries
rados_omap_iter_t iter_vals;
rados_read_op_t rop = rados_create_read_op();
rados_read_op_omap_get_vals2(rop, "", "", 10, &iter_vals, NULL, NULL);
ASSERT_EQ(-ENOENT, rados_read_op_operate(rop, ioctx, obj, 0));
rados_release_read_op(rop);
compare_omap_vals(NULL, NULL, NULL, 0, iter_vals);
write_object();
fetch_and_verify_omap_vals(NULL, NULL, NULL, 0);
// write and check for the k/v pairs
rados_write_op_t op = rados_create_write_op();
rados_write_op_omap_set(op, keys, vals, lens, 4);
ASSERT_EQ(0, rados_write_op_operate(op, ioctx, obj, NULL, 0));
rados_release_write_op(op);
fetch_and_verify_omap_vals(keys, vals, lens, 4);
rados_omap_iter_t iter_keys;
int r_vals = -1, r_keys = -1;
rop = rados_create_read_op();
rados_read_op_omap_get_vals2(rop, "", "test", 1, &iter_vals, NULL, &r_vals);
rados_read_op_omap_get_keys2(rop, "test", 1, &iter_keys, NULL, &r_keys);
ASSERT_EQ(0, rados_read_op_operate(rop, ioctx, obj, 0));
rados_release_read_op(rop);
EXPECT_EQ(0, r_vals);
EXPECT_EQ(0, r_keys);
EXPECT_EQ(1u, rados_omap_iter_size(iter_vals));
EXPECT_EQ(1u, rados_omap_iter_size(iter_keys));
compare_omap_vals(&keys[2], &vals[2], &lens[2], 1, iter_vals);
compare_omap_vals(&keys[2], &vals[0], &lens[0], 1, iter_keys);
// check omap_cmp finds all expected values
rop = rados_create_read_op();
int rvals[4];
for (int i = 0; i < 4; ++i)
rados_read_op_omap_cmp(rop, keys[i], LIBRADOS_CMPXATTR_OP_EQ,
vals[i], lens[i], &rvals[i]);
EXPECT_EQ(0, rados_read_op_operate(rop, ioctx, obj, 0));
rados_release_read_op(rop);
for (int i = 0; i < 4; ++i)
EXPECT_EQ(0, rvals[i]);
// try to remove keys with a guard that should fail
op = rados_create_write_op();
rados_write_op_omap_cmp(op, keys[2], LIBRADOS_CMPXATTR_OP_LT,
vals[2], lens[2], &r_vals);
rados_write_op_omap_rm_keys(op, keys, 2);
EXPECT_EQ(-ECANCELED, rados_write_op_operate(op, ioctx, obj, NULL, 0));
rados_release_write_op(op);
// see http://tracker.ceph.com/issues/19518
//ASSERT_EQ(-ECANCELED, r_vals);
// verifying the keys are still there, and then remove them
op = rados_create_write_op();
rados_write_op_omap_cmp(op, keys[0], LIBRADOS_CMPXATTR_OP_EQ,
vals[0], lens[0], NULL);
rados_write_op_omap_cmp(op, keys[1], LIBRADOS_CMPXATTR_OP_EQ,
vals[1], lens[1], NULL);
rados_write_op_omap_rm_keys(op, keys, 2);
EXPECT_EQ(0, rados_write_op_operate(op, ioctx, obj, NULL, 0));
rados_release_write_op(op);
fetch_and_verify_omap_vals(&keys[2], &vals[2], &lens[2], 2);
// clear the rest and check there are none left
op = rados_create_write_op();
rados_write_op_omap_clear(op);
EXPECT_EQ(0, rados_write_op_operate(op, ioctx, obj, NULL, 0));
rados_release_write_op(op);
fetch_and_verify_omap_vals(NULL, NULL, NULL, 0);
remove_object();
}
TEST_F(CReadOpsTest, OmapNuls) {
char *keys[] = {(char*)"1\0bar",
(char*)"2baar\0",
(char*)"3baa\0rr"};
char *vals[] = {(char*)"_\0var",
(char*)"_vaar\0",
(char*)"__vaa\0rr"};
size_t nklens[] = {5, 6, 7};
size_t nvlens[] = {5, 6, 8};
const int paircount = 3;
// check for -ENOENT before the object exists and when it exists
// with no omap entries
rados_omap_iter_t iter_vals;
rados_read_op_t rop = rados_create_read_op();
rados_read_op_omap_get_vals2(rop, "", "", 10, &iter_vals, NULL, NULL);
ASSERT_EQ(-ENOENT, rados_read_op_operate(rop, ioctx, obj, 0));
rados_release_read_op(rop);
compare_omap_vals(NULL, NULL, NULL, 0, iter_vals);
write_object();
fetch_and_verify_omap_vals(NULL, NULL, NULL, 0);
// write and check for the k/v pairs
rados_write_op_t op = rados_create_write_op();
rados_write_op_omap_set2(op, keys, vals, nklens, nvlens, paircount);
ASSERT_EQ(0, rados_write_op_operate(op, ioctx, obj, NULL, 0));
rados_release_write_op(op);
fetch_and_verify_omap_vals2(keys, vals, nklens, nvlens, paircount);
// check omap_cmp finds all expected values
rop = rados_create_read_op();
int rvals[4];
for (int i = 0; i < paircount; ++i)
rados_read_op_omap_cmp2(rop, keys[i], LIBRADOS_CMPXATTR_OP_EQ,
vals[i], nklens[i], nvlens[i], &rvals[i]);
EXPECT_EQ(0, rados_read_op_operate(rop, ioctx, obj, 0));
rados_release_read_op(rop);
for (int i = 0; i < paircount; ++i)
EXPECT_EQ(0, rvals[i]);
// try to remove keys with a guard that should fail
int r_vals = -1;
op = rados_create_write_op();
rados_write_op_omap_cmp2(op, keys[2], LIBRADOS_CMPXATTR_OP_LT,
vals[2], nklens[2], nvlens[2], &r_vals);
rados_write_op_omap_rm_keys(op, keys, 2);
EXPECT_EQ(-ECANCELED, rados_write_op_operate(op, ioctx, obj, NULL, 0));
rados_release_write_op(op);
// verifying the keys are still there, and then remove them
op = rados_create_write_op();
rados_write_op_omap_cmp2(op, keys[0], LIBRADOS_CMPXATTR_OP_EQ,
vals[0], nklens[0], nvlens[0], NULL);
rados_write_op_omap_cmp2(op, keys[1], LIBRADOS_CMPXATTR_OP_EQ,
vals[1], nklens[1], nvlens[1], NULL);
rados_write_op_omap_rm_keys2(op, keys, nklens, 2);
EXPECT_EQ(0, rados_write_op_operate(op, ioctx, obj, NULL, 0));
rados_release_write_op(op);
fetch_and_verify_omap_vals2(&keys[2], &vals[2], &nklens[2], &nvlens[2], 1);
// clear the rest and check there are none left
op = rados_create_write_op();
rados_write_op_omap_clear(op);
EXPECT_EQ(0, rados_write_op_operate(op, ioctx, obj, NULL, 0));
rados_release_write_op(op);
fetch_and_verify_omap_vals(NULL, NULL, NULL, 0);
remove_object();
}
TEST_F(CReadOpsTest, GetXattrs) {
write_object();
char *keys[] = {(char*)"bar",
(char*)"foo",
(char*)"test1",
(char*)"test2"};
char *vals[] = {(char*)"",
(char*)"\0",
(char*)"abc",
(char*)"va\0lue"};
size_t lens[] = {0, 1, 3, 6};
int rval = 1;
rados_read_op_t op = rados_create_read_op();
rados_xattrs_iter_t it;
rados_read_op_getxattrs(op, &it, &rval);
EXPECT_EQ(0, rados_read_op_operate(op, ioctx, obj, 0));
EXPECT_EQ(0, rval);
rados_release_read_op(op);
compare_xattrs(keys, vals, lens, 0, it);
for (int i = 0; i < 4; ++i)
rados_setxattr(ioctx, obj, keys[i], vals[i], lens[i]);
rval = 1;
op = rados_create_read_op();
rados_read_op_getxattrs(op, &it, &rval);
EXPECT_EQ(0, rados_read_op_operate(op, ioctx, obj, 0));
EXPECT_EQ(0, rval);
rados_release_read_op(op);
compare_xattrs(keys, vals, lens, 4, it);
remove_object();
}
TEST_F(CReadOpsTest, CmpExt) {
char buf[len];
size_t bytes_read = 0;
int cmpext_val = 0;
int read_val = 0;
write_object();
// cmpext with match should ensure that the following read is successful
rados_read_op_t op = rados_create_read_op();
ASSERT_TRUE(op);
// @obj, @data and @len correspond to object initialised by write_object()
rados_read_op_cmpext(op, data, len, 0, &cmpext_val);
rados_read_op_read(op, 0, len, buf, &bytes_read, &read_val);
ASSERT_EQ(0, rados_read_op_operate(op, ioctx, obj, 0));
ASSERT_EQ(len, bytes_read);
ASSERT_EQ(0, memcmp(data, buf, len));
ASSERT_EQ(cmpext_val, 0);
rados_release_read_op(op);
// cmpext with mismatch should fail and fill mismatch_buf accordingly
memset(buf, 0, sizeof(buf));
bytes_read = 0;
cmpext_val = 0;
read_val = 0;
op = rados_create_read_op();
ASSERT_TRUE(op);
// @obj, @data and @len correspond to object initialised by write_object()
rados_read_op_cmpext(op, "mismatch", strlen("mismatch"), 0, &cmpext_val);
rados_read_op_read(op, 0, len, buf, &bytes_read, &read_val);
ASSERT_EQ(-MAX_ERRNO, rados_read_op_operate(op, ioctx, obj, 0));
rados_release_read_op(op);
ASSERT_EQ(-MAX_ERRNO, cmpext_val);
remove_object();
}
| 29,256 | 31.652902 | 90 | cc |
null | ceph-main/src/test/librados/c_write_operations.cc | // Tests for the C API coverage of atomic write operations
#include <errno.h>
#include "gtest/gtest.h"
#include "include/err.h"
#include "include/rados/librados.h"
#include "test/librados/test.h"
TEST(LibradosCWriteOps, NewDelete) {
rados_write_op_t op = rados_create_write_op();
ASSERT_TRUE(op);
rados_release_write_op(op);
}
TEST(LibRadosCWriteOps, assertExists) {
rados_t cluster;
rados_ioctx_t ioctx;
std::string pool_name = get_temp_pool_name();
ASSERT_EQ("", create_one_pool(pool_name, &cluster));
rados_ioctx_create(cluster, pool_name.c_str(), &ioctx);
rados_write_op_t op = rados_create_write_op();
ASSERT_TRUE(op);
rados_write_op_assert_exists(op);
// -2, ENOENT
ASSERT_EQ(-2, rados_write_op_operate(op, ioctx, "test", NULL, 0));
rados_release_write_op(op);
rados_write_op_t op2 = rados_create_write_op();
ASSERT_TRUE(op2);
rados_write_op_assert_exists(op2);
rados_completion_t completion;
ASSERT_EQ(0, rados_aio_create_completion(NULL, NULL, NULL, &completion));
ASSERT_EQ(0, rados_aio_write_op_operate(op2, ioctx, completion, "test", NULL, 0));
rados_aio_wait_for_complete(completion);
ASSERT_EQ(-2, rados_aio_get_return_value(completion));
rados_aio_release(completion);
rados_ioctx_destroy(ioctx);
rados_release_write_op(op2);
ASSERT_EQ(0, destroy_one_pool(pool_name, &cluster));
}
TEST(LibRadosCWriteOps, WriteOpAssertVersion) {
rados_t cluster;
rados_ioctx_t ioctx;
std::string pool_name = get_temp_pool_name();
ASSERT_EQ("", create_one_pool(pool_name, &cluster));
rados_ioctx_create(cluster, pool_name.c_str(), &ioctx);
rados_write_op_t op = rados_create_write_op();
ASSERT_TRUE(op);
rados_write_op_create(op, LIBRADOS_CREATE_EXCLUSIVE, NULL);
ASSERT_EQ(0, rados_write_op_operate(op, ioctx, "test", NULL, 0));
rados_release_write_op(op);
// Write to the object a second time to guarantee that its
// version number is greater than 0
op = rados_create_write_op();
ASSERT_TRUE(op);
rados_write_op_write_full(op, "hi", 2);
ASSERT_EQ(0, rados_write_op_operate(op, ioctx, "test", NULL, 0));
rados_release_write_op(op);
uint64_t v = rados_get_last_version(ioctx);
op = rados_create_write_op();
ASSERT_TRUE(op);
rados_write_op_assert_version(op, v+1);
ASSERT_EQ(-EOVERFLOW, rados_write_op_operate(op, ioctx, "test", NULL, 0));
rados_release_write_op(op);
op = rados_create_write_op();
ASSERT_TRUE(op);
rados_write_op_assert_version(op, v-1);
ASSERT_EQ(-ERANGE, rados_write_op_operate(op, ioctx, "test", NULL, 0));
rados_release_write_op(op);
op = rados_create_write_op();
ASSERT_TRUE(op);
rados_write_op_assert_version(op, v);
ASSERT_EQ(0, rados_write_op_operate(op, ioctx, "test", NULL, 0));
rados_release_write_op(op);
rados_ioctx_destroy(ioctx);
ASSERT_EQ(0, destroy_one_pool(pool_name, &cluster));
}
TEST(LibRadosCWriteOps, Xattrs) {
rados_t cluster;
rados_ioctx_t ioctx;
std::string pool_name = get_temp_pool_name();
ASSERT_EQ("", create_one_pool(pool_name, &cluster));
rados_ioctx_create(cluster, pool_name.c_str(), &ioctx);
// Create an object with an xattr
rados_write_op_t op = rados_create_write_op();
ASSERT_TRUE(op);
rados_write_op_create(op, LIBRADOS_CREATE_EXCLUSIVE, NULL);
rados_write_op_setxattr(op, "key", "value", 5);
ASSERT_EQ(0, rados_write_op_operate(op, ioctx, "test", NULL, 0));
rados_release_write_op(op);
// Check that xattr exists, if it does, delete it.
op = rados_create_write_op();
ASSERT_TRUE(op);
rados_write_op_create(op, LIBRADOS_CREATE_IDEMPOTENT, NULL);
rados_write_op_cmpxattr(op, "key", LIBRADOS_CMPXATTR_OP_EQ, "value", 5);
rados_write_op_rmxattr(op, "key");
ASSERT_EQ(0, rados_write_op_operate(op, ioctx, "test", NULL, 0));
rados_release_write_op(op);
// Check the xattr exits, if it does, add it again (will fail) with -125
// (ECANCELED)
op = rados_create_write_op();
ASSERT_TRUE(op);
rados_write_op_cmpxattr(op, "key", LIBRADOS_CMPXATTR_OP_EQ, "value", 5);
rados_write_op_setxattr(op, "key", "value", 5);
ASSERT_EQ(-ECANCELED, rados_write_op_operate(op, ioctx, "test", NULL, 0));
rados_release_write_op(op);
rados_ioctx_destroy(ioctx);
ASSERT_EQ(0, destroy_one_pool(pool_name, &cluster));
}
TEST(LibRadosCWriteOps, Write) {
rados_t cluster;
rados_ioctx_t ioctx;
std::string pool_name = get_temp_pool_name();
ASSERT_EQ("", create_one_pool(pool_name, &cluster));
rados_ioctx_create(cluster, pool_name.c_str(), &ioctx);
// Create an object, write and write full to it
rados_write_op_t op = rados_create_write_op();
ASSERT_TRUE(op);
rados_write_op_create(op, LIBRADOS_CREATE_EXCLUSIVE, NULL);
rados_write_op_write(op, "four", 4, 0);
rados_write_op_write_full(op, "hi", 2);
ASSERT_EQ(0, rados_write_op_operate(op, ioctx, "test", NULL, 0));
char hi[4];
ASSERT_EQ(2, rados_read(ioctx, "test", hi, 4, 0));
rados_release_write_op(op);
//create write op with iohint
op = rados_create_write_op();
ASSERT_TRUE(op);
rados_write_op_write_full(op, "ceph", 4);
rados_write_op_set_flags(op, LIBRADOS_OP_FLAG_FADVISE_NOCACHE);
ASSERT_EQ(0, rados_write_op_operate(op, ioctx, "test", NULL, 0));
ASSERT_EQ(4, rados_read(ioctx, "test", hi, 4, 0));
rados_release_write_op(op);
// Truncate and append
op = rados_create_write_op();
ASSERT_TRUE(op);
rados_write_op_truncate(op, 1);
rados_write_op_append(op, "hi", 2);
ASSERT_EQ(0, rados_write_op_operate(op, ioctx, "test", NULL, 0));
ASSERT_EQ(3, rados_read(ioctx, "test", hi, 4, 0));
rados_release_write_op(op);
// zero and remove
op = rados_create_write_op();
ASSERT_TRUE(op);
rados_write_op_zero(op, 0, 3);
rados_write_op_remove(op);
ASSERT_EQ(0, rados_write_op_operate(op, ioctx, "test", NULL, 0));
// ENOENT
ASSERT_EQ(-2, rados_read(ioctx, "test", hi, 4, 0));
rados_release_write_op(op);
rados_ioctx_destroy(ioctx);
ASSERT_EQ(0, destroy_one_pool(pool_name, &cluster));
}
TEST(LibRadosCWriteOps, Exec) {
rados_t cluster;
rados_ioctx_t ioctx;
std::string pool_name = get_temp_pool_name();
ASSERT_EQ("", create_one_pool(pool_name, &cluster));
rados_ioctx_create(cluster, pool_name.c_str(), &ioctx);
int rval = 1;
rados_write_op_t op = rados_create_write_op();
rados_write_op_exec(op, "hello", "record_hello", "test", 4, &rval);
ASSERT_EQ(0, rados_write_op_operate(op, ioctx, "test", NULL, 0));
rados_release_write_op(op);
ASSERT_EQ(0, rval);
char hi[100];
ASSERT_EQ(12, rados_read(ioctx, "test", hi, 100, 0));
hi[12] = '\0';
ASSERT_EQ(0, strcmp("Hello, test!", hi));
rados_ioctx_destroy(ioctx);
ASSERT_EQ(0, destroy_one_pool(pool_name, &cluster));
}
TEST(LibRadosCWriteOps, WriteSame) {
rados_t cluster;
rados_ioctx_t ioctx;
std::string pool_name = get_temp_pool_name();
ASSERT_EQ("", create_one_pool(pool_name, &cluster));
rados_ioctx_create(cluster, pool_name.c_str(), &ioctx);
// Create an object, write to it using writesame
rados_write_op_t op = rados_create_write_op();
ASSERT_TRUE(op);
rados_write_op_create(op, LIBRADOS_CREATE_EXCLUSIVE, NULL);
rados_write_op_writesame(op, "four", 4, 4 * 4, 0);
ASSERT_EQ(0, rados_write_op_operate(op, ioctx, "test", NULL, 0));
char hi[4 * 4];
ASSERT_EQ(sizeof(hi), static_cast<std::size_t>(
rados_read(ioctx, "test", hi,sizeof(hi), 0)));
rados_release_write_op(op);
ASSERT_EQ(0, memcmp("fourfourfourfour", hi, sizeof(hi)));
// cleanup
op = rados_create_write_op();
ASSERT_TRUE(op);
rados_write_op_remove(op);
ASSERT_EQ(0, rados_write_op_operate(op, ioctx, "test", NULL, 0));
rados_release_write_op(op);
rados_ioctx_destroy(ioctx);
ASSERT_EQ(0, destroy_one_pool(pool_name, &cluster));
}
TEST(LibRadosCWriteOps, CmpExt) {
rados_t cluster;
rados_ioctx_t ioctx;
std::string pool_name = get_temp_pool_name();
ASSERT_EQ("", create_one_pool(pool_name, &cluster));
rados_ioctx_create(cluster, pool_name.c_str(), &ioctx);
// create an object, write to it using writesame
rados_write_op_t op = rados_create_write_op();
ASSERT_TRUE(op);
rados_write_op_create(op, LIBRADOS_CREATE_EXCLUSIVE, NULL);
rados_write_op_write(op, "four", 4, 0);
ASSERT_EQ(0, rados_write_op_operate(op, ioctx, "test", NULL, 0));
rados_release_write_op(op);
char hi[4];
ASSERT_EQ(sizeof(hi), static_cast<std::size_t>(rados_read(ioctx, "test", hi, sizeof(hi), 0)));
ASSERT_EQ(0, memcmp("four", hi, sizeof(hi)));
// compare and overwrite on (expected) match
int val = 0;
op = rados_create_write_op();
ASSERT_TRUE(op);
rados_write_op_cmpext(op, "four", 4, 0, &val);
rados_write_op_write(op, "five", 4, 0);
ASSERT_EQ(0, rados_write_op_operate(op, ioctx, "test", NULL, 0));
ASSERT_EQ(0, val);
rados_release_write_op(op);
ASSERT_EQ(sizeof(hi), static_cast<std::size_t>(rados_read(ioctx, "test", hi, sizeof(hi), 0)));
ASSERT_EQ(0, memcmp("five", hi, sizeof(hi)));
// Check offset return error value
op = rados_create_write_op();
ASSERT_TRUE(op);
rados_write_op_cmpext(op, "four", 4, 0, &val);
rados_write_op_write(op, "six ", 4, 0);
ASSERT_EQ(-MAX_ERRNO - 1, rados_write_op_operate(op, ioctx, "test", NULL,
LIBRADOS_OPERATION_RETURNVEC));
ASSERT_EQ(-MAX_ERRNO - 1, val);
// compare and bail before write due to mismatch
// do it 1000 times to make sure we are hitting
// some socket injection
for (auto i = 0; i < 1000; ++i) {
val = 0;
op = rados_create_write_op();
ASSERT_TRUE(op);
rados_write_op_cmpext(op, "four", 4, 0, &val);
rados_write_op_write(op, "six ", 4, 0);
std::string const s = "test_" + std::to_string(i);
ASSERT_EQ(-MAX_ERRNO , rados_write_op_operate(op, ioctx, s.c_str(), NULL,
LIBRADOS_OPERATION_RETURNVEC));
ASSERT_EQ(-MAX_ERRNO , val);
}
// cleanup
op = rados_create_write_op();
ASSERT_TRUE(op);
rados_write_op_remove(op);
ASSERT_EQ(0, rados_write_op_operate(op, ioctx, "test", NULL, 0));
rados_ioctx_destroy(ioctx);
ASSERT_EQ(0, destroy_one_pool(pool_name, &cluster));
}
| 10,088 | 33.316327 | 96 | cc |
null | ceph-main/src/test/librados/cls.cc | #include <errno.h>
#include <map>
#include <sstream>
#include <string>
#include "gtest/gtest.h"
#include "include/rados/librados.hpp"
#include "test/librados/test_cxx.h"
using namespace librados;
using std::map;
using std::ostringstream;
using std::string;
TEST(LibRadosCls, DNE) {
Rados cluster;
std::string pool_name = get_temp_pool_name();
ASSERT_EQ("", create_one_pool_pp(pool_name, cluster));
IoCtx ioctx;
cluster.ioctx_create(pool_name.c_str(), ioctx);
// create an object
string oid = "foo";
bufferlist bl;
ASSERT_EQ(0, ioctx.write(oid, bl, bl.length(), 0));
// call a bogus class
ASSERT_EQ(-EOPNOTSUPP, ioctx.exec(oid, "doesnotexistasdfasdf", "method", bl, bl));
// call a bogus method on existent class
ASSERT_EQ(-EOPNOTSUPP, ioctx.exec(oid, "lock", "doesnotexistasdfasdfasdf", bl, bl));
ioctx.close();
ASSERT_EQ(0, destroy_one_pool_pp(pool_name, cluster));
}
| 907 | 23.540541 | 86 | cc |
null | ceph-main/src/test/librados/cls_remote_reads.cc | #include <set>
#include <string>
#include "common/ceph_json.h"
#include "gtest/gtest.h"
#include "test/librados/test_cxx.h"
#include "crimson_utils.h"
using namespace librados;
TEST(ClsTestRemoteReads, TestGather) {
SKIP_IF_CRIMSON();
Rados cluster;
std::string pool_name = get_temp_pool_name();
ASSERT_EQ("", create_one_pool_pp(pool_name, cluster));
IoCtx ioctx;
cluster.ioctx_create(pool_name.c_str(), ioctx);
bufferlist in, out;
int object_size = 4096;
char buf[object_size];
memset(buf, 1, sizeof(buf));
// create source objects from which data are gathered
in.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.write_full("src_object.1", in));
in.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.write_full("src_object.2", in));
in.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.write_full("src_object.3", in));
// construct JSON request passed to "test_gather" method, and in turn, to "test_read" method
JSONFormatter *formatter = new JSONFormatter(true);
formatter->open_object_section("foo");
std::set<std::string> src_objects;
src_objects.insert("src_object.1");
src_objects.insert("src_object.2");
src_objects.insert("src_object.3");
encode_json("src_objects", src_objects, formatter);
encode_json("cls", "test_remote_reads", formatter);
encode_json("method", "test_read", formatter);
encode_json("pool", pool_name, formatter);
formatter->close_section();
in.clear();
formatter->flush(in);
// create target object by combining data gathered from source objects using "test_read" method
ASSERT_EQ(0, ioctx.exec("tgt_object", "test_remote_reads", "test_gather", in, out));
// read target object and check its size
ASSERT_EQ(3*object_size, ioctx.read("tgt_object", out, 0, 0));
ASSERT_EQ(0, destroy_one_pool_pp(pool_name, cluster));
}
| 1,812 | 31.375 | 97 | cc |
null | ceph-main/src/test/librados/cmd.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "include/rados/librados.h"
#include "include/rados/librados.hpp"
#include "include/stringify.h"
#include "test/librados/test.h"
#include "gtest/gtest.h"
#include <errno.h>
#include <condition_variable>
#include <map>
#include <sstream>
#include <string>
using std::cout;
using std::list;
using std::map;
using std::ostringstream;
using std::string;
TEST(LibRadosCmd, MonDescribe) {
rados_t cluster;
ASSERT_EQ("", connect_cluster(&cluster));
char *buf, *st;
size_t buflen, stlen;
char *cmd[2];
cmd[1] = NULL;
cmd[0] = (char *)"{\"prefix\":\"get_command_descriptions\"}";
ASSERT_EQ(0, rados_mon_command(cluster, (const char **)cmd, 1, "", 0, &buf, &buflen, &st, &stlen));
ASSERT_LT(0u, buflen);
rados_buffer_free(buf);
rados_buffer_free(st);
cmd[0] = (char *)"get_command_descriptions";
ASSERT_EQ(-EINVAL, rados_mon_command(cluster, (const char **)cmd, 1, "", 0, &buf, &buflen, &st, &stlen));
rados_buffer_free(buf);
rados_buffer_free(st);
cmd[0] = (char *)"asdfqwer";
ASSERT_EQ(-EINVAL, rados_mon_command(cluster, (const char **)cmd, 1, "{}", 2, &buf, &buflen, &st, &stlen));
rados_buffer_free(buf);
rados_buffer_free(st);
cmd[0] = (char *)"";
ASSERT_EQ(-EINVAL, rados_mon_command(cluster, (const char **)cmd, 1, "{}", 2, &buf, &buflen, &st, &stlen));
rados_buffer_free(buf);
rados_buffer_free(st);
cmd[0] = (char *)"{}";
ASSERT_EQ(-EINVAL, rados_mon_command(cluster, (const char **)cmd, 1, "", 0, &buf, &buflen, &st, &stlen));
rados_buffer_free(buf);
rados_buffer_free(st);
cmd[0] = (char *)"{\"abc\":\"something\"}";
ASSERT_EQ(-EINVAL, rados_mon_command(cluster, (const char **)cmd, 1, "", 0, &buf, &buflen, &st, &stlen));
rados_buffer_free(buf);
rados_buffer_free(st);
cmd[0] = (char *)"{\"prefix\":\"\"}";
ASSERT_EQ(-EINVAL, rados_mon_command(cluster, (const char **)cmd, 1, "", 0, &buf, &buflen, &st, &stlen));
rados_buffer_free(buf);
rados_buffer_free(st);
cmd[0] = (char *)"{\"prefix\":\" \"}";
ASSERT_EQ(-EINVAL, rados_mon_command(cluster, (const char **)cmd, 1, "", 0, &buf, &buflen, &st, &stlen));
rados_buffer_free(buf);
rados_buffer_free(st);
cmd[0] = (char *)"{\"prefix\":\";;;,,,;;,,\"}";
ASSERT_EQ(-EINVAL, rados_mon_command(cluster, (const char **)cmd, 1, "", 0, &buf, &buflen, &st, &stlen));
rados_buffer_free(buf);
rados_buffer_free(st);
cmd[0] = (char *)"{\"prefix\":\"extra command\"}";
ASSERT_EQ(-EINVAL, rados_mon_command(cluster, (const char **)cmd, 1, "", 0, &buf, &buflen, &st, &stlen));
rados_buffer_free(buf);
rados_buffer_free(st);
cmd[0] = (char *)"{\"prefix\":\"quorum_status\"}";
ASSERT_EQ(0, rados_mon_command(cluster, (const char **)cmd, 1, "", 0, &buf, &buflen, &st, &stlen));
ASSERT_LT(0u, buflen);
//ASSERT_LT(0u, stlen);
rados_buffer_free(buf);
rados_buffer_free(st);
rados_shutdown(cluster);
}
TEST(LibRadosCmd, OSDCmd) {
rados_t cluster;
ASSERT_EQ("", connect_cluster(&cluster));
int r;
char *buf, *st;
size_t buflen, stlen;
char *cmd[2];
cmd[1] = NULL;
// note: tolerate NXIO here in case the cluster is thrashing out underneath us.
cmd[0] = (char *)"asdfasdf";
r = rados_osd_command(cluster, 0, (const char **)cmd, 1, "", 0, &buf, &buflen, &st, &stlen);
rados_buffer_free(buf);
rados_buffer_free(st);
ASSERT_TRUE(r == -22 || r == -ENXIO);
cmd[0] = (char *)"version";
r = rados_osd_command(cluster, 0, (const char **)cmd, 1, "", 0, &buf, &buflen, &st, &stlen);
rados_buffer_free(buf);
rados_buffer_free(st);
ASSERT_TRUE(r == -22 || r == -ENXIO);
cmd[0] = (char *)"{\"prefix\":\"version\"}";
r = rados_osd_command(cluster, 0, (const char **)cmd, 1, "", 0, &buf, &buflen, &st, &stlen);
ASSERT_TRUE((r == 0 && buflen > 0) || (r == -ENXIO && buflen == 0));
rados_buffer_free(buf);
rados_buffer_free(st);
rados_shutdown(cluster);
}
TEST(LibRadosCmd, PGCmd) {
rados_t cluster;
std::string pool_name = get_temp_pool_name();
ASSERT_EQ("", create_one_pool(pool_name, &cluster));
char *buf, *st;
size_t buflen, stlen;
char *cmd[2];
cmd[1] = NULL;
int64_t poolid = rados_pool_lookup(cluster, pool_name.c_str());
ASSERT_LT(0, poolid);
string pgid = stringify(poolid) + ".0";
cmd[0] = (char *)"asdfasdf";
// note: tolerate NXIO here in case the cluster is thrashing out underneath us.
int r = rados_pg_command(cluster, pgid.c_str(), (const char **)cmd, 1, "", 0, &buf, &buflen, &st, &stlen);
ASSERT_TRUE(r == -22 || r == -ENXIO);
rados_buffer_free(buf);
rados_buffer_free(st);
// make sure the pg exists on the osd before we query it
rados_ioctx_t io;
rados_ioctx_create(cluster, pool_name.c_str(), &io);
for (int i=0; i<100; i++) {
string oid = "obj" + stringify(i);
ASSERT_EQ(-ENOENT, rados_stat(io, oid.c_str(), NULL, NULL));
}
rados_ioctx_destroy(io);
string qstr = "{\"prefix\":\"pg\", \"cmd\":\"query\", \"pgid\":\"" + pgid + "\"}";
cmd[0] = (char *)qstr.c_str();
// note: tolerate ENOENT/ENXIO here if hte osd is thrashing out underneath us
r = rados_pg_command(cluster, pgid.c_str(), (const char **)cmd, 1, "", 0, &buf, &buflen, &st, &stlen);
ASSERT_TRUE(r == 0 || r == -ENOENT || r == -ENXIO);
ASSERT_LT(0u, buflen);
rados_buffer_free(buf);
rados_buffer_free(st);
ASSERT_EQ(0, destroy_one_pool(pool_name, &cluster));
}
struct Log {
list<string> log;
std::condition_variable cond;
std::mutex lock;
bool contains(const string& str) {
std::lock_guard<std::mutex> l(lock);
for (list<string>::iterator p = log.begin(); p != log.end(); ++p) {
if (p->find(str) != std::string::npos)
return true;
}
return false;
}
};
void log_cb(void *arg,
const char *line,
const char *who, uint64_t stampsec, uint64_t stamp_nsec,
uint64_t seq, const char *level,
const char *msg) {
Log *l = static_cast<Log *>(arg);
std::lock_guard<std::mutex> locker(l->lock);
l->log.push_back(line);
l->cond.notify_all();
cout << "got: " << line << std::endl;
}
TEST(LibRadosCmd, WatchLog) {
rados_t cluster;
ASSERT_EQ("", connect_cluster(&cluster));
char *buf, *st;
char *cmd[2];
cmd[1] = NULL;
size_t buflen, stlen;
Log l;
ASSERT_EQ(0, rados_monitor_log(cluster, "info", log_cb, &l));
cmd[0] = (char *)"{\"prefix\":\"log\", \"logtext\":[\"onexx\"]}";
ASSERT_EQ(0, rados_mon_command(cluster, (const char **)cmd, 1, "", 0, &buf, &buflen, &st, &stlen));
for (int i=0; !l.contains("onexx"); i++) {
ASSERT_TRUE(i<100);
sleep(1);
}
ASSERT_TRUE(l.contains("onexx"));
cmd[0] = (char *)"{\"prefix\":\"log\", \"logtext\":[\"twoxx\"]}";
ASSERT_EQ(0, rados_monitor_log(cluster, "err", log_cb, &l));
ASSERT_EQ(0, rados_mon_command(cluster, (const char **)cmd, 1, "", 0, &buf, &buflen, &st, &stlen));
sleep(2);
ASSERT_FALSE(l.contains("twoxx"));
ASSERT_EQ(0, rados_monitor_log(cluster, "info", log_cb, &l));
cmd[0] = (char *)"{\"prefix\":\"log\", \"logtext\":[\"threexx\"]}";
ASSERT_EQ(0, rados_mon_command(cluster, (const char **)cmd, 1, "", 0, &buf, &buflen, &st, &stlen));
for (int i=0; !l.contains("threexx"); i++) {
ASSERT_TRUE(i<100);
sleep(1);
}
ASSERT_EQ(0, rados_monitor_log(cluster, "info", NULL, NULL));
cmd[0] = (char *)"{\"prefix\":\"log\", \"logtext\":[\"fourxx\"]}";
ASSERT_EQ(0, rados_mon_command(cluster, (const char **)cmd, 1, "", 0, &buf, &buflen, &st, &stlen));
sleep(2);
ASSERT_FALSE(l.contains("fourxx"));
rados_shutdown(cluster);
}
| 7,556 | 31.856522 | 109 | cc |
null | ceph-main/src/test/librados/cmd_cxx.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include <errno.h>
#include <condition_variable>
#include <map>
#include <sstream>
#include <string>
#include "gtest/gtest.h"
#include "include/rados/librados.hpp"
#include "include/stringify.h"
#include "test/librados/test_cxx.h"
using namespace librados;
using std::map;
using std::ostringstream;
using std::string;
TEST(LibRadosCmd, MonDescribePP) {
Rados cluster;
ASSERT_EQ("", connect_cluster_pp(cluster));
bufferlist inbl, outbl;
string outs;
ASSERT_EQ(0, cluster.mon_command("{\"prefix\": \"get_command_descriptions\"}",
inbl, &outbl, &outs));
ASSERT_LT(0u, outbl.length());
ASSERT_LE(0u, outs.length());
cluster.shutdown();
}
TEST(LibRadosCmd, OSDCmdPP) {
Rados cluster;
ASSERT_EQ("", connect_cluster_pp(cluster));
int r;
bufferlist inbl, outbl;
string outs;
string cmd;
// note: tolerate NXIO here in case the cluster is thrashing out underneath us.
cmd = "asdfasdf";
r = cluster.osd_command(0, cmd, inbl, &outbl, &outs);
ASSERT_TRUE(r == -22 || r == -ENXIO);
cmd = "version";
r = cluster.osd_command(0, cmd, inbl, &outbl, &outs);
ASSERT_TRUE(r == -22 || r == -ENXIO);
cmd = "{\"prefix\":\"version\"}";
r = cluster.osd_command(0, cmd, inbl, &outbl, &outs);
ASSERT_TRUE((r == 0 && outbl.length() > 0) || (r == -ENXIO && outbl.length() == 0));
cluster.shutdown();
}
TEST(LibRadosCmd, PGCmdPP) {
Rados cluster;
std::string pool_name = get_temp_pool_name();
ASSERT_EQ("", create_one_pool_pp(pool_name, cluster));
int r;
bufferlist inbl, outbl;
string outs;
string cmd;
int64_t poolid = cluster.pool_lookup(pool_name.c_str());
ASSERT_LT(0, poolid);
string pgid = stringify(poolid) + ".0";
cmd = "asdfasdf";
// note: tolerate NXIO here in case the cluster is thrashing out underneath us.
r = cluster.pg_command(pgid.c_str(), cmd, inbl, &outbl, &outs);
ASSERT_TRUE(r == -22 || r == -ENXIO);
// make sure the pg exists on the osd before we query it
IoCtx io;
cluster.ioctx_create(pool_name.c_str(), io);
for (int i=0; i<100; i++) {
string oid = "obj" + stringify(i);
ASSERT_EQ(-ENOENT, io.stat(oid, NULL, NULL));
}
io.close();
cmd = "{\"prefix\":\"pg\", \"cmd\":\"query\", \"pgid\":\"" + pgid + "\"}";
// note: tolerate ENOENT/ENXIO here if hte osd is thrashing out underneath us
r = cluster.pg_command(pgid.c_str(), cmd, inbl, &outbl, &outs);
ASSERT_TRUE(r == 0 || r == -ENOENT || r == -ENXIO);
ASSERT_LT(0u, outbl.length());
ASSERT_EQ(0, destroy_one_pool_pp(pool_name, cluster));
}
| 2,618 | 27.16129 | 86 | cc |
null | ceph-main/src/test/librados/completion_speed.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "include/rados/librados.hpp"
#include "common/ceph_context.h"
#include "common/Finisher.h"
#include "librados/AioCompletionImpl.h"
constexpr int max_completions = 10'000'000;
int completed = 0;
auto cct = (new CephContext(CEPH_ENTITY_TYPE_CLIENT))->get();
Finisher f(cct);
void completion_cb(librados::completion_t cb, void* arg) {
auto c = static_cast<librados::AioCompletion*>(arg);
delete c;
if (++completed < max_completions) {
auto aio = librados::Rados::aio_create_completion();
aio->set_complete_callback(static_cast<void*>(aio), &completion_cb);
f.queue(new librados::C_AioComplete(aio->pc));
}
}
int main(void) {
auto aio = librados::Rados::aio_create_completion();
aio->set_complete_callback(static_cast<void*>(aio), &completion_cb);
f.queue(new librados::C_AioComplete(aio->pc));
f.start();
while (completed < max_completions)
f.wait_for_empty();
f.stop();
assert(completed == max_completions);
cct->put();
}
| 1,075 | 26.589744 | 72 | cc |
null | ceph-main/src/test/librados/crimson_utils.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <cstdlib>
static inline bool is_crimson_cluster() {
return getenv("CRIMSON_COMPAT") != nullptr;
}
#define SKIP_IF_CRIMSON() \
if (is_crimson_cluster()) { \
GTEST_SKIP() << "Not supported by crimson yet. Skipped"; \
}
| 371 | 22.25 | 70 | h |
null | ceph-main/src/test/librados/io.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*
// vim: ts=8 sw=2 smarttab
#include <climits>
#include "include/rados/librados.h"
#include "include/encoding.h"
#include "include/err.h"
#include "include/scope_guard.h"
#include "test/librados/test.h"
#include "test/librados/TestCase.h"
#include <errno.h>
#include "gtest/gtest.h"
#include "crimson_utils.h"
using std::string;
typedef RadosTest LibRadosIo;
typedef RadosTestEC LibRadosIoEC;
TEST_F(LibRadosIo, SimpleWrite) {
char buf[128];
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_write(ioctx, "foo", buf, sizeof(buf), 0));
rados_ioctx_set_namespace(ioctx, "nspace");
ASSERT_EQ(0, rados_write(ioctx, "foo", buf, sizeof(buf), 0));
}
TEST_F(LibRadosIo, TooBig) {
char buf[1] = { 0 };
ASSERT_EQ(-E2BIG, rados_write(ioctx, "A", buf, UINT_MAX, 0));
ASSERT_EQ(-E2BIG, rados_append(ioctx, "A", buf, UINT_MAX));
ASSERT_EQ(-E2BIG, rados_write_full(ioctx, "A", buf, UINT_MAX));
ASSERT_EQ(-E2BIG, rados_writesame(ioctx, "A", buf, sizeof(buf), UINT_MAX, 0));
}
TEST_F(LibRadosIo, ReadTimeout) {
char buf[128];
memset(buf, 'a', sizeof(buf));
ASSERT_EQ(0, rados_write(ioctx, "foo", buf, sizeof(buf), 0));
{
// set up a second client
rados_t cluster;
rados_ioctx_t ioctx;
ASSERT_EQ(0, rados_create(&cluster, "admin"));
ASSERT_EQ(0, rados_conf_read_file(cluster, NULL));
ASSERT_EQ(0, rados_conf_parse_env(cluster, NULL));
ASSERT_EQ(0, rados_conf_set(cluster, "rados_osd_op_timeout", "1")); // use any small value that will result in a timeout
ASSERT_EQ(0, rados_conf_set(cluster, "ms_inject_internal_delays", "2")); // create a 2 second delay
ASSERT_EQ(0, rados_connect(cluster));
ASSERT_EQ(0, rados_ioctx_create(cluster, pool_name.c_str(), &ioctx));
rados_ioctx_set_namespace(ioctx, nspace.c_str());
// then we show that the buffer is changed after rados_read returned
// with a timeout
for (int i=0; i<5; i++) {
char buf2[sizeof(buf)];
memset(buf2, 0, sizeof(buf2));
int err = rados_read(ioctx, "foo", buf2, sizeof(buf2), 0);
if (err == -110) {
int startIndex = 0;
// find the index until which librados already read the object before the timeout occurred
for (unsigned b=0; b<sizeof(buf); b++) {
if (buf2[b] != buf[b]) {
startIndex = b;
break;
}
}
// wait some time to give librados a change to do something
sleep(1);
// then check if the buffer was changed after the call
if (buf2[startIndex] == 'a') {
printf("byte at index %d was changed after the timeout to %d\n",
startIndex, (int)buf[startIndex]);
ASSERT_TRUE(0);
break;
}
} else {
printf("no timeout :/\n");
}
}
rados_ioctx_destroy(ioctx);
rados_shutdown(cluster);
}
}
TEST_F(LibRadosIo, RoundTrip) {
char buf[128];
char buf2[128];
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_write(ioctx, "foo", buf, sizeof(buf), 0));
memset(buf2, 0, sizeof(buf2));
ASSERT_EQ((int)sizeof(buf2), rados_read(ioctx, "foo", buf2, sizeof(buf2), 0));
ASSERT_EQ(0, memcmp(buf, buf2, sizeof(buf)));
uint64_t off = 19;
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_write(ioctx, "bar", buf, sizeof(buf), off));
memset(buf2, 0, sizeof(buf2));
ASSERT_EQ((int)sizeof(buf2), rados_read(ioctx, "bar", buf2, sizeof(buf2), off));
ASSERT_EQ(0, memcmp(buf, buf2, sizeof(buf)));
}
TEST_F(LibRadosIo, Checksum) {
char buf[128];
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_write(ioctx, "foo", buf, sizeof(buf), 0));
uint32_t expected_crc = ceph_crc32c(-1, reinterpret_cast<const uint8_t*>(buf),
sizeof(buf));
ceph_le32 init_value(-1);
ceph_le32 crc[2];
ASSERT_EQ(0, rados_checksum(ioctx, "foo", LIBRADOS_CHECKSUM_TYPE_CRC32C,
reinterpret_cast<char*>(&init_value),
sizeof(init_value), sizeof(buf), 0, 0,
reinterpret_cast<char*>(&crc), sizeof(crc)));
ASSERT_EQ(1U, crc[0]);
ASSERT_EQ(expected_crc, crc[1]);
}
TEST_F(LibRadosIo, OverlappingWriteRoundTrip) {
char buf[128];
char buf2[64];
char buf3[128];
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_write(ioctx, "foo", buf, sizeof(buf), 0));
memset(buf2, 0xdd, sizeof(buf2));
ASSERT_EQ(0, rados_write(ioctx, "foo", buf2, sizeof(buf2), 0));
memset(buf3, 0xdd, sizeof(buf3));
ASSERT_EQ((int)sizeof(buf3), rados_read(ioctx, "foo", buf3, sizeof(buf3), 0));
ASSERT_EQ(0, memcmp(buf3, buf2, sizeof(buf2)));
ASSERT_EQ(0, memcmp(buf3 + sizeof(buf2), buf, sizeof(buf) - sizeof(buf2)));
}
TEST_F(LibRadosIo, WriteFullRoundTrip) {
char buf[128];
char buf2[64];
char buf3[128];
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_write(ioctx, "foo", buf, sizeof(buf), 0));
memset(buf2, 0xdd, sizeof(buf2));
ASSERT_EQ(0, rados_write_full(ioctx, "foo", buf2, sizeof(buf2)));
memset(buf3, 0x00, sizeof(buf3));
ASSERT_EQ((int)sizeof(buf2), rados_read(ioctx, "foo", buf3, sizeof(buf3), 0));
ASSERT_EQ(0, memcmp(buf2, buf3, sizeof(buf2)));
}
TEST_F(LibRadosIo, AppendRoundTrip) {
char buf[64];
char buf2[64];
char buf3[sizeof(buf) + sizeof(buf2)];
memset(buf, 0xde, sizeof(buf));
ASSERT_EQ(0, rados_append(ioctx, "foo", buf, sizeof(buf)));
memset(buf2, 0xad, sizeof(buf2));
ASSERT_EQ(0, rados_append(ioctx, "foo", buf2, sizeof(buf2)));
memset(buf3, 0, sizeof(buf3));
ASSERT_EQ((int)sizeof(buf3), rados_read(ioctx, "foo", buf3, sizeof(buf3), 0));
ASSERT_EQ(0, memcmp(buf3, buf, sizeof(buf)));
ASSERT_EQ(0, memcmp(buf3 + sizeof(buf), buf2, sizeof(buf2)));
}
TEST_F(LibRadosIo, ZeroLenZero) {
rados_write_op_t op = rados_create_write_op();
ASSERT_TRUE(op);
rados_write_op_zero(op, 0, 0);
ASSERT_EQ(0, rados_write_op_operate(op, ioctx, "foo", NULL, 0));
rados_release_write_op(op);
}
TEST_F(LibRadosIo, TruncTest) {
char buf[128];
char buf2[sizeof(buf)];
memset(buf, 0xaa, sizeof(buf));
ASSERT_EQ(0, rados_append(ioctx, "foo", buf, sizeof(buf)));
ASSERT_EQ(0, rados_trunc(ioctx, "foo", sizeof(buf) / 2));
memset(buf2, 0, sizeof(buf2));
ASSERT_EQ((int)(sizeof(buf)/2), rados_read(ioctx, "foo", buf2, sizeof(buf2), 0));
ASSERT_EQ(0, memcmp(buf, buf2, sizeof(buf)/2));
}
TEST_F(LibRadosIo, RemoveTest) {
char buf[128];
char buf2[sizeof(buf)];
memset(buf, 0xaa, sizeof(buf));
ASSERT_EQ(0, rados_append(ioctx, "foo", buf, sizeof(buf)));
ASSERT_EQ(0, rados_remove(ioctx, "foo"));
memset(buf2, 0, sizeof(buf2));
ASSERT_EQ(-ENOENT, rados_read(ioctx, "foo", buf2, sizeof(buf2), 0));
}
TEST_F(LibRadosIo, XattrsRoundTrip) {
char buf[128];
char attr1[] = "attr1";
char attr1_buf[] = "foo bar baz";
memset(buf, 0xaa, sizeof(buf));
ASSERT_EQ(0, rados_append(ioctx, "foo", buf, sizeof(buf)));
ASSERT_EQ(-ENODATA, rados_getxattr(ioctx, "foo", attr1, buf, sizeof(buf)));
ASSERT_EQ(0, rados_setxattr(ioctx, "foo", attr1, attr1_buf, sizeof(attr1_buf)));
ASSERT_EQ((int)sizeof(attr1_buf),
rados_getxattr(ioctx, "foo", attr1, buf, sizeof(buf)));
ASSERT_EQ(0, memcmp(attr1_buf, buf, sizeof(attr1_buf)));
}
TEST_F(LibRadosIo, RmXattr) {
char buf[128];
char attr1[] = "attr1";
char attr1_buf[] = "foo bar baz";
memset(buf, 0xaa, sizeof(buf));
ASSERT_EQ(0, rados_append(ioctx, "foo", buf, sizeof(buf)));
ASSERT_EQ(0,
rados_setxattr(ioctx, "foo", attr1, attr1_buf, sizeof(attr1_buf)));
ASSERT_EQ(0, rados_rmxattr(ioctx, "foo", attr1));
ASSERT_EQ(-ENODATA, rados_getxattr(ioctx, "foo", attr1, buf, sizeof(buf)));
// Test rmxattr on a removed object
char buf2[128];
char attr2[] = "attr2";
char attr2_buf[] = "foo bar baz";
memset(buf2, 0xbb, sizeof(buf2));
ASSERT_EQ(0, rados_write(ioctx, "foo_rmxattr", buf2, sizeof(buf2), 0));
ASSERT_EQ(0,
rados_setxattr(ioctx, "foo_rmxattr", attr2, attr2_buf, sizeof(attr2_buf)));
ASSERT_EQ(0, rados_remove(ioctx, "foo_rmxattr"));
ASSERT_EQ(-ENOENT, rados_rmxattr(ioctx, "foo_rmxattr", attr2));
}
TEST_F(LibRadosIo, XattrIter) {
char buf[128];
char attr1[] = "attr1";
char attr1_buf[] = "foo bar baz";
char attr2[] = "attr2";
char attr2_buf[256];
for (size_t j = 0; j < sizeof(attr2_buf); ++j) {
attr2_buf[j] = j % 0xff;
}
memset(buf, 0xaa, sizeof(buf));
ASSERT_EQ(0, rados_append(ioctx, "foo", buf, sizeof(buf)));
ASSERT_EQ(0, rados_setxattr(ioctx, "foo", attr1, attr1_buf, sizeof(attr1_buf)));
ASSERT_EQ(0, rados_setxattr(ioctx, "foo", attr2, attr2_buf, sizeof(attr2_buf)));
rados_xattrs_iter_t iter;
ASSERT_EQ(0, rados_getxattrs(ioctx, "foo", &iter));
int num_seen = 0;
while (true) {
const char *name;
const char *val;
size_t len;
ASSERT_EQ(0, rados_getxattrs_next(iter, &name, &val, &len));
if (name == NULL) {
break;
}
ASSERT_LT(num_seen, 2);
if ((strcmp(name, attr1) == 0) && (val != NULL) && (memcmp(val, attr1_buf, len) == 0)) {
num_seen++;
continue;
}
else if ((strcmp(name, attr2) == 0) && (val != NULL) && (memcmp(val, attr2_buf, len) == 0)) {
num_seen++;
continue;
}
else {
ASSERT_EQ(0, 1);
}
}
rados_getxattrs_end(iter);
}
TEST_F(LibRadosIoEC, SimpleWrite) {
SKIP_IF_CRIMSON();
char buf[128];
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_write(ioctx, "foo", buf, sizeof(buf), 0));
rados_ioctx_set_namespace(ioctx, "nspace");
ASSERT_EQ(0, rados_write(ioctx, "foo", buf, sizeof(buf), 0));
}
TEST_F(LibRadosIoEC, RoundTrip) {
SKIP_IF_CRIMSON();
char buf[128];
char buf2[128];
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_write(ioctx, "foo", buf, sizeof(buf), 0));
memset(buf2, 0, sizeof(buf2));
ASSERT_EQ((int)sizeof(buf2), rados_read(ioctx, "foo", buf2, sizeof(buf2), 0));
ASSERT_EQ(0, memcmp(buf, buf2, sizeof(buf)));
uint64_t off = 19;
ASSERT_EQ(-EOPNOTSUPP, rados_write(ioctx, "bar", buf, sizeof(buf), off));
}
TEST_F(LibRadosIoEC, OverlappingWriteRoundTrip) {
SKIP_IF_CRIMSON();
int bsize = alignment;
int dbsize = bsize * 2;
char *buf = (char *)new char[dbsize];
char *buf2 = (char *)new char[bsize];
char *buf3 = (char *)new char[dbsize];
auto cleanup = [&] {
delete[] buf;
delete[] buf2;
delete[] buf3;
};
scope_guard<decltype(cleanup)> sg(std::move(cleanup));
memset(buf, 0xcc, dbsize);
ASSERT_EQ(0, rados_write(ioctx, "foo", buf, dbsize, 0));
memset(buf2, 0xdd, bsize);
ASSERT_EQ(-EOPNOTSUPP, rados_write(ioctx, "foo", buf2, bsize, 0));
memset(buf3, 0xdd, dbsize);
ASSERT_EQ(dbsize, rados_read(ioctx, "foo", buf3, dbsize, 0));
// Read the same as first write
ASSERT_EQ(0, memcmp(buf3, buf, dbsize));
}
TEST_F(LibRadosIoEC, WriteFullRoundTrip) {
SKIP_IF_CRIMSON();
char buf[128];
char buf2[64];
char buf3[128];
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_write(ioctx, "foo", buf, sizeof(buf), 0));
memset(buf2, 0xdd, sizeof(buf2));
ASSERT_EQ(0, rados_write_full(ioctx, "foo", buf2, sizeof(buf2)));
memset(buf3, 0xee, sizeof(buf3));
ASSERT_EQ((int)sizeof(buf2), rados_read(ioctx, "foo", buf3, sizeof(buf3), 0));
ASSERT_EQ(0, memcmp(buf3, buf2, sizeof(buf2)));
}
TEST_F(LibRadosIoEC, AppendRoundTrip) {
SKIP_IF_CRIMSON();
char *buf = (char *)new char[alignment];
char *buf2 = (char *)new char[alignment];
char *buf3 = (char *)new char[alignment *2];
int uasize = alignment/2;
char *unalignedbuf = (char *)new char[uasize];
auto cleanup = [&] {
delete[] buf;
delete[] buf2;
delete[] buf3;
delete[] unalignedbuf;
};
scope_guard<decltype(cleanup)> sg(std::move(cleanup));
memset(buf, 0xde, alignment);
ASSERT_EQ(0, rados_append(ioctx, "foo", buf, alignment));
memset(buf2, 0xad, alignment);
ASSERT_EQ(0, rados_append(ioctx, "foo", buf2, alignment));
memset(buf3, 0, alignment*2);
ASSERT_EQ((int)alignment*2, rados_read(ioctx, "foo", buf3, alignment*2, 0));
ASSERT_EQ(0, memcmp(buf3, buf, alignment));
ASSERT_EQ(0, memcmp(buf3 + alignment, buf2, alignment));
memset(unalignedbuf, 0, uasize);
ASSERT_EQ(0, rados_append(ioctx, "foo", unalignedbuf, uasize));
ASSERT_EQ(-EOPNOTSUPP, rados_append(ioctx, "foo", unalignedbuf, uasize));
}
TEST_F(LibRadosIoEC, TruncTest) {
SKIP_IF_CRIMSON();
char buf[128];
char buf2[sizeof(buf)];
memset(buf, 0xaa, sizeof(buf));
ASSERT_EQ(0, rados_append(ioctx, "foo", buf, sizeof(buf)));
ASSERT_EQ(-EOPNOTSUPP, rados_trunc(ioctx, "foo", sizeof(buf) / 2));
memset(buf2, 0, sizeof(buf2));
// Same size
ASSERT_EQ((int)sizeof(buf), rados_read(ioctx, "foo", buf2, sizeof(buf2), 0));
// No change
ASSERT_EQ(0, memcmp(buf, buf2, sizeof(buf)));
}
TEST_F(LibRadosIoEC, RemoveTest) {
SKIP_IF_CRIMSON();
char buf[128];
char buf2[sizeof(buf)];
memset(buf, 0xaa, sizeof(buf));
ASSERT_EQ(0, rados_append(ioctx, "foo", buf, sizeof(buf)));
ASSERT_EQ(0, rados_remove(ioctx, "foo"));
memset(buf2, 0, sizeof(buf2));
ASSERT_EQ(-ENOENT, rados_read(ioctx, "foo", buf2, sizeof(buf2), 0));
}
TEST_F(LibRadosIoEC, XattrsRoundTrip) {
SKIP_IF_CRIMSON();
char buf[128];
char attr1[] = "attr1";
char attr1_buf[] = "foo bar baz";
memset(buf, 0xaa, sizeof(buf));
ASSERT_EQ(0, rados_append(ioctx, "foo", buf, sizeof(buf)));
ASSERT_EQ(-ENODATA, rados_getxattr(ioctx, "foo", attr1, buf, sizeof(buf)));
ASSERT_EQ(0, rados_setxattr(ioctx, "foo", attr1, attr1_buf, sizeof(attr1_buf)));
ASSERT_EQ((int)sizeof(attr1_buf),
rados_getxattr(ioctx, "foo", attr1, buf, sizeof(buf)));
ASSERT_EQ(0, memcmp(attr1_buf, buf, sizeof(attr1_buf)));
}
TEST_F(LibRadosIoEC, RmXattr) {
SKIP_IF_CRIMSON();
char buf[128];
char attr1[] = "attr1";
char attr1_buf[] = "foo bar baz";
memset(buf, 0xaa, sizeof(buf));
ASSERT_EQ(0, rados_append(ioctx, "foo", buf, sizeof(buf)));
ASSERT_EQ(0,
rados_setxattr(ioctx, "foo", attr1, attr1_buf, sizeof(attr1_buf)));
ASSERT_EQ(0, rados_rmxattr(ioctx, "foo", attr1));
ASSERT_EQ(-ENODATA, rados_getxattr(ioctx, "foo", attr1, buf, sizeof(buf)));
// Test rmxattr on a removed object
char buf2[128];
char attr2[] = "attr2";
char attr2_buf[] = "foo bar baz";
memset(buf2, 0xbb, sizeof(buf2));
ASSERT_EQ(0, rados_write(ioctx, "foo_rmxattr", buf2, sizeof(buf2), 0));
ASSERT_EQ(0,
rados_setxattr(ioctx, "foo_rmxattr", attr2, attr2_buf, sizeof(attr2_buf)));
ASSERT_EQ(0, rados_remove(ioctx, "foo_rmxattr"));
ASSERT_EQ(-ENOENT, rados_rmxattr(ioctx, "foo_rmxattr", attr2));
}
TEST_F(LibRadosIoEC, XattrIter) {
SKIP_IF_CRIMSON();
char buf[128];
char attr1[] = "attr1";
char attr1_buf[] = "foo bar baz";
char attr2[] = "attr2";
char attr2_buf[256];
for (size_t j = 0; j < sizeof(attr2_buf); ++j) {
attr2_buf[j] = j % 0xff;
}
memset(buf, 0xaa, sizeof(buf));
ASSERT_EQ(0, rados_append(ioctx, "foo", buf, sizeof(buf)));
ASSERT_EQ(0, rados_setxattr(ioctx, "foo", attr1, attr1_buf, sizeof(attr1_buf)));
ASSERT_EQ(0, rados_setxattr(ioctx, "foo", attr2, attr2_buf, sizeof(attr2_buf)));
rados_xattrs_iter_t iter;
ASSERT_EQ(0, rados_getxattrs(ioctx, "foo", &iter));
int num_seen = 0;
while (true) {
const char *name;
const char *val;
size_t len;
ASSERT_EQ(0, rados_getxattrs_next(iter, &name, &val, &len));
if (name == NULL) {
break;
}
ASSERT_LT(num_seen, 2);
if ((strcmp(name, attr1) == 0) && (val != NULL) && (memcmp(val, attr1_buf, len) == 0)) {
num_seen++;
continue;
}
else if ((strcmp(name, attr2) == 0) && (val != NULL) && (memcmp(val, attr2_buf, len) == 0)) {
num_seen++;
continue;
}
else {
ASSERT_EQ(0, 1);
}
}
rados_getxattrs_end(iter);
}
| 15,525 | 32.606061 | 124 | cc |
null | ceph-main/src/test/librados/io_cxx.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*
// vim: ts=8 sw=2 smarttab
#include <climits>
#include <errno.h>
#include "gtest/gtest.h"
#include "include/rados/librados.hpp"
#include "include/encoding.h"
#include "include/err.h"
#include "include/scope_guard.h"
#include "test/librados/test_cxx.h"
#include "test/librados/testcase_cxx.h"
#include "crimson_utils.h"
using namespace librados;
using std::string;
typedef RadosTestPP LibRadosIoPP;
typedef RadosTestECPP LibRadosIoECPP;
TEST_F(LibRadosIoPP, TooBigPP) {
IoCtx ioctx;
bufferlist bl;
ASSERT_EQ(-E2BIG, ioctx.write("foo", bl, UINT_MAX, 0));
ASSERT_EQ(-E2BIG, ioctx.append("foo", bl, UINT_MAX));
// ioctx.write_full no way to overflow bl.length()
ASSERT_EQ(-E2BIG, ioctx.writesame("foo", bl, UINT_MAX, 0));
}
TEST_F(LibRadosIoPP, SimpleWritePP) {
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl;
bl.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.write("foo", bl, sizeof(buf), 0));
ioctx.set_namespace("nspace");
ASSERT_EQ(0, ioctx.write("foo", bl, sizeof(buf), 0));
}
TEST_F(LibRadosIoPP, ReadOpPP) {
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl;
bl.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.write("foo", bl, sizeof(buf), 0));
{
bufferlist op_bl;
ObjectReadOperation op;
op.read(0, sizeof(buf), NULL, NULL);
ASSERT_EQ(0, ioctx.operate("foo", &op, &op_bl));
ASSERT_EQ(sizeof(buf), op_bl.length());
ASSERT_EQ(0, memcmp(op_bl.c_str(), buf, sizeof(buf)));
}
{
bufferlist op_bl;
ObjectReadOperation op;
op.read(0, 0, NULL, NULL); //len=0 mean read the whole object data.
ASSERT_EQ(0, ioctx.operate("foo", &op, &op_bl));
ASSERT_EQ(sizeof(buf), op_bl.length());
ASSERT_EQ(0, memcmp(op_bl.c_str(), buf, sizeof(buf)));
}
{
bufferlist read_bl, op_bl;
ObjectReadOperation op;
op.read(0, sizeof(buf), &read_bl, NULL);
ASSERT_EQ(0, ioctx.operate("foo", &op, &op_bl));
ASSERT_EQ(sizeof(buf), read_bl.length());
ASSERT_EQ(sizeof(buf), op_bl.length());
ASSERT_EQ(0, memcmp(op_bl.c_str(), buf, sizeof(buf)));
ASSERT_EQ(0, memcmp(read_bl.c_str(), buf, sizeof(buf)));
}
{
bufferlist op_bl;
int rval = 1000;
ObjectReadOperation op;
op.read(0, sizeof(buf), NULL, &rval);
ASSERT_EQ(0, ioctx.operate("foo", &op, &op_bl));
ASSERT_EQ(sizeof(buf), op_bl.length());
ASSERT_EQ(0, rval);
ASSERT_EQ(0, memcmp(op_bl.c_str(), buf, sizeof(buf)));
}
{
bufferlist read_bl, op_bl;
int rval = 1000;
ObjectReadOperation op;
op.read(0, sizeof(buf), &read_bl, &rval);
ASSERT_EQ(0, ioctx.operate("foo", &op, &op_bl));
ASSERT_EQ(sizeof(buf), read_bl.length());
ASSERT_EQ(sizeof(buf), op_bl.length());
ASSERT_EQ(0, rval);
ASSERT_EQ(0, memcmp(op_bl.c_str(), buf, sizeof(buf)));
ASSERT_EQ(0, memcmp(read_bl.c_str(), buf, sizeof(buf)));
}
{
bufferlist read_bl1, read_bl2, op_bl;
int rval1 = 1000, rval2 = 1002;
ObjectReadOperation op;
op.read(0, sizeof(buf), &read_bl1, &rval1);
op.read(0, sizeof(buf), &read_bl2, &rval2);
ASSERT_EQ(0, ioctx.operate("foo", &op, &op_bl));
ASSERT_EQ(sizeof(buf), read_bl1.length());
ASSERT_EQ(sizeof(buf), read_bl2.length());
ASSERT_EQ(sizeof(buf) * 2, op_bl.length());
ASSERT_EQ(0, rval1);
ASSERT_EQ(0, rval2);
ASSERT_EQ(0, memcmp(read_bl1.c_str(), buf, sizeof(buf)));
ASSERT_EQ(0, memcmp(read_bl2.c_str(), buf, sizeof(buf)));
ASSERT_EQ(0, memcmp(op_bl.c_str(), buf, sizeof(buf)));
ASSERT_EQ(0, memcmp(op_bl.c_str() + sizeof(buf), buf, sizeof(buf)));
}
{
bufferlist op_bl;
ObjectReadOperation op;
op.read(0, sizeof(buf), NULL, NULL);
ASSERT_EQ(0, ioctx.operate("foo", &op, &op_bl));
ASSERT_EQ(sizeof(buf), op_bl.length());
ASSERT_EQ(0, memcmp(op_bl.c_str(), buf, sizeof(buf)));
}
{
bufferlist read_bl;
ObjectReadOperation op;
op.read(0, sizeof(buf), &read_bl, NULL);
ASSERT_EQ(0, ioctx.operate("foo", &op, NULL));
ASSERT_EQ(sizeof(buf), read_bl.length());
ASSERT_EQ(0, memcmp(read_bl.c_str(), buf, sizeof(buf)));
}
{
int rval = 1000;
ObjectReadOperation op;
op.read(0, sizeof(buf), NULL, &rval);
ASSERT_EQ(0, ioctx.operate("foo", &op, NULL));
ASSERT_EQ(0, rval);
}
{
bufferlist read_bl;
int rval = 1000;
ObjectReadOperation op;
op.read(0, sizeof(buf), &read_bl, &rval);
ASSERT_EQ(0, ioctx.operate("foo", &op, NULL));
ASSERT_EQ(sizeof(buf), read_bl.length());
ASSERT_EQ(0, rval);
ASSERT_EQ(0, memcmp(read_bl.c_str(), buf, sizeof(buf)));
}
{
bufferlist read_bl1, read_bl2;
int rval1 = 1000, rval2 = 1002;
ObjectReadOperation op;
op.read(0, sizeof(buf), &read_bl1, &rval1);
op.read(0, sizeof(buf), &read_bl2, &rval2);
ASSERT_EQ(0, ioctx.operate("foo", &op, NULL));
ASSERT_EQ(sizeof(buf), read_bl1.length());
ASSERT_EQ(sizeof(buf), read_bl2.length());
ASSERT_EQ(0, rval1);
ASSERT_EQ(0, rval2);
ASSERT_EQ(0, memcmp(read_bl1.c_str(), buf, sizeof(buf)));
ASSERT_EQ(0, memcmp(read_bl2.c_str(), buf, sizeof(buf)));
}
// read into a preallocated buffer with a cached crc
{
bufferlist op_bl;
op_bl.append(std::string(sizeof(buf), 'x'));
ASSERT_NE(op_bl.crc32c(0), bl.crc32c(0)); // cache 'x' crc
ObjectReadOperation op;
op.read(0, sizeof(buf), NULL, NULL);
ASSERT_EQ(0, ioctx.operate("foo", &op, &op_bl));
ASSERT_EQ(sizeof(buf), op_bl.length());
ASSERT_EQ(0, memcmp(op_bl.c_str(), buf, sizeof(buf)));
ASSERT_EQ(op_bl.crc32c(0), bl.crc32c(0));
}
}
TEST_F(LibRadosIoPP, SparseReadOpPP) {
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl;
bl.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.write("foo", bl, sizeof(buf), 0));
{
std::map<uint64_t, uint64_t> extents;
bufferlist read_bl;
int rval = -1;
ObjectReadOperation op;
op.sparse_read(0, sizeof(buf), &extents, &read_bl, &rval);
ASSERT_EQ(0, ioctx.operate("foo", &op, nullptr));
ASSERT_EQ(0, rval);
assert_eq_sparse(bl, extents, read_bl);
}
{
bufferlist bl;
bl.append(buf, sizeof(buf) / 2);
std::map<uint64_t, uint64_t> extents;
bufferlist read_bl;
int rval = -1;
ObjectReadOperation op;
op.sparse_read(0, sizeof(buf), &extents, &read_bl, &rval, sizeof(buf) / 2, 1);
ASSERT_EQ(0, ioctx.operate("foo", &op, nullptr));
ASSERT_EQ(0, rval);
assert_eq_sparse(bl, extents, read_bl);
}
}
TEST_F(LibRadosIoPP, RoundTripPP) {
char buf[128];
Rados cluster;
memset(buf, 0xcc, sizeof(buf));
bufferlist bl;
bl.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.write("foo", bl, sizeof(buf), 0));
bufferlist cl;
ASSERT_EQ((int)sizeof(buf), ioctx.read("foo", cl, sizeof(buf), 0));
ASSERT_EQ(0, memcmp(buf, cl.c_str(), sizeof(buf)));
}
TEST_F(LibRadosIoPP, RoundTripPP2)
{
bufferlist bl;
bl.append("ceph");
ObjectWriteOperation write;
write.write(0, bl);
write.set_op_flags2(LIBRADOS_OP_FLAG_FADVISE_DONTNEED);
ASSERT_EQ(0, ioctx.operate("foo", &write));
ObjectReadOperation read;
read.read(0, bl.length(), NULL, NULL);
read.set_op_flags2(LIBRADOS_OP_FLAG_FADVISE_NOCACHE|LIBRADOS_OP_FLAG_FADVISE_RANDOM);
ASSERT_EQ(0, ioctx.operate("foo", &read, &bl));
ASSERT_EQ(0, memcmp(bl.c_str(), "ceph", 4));
}
TEST_F(LibRadosIoPP, Checksum) {
char buf[128];
Rados cluster;
memset(buf, 0xcc, sizeof(buf));
bufferlist bl;
bl.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.write("foo", bl, sizeof(buf), 0));
bufferlist init_value_bl;
encode(static_cast<uint32_t>(-1), init_value_bl);
bufferlist csum_bl;
ASSERT_EQ(0, ioctx.checksum("foo", LIBRADOS_CHECKSUM_TYPE_CRC32C,
init_value_bl, sizeof(buf), 0, 0, &csum_bl));
auto csum_bl_it = csum_bl.cbegin();
uint32_t csum_count;
decode(csum_count, csum_bl_it);
ASSERT_EQ(1U, csum_count);
uint32_t csum;
decode(csum, csum_bl_it);
ASSERT_EQ(bl.crc32c(-1), csum);
}
TEST_F(LibRadosIoPP, ReadIntoBufferlist) {
// here we test reading into a non-empty bufferlist referencing existing
// buffers
char buf[128];
Rados cluster;
memset(buf, 0xcc, sizeof(buf));
bufferlist bl;
bl.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.write("foo", bl, sizeof(buf), 0));
bufferlist bl2;
char buf2[sizeof(buf)];
memset(buf2, 0xbb, sizeof(buf2));
bl2.append(buffer::create_static(sizeof(buf2), buf2));
ASSERT_EQ((int)sizeof(buf), ioctx.read("foo", bl2, sizeof(buf), 0));
ASSERT_EQ(0, memcmp(buf, buf2, sizeof(buf)));
ASSERT_EQ(0, memcmp(buf, bl2.c_str(), sizeof(buf)));
}
TEST_F(LibRadosIoPP, OverlappingWriteRoundTripPP) {
char buf[128];
char buf2[64];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.write("foo", bl1, sizeof(buf), 0));
memset(buf2, 0xdd, sizeof(buf2));
bufferlist bl2;
bl2.append(buf2, sizeof(buf2));
ASSERT_EQ(0, ioctx.write("foo", bl2, sizeof(buf2), 0));
bufferlist bl3;
ASSERT_EQ((int)sizeof(buf), ioctx.read("foo", bl3, sizeof(buf), 0));
ASSERT_EQ(0, memcmp(bl3.c_str(), buf2, sizeof(buf2)));
ASSERT_EQ(0, memcmp(bl3.c_str() + sizeof(buf2), buf, sizeof(buf) - sizeof(buf2)));
}
TEST_F(LibRadosIoPP, WriteFullRoundTripPP) {
char buf[128];
char buf2[64];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.write("foo", bl1, sizeof(buf), 0));
memset(buf2, 0xdd, sizeof(buf2));
bufferlist bl2;
bl2.append(buf2, sizeof(buf2));
ASSERT_EQ(0, ioctx.write_full("foo", bl2));
bufferlist bl3;
ASSERT_EQ((int)sizeof(buf2), ioctx.read("foo", bl3, sizeof(buf), 0));
ASSERT_EQ(0, memcmp(bl3.c_str(), buf2, sizeof(buf2)));
}
TEST_F(LibRadosIoPP, WriteFullRoundTripPP2)
{
bufferlist bl;
bl.append("ceph");
ObjectWriteOperation write;
write.write_full(bl);
write.set_op_flags2(LIBRADOS_OP_FLAG_FADVISE_NOCACHE);
ASSERT_EQ(0, ioctx.operate("foo", &write));
ObjectReadOperation read;
read.read(0, bl.length(), NULL, NULL);
read.set_op_flags2(LIBRADOS_OP_FLAG_FADVISE_DONTNEED|LIBRADOS_OP_FLAG_FADVISE_RANDOM);
ASSERT_EQ(0, ioctx.operate("foo", &read, &bl));
ASSERT_EQ(0, memcmp(bl.c_str(), "ceph", 4));
}
TEST_F(LibRadosIoPP, AppendRoundTripPP) {
char buf[64];
char buf2[64];
memset(buf, 0xde, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.append("foo", bl1, sizeof(buf)));
memset(buf2, 0xad, sizeof(buf2));
bufferlist bl2;
bl2.append(buf2, sizeof(buf2));
ASSERT_EQ(0, ioctx.append("foo", bl2, sizeof(buf2)));
bufferlist bl3;
ASSERT_EQ((int)(sizeof(buf) + sizeof(buf2)),
ioctx.read("foo", bl3, (sizeof(buf) + sizeof(buf2)), 0));
const char *bl3_str = bl3.c_str();
ASSERT_EQ(0, memcmp(bl3_str, buf, sizeof(buf)));
ASSERT_EQ(0, memcmp(bl3_str + sizeof(buf), buf2, sizeof(buf2)));
}
TEST_F(LibRadosIoPP, TruncTestPP) {
char buf[128];
memset(buf, 0xaa, sizeof(buf));
bufferlist bl;
bl.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.append("foo", bl, sizeof(buf)));
ASSERT_EQ(0, ioctx.trunc("foo", sizeof(buf) / 2));
bufferlist bl2;
ASSERT_EQ((int)(sizeof(buf)/2), ioctx.read("foo", bl2, sizeof(buf), 0));
ASSERT_EQ(0, memcmp(bl2.c_str(), buf, sizeof(buf)/2));
}
TEST_F(LibRadosIoPP, RemoveTestPP) {
char buf[128];
memset(buf, 0xaa, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.append("foo", bl1, sizeof(buf)));
ASSERT_EQ(0, ioctx.remove("foo"));
bufferlist bl2;
ASSERT_EQ(-ENOENT, ioctx.read("foo", bl2, sizeof(buf), 0));
}
TEST_F(LibRadosIoPP, XattrsRoundTripPP) {
char buf[128];
char attr1[] = "attr1";
char attr1_buf[] = "foo bar baz";
memset(buf, 0xaa, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.append("foo", bl1, sizeof(buf)));
bufferlist bl2;
ASSERT_EQ(-ENODATA, ioctx.getxattr("foo", attr1, bl2));
bufferlist bl3;
bl3.append(attr1_buf, sizeof(attr1_buf));
ASSERT_EQ(0, ioctx.setxattr("foo", attr1, bl3));
bufferlist bl4;
ASSERT_EQ((int)sizeof(attr1_buf),
ioctx.getxattr("foo", attr1, bl4));
ASSERT_EQ(0, memcmp(bl4.c_str(), attr1_buf, sizeof(attr1_buf)));
}
TEST_F(LibRadosIoPP, RmXattrPP) {
char buf[128];
char attr1[] = "attr1";
char attr1_buf[] = "foo bar baz";
memset(buf, 0xaa, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.append("foo", bl1, sizeof(buf)));
bufferlist bl2;
bl2.append(attr1_buf, sizeof(attr1_buf));
ASSERT_EQ(0, ioctx.setxattr("foo", attr1, bl2));
ASSERT_EQ(0, ioctx.rmxattr("foo", attr1));
bufferlist bl3;
ASSERT_EQ(-ENODATA, ioctx.getxattr("foo", attr1, bl3));
// Test rmxattr on a removed object
char buf2[128];
char attr2[] = "attr2";
char attr2_buf[] = "foo bar baz";
memset(buf2, 0xbb, sizeof(buf2));
bufferlist bl21;
bl21.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.write("foo_rmxattr", bl21, sizeof(buf2), 0));
bufferlist bl22;
bl22.append(attr2_buf, sizeof(attr2_buf));
ASSERT_EQ(0, ioctx.setxattr("foo_rmxattr", attr2, bl22));
ASSERT_EQ(0, ioctx.remove("foo_rmxattr"));
ASSERT_EQ(-ENOENT, ioctx.rmxattr("foo_rmxattr", attr2));
}
TEST_F(LibRadosIoPP, XattrListPP) {
char buf[128];
char attr1[] = "attr1";
char attr1_buf[] = "foo bar baz";
char attr2[] = "attr2";
char attr2_buf[256];
for (size_t j = 0; j < sizeof(attr2_buf); ++j) {
attr2_buf[j] = j % 0xff;
}
memset(buf, 0xaa, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.append("foo", bl1, sizeof(buf)));
bufferlist bl2;
bl2.append(attr1_buf, sizeof(attr1_buf));
ASSERT_EQ(0, ioctx.setxattr("foo", attr1, bl2));
bufferlist bl3;
bl3.append(attr2_buf, sizeof(attr2_buf));
ASSERT_EQ(0, ioctx.setxattr("foo", attr2, bl3));
std::map<std::string, bufferlist> attrset;
ASSERT_EQ(0, ioctx.getxattrs("foo", attrset));
for (std::map<std::string, bufferlist>::iterator i = attrset.begin();
i != attrset.end(); ++i) {
if (i->first == string(attr1)) {
ASSERT_EQ(0, memcmp(i->second.c_str(), attr1_buf, sizeof(attr1_buf)));
}
else if (i->first == string(attr2)) {
ASSERT_EQ(0, memcmp(i->second.c_str(), attr2_buf, sizeof(attr2_buf)));
}
else {
ASSERT_EQ(0, 1);
}
}
}
TEST_F(LibRadosIoECPP, SimpleWritePP) {
SKIP_IF_CRIMSON();
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl;
bl.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.write("foo", bl, sizeof(buf), 0));
ioctx.set_namespace("nspace");
ASSERT_EQ(0, ioctx.write("foo", bl, sizeof(buf), 0));
}
TEST_F(LibRadosIoECPP, ReadOpPP) {
SKIP_IF_CRIMSON();
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl;
bl.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.write("foo", bl, sizeof(buf), 0));
{
bufferlist op_bl;
ObjectReadOperation op;
op.read(0, sizeof(buf), NULL, NULL);
ASSERT_EQ(0, ioctx.operate("foo", &op, &op_bl));
ASSERT_EQ(sizeof(buf), op_bl.length());
ASSERT_EQ(0, memcmp(op_bl.c_str(), buf, sizeof(buf)));
}
{
bufferlist op_bl;
ObjectReadOperation op;
op.read(0, 0, NULL, NULL); //len=0 mean read the whole object data
ASSERT_EQ(0, ioctx.operate("foo", &op, &op_bl));
ASSERT_EQ(sizeof(buf), op_bl.length());
ASSERT_EQ(0, memcmp(op_bl.c_str(), buf, sizeof(buf)));
}
{
bufferlist read_bl, op_bl;
ObjectReadOperation op;
op.read(0, sizeof(buf), &read_bl, NULL);
ASSERT_EQ(0, ioctx.operate("foo", &op, &op_bl));
ASSERT_EQ(sizeof(buf), read_bl.length());
ASSERT_EQ(sizeof(buf), op_bl.length());
ASSERT_EQ(0, memcmp(op_bl.c_str(), buf, sizeof(buf)));
ASSERT_EQ(0, memcmp(read_bl.c_str(), buf, sizeof(buf)));
}
{
bufferlist op_bl;
int rval = 1000;
ObjectReadOperation op;
op.read(0, sizeof(buf), NULL, &rval);
ASSERT_EQ(0, ioctx.operate("foo", &op, &op_bl));
ASSERT_EQ(sizeof(buf), op_bl.length());
ASSERT_EQ(0, rval);
ASSERT_EQ(0, memcmp(op_bl.c_str(), buf, sizeof(buf)));
}
{
bufferlist read_bl, op_bl;
int rval = 1000;
ObjectReadOperation op;
op.read(0, sizeof(buf), &read_bl, &rval);
ASSERT_EQ(0, ioctx.operate("foo", &op, &op_bl));
ASSERT_EQ(sizeof(buf), read_bl.length());
ASSERT_EQ(sizeof(buf), op_bl.length());
ASSERT_EQ(0, rval);
ASSERT_EQ(0, memcmp(op_bl.c_str(), buf, sizeof(buf)));
ASSERT_EQ(0, memcmp(read_bl.c_str(), buf, sizeof(buf)));
}
{
bufferlist read_bl1, read_bl2, op_bl;
int rval1 = 1000, rval2 = 1002;
ObjectReadOperation op;
op.read(0, sizeof(buf), &read_bl1, &rval1);
op.read(0, sizeof(buf), &read_bl2, &rval2);
ASSERT_EQ(0, ioctx.operate("foo", &op, &op_bl));
ASSERT_EQ(sizeof(buf), read_bl1.length());
ASSERT_EQ(sizeof(buf), read_bl2.length());
ASSERT_EQ(sizeof(buf) * 2, op_bl.length());
ASSERT_EQ(0, rval1);
ASSERT_EQ(0, rval2);
ASSERT_EQ(0, memcmp(read_bl1.c_str(), buf, sizeof(buf)));
ASSERT_EQ(0, memcmp(read_bl2.c_str(), buf, sizeof(buf)));
ASSERT_EQ(0, memcmp(op_bl.c_str(), buf, sizeof(buf)));
ASSERT_EQ(0, memcmp(op_bl.c_str() + sizeof(buf), buf, sizeof(buf)));
}
{
bufferlist op_bl;
ObjectReadOperation op;
op.read(0, sizeof(buf), NULL, NULL);
ASSERT_EQ(0, ioctx.operate("foo", &op, &op_bl));
ASSERT_EQ(sizeof(buf), op_bl.length());
ASSERT_EQ(0, memcmp(op_bl.c_str(), buf, sizeof(buf)));
}
{
bufferlist read_bl;
ObjectReadOperation op;
op.read(0, sizeof(buf), &read_bl, NULL);
ASSERT_EQ(0, ioctx.operate("foo", &op, NULL));
ASSERT_EQ(sizeof(buf), read_bl.length());
ASSERT_EQ(0, memcmp(read_bl.c_str(), buf, sizeof(buf)));
}
{
int rval = 1000;
ObjectReadOperation op;
op.read(0, sizeof(buf), NULL, &rval);
ASSERT_EQ(0, ioctx.operate("foo", &op, NULL));
ASSERT_EQ(0, rval);
}
{
bufferlist read_bl;
int rval = 1000;
ObjectReadOperation op;
op.read(0, sizeof(buf), &read_bl, &rval);
ASSERT_EQ(0, ioctx.operate("foo", &op, NULL));
ASSERT_EQ(sizeof(buf), read_bl.length());
ASSERT_EQ(0, rval);
ASSERT_EQ(0, memcmp(read_bl.c_str(), buf, sizeof(buf)));
}
{
bufferlist read_bl1, read_bl2;
int rval1 = 1000, rval2 = 1002;
ObjectReadOperation op;
op.read(0, sizeof(buf), &read_bl1, &rval1);
op.read(0, sizeof(buf), &read_bl2, &rval2);
ASSERT_EQ(0, ioctx.operate("foo", &op, NULL));
ASSERT_EQ(sizeof(buf), read_bl1.length());
ASSERT_EQ(sizeof(buf), read_bl2.length());
ASSERT_EQ(0, rval1);
ASSERT_EQ(0, rval2);
ASSERT_EQ(0, memcmp(read_bl1.c_str(), buf, sizeof(buf)));
ASSERT_EQ(0, memcmp(read_bl2.c_str(), buf, sizeof(buf)));
}
// read into a preallocated buffer with a cached crc
{
bufferlist op_bl;
op_bl.append(std::string(sizeof(buf), 'x'));
ASSERT_NE(op_bl.crc32c(0), bl.crc32c(0)); // cache 'x' crc
ObjectReadOperation op;
op.read(0, sizeof(buf), NULL, NULL);
ASSERT_EQ(0, ioctx.operate("foo", &op, &op_bl));
ASSERT_EQ(sizeof(buf), op_bl.length());
ASSERT_EQ(0, memcmp(op_bl.c_str(), buf, sizeof(buf)));
ASSERT_EQ(op_bl.crc32c(0), bl.crc32c(0));
}
}
TEST_F(LibRadosIoECPP, SparseReadOpPP) {
SKIP_IF_CRIMSON();
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl;
bl.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.write("foo", bl, sizeof(buf), 0));
{
std::map<uint64_t, uint64_t> extents;
bufferlist read_bl;
int rval = -1;
ObjectReadOperation op;
op.sparse_read(0, sizeof(buf), &extents, &read_bl, &rval);
ASSERT_EQ(0, ioctx.operate("foo", &op, nullptr));
ASSERT_EQ(0, rval);
assert_eq_sparse(bl, extents, read_bl);
}
}
TEST_F(LibRadosIoECPP, RoundTripPP) {
SKIP_IF_CRIMSON();
char buf[128];
Rados cluster;
memset(buf, 0xcc, sizeof(buf));
bufferlist bl;
bl.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.write("foo", bl, sizeof(buf), 0));
bufferlist cl;
ASSERT_EQ((int)sizeof(buf), ioctx.read("foo", cl, sizeof(buf) * 3, 0));
ASSERT_EQ(0, memcmp(buf, cl.c_str(), sizeof(buf)));
}
TEST_F(LibRadosIoECPP, RoundTripPP2)
{
SKIP_IF_CRIMSON();
bufferlist bl;
bl.append("ceph");
ObjectWriteOperation write;
write.write(0, bl);
write.set_op_flags2(LIBRADOS_OP_FLAG_FADVISE_DONTNEED);
ASSERT_EQ(0, ioctx.operate("foo", &write));
ObjectReadOperation read;
read.read(0, bl.length(), NULL, NULL);
read.set_op_flags2(LIBRADOS_OP_FLAG_FADVISE_DONTNEED|LIBRADOS_OP_FLAG_FADVISE_RANDOM);
ASSERT_EQ(0, ioctx.operate("foo", &read, &bl));
ASSERT_EQ(0, memcmp(bl.c_str(), "ceph", 4));
}
TEST_F(LibRadosIoECPP, OverlappingWriteRoundTripPP) {
SKIP_IF_CRIMSON();
int bsize = alignment;
int dbsize = bsize * 2;
char *buf = (char *)new char[dbsize];
char *buf2 = (char *)new char[bsize];
auto cleanup = [&] {
delete[] buf;
delete[] buf2;
};
scope_guard<decltype(cleanup)> sg(std::move(cleanup));
memset(buf, 0xcc, dbsize);
bufferlist bl1;
bl1.append(buf, dbsize);
ASSERT_EQ(0, ioctx.write("foo", bl1, dbsize, 0));
memset(buf2, 0xdd, bsize);
bufferlist bl2;
bl2.append(buf2, bsize);
ASSERT_EQ(-EOPNOTSUPP, ioctx.write("foo", bl2, bsize, 0));
bufferlist bl3;
ASSERT_EQ(dbsize, ioctx.read("foo", bl3, dbsize, 0));
// Read the same as first write
ASSERT_EQ(0, memcmp(bl3.c_str(), buf, dbsize));
}
TEST_F(LibRadosIoECPP, WriteFullRoundTripPP) {
SKIP_IF_CRIMSON();
char buf[128];
char buf2[64];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.write("foo", bl1, sizeof(buf), 0));
memset(buf2, 0xdd, sizeof(buf2));
bufferlist bl2;
bl2.append(buf2, sizeof(buf2));
ASSERT_EQ(0, ioctx.write_full("foo", bl2));
bufferlist bl3;
ASSERT_EQ((int)sizeof(buf2), ioctx.read("foo", bl3, sizeof(buf), 0));
ASSERT_EQ(0, memcmp(bl3.c_str(), buf2, sizeof(buf2)));
}
TEST_F(LibRadosIoECPP, WriteFullRoundTripPP2)
{
SKIP_IF_CRIMSON();
bufferlist bl;
bl.append("ceph");
ObjectWriteOperation write;
write.write_full(bl);
write.set_op_flags2(LIBRADOS_OP_FLAG_FADVISE_DONTNEED);
ASSERT_EQ(0, ioctx.operate("foo", &write));
ObjectReadOperation read;
read.read(0, bl.length(), NULL, NULL);
read.set_op_flags2(LIBRADOS_OP_FLAG_FADVISE_DONTNEED|LIBRADOS_OP_FLAG_FADVISE_RANDOM);
ASSERT_EQ(0, ioctx.operate("foo", &read, &bl));
ASSERT_EQ(0, memcmp(bl.c_str(), "ceph", 4));
}
TEST_F(LibRadosIoECPP, AppendRoundTripPP) {
SKIP_IF_CRIMSON();
char *buf = (char *)new char[alignment];
char *buf2 = (char *)new char[alignment];
auto cleanup = [&] {
delete[] buf;
delete[] buf2;
};
scope_guard<decltype(cleanup)> sg(std::move(cleanup));
memset(buf, 0xde, alignment);
bufferlist bl1;
bl1.append(buf, alignment);
ASSERT_EQ(0, ioctx.append("foo", bl1, alignment));
memset(buf2, 0xad, alignment);
bufferlist bl2;
bl2.append(buf2, alignment);
ASSERT_EQ(0, ioctx.append("foo", bl2, alignment));
bufferlist bl3;
ASSERT_EQ((int)(alignment * 2),
ioctx.read("foo", bl3, (alignment * 4), 0));
const char *bl3_str = bl3.c_str();
ASSERT_EQ(0, memcmp(bl3_str, buf, alignment));
ASSERT_EQ(0, memcmp(bl3_str + alignment, buf2, alignment));
}
TEST_F(LibRadosIoECPP, TruncTestPP) {
SKIP_IF_CRIMSON();
char buf[128];
memset(buf, 0xaa, sizeof(buf));
bufferlist bl;
bl.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.append("foo", bl, sizeof(buf)));
ASSERT_EQ(-EOPNOTSUPP, ioctx.trunc("foo", sizeof(buf) / 2));
bufferlist bl2;
// Same size
ASSERT_EQ((int)sizeof(buf), ioctx.read("foo", bl2, sizeof(buf), 0));
// No change
ASSERT_EQ(0, memcmp(bl2.c_str(), buf, sizeof(buf)));
}
TEST_F(LibRadosIoECPP, RemoveTestPP) {
SKIP_IF_CRIMSON();
char buf[128];
memset(buf, 0xaa, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.append("foo", bl1, sizeof(buf)));
ASSERT_EQ(0, ioctx.remove("foo"));
bufferlist bl2;
ASSERT_EQ(-ENOENT, ioctx.read("foo", bl2, sizeof(buf), 0));
}
TEST_F(LibRadosIoECPP, XattrsRoundTripPP) {
SKIP_IF_CRIMSON();
char buf[128];
char attr1[] = "attr1";
char attr1_buf[] = "foo bar baz";
memset(buf, 0xaa, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.append("foo", bl1, sizeof(buf)));
bufferlist bl2;
ASSERT_EQ(-ENODATA, ioctx.getxattr("foo", attr1, bl2));
bufferlist bl3;
bl3.append(attr1_buf, sizeof(attr1_buf));
ASSERT_EQ(0, ioctx.setxattr("foo", attr1, bl3));
bufferlist bl4;
ASSERT_EQ((int)sizeof(attr1_buf),
ioctx.getxattr("foo", attr1, bl4));
ASSERT_EQ(0, memcmp(bl4.c_str(), attr1_buf, sizeof(attr1_buf)));
}
TEST_F(LibRadosIoECPP, RmXattrPP) {
SKIP_IF_CRIMSON();
char buf[128];
char attr1[] = "attr1";
char attr1_buf[] = "foo bar baz";
memset(buf, 0xaa, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.append("foo", bl1, sizeof(buf)));
bufferlist bl2;
bl2.append(attr1_buf, sizeof(attr1_buf));
ASSERT_EQ(0, ioctx.setxattr("foo", attr1, bl2));
ASSERT_EQ(0, ioctx.rmxattr("foo", attr1));
bufferlist bl3;
ASSERT_EQ(-ENODATA, ioctx.getxattr("foo", attr1, bl3));
// Test rmxattr on a removed object
char buf2[128];
char attr2[] = "attr2";
char attr2_buf[] = "foo bar baz";
memset(buf2, 0xbb, sizeof(buf2));
bufferlist bl21;
bl21.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.write("foo_rmxattr", bl21, sizeof(buf2), 0));
bufferlist bl22;
bl22.append(attr2_buf, sizeof(attr2_buf));
ASSERT_EQ(0, ioctx.setxattr("foo_rmxattr", attr2, bl22));
ASSERT_EQ(0, ioctx.remove("foo_rmxattr"));
ASSERT_EQ(-ENOENT, ioctx.rmxattr("foo_rmxattr", attr2));
}
TEST_F(LibRadosIoECPP, XattrListPP) {
SKIP_IF_CRIMSON();
char buf[128];
char attr1[] = "attr1";
char attr1_buf[] = "foo bar baz";
char attr2[] = "attr2";
char attr2_buf[256];
for (size_t j = 0; j < sizeof(attr2_buf); ++j) {
attr2_buf[j] = j % 0xff;
}
memset(buf, 0xaa, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.append("foo", bl1, sizeof(buf)));
bufferlist bl2;
bl2.append(attr1_buf, sizeof(attr1_buf));
ASSERT_EQ(0, ioctx.setxattr("foo", attr1, bl2));
bufferlist bl3;
bl3.append(attr2_buf, sizeof(attr2_buf));
ASSERT_EQ(0, ioctx.setxattr("foo", attr2, bl3));
std::map<std::string, bufferlist> attrset;
ASSERT_EQ(0, ioctx.getxattrs("foo", attrset));
for (std::map<std::string, bufferlist>::iterator i = attrset.begin();
i != attrset.end(); ++i) {
if (i->first == string(attr1)) {
ASSERT_EQ(0, memcmp(i->second.c_str(), attr1_buf, sizeof(attr1_buf)));
}
else if (i->first == string(attr2)) {
ASSERT_EQ(0, memcmp(i->second.c_str(), attr2_buf, sizeof(attr2_buf)));
}
else {
ASSERT_EQ(0, 1);
}
}
}
TEST_F(LibRadosIoPP, CmpExtPP) {
bufferlist bl;
bl.append("ceph");
ObjectWriteOperation write1;
write1.write(0, bl);
ASSERT_EQ(0, ioctx.operate("foo", &write1));
bufferlist new_bl;
new_bl.append("CEPH");
ObjectWriteOperation write2;
write2.cmpext(0, bl, nullptr);
write2.write(0, new_bl);
ASSERT_EQ(0, ioctx.operate("foo", &write2));
ObjectReadOperation read;
read.read(0, bl.length(), NULL, NULL);
ASSERT_EQ(0, ioctx.operate("foo", &read, &bl));
ASSERT_EQ(0, memcmp(bl.c_str(), "CEPH", 4));
}
TEST_F(LibRadosIoPP, CmpExtDNEPP) {
bufferlist bl;
bl.append(std::string(4, '\0'));
bufferlist new_bl;
new_bl.append("CEPH");
ObjectWriteOperation write;
write.cmpext(0, bl, nullptr);
write.write(0, new_bl);
ASSERT_EQ(0, ioctx.operate("foo", &write));
ObjectReadOperation read;
read.read(0, bl.length(), NULL, NULL);
ASSERT_EQ(0, ioctx.operate("foo", &read, &bl));
ASSERT_EQ(0, memcmp(bl.c_str(), "CEPH", 4));
}
TEST_F(LibRadosIoPP, CmpExtMismatchPP) {
bufferlist bl;
bl.append("ceph");
ObjectWriteOperation write1;
write1.write(0, bl);
ASSERT_EQ(0, ioctx.operate("foo", &write1));
bufferlist new_bl;
new_bl.append("CEPH");
ObjectWriteOperation write2;
write2.cmpext(0, new_bl, nullptr);
write2.write(0, new_bl);
ASSERT_EQ(-MAX_ERRNO, ioctx.operate("foo", &write2));
ObjectReadOperation read;
read.read(0, bl.length(), NULL, NULL);
ASSERT_EQ(0, ioctx.operate("foo", &read, &bl));
ASSERT_EQ(0, memcmp(bl.c_str(), "ceph", 4));
}
TEST_F(LibRadosIoECPP, CmpExtPP) {
SKIP_IF_CRIMSON();
bufferlist bl;
bl.append("ceph");
ObjectWriteOperation write1;
write1.write(0, bl);
ASSERT_EQ(0, ioctx.operate("foo", &write1));
bufferlist new_bl;
new_bl.append("CEPH");
ObjectWriteOperation write2;
write2.cmpext(0, bl, nullptr);
write2.write_full(new_bl);
ASSERT_EQ(0, ioctx.operate("foo", &write2));
ObjectReadOperation read;
read.read(0, bl.length(), NULL, NULL);
ASSERT_EQ(0, ioctx.operate("foo", &read, &bl));
ASSERT_EQ(0, memcmp(bl.c_str(), "CEPH", 4));
}
TEST_F(LibRadosIoECPP, CmpExtDNEPP) {
SKIP_IF_CRIMSON();
bufferlist bl;
bl.append(std::string(4, '\0'));
bufferlist new_bl;
new_bl.append("CEPH");
ObjectWriteOperation write;
write.cmpext(0, bl, nullptr);
write.write_full(new_bl);
ASSERT_EQ(0, ioctx.operate("foo", &write));
ObjectReadOperation read;
read.read(0, bl.length(), NULL, NULL);
ASSERT_EQ(0, ioctx.operate("foo", &read, &bl));
ASSERT_EQ(0, memcmp(bl.c_str(), "CEPH", 4));
}
TEST_F(LibRadosIoECPP, CmpExtMismatchPP) {
SKIP_IF_CRIMSON();
bufferlist bl;
bl.append("ceph");
ObjectWriteOperation write1;
write1.write(0, bl);
ASSERT_EQ(0, ioctx.operate("foo", &write1));
bufferlist new_bl;
new_bl.append("CEPH");
ObjectWriteOperation write2;
write2.cmpext(0, new_bl, nullptr);
write2.write_full(new_bl);
ASSERT_EQ(-MAX_ERRNO, ioctx.operate("foo", &write2));
ObjectReadOperation read;
read.read(0, bl.length(), NULL, NULL);
ASSERT_EQ(0, ioctx.operate("foo", &read, &bl));
ASSERT_EQ(0, memcmp(bl.c_str(), "ceph", 4));
}
| 30,243 | 29.642351 | 88 | cc |
null | ceph-main/src/test/librados/librados.cc | //#include "common/config.h"
#include "include/rados/librados.h"
#include "gtest/gtest.h"
TEST(Librados, CreateShutdown) {
rados_t cluster;
int err;
err = rados_create(&cluster, "someid");
EXPECT_EQ(err, 0);
rados_shutdown(cluster);
}
| 248 | 16.785714 | 41 | cc |
null | ceph-main/src/test/librados/librados_config.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2011 New Dream Network
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "gtest/gtest.h"
#include "include/rados/librados.h"
#include <sstream>
#include <string>
#include <string.h>
#include <errno.h>
using std::string;
TEST(LibRadosConfig, SimpleSet) {
rados_t cl;
int ret = rados_create(&cl, NULL);
ASSERT_EQ(ret, 0);
ret = rados_conf_set(cl, "log_max_new", "21");
ASSERT_EQ(ret, 0);
char buf[128];
memset(buf, 0, sizeof(buf));
ret = rados_conf_get(cl, "log_max_new", buf, sizeof(buf));
ASSERT_EQ(ret, 0);
ASSERT_EQ(string("21"), string(buf));
rados_shutdown(cl);
}
TEST(LibRadosConfig, ArgV) {
rados_t cl;
int ret = rados_create(&cl, NULL);
ASSERT_EQ(ret, 0);
const char *argv[] = { "foo", "--log_max_new", "2",
"--key", "my-key", NULL };
size_t argc = (sizeof(argv) / sizeof(argv[0])) - 1;
rados_conf_parse_argv(cl, argc, argv);
char buf[128];
memset(buf, 0, sizeof(buf));
ret = rados_conf_get(cl, "key", buf, sizeof(buf));
ASSERT_EQ(ret, 0);
ASSERT_EQ(string("my-key"), string(buf));
memset(buf, 0, sizeof(buf));
ret = rados_conf_get(cl, "log_max_new", buf, sizeof(buf));
ASSERT_EQ(ret, 0);
ASSERT_EQ(string("2"), string(buf));
rados_shutdown(cl);
}
TEST(LibRadosConfig, DebugLevels) {
rados_t cl;
int ret = rados_create(&cl, NULL);
ASSERT_EQ(ret, 0);
ret = rados_conf_set(cl, "debug_rados", "3");
ASSERT_EQ(ret, 0);
char buf[128];
memset(buf, 0, sizeof(buf));
ret = rados_conf_get(cl, "debug_rados", buf, sizeof(buf));
ASSERT_EQ(ret, 0);
ASSERT_EQ(0, strcmp("3/3", buf));
ret = rados_conf_set(cl, "debug_rados", "7/8");
ASSERT_EQ(ret, 0);
memset(buf, 0, sizeof(buf));
ret = rados_conf_get(cl, "debug_rados", buf, sizeof(buf));
ASSERT_EQ(ret, 0);
ASSERT_EQ(0, strcmp("7/8", buf));
ret = rados_conf_set(cl, "debug_rados", "foo");
ASSERT_EQ(ret, -EINVAL);
ret = rados_conf_set(cl, "debug_asdkfasdjfajksdf", "foo");
ASSERT_EQ(ret, -ENOENT);
ret = rados_conf_get(cl, "debug_radfjadfsdados", buf, sizeof(buf));
ASSERT_EQ(ret, -ENOENT);
rados_shutdown(cl);
}
| 2,427 | 23.525253 | 70 | cc |
null | ceph-main/src/test/librados/list.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "include/rados/librados.h"
#include "include/rados/librados.hpp"
#include "include/stringify.h"
#include "test/librados/test.h"
#include "test/librados/test_common.h"
#include "test/librados/TestCase.h"
#include "global/global_context.h"
#include "include/types.h"
#include "common/hobject.h"
#include "gtest/gtest.h"
#include <errno.h>
#include <string>
#include <stdexcept>
#include "crimson_utils.h"
using namespace std;
using namespace librados;
typedef RadosTestNSCleanup LibRadosList;
typedef RadosTestECNSCleanup LibRadosListEC;
typedef RadosTestNP LibRadosListNP;
TEST_F(LibRadosList, ListObjects) {
char buf[128];
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_write(ioctx, "foo", buf, sizeof(buf), 0));
rados_list_ctx_t ctx;
ASSERT_EQ(0, rados_nobjects_list_open(ioctx, &ctx));
const char *entry;
bool foundit = false;
while (rados_nobjects_list_next(ctx, &entry, NULL, NULL) != -ENOENT) {
foundit = true;
ASSERT_EQ(std::string(entry), "foo");
}
ASSERT_TRUE(foundit);
rados_nobjects_list_close(ctx);
}
TEST_F(LibRadosList, ListObjectsZeroInName) {
char buf[128];
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_write(ioctx, "foo\0bar", buf, sizeof(buf), 0));
rados_list_ctx_t ctx;
ASSERT_EQ(0, rados_nobjects_list_open(ioctx, &ctx));
const char *entry;
size_t entry_size;
bool foundit = false;
while (rados_nobjects_list_next2(ctx, &entry, NULL, NULL,
&entry_size, NULL, NULL) != -ENOENT) {
foundit = true;
ASSERT_EQ(std::string(entry, entry_size), "foo\0bar");
}
ASSERT_TRUE(foundit);
rados_nobjects_list_close(ctx);
}
static void check_list(
std::set<std::string>& myset,
rados_list_ctx_t& ctx,
const std::string &check_nspace)
{
const char *entry, *nspace;
cout << "myset " << myset << std::endl;
// we should see every item exactly once.
int ret;
while ((ret = rados_nobjects_list_next(ctx, &entry, NULL, &nspace)) == 0) {
std::string test_name;
if (check_nspace == all_nspaces) {
test_name = std::string(nspace) + ":" + std::string(entry);
} else {
ASSERT_TRUE(std::string(nspace) == check_nspace);
test_name = std::string(entry);
}
cout << test_name << std::endl;
ASSERT_TRUE(myset.end() != myset.find(test_name));
myset.erase(test_name);
}
ASSERT_EQ(-ENOENT, ret);
ASSERT_TRUE(myset.empty());
}
TEST_F(LibRadosList, ListObjectsNS) {
char buf[128];
memset(buf, 0xcc, sizeof(buf));
// Create :foo1, :foo2, :foo3, n1:foo1, ns1:foo4, ns1:foo5, ns2:foo6, n2:foo7
rados_ioctx_set_namespace(ioctx, "");
ASSERT_EQ(0, rados_write(ioctx, "foo1", buf, sizeof(buf), 0));
rados_ioctx_set_namespace(ioctx, "ns1");
ASSERT_EQ(0, rados_write(ioctx, "foo1", buf, sizeof(buf), 0));
rados_ioctx_set_namespace(ioctx, "");
ASSERT_EQ(0, rados_write(ioctx, "foo2", buf, sizeof(buf), 0));
ASSERT_EQ(0, rados_write(ioctx, "foo3", buf, sizeof(buf), 0));
rados_ioctx_set_namespace(ioctx, "ns1");
ASSERT_EQ(0, rados_write(ioctx, "foo4", buf, sizeof(buf), 0));
ASSERT_EQ(0, rados_write(ioctx, "foo5", buf, sizeof(buf), 0));
rados_ioctx_set_namespace(ioctx, "ns2");
ASSERT_EQ(0, rados_write(ioctx, "foo6", buf, sizeof(buf), 0));
ASSERT_EQ(0, rados_write(ioctx, "foo7", buf, sizeof(buf), 0));
char nspace[4];
ASSERT_EQ(-ERANGE, rados_ioctx_get_namespace(ioctx, nspace, 3));
ASSERT_EQ(static_cast<int>(strlen("ns2")),
rados_ioctx_get_namespace(ioctx, nspace, sizeof(nspace)));
ASSERT_EQ(0, strcmp("ns2", nspace));
std::set<std::string> def, ns1, ns2, all;
def.insert(std::string("foo1"));
def.insert(std::string("foo2"));
def.insert(std::string("foo3"));
ns1.insert(std::string("foo1"));
ns1.insert(std::string("foo4"));
ns1.insert(std::string("foo5"));
ns2.insert(std::string("foo6"));
ns2.insert(std::string("foo7"));
all.insert(std::string(":foo1"));
all.insert(std::string(":foo2"));
all.insert(std::string(":foo3"));
all.insert(std::string("ns1:foo1"));
all.insert(std::string("ns1:foo4"));
all.insert(std::string("ns1:foo5"));
all.insert(std::string("ns2:foo6"));
all.insert(std::string("ns2:foo7"));
rados_list_ctx_t ctx;
// Check default namespace ""
rados_ioctx_set_namespace(ioctx, "");
ASSERT_EQ(0, rados_nobjects_list_open(ioctx, &ctx));
check_list(def, ctx, "");
rados_nobjects_list_close(ctx);
// Check namespace "ns1"
rados_ioctx_set_namespace(ioctx, "ns1");
ASSERT_EQ(0, rados_nobjects_list_open(ioctx, &ctx));
check_list(ns1, ctx, "ns1");
rados_nobjects_list_close(ctx);
// Check namespace "ns2"
rados_ioctx_set_namespace(ioctx, "ns2");
ASSERT_EQ(0, rados_nobjects_list_open(ioctx, &ctx));
check_list(ns2, ctx, "ns2");
rados_nobjects_list_close(ctx);
// Check ALL namespaces
rados_ioctx_set_namespace(ioctx, LIBRADOS_ALL_NSPACES);
ASSERT_EQ(0, rados_nobjects_list_open(ioctx, &ctx));
check_list(all, ctx, all_nspaces);
rados_nobjects_list_close(ctx);
}
TEST_F(LibRadosList, ListObjectsStart) {
char buf[128];
memset(buf, 0xcc, sizeof(buf));
for (int i=0; i<16; ++i) {
string n = stringify(i);
ASSERT_EQ(0, rados_write(ioctx, n.c_str(), buf, sizeof(buf), 0));
}
rados_list_ctx_t ctx;
ASSERT_EQ(0, rados_nobjects_list_open(ioctx, &ctx));
std::map<int, std::set<std::string> > pg_to_obj;
const char *entry;
while (rados_nobjects_list_next(ctx, &entry, NULL, NULL) == 0) {
uint32_t pos = rados_nobjects_list_get_pg_hash_position(ctx);
std::cout << entry << " " << pos << std::endl;
pg_to_obj[pos].insert(entry);
}
rados_nobjects_list_close(ctx);
std::map<int, std::set<std::string> >::reverse_iterator p =
pg_to_obj.rbegin();
ASSERT_EQ(0, rados_nobjects_list_open(ioctx, &ctx));
while (p != pg_to_obj.rend()) {
ASSERT_EQ((uint32_t)p->first, rados_nobjects_list_seek(ctx, p->first));
ASSERT_EQ(0, rados_nobjects_list_next(ctx, &entry, NULL, NULL));
std::cout << "have " << entry << " expect one of " << p->second << std::endl;
ASSERT_TRUE(p->second.count(entry));
++p;
}
rados_nobjects_list_close(ctx);
}
// this function replicates
// librados::operator<<(std::ostream& os, const librados::ObjectCursor& oc)
// because we don't want to use librados in librados client.
std::ostream& operator<<(std::ostream&os, const rados_object_list_cursor& oc)
{
if (oc) {
os << *(hobject_t *)oc;
} else {
os << hobject_t{};
}
return os;
}
TEST_F(LibRadosList, ListObjectsCursor) {
char buf[128];
memset(buf, 0xcc, sizeof(buf));
const int max_objs = 16;
for (int i=0; i<max_objs; ++i) {
string n = stringify(i);
ASSERT_EQ(0, rados_write(ioctx, n.c_str(), buf, sizeof(buf), 0));
}
{
rados_list_ctx_t ctx;
const char *entry;
rados_object_list_cursor cursor;
ASSERT_EQ(0, rados_nobjects_list_open(ioctx, &ctx));
ASSERT_EQ(rados_nobjects_list_get_cursor(ctx, &cursor), 0);
rados_object_list_cursor first_cursor = cursor;
cout << "x cursor=" << cursor << std::endl;
while (rados_nobjects_list_next(ctx, &entry, NULL, NULL) == 0) {
string oid = entry;
ASSERT_EQ(rados_nobjects_list_get_cursor(ctx, &cursor), 0);
cout << "> oid=" << oid << " cursor=" << cursor << std::endl;
}
rados_nobjects_list_seek_cursor(ctx, first_cursor);
ASSERT_EQ(rados_nobjects_list_next(ctx, &entry, NULL, NULL), 0);
cout << "FIRST> seek to " << first_cursor << " oid=" << string(entry) << std::endl;
}
rados_list_ctx_t ctx;
ASSERT_EQ(0, rados_nobjects_list_open(ioctx, &ctx));
std::map<rados_object_list_cursor, string> cursor_to_obj;
int count = 0;
const char *entry;
while (rados_nobjects_list_next(ctx, &entry, NULL, NULL) == 0) {
rados_object_list_cursor cursor;
ASSERT_EQ(rados_nobjects_list_get_cursor(ctx, &cursor), 0);
string oid = entry;
cout << ": oid=" << oid << " cursor=" << cursor << std::endl;
cursor_to_obj[cursor] = oid;
rados_nobjects_list_seek_cursor(ctx, cursor);
cout << ": seek to " << cursor << std::endl;
ASSERT_EQ(rados_nobjects_list_next(ctx, &entry, NULL, NULL), 0);
cout << "> " << cursor << " -> " << entry << std::endl;
ASSERT_EQ(string(entry), oid);
ASSERT_LT(count, max_objs); /* avoid infinite loops due to bad seek */
++count;
}
ASSERT_EQ(count, max_objs);
auto p = cursor_to_obj.rbegin();
ASSERT_EQ(0, rados_nobjects_list_open(ioctx, &ctx));
while (p != cursor_to_obj.rend()) {
cout << ": seek to " << p->first << std::endl;
rados_object_list_cursor cursor;
rados_object_list_cursor oid(p->first);
rados_nobjects_list_seek_cursor(ctx, oid);
ASSERT_EQ(rados_nobjects_list_get_cursor(ctx, &cursor), 0);
cout << ": cursor()=" << cursor << " expected=" << oid << std::endl;
// ASSERT_EQ(ObjectCursor(oid), ObjectCursor(cursor));
ASSERT_EQ(rados_nobjects_list_next(ctx, &entry, NULL, NULL), 0);
cout << "> " << cursor << " -> " << entry << std::endl;
cout << ": entry=" << entry << " expected=" << p->second << std::endl;
ASSERT_EQ(p->second, string(entry));
++p;
rados_object_list_cursor_free(ctx, cursor);
}
}
TEST_F(LibRadosListEC, ListObjects) {
SKIP_IF_CRIMSON();
char buf[128];
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_write(ioctx, "foo", buf, sizeof(buf), 0));
rados_list_ctx_t ctx;
ASSERT_EQ(0, rados_nobjects_list_open(ioctx, &ctx));
const char *entry;
bool foundit = false;
while (rados_nobjects_list_next(ctx, &entry, NULL, NULL) != -ENOENT) {
foundit = true;
ASSERT_EQ(std::string(entry), "foo");
}
ASSERT_TRUE(foundit);
rados_nobjects_list_close(ctx);
}
TEST_F(LibRadosListEC, ListObjectsNS) {
SKIP_IF_CRIMSON();
char buf[128];
memset(buf, 0xcc, sizeof(buf));
// Create :foo1, :foo2, :foo3, n1:foo1, ns1:foo4, ns1:foo5, ns2:foo6, n2:foo7
rados_ioctx_set_namespace(ioctx, "");
ASSERT_EQ(0, rados_write(ioctx, "foo1", buf, sizeof(buf), 0));
rados_ioctx_set_namespace(ioctx, "ns1");
ASSERT_EQ(0, rados_write(ioctx, "foo1", buf, sizeof(buf), 0));
rados_ioctx_set_namespace(ioctx, "");
ASSERT_EQ(0, rados_write(ioctx, "foo2", buf, sizeof(buf), 0));
ASSERT_EQ(0, rados_write(ioctx, "foo3", buf, sizeof(buf), 0));
rados_ioctx_set_namespace(ioctx, "ns1");
ASSERT_EQ(0, rados_write(ioctx, "foo4", buf, sizeof(buf), 0));
ASSERT_EQ(0, rados_write(ioctx, "foo5", buf, sizeof(buf), 0));
rados_ioctx_set_namespace(ioctx, "ns2");
ASSERT_EQ(0, rados_write(ioctx, "foo6", buf, sizeof(buf), 0));
ASSERT_EQ(0, rados_write(ioctx, "foo7", buf, sizeof(buf), 0));
std::set<std::string> def, ns1, ns2, all;
def.insert(std::string("foo1"));
def.insert(std::string("foo2"));
def.insert(std::string("foo3"));
ns1.insert(std::string("foo1"));
ns1.insert(std::string("foo4"));
ns1.insert(std::string("foo5"));
ns2.insert(std::string("foo6"));
ns2.insert(std::string("foo7"));
all.insert(std::string(":foo1"));
all.insert(std::string(":foo2"));
all.insert(std::string(":foo3"));
all.insert(std::string("ns1:foo1"));
all.insert(std::string("ns1:foo4"));
all.insert(std::string("ns1:foo5"));
all.insert(std::string("ns2:foo6"));
all.insert(std::string("ns2:foo7"));
rados_list_ctx_t ctx;
// Check default namespace ""
rados_ioctx_set_namespace(ioctx, "");
ASSERT_EQ(0, rados_nobjects_list_open(ioctx, &ctx));
check_list(def, ctx, "");
rados_nobjects_list_close(ctx);
// Check default namespace "ns1"
rados_ioctx_set_namespace(ioctx, "ns1");
ASSERT_EQ(0, rados_nobjects_list_open(ioctx, &ctx));
check_list(ns1, ctx, "ns1");
rados_nobjects_list_close(ctx);
// Check default namespace "ns2"
rados_ioctx_set_namespace(ioctx, "ns2");
ASSERT_EQ(0, rados_nobjects_list_open(ioctx, &ctx));
check_list(ns2, ctx, "ns2");
rados_nobjects_list_close(ctx);
// Check all namespaces
rados_ioctx_set_namespace(ioctx, LIBRADOS_ALL_NSPACES);
ASSERT_EQ(0, rados_nobjects_list_open(ioctx, &ctx));
check_list(all, ctx, all_nspaces);
rados_nobjects_list_close(ctx);
}
TEST_F(LibRadosListEC, ListObjectsStart) {
SKIP_IF_CRIMSON();
char buf[128];
memset(buf, 0xcc, sizeof(buf));
for (int i=0; i<16; ++i) {
string n = stringify(i);
ASSERT_EQ(0, rados_write(ioctx, n.c_str(), buf, sizeof(buf), 0));
}
rados_list_ctx_t ctx;
ASSERT_EQ(0, rados_nobjects_list_open(ioctx, &ctx));
std::map<int, std::set<std::string> > pg_to_obj;
const char *entry;
while (rados_nobjects_list_next(ctx, &entry, NULL, NULL) == 0) {
uint32_t pos = rados_nobjects_list_get_pg_hash_position(ctx);
std::cout << entry << " " << pos << std::endl;
pg_to_obj[pos].insert(entry);
}
rados_nobjects_list_close(ctx);
std::map<int, std::set<std::string> >::reverse_iterator p =
pg_to_obj.rbegin();
ASSERT_EQ(0, rados_nobjects_list_open(ioctx, &ctx));
while (p != pg_to_obj.rend()) {
ASSERT_EQ((uint32_t)p->first, rados_nobjects_list_seek(ctx, p->first));
ASSERT_EQ(0, rados_nobjects_list_next(ctx, &entry, NULL, NULL));
std::cout << "have " << entry << " expect one of " << p->second << std::endl;
ASSERT_TRUE(p->second.count(entry));
++p;
}
rados_nobjects_list_close(ctx);
}
TEST_F(LibRadosListNP, ListObjectsError) {
std::string pool_name;
rados_t cluster;
rados_ioctx_t ioctx;
pool_name = get_temp_pool_name();
ASSERT_EQ("", create_one_pool(pool_name, &cluster));
ASSERT_EQ(0, rados_ioctx_create(cluster, pool_name.c_str(), &ioctx));
char buf[128];
memset(buf, 0xcc, sizeof(buf));
rados_ioctx_set_namespace(ioctx, "");
ASSERT_EQ(0, rados_write(ioctx, "foo", buf, sizeof(buf), 0));
//ASSERT_EQ(0, rados_pool_delete(cluster, pool_name.c_str()));
{
char *buf, *st;
size_t buflen, stlen;
string c = "{\"prefix\":\"osd pool rm\",\"pool\": \"" + pool_name +
"\",\"pool2\":\"" + pool_name +
"\",\"yes_i_really_really_mean_it_not_faking\": true}";
const char *cmd[2] = { c.c_str(), 0 };
ASSERT_EQ(0, rados_mon_command(cluster, (const char **)cmd, 1, "", 0, &buf, &buflen, &st, &stlen));
ASSERT_EQ(0, rados_wait_for_latest_osdmap(cluster));
}
rados_list_ctx_t ctx;
ASSERT_EQ(0, rados_nobjects_list_open(ioctx, &ctx));
const char *entry;
ASSERT_EQ(-ENOENT, rados_nobjects_list_next(ctx, &entry, NULL, NULL));
rados_nobjects_list_close(ctx);
rados_ioctx_destroy(ioctx);
rados_shutdown(cluster);
}
// ---------------------------------------------
TEST_F(LibRadosList, EnumerateObjects) {
char buf[128];
memset(buf, 0xcc, sizeof(buf));
const uint32_t n_objects = 16;
for (unsigned i=0; i<n_objects; ++i) {
ASSERT_EQ(0, rados_write(ioctx, stringify(i).c_str(), buf, sizeof(buf), 0));
}
// Ensure a non-power-of-two PG count to avoid only
// touching the easy path.
if (!is_crimson_cluster()) {
ASSERT_TRUE(set_pg_num(&s_cluster, pool_name, 11).empty());
ASSERT_TRUE(set_pgp_num(&s_cluster, pool_name, 11).empty());
}
std::set<std::string> saw_obj;
rados_object_list_cursor c = rados_object_list_begin(ioctx);
rados_object_list_cursor end = rados_object_list_end(ioctx);
while(!rados_object_list_is_end(ioctx, c))
{
rados_object_list_item results[12];
memset(results, 0, sizeof(rados_object_list_item) * 12);
rados_object_list_cursor temp_end = rados_object_list_end(ioctx);
int r = rados_object_list(ioctx, c, temp_end,
12, NULL, 0, results, &c);
rados_object_list_cursor_free(ioctx, temp_end);
ASSERT_GE(r, 0);
for (int i = 0; i < r; ++i) {
std::string oid(results[i].oid, results[i].oid_length);
if (saw_obj.count(oid)) {
std::cerr << "duplicate obj " << oid << std::endl;
}
ASSERT_FALSE(saw_obj.count(oid));
saw_obj.insert(oid);
}
rados_object_list_free(12, results);
}
rados_object_list_cursor_free(ioctx, c);
rados_object_list_cursor_free(ioctx, end);
for (unsigned i=0; i<n_objects; ++i) {
if (!saw_obj.count(stringify(i))) {
std::cerr << "missing object " << i << std::endl;
}
ASSERT_TRUE(saw_obj.count(stringify(i)));
}
ASSERT_EQ(n_objects, saw_obj.size());
}
TEST_F(LibRadosList, EnumerateObjectsSplit) {
char buf[128];
memset(buf, 0xcc, sizeof(buf));
const uint32_t n_objects = 16;
for (unsigned i=0; i<n_objects; ++i) {
ASSERT_EQ(0, rados_write(ioctx, stringify(i).c_str(), buf, sizeof(buf), 0));
}
// Ensure a non-power-of-two PG count to avoid only
// touching the easy path.
if (!is_crimson_cluster()) {
if (auto error = set_pg_num(&s_cluster, pool_name, 11); !error.empty()) {
GTEST_FAIL() << error;
}
if (auto error = set_pgp_num(&s_cluster, pool_name, 11); !error.empty()) {
GTEST_FAIL() << error;
}
}
rados_object_list_cursor begin = rados_object_list_begin(ioctx);
rados_object_list_cursor end = rados_object_list_end(ioctx);
// Step through an odd number of shards
unsigned m = 5;
std::set<std::string> saw_obj;
for (unsigned n = 0; n < m; ++n) {
rados_object_list_cursor shard_start = rados_object_list_begin(ioctx);;
rados_object_list_cursor shard_end = rados_object_list_end(ioctx);;
rados_object_list_slice(
ioctx,
begin,
end,
n,
m,
&shard_start,
&shard_end);
std::cout << "split " << n << "/" << m << " -> "
<< *(hobject_t*)shard_start << " "
<< *(hobject_t*)shard_end << std::endl;
rados_object_list_cursor c = shard_start;
//while(c < shard_end)
while(rados_object_list_cursor_cmp(ioctx, c, shard_end) == -1)
{
rados_object_list_item results[12];
memset(results, 0, sizeof(rados_object_list_item) * 12);
int r = rados_object_list(ioctx,
c, shard_end,
12, NULL, 0, results, &c);
ASSERT_GE(r, 0);
for (int i = 0; i < r; ++i) {
std::string oid(results[i].oid, results[i].oid_length);
if (saw_obj.count(oid)) {
std::cerr << "duplicate obj " << oid << std::endl;
}
ASSERT_FALSE(saw_obj.count(oid));
saw_obj.insert(oid);
}
rados_object_list_free(12, results);
}
rados_object_list_cursor_free(ioctx, shard_start);
rados_object_list_cursor_free(ioctx, shard_end);
}
rados_object_list_cursor_free(ioctx, begin);
rados_object_list_cursor_free(ioctx, end);
for (unsigned i=0; i<n_objects; ++i) {
if (!saw_obj.count(stringify(i))) {
std::cerr << "missing object " << i << std::endl;
}
ASSERT_TRUE(saw_obj.count(stringify(i)));
}
ASSERT_EQ(n_objects, saw_obj.size());
}
| 18,654 | 32.552158 | 103 | cc |
null | ceph-main/src/test/librados/list_cxx.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include <errno.h>
#include <string>
#include <stdexcept>
#include "gtest/gtest.h"
#include "include/rados/librados.hpp"
#include "include/stringify.h"
#include "include/types.h"
#include "common/hobject.h"
#include "test/librados/test_cxx.h"
#include "test/librados/test_common.h"
#include "test/librados/testcase_cxx.h"
#include "global/global_context.h"
#include "crimson_utils.h"
using namespace librados;
typedef RadosTestPPNSCleanup LibRadosListPP;
typedef RadosTestECPPNSCleanup LibRadosListECPP;
TEST_F(LibRadosListPP, ListObjectsPP) {
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.write("foo", bl1, sizeof(buf), 0));
NObjectIterator iter(ioctx.nobjects_begin());
bool foundit = false;
while (iter != ioctx.nobjects_end()) {
foundit = true;
ASSERT_EQ((*iter).get_oid(), "foo");
++iter;
}
ASSERT_TRUE(foundit);
}
TEST_F(LibRadosListPP, ListObjectsTwicePP) {
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.write("foo", bl1, sizeof(buf), 0));
NObjectIterator iter(ioctx.nobjects_begin());
bool foundit = false;
while (iter != ioctx.nobjects_end()) {
foundit = true;
ASSERT_EQ((*iter).get_oid(), "foo");
++iter;
}
ASSERT_TRUE(foundit);
++iter;
ASSERT_TRUE(iter == ioctx.nobjects_end());
foundit = false;
iter.seek(0);
while (iter != ioctx.nobjects_end()) {
foundit = true;
ASSERT_EQ((*iter).get_oid(), "foo");
++iter;
}
ASSERT_TRUE(foundit);
}
TEST_F(LibRadosListPP, ListObjectsCopyIterPP) {
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.write("foo", bl1, sizeof(buf), 0));
// make sure this is still valid after the original iterators are gone
NObjectIterator iter3;
{
NObjectIterator iter(ioctx.nobjects_begin());
NObjectIterator iter2(iter);
iter3 = iter2;
ASSERT_EQ((*iter).get_oid(), "foo");
++iter;
ASSERT_TRUE(iter == ioctx.nobjects_end());
++iter;
ASSERT_TRUE(iter == ioctx.nobjects_end());
ASSERT_EQ(iter2->get_oid(), "foo");
ASSERT_EQ(iter3->get_oid(), "foo");
++iter2;
ASSERT_TRUE(iter2 == ioctx.nobjects_end());
}
ASSERT_EQ(iter3->get_oid(), "foo");
iter3 = iter3;
ASSERT_EQ(iter3->get_oid(), "foo");
++iter3;
ASSERT_TRUE(iter3 == ioctx.nobjects_end());
}
TEST_F(LibRadosListPP, ListObjectsEndIter) {
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.write("foo", bl1, sizeof(buf), 0));
NObjectIterator iter(ioctx.nobjects_begin());
NObjectIterator iter_end(ioctx.nobjects_end());
NObjectIterator iter_end2 = ioctx.nobjects_end();
ASSERT_TRUE(iter_end == iter_end2);
ASSERT_TRUE(iter_end == ioctx.nobjects_end());
ASSERT_TRUE(iter_end2 == ioctx.nobjects_end());
ASSERT_EQ(iter->get_oid(), "foo");
++iter;
ASSERT_TRUE(iter == ioctx.nobjects_end());
ASSERT_TRUE(iter == iter_end);
ASSERT_TRUE(iter == iter_end2);
NObjectIterator iter2 = iter;
ASSERT_TRUE(iter2 == ioctx.nobjects_end());
ASSERT_TRUE(iter2 == iter_end);
ASSERT_TRUE(iter2 == iter_end2);
}
static void check_listpp(std::set<std::string>& myset, IoCtx& ioctx, const std::string &check_nspace)
{
NObjectIterator iter(ioctx.nobjects_begin());
std::set<std::string> orig_set(myset);
/**
* During splitting, we might see duplicate items.
* We assert that every object returned is in myset and that
* we don't hit ENOENT until we have hit every item in myset
* at least once.
*/
while (iter != ioctx.nobjects_end()) {
std::string test_name;
if (check_nspace == all_nspaces) {
test_name = iter->get_nspace() + ":" + iter->get_oid();
} else {
ASSERT_TRUE(iter->get_nspace() == check_nspace);
test_name = iter->get_oid();
}
ASSERT_TRUE(orig_set.end() != orig_set.find(test_name));
myset.erase(test_name);
++iter;
}
ASSERT_TRUE(myset.empty());
}
TEST_F(LibRadosListPP, ListObjectsPPNS) {
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
// Create :foo1, :foo2, :foo3, n1:foo1, ns1:foo4, ns1:foo5, ns2:foo6, n2:foo7
ioctx.set_namespace("");
ASSERT_EQ(0, ioctx.write("foo1", bl1, sizeof(buf), 0));
ioctx.set_namespace("ns1");
ASSERT_EQ(0, ioctx.write("foo1", bl1, sizeof(buf), 0));
ioctx.set_namespace("");
ASSERT_EQ(0, ioctx.write("foo2", bl1, sizeof(buf), 0));
ASSERT_EQ(0, ioctx.write("foo3", bl1, sizeof(buf), 0));
ioctx.set_namespace("ns1");
ASSERT_EQ(0, ioctx.write("foo4", bl1, sizeof(buf), 0));
ASSERT_EQ(0, ioctx.write("foo5", bl1, sizeof(buf), 0));
ioctx.set_namespace("ns2");
ASSERT_EQ(0, ioctx.write("foo6", bl1, sizeof(buf), 0));
ASSERT_EQ(0, ioctx.write("foo7", bl1, sizeof(buf), 0));
ASSERT_EQ(std::string("ns2"), ioctx.get_namespace());
std::set<std::string> def, ns1, ns2, all;
def.insert(std::string("foo1"));
def.insert(std::string("foo2"));
def.insert(std::string("foo3"));
ns1.insert(std::string("foo1"));
ns1.insert(std::string("foo4"));
ns1.insert(std::string("foo5"));
ns2.insert(std::string("foo6"));
ns2.insert(std::string("foo7"));
all.insert(std::string(":foo1"));
all.insert(std::string(":foo2"));
all.insert(std::string(":foo3"));
all.insert(std::string("ns1:foo1"));
all.insert(std::string("ns1:foo4"));
all.insert(std::string("ns1:foo5"));
all.insert(std::string("ns2:foo6"));
all.insert(std::string("ns2:foo7"));
ioctx.set_namespace("");
check_listpp(def, ioctx, "");
ioctx.set_namespace("ns1");
check_listpp(ns1, ioctx, "ns1");
ioctx.set_namespace("ns2");
check_listpp(ns2, ioctx, "ns2");
ioctx.set_namespace(all_nspaces);
check_listpp(all, ioctx, all_nspaces);
}
TEST_F(LibRadosListPP, ListObjectsManyPP) {
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl;
bl.append(buf, sizeof(buf));
for (int i=0; i<256; ++i) {
ASSERT_EQ(0, ioctx.write(stringify(i), bl, bl.length(), 0));
}
librados::NObjectIterator it = ioctx.nobjects_begin();
std::set<std::string> saw_obj;
std::set<int> saw_pg;
for (; it != ioctx.nobjects_end(); ++it) {
std::cout << it->get_oid()
<< " " << it.get_pg_hash_position() << std::endl;
saw_obj.insert(it->get_oid());
saw_pg.insert(it.get_pg_hash_position());
}
std::cout << "saw " << saw_pg.size() << " pgs " << std::endl;
// make sure they are 0..n
for (unsigned i = 0; i < saw_pg.size(); ++i)
ASSERT_TRUE(saw_pg.count(i));
}
TEST_F(LibRadosListPP, ListObjectsStartPP) {
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl;
bl.append(buf, sizeof(buf));
for (int i=0; i<16; ++i) {
ASSERT_EQ(0, ioctx.write(stringify(i), bl, bl.length(), 0));
}
librados::NObjectIterator it = ioctx.nobjects_begin();
std::map<int, std::set<std::string> > pg_to_obj;
for (; it != ioctx.nobjects_end(); ++it) {
std::cout << it->get_oid() << " " << it.get_pg_hash_position() << std::endl;
pg_to_obj[it.get_pg_hash_position()].insert(it->get_oid());
}
std::map<int, std::set<std::string> >::reverse_iterator p =
pg_to_obj.rbegin();
it = ioctx.nobjects_begin(p->first);
while (p != pg_to_obj.rend()) {
ASSERT_EQ((uint32_t)p->first, it.seek(p->first));
std::cout << "have " << it->get_oid() << " expect one of " << p->second << std::endl;
ASSERT_TRUE(p->second.count(it->get_oid()));
++p;
}
}
TEST_F(LibRadosListPP, ListObjectsCursorNSPP) {
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl;
bl.append(buf, sizeof(buf));
const int max_objs = 16;
map<string, string> oid_to_ns;
for (int i=0; i<max_objs; ++i) {
stringstream ss;
ss << "ns" << i / 4;
ioctx.set_namespace(ss.str());
string oid = stringify(i);
ASSERT_EQ(0, ioctx.write(oid, bl, bl.length(), 0));
oid_to_ns[oid] = ss.str();
}
ioctx.set_namespace(all_nspaces);
librados::NObjectIterator it = ioctx.nobjects_begin();
std::map<librados::ObjectCursor, string> cursor_to_obj;
int count = 0;
librados::ObjectCursor seek_cursor;
map<string, list<librados::ObjectCursor> > ns_to_cursors;
for (it = ioctx.nobjects_begin(); it != ioctx.nobjects_end(); ++it) {
librados::ObjectCursor cursor = it.get_cursor();
string oid = it->get_oid();
cout << "> oid=" << oid << " cursor=" << it.get_cursor() << std::endl;
}
vector<string> objs_order;
for (it = ioctx.nobjects_begin(); it != ioctx.nobjects_end(); ++it, ++count) {
librados::ObjectCursor cursor = it.get_cursor();
string oid = it->get_oid();
std::cout << oid << " " << it.get_pg_hash_position() << std::endl;
cout << ": oid=" << oid << " cursor=" << it.get_cursor() << std::endl;
cursor_to_obj[cursor] = oid;
ASSERT_EQ(oid_to_ns[oid], it->get_nspace());
it.seek(cursor);
cout << ": seek to " << cursor << " it.cursor=" << it.get_cursor() << std::endl;
ASSERT_EQ(oid, it->get_oid());
ASSERT_LT(count, max_objs); /* avoid infinite loops due to bad seek */
ns_to_cursors[it->get_nspace()].push_back(cursor);
if (count == max_objs/2) {
seek_cursor = cursor;
}
objs_order.push_back(it->get_oid());
}
ASSERT_EQ(count, max_objs);
/* check that reading past seek also works */
cout << "seek_cursor=" << seek_cursor << std::endl;
it.seek(seek_cursor);
for (count = max_objs/2; count < max_objs; ++count, ++it) {
ASSERT_EQ(objs_order[count], it->get_oid());
}
/* seek to all cursors, check that we get expected obj */
for (auto& niter : ns_to_cursors) {
const string& ns = niter.first;
list<librados::ObjectCursor>& cursors = niter.second;
for (auto& cursor : cursors) {
cout << ": seek to " << cursor << std::endl;
it.seek(cursor);
ASSERT_EQ(cursor, it.get_cursor());
string& expected_oid = cursor_to_obj[cursor];
cout << ": it->get_cursor()=" << it.get_cursor() << " expected=" << cursor << std::endl;
cout << ": it->get_oid()=" << it->get_oid() << " expected=" << expected_oid << std::endl;
cout << ": it->get_nspace()=" << it->get_oid() << " expected=" << ns << std::endl;
ASSERT_EQ(expected_oid, it->get_oid());
ASSERT_EQ(it->get_nspace(), ns);
}
}
}
TEST_F(LibRadosListPP, ListObjectsCursorPP) {
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl;
bl.append(buf, sizeof(buf));
const int max_objs = 16;
for (int i=0; i<max_objs; ++i) {
stringstream ss;
ss << "ns" << i / 4;
ioctx.set_namespace(ss.str());
ASSERT_EQ(0, ioctx.write(stringify(i), bl, bl.length(), 0));
}
ioctx.set_namespace(all_nspaces);
librados::NObjectIterator it = ioctx.nobjects_begin();
std::map<librados::ObjectCursor, string> cursor_to_obj;
int count = 0;
for (; it != ioctx.nobjects_end(); ++it, ++count) {
librados::ObjectCursor cursor = it.get_cursor();
string oid = it->get_oid();
std::cout << oid << " " << it.get_pg_hash_position() << std::endl;
cout << ": oid=" << oid << " cursor=" << it.get_cursor() << std::endl;
cursor_to_obj[cursor] = oid;
it.seek(cursor);
cout << ": seek to " << cursor << std::endl;
ASSERT_EQ(oid, it->get_oid());
ASSERT_LT(count, max_objs); /* avoid infinite loops due to bad seek */
}
ASSERT_EQ(count, max_objs);
auto p = cursor_to_obj.rbegin();
it = ioctx.nobjects_begin();
while (p != cursor_to_obj.rend()) {
cout << ": seek to " << p->first << std::endl;
it.seek(p->first);
ASSERT_EQ(p->first, it.get_cursor());
cout << ": it->get_cursor()=" << it.get_cursor() << " expected=" << p->first << std::endl;
cout << ": it->get_oid()=" << it->get_oid() << " expected=" << p->second << std::endl;
ASSERT_EQ(p->second, it->get_oid());
librados::NObjectIterator it2 = ioctx.nobjects_begin(it.get_cursor());
ASSERT_EQ(it2->get_oid(), it->get_oid());
++p;
}
}
TEST_F(LibRadosListECPP, ListObjectsPP) {
SKIP_IF_CRIMSON();
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.write("foo", bl1, sizeof(buf), 0));
NObjectIterator iter(ioctx.nobjects_begin());
bool foundit = false;
while (iter != ioctx.nobjects_end()) {
foundit = true;
ASSERT_EQ((*iter).get_oid(), "foo");
++iter;
}
ASSERT_TRUE(foundit);
}
TEST_F(LibRadosListECPP, ListObjectsTwicePP) {
SKIP_IF_CRIMSON();
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.write("foo", bl1, sizeof(buf), 0));
NObjectIterator iter(ioctx.nobjects_begin());
bool foundit = false;
while (iter != ioctx.nobjects_end()) {
foundit = true;
ASSERT_EQ((*iter).get_oid(), "foo");
++iter;
}
ASSERT_TRUE(foundit);
++iter;
ASSERT_TRUE(iter == ioctx.nobjects_end());
foundit = false;
iter.seek(0);
while (iter != ioctx.nobjects_end()) {
foundit = true;
ASSERT_EQ((*iter).get_oid(), "foo");
++iter;
}
ASSERT_TRUE(foundit);
}
TEST_F(LibRadosListECPP, ListObjectsCopyIterPP) {
SKIP_IF_CRIMSON();
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.write("foo", bl1, sizeof(buf), 0));
// make sure this is still valid after the original iterators are gone
NObjectIterator iter3;
{
NObjectIterator iter(ioctx.nobjects_begin());
NObjectIterator iter2(iter);
iter3 = iter2;
ASSERT_EQ((*iter).get_oid(), "foo");
++iter;
ASSERT_TRUE(iter == ioctx.nobjects_end());
++iter;
ASSERT_TRUE(iter == ioctx.nobjects_end());
ASSERT_EQ(iter2->get_oid(), "foo");
ASSERT_EQ(iter3->get_oid(), "foo");
++iter2;
ASSERT_TRUE(iter2 == ioctx.nobjects_end());
}
ASSERT_EQ(iter3->get_oid(), "foo");
iter3 = iter3;
ASSERT_EQ(iter3->get_oid(), "foo");
++iter3;
ASSERT_TRUE(iter3 == ioctx.nobjects_end());
}
TEST_F(LibRadosListECPP, ListObjectsEndIter) {
SKIP_IF_CRIMSON();
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.write("foo", bl1, sizeof(buf), 0));
NObjectIterator iter(ioctx.nobjects_begin());
NObjectIterator iter_end(ioctx.nobjects_end());
NObjectIterator iter_end2 = ioctx.nobjects_end();
ASSERT_TRUE(iter_end == iter_end2);
ASSERT_TRUE(iter_end == ioctx.nobjects_end());
ASSERT_TRUE(iter_end2 == ioctx.nobjects_end());
ASSERT_EQ(iter->get_oid(), "foo");
++iter;
ASSERT_TRUE(iter == ioctx.nobjects_end());
ASSERT_TRUE(iter == iter_end);
ASSERT_TRUE(iter == iter_end2);
NObjectIterator iter2 = iter;
ASSERT_TRUE(iter2 == ioctx.nobjects_end());
ASSERT_TRUE(iter2 == iter_end);
ASSERT_TRUE(iter2 == iter_end2);
}
TEST_F(LibRadosListECPP, ListObjectsPPNS) {
SKIP_IF_CRIMSON();
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
// Create :foo1, :foo2, :foo3, n1:foo1, ns1:foo4, ns1:foo5, ns2:foo6, n2:foo7
ioctx.set_namespace("");
ASSERT_EQ(0, ioctx.write("foo1", bl1, sizeof(buf), 0));
ioctx.set_namespace("ns1");
ASSERT_EQ(0, ioctx.write("foo1", bl1, sizeof(buf), 0));
ioctx.set_namespace("");
ASSERT_EQ(0, ioctx.write("foo2", bl1, sizeof(buf), 0));
ASSERT_EQ(0, ioctx.write("foo3", bl1, sizeof(buf), 0));
ioctx.set_namespace("ns1");
ASSERT_EQ(0, ioctx.write("foo4", bl1, sizeof(buf), 0));
ASSERT_EQ(0, ioctx.write("foo5", bl1, sizeof(buf), 0));
ioctx.set_namespace("ns2");
ASSERT_EQ(0, ioctx.write("foo6", bl1, sizeof(buf), 0));
ASSERT_EQ(0, ioctx.write("foo7", bl1, sizeof(buf), 0));
std::set<std::string> def, ns1, ns2;
def.insert(std::string("foo1"));
def.insert(std::string("foo2"));
def.insert(std::string("foo3"));
ns1.insert(std::string("foo1"));
ns1.insert(std::string("foo4"));
ns1.insert(std::string("foo5"));
ns2.insert(std::string("foo6"));
ns2.insert(std::string("foo7"));
ioctx.set_namespace("");
check_listpp(def, ioctx, "");
ioctx.set_namespace("ns1");
check_listpp(ns1, ioctx, "ns1");
ioctx.set_namespace("ns2");
check_listpp(ns2, ioctx, "ns2");
}
TEST_F(LibRadosListECPP, ListObjectsManyPP) {
SKIP_IF_CRIMSON();
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl;
bl.append(buf, sizeof(buf));
for (int i=0; i<256; ++i) {
ASSERT_EQ(0, ioctx.write(stringify(i), bl, bl.length(), 0));
}
librados::NObjectIterator it = ioctx.nobjects_begin();
std::set<std::string> saw_obj;
std::set<int> saw_pg;
for (; it != ioctx.nobjects_end(); ++it) {
std::cout << it->get_oid()
<< " " << it.get_pg_hash_position() << std::endl;
saw_obj.insert(it->get_oid());
saw_pg.insert(it.get_pg_hash_position());
}
std::cout << "saw " << saw_pg.size() << " pgs " << std::endl;
// make sure they are 0..n
for (unsigned i = 0; i < saw_pg.size(); ++i)
ASSERT_TRUE(saw_pg.count(i));
}
TEST_F(LibRadosListECPP, ListObjectsStartPP) {
SKIP_IF_CRIMSON();
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl;
bl.append(buf, sizeof(buf));
for (int i=0; i<16; ++i) {
ASSERT_EQ(0, ioctx.write(stringify(i), bl, bl.length(), 0));
}
librados::NObjectIterator it = ioctx.nobjects_begin();
std::map<int, std::set<std::string> > pg_to_obj;
for (; it != ioctx.nobjects_end(); ++it) {
std::cout << it->get_oid() << " " << it.get_pg_hash_position() << std::endl;
pg_to_obj[it.get_pg_hash_position()].insert(it->get_oid());
}
std::map<int, std::set<std::string> >::reverse_iterator p =
pg_to_obj.rbegin();
it = ioctx.nobjects_begin(p->first);
while (p != pg_to_obj.rend()) {
ASSERT_EQ((uint32_t)p->first, it.seek(p->first));
std::cout << "have " << it->get_oid() << " expect one of " << p->second << std::endl;
ASSERT_TRUE(p->second.count(it->get_oid()));
++p;
}
}
TEST_F(LibRadosListPP, ListObjectsFilterPP) {
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist obj_content;
obj_content.append(buf, sizeof(buf));
std::string target_str = "content";
// Write xattr bare, no ::encod'ing
bufferlist target_val;
target_val.append(target_str);
bufferlist nontarget_val;
nontarget_val.append("rhubarb");
ASSERT_EQ(0, ioctx.write("has_xattr", obj_content, obj_content.length(), 0));
ASSERT_EQ(0, ioctx.write("has_wrong_xattr", obj_content, obj_content.length(), 0));
ASSERT_EQ(0, ioctx.write("no_xattr", obj_content, obj_content.length(), 0));
ASSERT_EQ(0, ioctx.setxattr("has_xattr", "theattr", target_val));
ASSERT_EQ(0, ioctx.setxattr("has_wrong_xattr", "theattr", nontarget_val));
bufferlist filter_bl;
std::string filter_name = "plain";
encode(filter_name, filter_bl);
encode("_theattr", filter_bl);
encode(target_str, filter_bl);
NObjectIterator iter(ioctx.nobjects_begin(filter_bl));
bool foundit = false;
int k = 0;
while (iter != ioctx.nobjects_end()) {
foundit = true;
// We should only see the object that matches the filter
ASSERT_EQ((*iter).get_oid(), "has_xattr");
// We should only see it once
ASSERT_EQ(k, 0);
++iter;
++k;
}
ASSERT_TRUE(foundit);
}
TEST_F(LibRadosListPP, EnumerateObjectsPP) {
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl;
bl.append(buf, sizeof(buf));
const uint32_t n_objects = 16;
for (unsigned i=0; i<n_objects; ++i) {
ASSERT_EQ(0, ioctx.write(stringify(i), bl, sizeof(buf), 0));
}
std::set<std::string> saw_obj;
ObjectCursor c = ioctx.object_list_begin();
ObjectCursor end = ioctx.object_list_end();
while(!ioctx.object_list_is_end(c))
{
std::vector<ObjectItem> result;
int r = ioctx.object_list(c, end, 12, {}, &result, &c);
ASSERT_GE(r, 0);
ASSERT_EQ(r, (int)result.size());
for (int i = 0; i < r; ++i) {
auto oid = result[i].oid;
if (saw_obj.count(oid)) {
std::cerr << "duplicate obj " << oid << std::endl;
}
ASSERT_FALSE(saw_obj.count(oid));
saw_obj.insert(oid);
}
}
for (unsigned i=0; i<n_objects; ++i) {
if (!saw_obj.count(stringify(i))) {
std::cerr << "missing object " << i << std::endl;
}
ASSERT_TRUE(saw_obj.count(stringify(i)));
}
ASSERT_EQ(n_objects, saw_obj.size());
}
TEST_F(LibRadosListPP, EnumerateObjectsSplitPP) {
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl;
bl.append(buf, sizeof(buf));
const uint32_t n_objects = 16;
for (unsigned i=0; i<n_objects; ++i) {
ASSERT_EQ(0, ioctx.write(stringify(i), bl, sizeof(buf), 0));
}
ObjectCursor begin = ioctx.object_list_begin();
ObjectCursor end = ioctx.object_list_end();
// Step through an odd number of shards
unsigned m = 5;
std::set<std::string> saw_obj;
for (unsigned n = 0; n < m; ++n) {
ObjectCursor shard_start;
ObjectCursor shard_end;
ioctx.object_list_slice(
begin,
end,
n,
m,
&shard_start,
&shard_end);
ObjectCursor c(shard_start);
while(c < shard_end)
{
std::vector<ObjectItem> result;
int r = ioctx.object_list(c, shard_end, 12, {}, &result, &c);
ASSERT_GE(r, 0);
for (const auto & i : result) {
const auto &oid = i.oid;
if (saw_obj.count(oid)) {
std::cerr << "duplicate obj " << oid << std::endl;
}
ASSERT_FALSE(saw_obj.count(oid));
saw_obj.insert(oid);
}
}
}
for (unsigned i=0; i<n_objects; ++i) {
if (!saw_obj.count(stringify(i))) {
std::cerr << "missing object " << i << std::endl;
}
ASSERT_TRUE(saw_obj.count(stringify(i)));
}
ASSERT_EQ(n_objects, saw_obj.size());
}
TEST_F(LibRadosListPP, EnumerateObjectsFilterPP) {
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist obj_content;
obj_content.append(buf, sizeof(buf));
std::string target_str = "content";
// Write xattr bare, no ::encod'ing
bufferlist target_val;
target_val.append(target_str);
bufferlist nontarget_val;
nontarget_val.append("rhubarb");
ASSERT_EQ(0, ioctx.write("has_xattr", obj_content, obj_content.length(), 0));
ASSERT_EQ(0, ioctx.write("has_wrong_xattr", obj_content, obj_content.length(), 0));
ASSERT_EQ(0, ioctx.write("no_xattr", obj_content, obj_content.length(), 0));
ASSERT_EQ(0, ioctx.setxattr("has_xattr", "theattr", target_val));
ASSERT_EQ(0, ioctx.setxattr("has_wrong_xattr", "theattr", nontarget_val));
bufferlist filter_bl;
std::string filter_name = "plain";
encode(filter_name, filter_bl);
encode("_theattr", filter_bl);
encode(target_str, filter_bl);
ObjectCursor c = ioctx.object_list_begin();
ObjectCursor end = ioctx.object_list_end();
bool foundit = false;
while(!ioctx.object_list_is_end(c))
{
std::vector<ObjectItem> result;
int r = ioctx.object_list(c, end, 12, filter_bl, &result, &c);
ASSERT_GE(r, 0);
ASSERT_EQ(r, (int)result.size());
for (int i = 0; i < r; ++i) {
auto oid = result[i].oid;
// We should only see the object that matches the filter
ASSERT_EQ(oid, "has_xattr");
// We should only see it once
ASSERT_FALSE(foundit);
foundit = true;
}
}
ASSERT_TRUE(foundit);
}
| 23,354 | 28.827586 | 101 | cc |
null | ceph-main/src/test/librados/lock.cc | #include "include/rados/librados.h"
#include "test/librados/test.h"
#include "test/librados/TestCase.h"
#include "cls/lock/cls_lock_client.h"
#include <algorithm>
#include <chrono>
#include <thread>
#include <errno.h>
#include "gtest/gtest.h"
#include <sys/time.h>
#include "crimson_utils.h"
using namespace std::chrono_literals;
typedef RadosTest LibRadosLock;
typedef RadosTestEC LibRadosLockEC;
TEST_F(LibRadosLock, LockExclusive) {
ASSERT_EQ(0, rados_lock_exclusive(ioctx, "foo", "TestLock1", "Cookie", "", NULL, 0));
ASSERT_EQ(-EEXIST, rados_lock_exclusive(ioctx, "foo", "TestLock1", "Cookie", "", NULL, 0));
}
TEST_F(LibRadosLock, LockShared) {
ASSERT_EQ(0, rados_lock_shared(ioctx, "foo", "TestLock2", "Cookie", "Tag", "", NULL, 0));
ASSERT_EQ(-EEXIST, rados_lock_shared(ioctx, "foo", "TestLock2", "Cookie", "Tag", "", NULL, 0));
}
TEST_F(LibRadosLock, LockExclusiveDur) {
struct timeval tv;
tv.tv_sec = 1;
tv.tv_usec = 0;
auto lock_exclusive = [this](timeval* tv) {
return rados_lock_exclusive(ioctx, "foo", "TestLock3", "Cookie", "", tv, 0);
};
constexpr int expected = 0;
ASSERT_EQ(expected, lock_exclusive(&tv));
ASSERT_EQ(expected, wait_until(1.0s, 0.1s, expected, lock_exclusive, nullptr));
}
TEST_F(LibRadosLock, LockSharedDur) {
struct timeval tv;
tv.tv_sec = 1;
tv.tv_usec = 0;
auto lock_shared = [this](timeval* tv) {
return rados_lock_shared(ioctx, "foo", "TestLock4", "Cookie", "Tag", "", tv, 0);
};
constexpr int expected = 0;
ASSERT_EQ(expected, lock_shared(&tv));
ASSERT_EQ(expected, wait_until(1.0s, 0.1s, expected, lock_shared, nullptr));
}
TEST_F(LibRadosLock, LockMayRenew) {
ASSERT_EQ(0, rados_lock_exclusive(ioctx, "foo", "TestLock5", "Cookie", "", NULL, 0));
ASSERT_EQ(-EEXIST, rados_lock_exclusive(ioctx, "foo", "TestLock5", "Cookie", "", NULL, 0));
ASSERT_EQ(0, rados_lock_exclusive(ioctx, "foo", "TestLock5", "Cookie", "", NULL, LOCK_FLAG_MAY_RENEW));
}
TEST_F(LibRadosLock, Unlock) {
ASSERT_EQ(0, rados_lock_exclusive(ioctx, "foo", "TestLock6", "Cookie", "", NULL, 0));
ASSERT_EQ(0, rados_unlock(ioctx, "foo", "TestLock6", "Cookie"));
ASSERT_EQ(0, rados_lock_exclusive(ioctx, "foo", "TestLock6", "Cookie", "", NULL, 0));
}
TEST_F(LibRadosLock, ListLockers) {
int exclusive;
char tag[1024];
char clients[1024];
char cookies[1024];
char addresses[1024];
size_t tag_len = 1024;
size_t clients_len = 1024;
size_t cookies_len = 1024;
size_t addresses_len = 1024;
std::stringstream sstm;
sstm << "client." << rados_get_instance_id(cluster);
std::string me = sstm.str();
ASSERT_EQ(0, rados_lock_shared(ioctx, "foo", "TestLock7", "Cookie", "Tag", "", NULL, 0));
ASSERT_EQ(0, rados_unlock(ioctx, "foo", "TestLock7", "Cookie"));
ASSERT_EQ(0, rados_list_lockers(ioctx, "foo", "TestLock7", &exclusive, tag, &tag_len, clients, &clients_len, cookies, &cookies_len, addresses, &addresses_len ));
ASSERT_EQ(0, rados_lock_shared(ioctx, "foo", "TestLock7", "Cookie", "Tag", "", NULL, 0));
ASSERT_EQ(-34, rados_list_lockers(ioctx, "foo", "TestLock7", &exclusive, tag, &tag_len, clients, &clients_len, cookies, &cookies_len, addresses, &addresses_len ));
tag_len = 1024;
clients_len = 1024;
cookies_len = 1024;
addresses_len = 1024;
ASSERT_EQ(1, rados_list_lockers(ioctx, "foo", "TestLock7", &exclusive, tag, &tag_len, clients, &clients_len, cookies, &cookies_len, addresses, &addresses_len ));
ASSERT_EQ(0, exclusive);
ASSERT_EQ(0, strcmp(tag, "Tag"));
ASSERT_EQ(strlen("Tag") + 1, tag_len);
ASSERT_EQ(0, strcmp(me.c_str(), clients));
ASSERT_EQ(me.size() + 1, clients_len);
ASSERT_EQ(0, strcmp(cookies, "Cookie"));
ASSERT_EQ(strlen("Cookie") + 1, cookies_len);
}
TEST_F(LibRadosLock, BreakLock) {
int exclusive;
char tag[1024];
char clients[1024];
char cookies[1024];
char addresses[1024];
size_t tag_len = 1024;
size_t clients_len = 1024;
size_t cookies_len = 1024;
size_t addresses_len = 1024;
std::stringstream sstm;
sstm << "client." << rados_get_instance_id(cluster);
std::string me = sstm.str();
ASSERT_EQ(0, rados_lock_exclusive(ioctx, "foo", "TestLock8", "Cookie", "", NULL, 0));
ASSERT_EQ(1, rados_list_lockers(ioctx, "foo", "TestLock8", &exclusive, tag, &tag_len, clients, &clients_len, cookies, &cookies_len, addresses, &addresses_len ));
ASSERT_EQ(1, exclusive);
ASSERT_EQ(0, strcmp(tag, ""));
ASSERT_EQ(1U, tag_len);
ASSERT_EQ(0, strcmp(me.c_str(), clients));
ASSERT_EQ(me.size() + 1, clients_len);
ASSERT_EQ(0, strcmp(cookies, "Cookie"));
ASSERT_EQ(strlen("Cookie") + 1, cookies_len);
ASSERT_EQ(0, rados_break_lock(ioctx, "foo", "TestLock8", clients, "Cookie"));
}
// EC testing
TEST_F(LibRadosLockEC, LockExclusive) {
SKIP_IF_CRIMSON();
ASSERT_EQ(0, rados_lock_exclusive(ioctx, "foo", "TestLockEC1", "Cookie", "", NULL, 0));
ASSERT_EQ(-EEXIST, rados_lock_exclusive(ioctx, "foo", "TestLockEC1", "Cookie", "", NULL, 0));
}
TEST_F(LibRadosLockEC, LockShared) {
SKIP_IF_CRIMSON();
ASSERT_EQ(0, rados_lock_shared(ioctx, "foo", "TestLockEC2", "Cookie", "Tag", "", NULL, 0));
ASSERT_EQ(-EEXIST, rados_lock_shared(ioctx, "foo", "TestLockEC2", "Cookie", "Tag", "", NULL, 0));
}
TEST_F(LibRadosLockEC, LockExclusiveDur) {
SKIP_IF_CRIMSON();
struct timeval tv;
tv.tv_sec = 1;
tv.tv_usec = 0;
auto lock_exclusive = [this](timeval* tv) {
return rados_lock_exclusive(ioctx, "foo", "TestLockEC3", "Cookie", "", tv, 0);
};
constexpr int expected = 0;
ASSERT_EQ(expected, lock_exclusive(&tv));
ASSERT_EQ(expected, wait_until(1.0s, 0.1s, expected, lock_exclusive, nullptr));
}
TEST_F(LibRadosLockEC, LockSharedDur) {
SKIP_IF_CRIMSON();
struct timeval tv;
tv.tv_sec = 1;
tv.tv_usec = 0;
auto lock_shared = [this](timeval* tv) {
return rados_lock_shared(ioctx, "foo", "TestLockEC4", "Cookie", "Tag", "", tv, 0);
};
constexpr int expected = 0;
ASSERT_EQ(expected, lock_shared(&tv));
ASSERT_EQ(expected, wait_until(1.0s, 0.1s, expected, lock_shared, nullptr));
}
TEST_F(LibRadosLockEC, LockMayRenew) {
SKIP_IF_CRIMSON();
ASSERT_EQ(0, rados_lock_exclusive(ioctx, "foo", "TestLockEC5", "Cookie", "", NULL, 0));
ASSERT_EQ(-EEXIST, rados_lock_exclusive(ioctx, "foo", "TestLockEC5", "Cookie", "", NULL, 0));
ASSERT_EQ(0, rados_lock_exclusive(ioctx, "foo", "TestLockEC5", "Cookie", "", NULL, LOCK_FLAG_MAY_RENEW));
}
TEST_F(LibRadosLockEC, Unlock) {
SKIP_IF_CRIMSON();
ASSERT_EQ(0, rados_lock_exclusive(ioctx, "foo", "TestLockEC6", "Cookie", "", NULL, 0));
ASSERT_EQ(0, rados_unlock(ioctx, "foo", "TestLockEC6", "Cookie"));
ASSERT_EQ(0, rados_lock_exclusive(ioctx, "foo", "TestLockEC6", "Cookie", "", NULL, 0));
}
TEST_F(LibRadosLockEC, ListLockers) {
SKIP_IF_CRIMSON();
int exclusive;
char tag[1024];
char clients[1024];
char cookies[1024];
char addresses[1024];
size_t tag_len = 1024;
size_t clients_len = 1024;
size_t cookies_len = 1024;
size_t addresses_len = 1024;
std::stringstream sstm;
sstm << "client." << rados_get_instance_id(cluster);
std::string me = sstm.str();
ASSERT_EQ(0, rados_lock_shared(ioctx, "foo", "TestLockEC7", "Cookie", "Tag", "", NULL, 0));
ASSERT_EQ(0, rados_unlock(ioctx, "foo", "TestLockEC7", "Cookie"));
ASSERT_EQ(0, rados_list_lockers(ioctx, "foo", "TestLockEC7", &exclusive, tag, &tag_len, clients, &clients_len, cookies, &cookies_len, addresses, &addresses_len ));
ASSERT_EQ(0, rados_lock_shared(ioctx, "foo", "TestLockEC7", "Cookie", "Tag", "", NULL, 0));
ASSERT_EQ(-34, rados_list_lockers(ioctx, "foo", "TestLockEC7", &exclusive, tag, &tag_len, clients, &clients_len, cookies, &cookies_len, addresses, &addresses_len ));
tag_len = 1024;
clients_len = 1024;
cookies_len = 1024;
addresses_len = 1024;
ASSERT_EQ(1, rados_list_lockers(ioctx, "foo", "TestLockEC7", &exclusive, tag, &tag_len, clients, &clients_len, cookies, &cookies_len, addresses, &addresses_len ));
ASSERT_EQ(0, exclusive);
ASSERT_EQ(0, strcmp(tag, "Tag"));
ASSERT_EQ(strlen("Tag") + 1, tag_len);
ASSERT_EQ(0, strcmp(me.c_str(), clients));
ASSERT_EQ(me.size() + 1, clients_len);
ASSERT_EQ(0, strcmp(cookies, "Cookie"));
ASSERT_EQ(strlen("Cookie") + 1, cookies_len);
}
TEST_F(LibRadosLockEC, BreakLock) {
SKIP_IF_CRIMSON();
int exclusive;
char tag[1024];
char clients[1024];
char cookies[1024];
char addresses[1024];
size_t tag_len = 1024;
size_t clients_len = 1024;
size_t cookies_len = 1024;
size_t addresses_len = 1024;
std::stringstream sstm;
sstm << "client." << rados_get_instance_id(cluster);
std::string me = sstm.str();
ASSERT_EQ(0, rados_lock_exclusive(ioctx, "foo", "TestLockEC8", "Cookie", "", NULL, 0));
ASSERT_EQ(1, rados_list_lockers(ioctx, "foo", "TestLockEC8", &exclusive, tag, &tag_len, clients, &clients_len, cookies, &cookies_len, addresses, &addresses_len ));
ASSERT_EQ(1, exclusive);
ASSERT_EQ(0, strcmp(tag, ""));
ASSERT_EQ(1U, tag_len);
ASSERT_EQ(0, strcmp(me.c_str(), clients));
ASSERT_EQ(me.size() + 1, clients_len);
ASSERT_EQ(0, strcmp(cookies, "Cookie"));
ASSERT_EQ(strlen("Cookie") + 1, cookies_len);
ASSERT_EQ(0, rados_break_lock(ioctx, "foo", "TestLockEC8", clients, "Cookie"));
}
| 9,180 | 37.57563 | 167 | cc |
null | ceph-main/src/test/librados/lock_cxx.cc | #include <algorithm>
#include <chrono>
#include <thread>
#include <errno.h>
#include <sys/time.h>
#include "gtest/gtest.h"
#include "include/rados/librados.hpp"
#include "cls/lock/cls_lock_client.h"
#include "test/librados/test_cxx.h"
#include "test/librados/testcase_cxx.h"
#include "crimson_utils.h"
using namespace std::chrono_literals;
using namespace librados;
typedef RadosTestPP LibRadosLockPP;
typedef RadosTestECPP LibRadosLockECPP;
TEST_F(LibRadosLockPP, LockExclusivePP) {
ASSERT_EQ(0, ioctx.lock_exclusive("foo", "TestLockPP1", "Cookie", "", NULL, 0));
ASSERT_EQ(-EEXIST, ioctx.lock_exclusive("foo", "TestLockPP1", "Cookie", "", NULL, 0));
}
TEST_F(LibRadosLockPP, LockSharedPP) {
ASSERT_EQ(0, ioctx.lock_shared("foo", "TestLockPP2", "Cookie", "Tag", "", NULL, 0));
ASSERT_EQ(-EEXIST, ioctx.lock_shared("foo", "TestLockPP2", "Cookie", "Tag", "", NULL, 0));
}
TEST_F(LibRadosLockPP, LockExclusiveDurPP) {
struct timeval tv;
tv.tv_sec = 1;
tv.tv_usec = 0;
auto lock_exclusive = [this](timeval* tv) {
return ioctx.lock_exclusive("foo", "TestLockPP3", "Cookie", "", tv, 0);
};
constexpr int expected = 0;
ASSERT_EQ(expected, lock_exclusive(&tv));
ASSERT_EQ(expected, wait_until(1.0s, 0.1s, expected, lock_exclusive, nullptr));
}
TEST_F(LibRadosLockPP, LockSharedDurPP) {
struct timeval tv;
tv.tv_sec = 1;
tv.tv_usec = 0;
auto lock_shared = [this](timeval* tv) {
return ioctx.lock_shared("foo", "TestLockPP4", "Cookie", "Tag", "", tv, 0);
};
constexpr int expected = 0;
ASSERT_EQ(expected, lock_shared(&tv));
ASSERT_EQ(expected, wait_until(1.0s, 0.1s, expected, lock_shared, nullptr));
}
TEST_F(LibRadosLockPP, LockMayRenewPP) {
ASSERT_EQ(0, ioctx.lock_exclusive("foo", "TestLockPP5", "Cookie", "", NULL, 0));
ASSERT_EQ(-EEXIST, ioctx.lock_exclusive("foo", "TestLockPP5", "Cookie", "", NULL, 0));
ASSERT_EQ(0, ioctx.lock_exclusive("foo", "TestLockPP5", "Cookie", "", NULL, LOCK_FLAG_MAY_RENEW));
}
TEST_F(LibRadosLockPP, UnlockPP) {
ASSERT_EQ(0, ioctx.lock_exclusive("foo", "TestLockPP6", "Cookie", "", NULL, 0));
ASSERT_EQ(0, ioctx.unlock("foo", "TestLockPP6", "Cookie"));
ASSERT_EQ(0, ioctx.lock_exclusive("foo", "TestLockPP6", "Cookie", "", NULL, 0));
}
TEST_F(LibRadosLockPP, ListLockersPP) {
std::stringstream sstm;
sstm << "client." << cluster.get_instance_id();
std::string me = sstm.str();
ASSERT_EQ(0, ioctx.lock_shared("foo", "TestLockPP7", "Cookie", "Tag", "", NULL, 0));
ASSERT_EQ(0, ioctx.unlock("foo", "TestLockPP7", "Cookie"));
{
int exclusive;
std::string tag;
std::list<librados::locker_t> lockers;
ASSERT_EQ(0, ioctx.list_lockers("foo", "TestLockPP7", &exclusive, &tag, &lockers));
}
ASSERT_EQ(0, ioctx.lock_shared("foo", "TestLockPP7", "Cookie", "Tag", "", NULL, 0));
{
int exclusive;
std::string tag;
std::list<librados::locker_t> lockers;
ASSERT_EQ(1, ioctx.list_lockers("foo", "TestLockPP7", &exclusive, &tag, &lockers));
std::list<librados::locker_t>::iterator it = lockers.begin();
ASSERT_FALSE(lockers.end() == it);
ASSERT_EQ(me, it->client);
ASSERT_EQ("Cookie", it->cookie);
}
}
TEST_F(LibRadosLockPP, BreakLockPP) {
int exclusive;
std::string tag;
std::list<librados::locker_t> lockers;
std::stringstream sstm;
sstm << "client." << cluster.get_instance_id();
std::string me = sstm.str();
ASSERT_EQ(0, ioctx.lock_exclusive("foo", "TestLockPP8", "Cookie", "", NULL, 0));
ASSERT_EQ(1, ioctx.list_lockers("foo", "TestLockPP8", &exclusive, &tag, &lockers));
std::list<librados::locker_t>::iterator it = lockers.begin();
ASSERT_FALSE(lockers.end() == it);
ASSERT_EQ(me, it->client);
ASSERT_EQ("Cookie", it->cookie);
ASSERT_EQ(0, ioctx.break_lock("foo", "TestLockPP8", it->client, "Cookie"));
}
// EC testing
TEST_F(LibRadosLockECPP, LockExclusivePP) {
SKIP_IF_CRIMSON();
ASSERT_EQ(0, ioctx.lock_exclusive("foo", "TestLockECPP1", "Cookie", "", NULL, 0));
ASSERT_EQ(-EEXIST, ioctx.lock_exclusive("foo", "TestLockECPP1", "Cookie", "", NULL, 0));
}
TEST_F(LibRadosLockECPP, LockSharedPP) {
SKIP_IF_CRIMSON();
ASSERT_EQ(0, ioctx.lock_shared("foo", "TestLockECPP2", "Cookie", "Tag", "", NULL, 0));
ASSERT_EQ(-EEXIST, ioctx.lock_shared("foo", "TestLockECPP2", "Cookie", "Tag", "", NULL, 0));
}
TEST_F(LibRadosLockECPP, LockExclusiveDurPP) {
SKIP_IF_CRIMSON();
struct timeval tv;
tv.tv_sec = 1;
tv.tv_usec = 0;
auto lock_exclusive = [this](timeval* tv) {
return ioctx.lock_exclusive("foo", "TestLockECPP3", "Cookie", "", tv, 0);
};
constexpr int expected = 0;
ASSERT_EQ(expected, lock_exclusive(&tv));
ASSERT_EQ(expected, wait_until(1.0s, 0.1s, expected, lock_exclusive, nullptr));
}
TEST_F(LibRadosLockECPP, LockSharedDurPP) {
SKIP_IF_CRIMSON();
struct timeval tv;
tv.tv_sec = 1;
tv.tv_usec = 0;
auto lock_shared = [this](timeval* tv) {
return ioctx.lock_shared("foo", "TestLockECPP4", "Cookie", "Tag", "", tv, 0);
};
const int expected = 0;
ASSERT_EQ(expected, lock_shared(&tv));
ASSERT_EQ(expected, wait_until(1.0s, 0.1s, expected, lock_shared, nullptr));
}
TEST_F(LibRadosLockECPP, LockMayRenewPP) {
SKIP_IF_CRIMSON();
ASSERT_EQ(0, ioctx.lock_exclusive("foo", "TestLockECPP5", "Cookie", "", NULL, 0));
ASSERT_EQ(-EEXIST, ioctx.lock_exclusive("foo", "TestLockECPP5", "Cookie", "", NULL, 0));
ASSERT_EQ(0, ioctx.lock_exclusive("foo", "TestLockECPP5", "Cookie", "", NULL, LOCK_FLAG_MAY_RENEW));
}
TEST_F(LibRadosLockECPP, UnlockPP) {
SKIP_IF_CRIMSON();
ASSERT_EQ(0, ioctx.lock_exclusive("foo", "TestLockECPP6", "Cookie", "", NULL, 0));
ASSERT_EQ(0, ioctx.unlock("foo", "TestLockECPP6", "Cookie"));
ASSERT_EQ(0, ioctx.lock_exclusive("foo", "TestLockECPP6", "Cookie", "", NULL, 0));
}
TEST_F(LibRadosLockECPP, ListLockersPP) {
SKIP_IF_CRIMSON();
std::stringstream sstm;
sstm << "client." << cluster.get_instance_id();
std::string me = sstm.str();
ASSERT_EQ(0, ioctx.lock_shared("foo", "TestLockECPP7", "Cookie", "Tag", "", NULL, 0));
ASSERT_EQ(0, ioctx.unlock("foo", "TestLockECPP7", "Cookie"));
{
int exclusive;
std::string tag;
std::list<librados::locker_t> lockers;
ASSERT_EQ(0, ioctx.list_lockers("foo", "TestLockECPP7", &exclusive, &tag, &lockers));
}
ASSERT_EQ(0, ioctx.lock_shared("foo", "TestLockECPP7", "Cookie", "Tag", "", NULL, 0));
{
int exclusive;
std::string tag;
std::list<librados::locker_t> lockers;
ASSERT_EQ(1, ioctx.list_lockers("foo", "TestLockECPP7", &exclusive, &tag, &lockers));
std::list<librados::locker_t>::iterator it = lockers.begin();
ASSERT_FALSE(lockers.end() == it);
ASSERT_EQ(me, it->client);
ASSERT_EQ("Cookie", it->cookie);
}
}
TEST_F(LibRadosLockECPP, BreakLockPP) {
SKIP_IF_CRIMSON();
int exclusive;
std::string tag;
std::list<librados::locker_t> lockers;
std::stringstream sstm;
sstm << "client." << cluster.get_instance_id();
std::string me = sstm.str();
ASSERT_EQ(0, ioctx.lock_exclusive("foo", "TestLockECPP8", "Cookie", "", NULL, 0));
ASSERT_EQ(1, ioctx.list_lockers("foo", "TestLockECPP8", &exclusive, &tag, &lockers));
std::list<librados::locker_t>::iterator it = lockers.begin();
ASSERT_FALSE(lockers.end() == it);
ASSERT_EQ(me, it->client);
ASSERT_EQ("Cookie", it->cookie);
ASSERT_EQ(0, ioctx.break_lock("foo", "TestLockECPP8", it->client, "Cookie"));
}
| 7,369 | 35.127451 | 102 | cc |
null | ceph-main/src/test/librados/misc.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "gtest/gtest.h"
#include "mds/mdstypes.h"
#include "include/err.h"
#include "include/buffer.h"
#include "include/rbd_types.h"
#include "include/rados.h"
#include "include/rados/librados.h"
#include "include/rados/librados.hpp"
#include "include/scope_guard.h"
#include "include/stringify.h"
#include "common/Checksummer.h"
#include "global/global_context.h"
#include "test/librados/test.h"
#include "test/librados/TestCase.h"
#include "gtest/gtest.h"
#include <sys/time.h>
#ifndef _WIN32
#include <sys/resource.h>
#endif
#include <errno.h>
#include <map>
#include <sstream>
#include <string>
#include <regex>
using namespace std;
using namespace librados;
typedef RadosTest LibRadosMisc;
TEST(LibRadosMiscVersion, Version) {
int major, minor, extra;
rados_version(&major, &minor, &extra);
}
static void test_rados_log_cb(void *arg,
const char *line,
const char *who,
uint64_t sec, uint64_t nsec,
uint64_t seq, const char *level,
const char *msg)
{
std::cerr << "monitor log callback invoked" << std::endl;
}
TEST(LibRadosMiscConnectFailure, ConnectFailure) {
rados_t cluster;
char *id = getenv("CEPH_CLIENT_ID");
if (id)
std::cerr << "Client id is: " << id << std::endl;
ASSERT_EQ(0, rados_create(&cluster, NULL));
ASSERT_EQ(0, rados_conf_read_file(cluster, NULL));
ASSERT_EQ(0, rados_conf_parse_env(cluster, NULL));
ASSERT_EQ(0, rados_conf_set(cluster, "client_mount_timeout", "1s"));
ASSERT_EQ(0, rados_conf_set(cluster, "debug_monc", "20"));
ASSERT_EQ(0, rados_conf_set(cluster, "debug_ms", "1"));
ASSERT_EQ(0, rados_conf_set(cluster, "log_to_stderr", "true"));
ASSERT_EQ(-ENOTCONN, rados_monitor_log(cluster, "error",
test_rados_log_cb, NULL));
// try this a few times; sometimes we don't schedule fast enough for the
// cond to time out
int r;
for (unsigned i=0; i<16; ++i) {
cout << i << std::endl;
r = rados_connect(cluster);
if (r < 0)
break; // yay, we timed out
// try again
rados_shutdown(cluster);
ASSERT_EQ(0, rados_create(&cluster, NULL));
}
ASSERT_NE(0, r);
rados_shutdown(cluster);
}
TEST(LibRadosMiscPool, PoolCreationRace) {
rados_t cluster_a, cluster_b;
char *id = getenv("CEPH_CLIENT_ID");
if (id)
std::cerr << "Client id is: " << id << std::endl;
ASSERT_EQ(0, rados_create(&cluster_a, NULL));
ASSERT_EQ(0, rados_conf_read_file(cluster_a, NULL));
// kludge: i want to --log-file foo and only get cluster b
//ASSERT_EQ(0, rados_conf_parse_env(cluster_a, NULL));
ASSERT_EQ(0, rados_conf_set(cluster_a,
"objecter_debug_inject_relock_delay", "true"));
ASSERT_EQ(0, rados_connect(cluster_a));
ASSERT_EQ(0, rados_create(&cluster_b, NULL));
ASSERT_EQ(0, rados_conf_read_file(cluster_b, NULL));
ASSERT_EQ(0, rados_conf_parse_env(cluster_b, NULL));
ASSERT_EQ(0, rados_connect(cluster_b));
char poolname[80];
snprintf(poolname, sizeof(poolname), "poolrace.%d", rand());
rados_pool_create(cluster_a, poolname);
rados_ioctx_t a;
rados_ioctx_create(cluster_a, poolname, &a);
char pool2name[80];
snprintf(pool2name, sizeof(pool2name), "poolrace2.%d", rand());
rados_pool_create(cluster_b, pool2name);
list<rados_completion_t> cls;
// this should normally trigger pretty easily, but we need to bound
// the requests because if we get too many we'll get stuck by always
// sending enough messages that we hit the socket failure injection.
int max = 512;
while (max--) {
char buf[100];
rados_completion_t c;
rados_aio_create_completion2(nullptr, nullptr, &c);
cls.push_back(c);
rados_aio_read(a, "PoolCreationRaceObj", c, buf, 100, 0);
cout << "started " << (void*)c << std::endl;
if (rados_aio_is_complete(cls.front())) {
break;
}
}
while (!rados_aio_is_complete(cls.front())) {
cout << "waiting 1 sec" << std::endl;
sleep(1);
}
cout << " started " << cls.size() << " aios" << std::endl;
for (auto c : cls) {
cout << "waiting " << (void*)c << std::endl;
rados_aio_wait_for_complete_and_cb(c);
rados_aio_release(c);
}
cout << "done." << std::endl;
rados_ioctx_destroy(a);
rados_pool_delete(cluster_a, poolname);
rados_pool_delete(cluster_a, pool2name);
rados_shutdown(cluster_b);
rados_shutdown(cluster_a);
}
TEST_F(LibRadosMisc, ClusterFSID) {
char fsid[37];
ASSERT_EQ(-ERANGE, rados_cluster_fsid(cluster, fsid, sizeof(fsid) - 1));
ASSERT_EQ(sizeof(fsid) - 1,
(size_t)rados_cluster_fsid(cluster, fsid, sizeof(fsid)));
}
TEST_F(LibRadosMisc, Exec) {
char buf[128];
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_write(ioctx, "foo", buf, sizeof(buf), 0));
char buf2[512];
int res = rados_exec(ioctx, "foo", "rbd", "get_all_features",
NULL, 0, buf2, sizeof(buf2));
ASSERT_GT(res, 0);
bufferlist bl;
bl.append(buf2, res);
auto iter = bl.cbegin();
uint64_t all_features;
decode(all_features, iter);
// make sure *some* features are specified; don't care which ones
ASSERT_NE(all_features, (unsigned)0);
}
TEST_F(LibRadosMisc, WriteSame) {
char buf[128];
char full[128 * 4];
char *cmp;
/* zero the full range before using writesame */
memset(full, 0, sizeof(full));
ASSERT_EQ(0, rados_write(ioctx, "ws", full, sizeof(full), 0));
memset(buf, 0xcc, sizeof(buf));
/* write the same buf four times */
ASSERT_EQ(0, rados_writesame(ioctx, "ws", buf, sizeof(buf), sizeof(full), 0));
/* read back the full buffer and confirm that it matches */
ASSERT_EQ((int)sizeof(full), rados_read(ioctx, "ws", full, sizeof(full), 0));
for (cmp = full; cmp < full + sizeof(full); cmp += sizeof(buf)) {
ASSERT_EQ(0, memcmp(cmp, buf, sizeof(buf)));
}
/* write_len not a multiple of data_len should throw error */
ASSERT_EQ(-EINVAL, rados_writesame(ioctx, "ws", buf, sizeof(buf),
(sizeof(buf) * 4) - 1, 0));
ASSERT_EQ(-EINVAL,
rados_writesame(ioctx, "ws", buf, sizeof(buf), sizeof(buf) / 2, 0));
ASSERT_EQ(-EINVAL,
rados_writesame(ioctx, "ws", buf, 0, sizeof(buf), 0));
/* write_len = data_len, i.e. same as rados_write() */
ASSERT_EQ(0, rados_writesame(ioctx, "ws", buf, sizeof(buf), sizeof(buf), 0));
}
TEST_F(LibRadosMisc, CmpExt) {
bufferlist cmp_bl, bad_cmp_bl, write_bl;
char stored_str[] = "1234567891";
char mismatch_str[] = "1234577777";
ASSERT_EQ(0,
rados_write(ioctx, "cmpextpp", stored_str, sizeof(stored_str), 0));
ASSERT_EQ(0,
rados_cmpext(ioctx, "cmpextpp", stored_str, sizeof(stored_str), 0));
ASSERT_EQ(-MAX_ERRNO - 5,
rados_cmpext(ioctx, "cmpextpp", mismatch_str, sizeof(mismatch_str), 0));
}
TEST_F(LibRadosMisc, Applications) {
const char *cmd[] = {"{\"prefix\":\"osd dump\"}", nullptr};
char *buf, *st;
size_t buflen, stlen;
ASSERT_EQ(0, rados_mon_command(cluster, (const char **)cmd, 1, "", 0, &buf,
&buflen, &st, &stlen));
ASSERT_LT(0u, buflen);
string result(buf);
rados_buffer_free(buf);
rados_buffer_free(st);
if (!std::regex_search(result, std::regex("require_osd_release [l-z]"))) {
std::cout << "SKIPPING";
return;
}
char apps[128];
size_t app_len;
app_len = sizeof(apps);
ASSERT_EQ(0, rados_application_list(ioctx, apps, &app_len));
ASSERT_EQ(6U, app_len);
ASSERT_EQ(0, memcmp("rados\0", apps, app_len));
ASSERT_EQ(0, rados_application_enable(ioctx, "app1", 1));
ASSERT_EQ(-EPERM, rados_application_enable(ioctx, "app2", 0));
ASSERT_EQ(0, rados_application_enable(ioctx, "app2", 1));
ASSERT_EQ(-ERANGE, rados_application_list(ioctx, apps, &app_len));
ASSERT_EQ(16U, app_len);
ASSERT_EQ(0, rados_application_list(ioctx, apps, &app_len));
ASSERT_EQ(16U, app_len);
ASSERT_EQ(0, memcmp("app1\0app2\0rados\0", apps, app_len));
char keys[128];
char vals[128];
size_t key_len;
size_t val_len;
key_len = sizeof(keys);
val_len = sizeof(vals);
ASSERT_EQ(-ENOENT, rados_application_metadata_list(ioctx, "dne", keys,
&key_len, vals, &val_len));
ASSERT_EQ(0, rados_application_metadata_list(ioctx, "app1", keys, &key_len,
vals, &val_len));
ASSERT_EQ(0U, key_len);
ASSERT_EQ(0U, val_len);
ASSERT_EQ(-ENOENT, rados_application_metadata_set(ioctx, "dne", "key",
"value"));
ASSERT_EQ(0, rados_application_metadata_set(ioctx, "app1", "key1", "value1"));
ASSERT_EQ(0, rados_application_metadata_set(ioctx, "app1", "key2", "value2"));
ASSERT_EQ(-ERANGE, rados_application_metadata_list(ioctx, "app1", keys,
&key_len, vals, &val_len));
ASSERT_EQ(10U, key_len);
ASSERT_EQ(14U, val_len);
ASSERT_EQ(0, rados_application_metadata_list(ioctx, "app1", keys, &key_len,
vals, &val_len));
ASSERT_EQ(10U, key_len);
ASSERT_EQ(14U, val_len);
ASSERT_EQ(0, memcmp("key1\0key2\0", keys, key_len));
ASSERT_EQ(0, memcmp("value1\0value2\0", vals, val_len));
ASSERT_EQ(0, rados_application_metadata_remove(ioctx, "app1", "key1"));
ASSERT_EQ(0, rados_application_metadata_list(ioctx, "app1", keys, &key_len,
vals, &val_len));
ASSERT_EQ(5U, key_len);
ASSERT_EQ(7U, val_len);
ASSERT_EQ(0, memcmp("key2\0", keys, key_len));
ASSERT_EQ(0, memcmp("value2\0", vals, val_len));
}
TEST_F(LibRadosMisc, MinCompatOSD) {
int8_t require_osd_release;
ASSERT_EQ(0, rados_get_min_compatible_osd(cluster, &require_osd_release));
ASSERT_LE(-1, require_osd_release);
ASSERT_GT(CEPH_RELEASE_MAX, require_osd_release);
}
TEST_F(LibRadosMisc, MinCompatClient) {
int8_t min_compat_client;
int8_t require_min_compat_client;
ASSERT_EQ(0, rados_get_min_compatible_client(cluster,
&min_compat_client,
&require_min_compat_client));
ASSERT_LE(-1, min_compat_client);
ASSERT_GT(CEPH_RELEASE_MAX, min_compat_client);
ASSERT_LE(-1, require_min_compat_client);
ASSERT_GT(CEPH_RELEASE_MAX, require_min_compat_client);
}
static void shutdown_racer_func()
{
const int niter = 32;
rados_t rad;
int i;
for (i = 0; i < niter; ++i) {
auto r = connect_cluster(&rad);
if (getenv("ALLOW_TIMEOUTS")) {
ASSERT_TRUE(r == "" || r == "rados_connect failed with error -110");
} else {
ASSERT_EQ("", r);
}
rados_shutdown(rad);
}
}
#ifndef _WIN32
// See trackers #20988 and #42026
TEST_F(LibRadosMisc, ShutdownRace)
{
const int nthreads = 128;
std::thread threads[nthreads];
// Need a bunch of fd's for this test
struct rlimit rold, rnew;
ASSERT_EQ(getrlimit(RLIMIT_NOFILE, &rold), 0);
rnew = rold;
rnew.rlim_cur = rnew.rlim_max;
ASSERT_EQ(setrlimit(RLIMIT_NOFILE, &rnew), 0);
for (int i = 0; i < nthreads; ++i)
threads[i] = std::thread(shutdown_racer_func);
for (int i = 0; i < nthreads; ++i)
threads[i].join();
ASSERT_EQ(setrlimit(RLIMIT_NOFILE, &rold), 0);
}
#endif /* _WIN32 */
| 11,343 | 31.135977 | 80 | cc |
null | ceph-main/src/test/librados/misc_cxx.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include <errno.h>
#include <map>
#include <sstream>
#include <string>
#include <regex>
#include "gtest/gtest.h"
#include "include/err.h"
#include "include/buffer.h"
#include "include/rbd_types.h"
#include "include/rados.h"
#include "include/rados/librados.hpp"
#include "include/scope_guard.h"
#include "include/stringify.h"
#include "common/Checksummer.h"
#include "mds/mdstypes.h"
#include "global/global_context.h"
#include "test/librados/testcase_cxx.h"
#include "test/librados/test_cxx.h"
#include "crimson_utils.h"
using namespace std;
using namespace librados;
typedef RadosTestPP LibRadosMiscPP;
typedef RadosTestECPP LibRadosMiscECPP;
TEST(LibRadosMiscVersion, VersionPP) {
int major, minor, extra;
Rados::version(&major, &minor, &extra);
}
TEST_F(LibRadosMiscPP, WaitOSDMapPP) {
ASSERT_EQ(0, cluster.wait_for_latest_osdmap());
}
TEST_F(LibRadosMiscPP, LongNamePP) {
bufferlist bl;
bl.append("content");
int maxlen = g_conf()->osd_max_object_name_len;
ASSERT_EQ(0, ioctx.write(string(maxlen/2, 'a').c_str(), bl, bl.length(), 0));
ASSERT_EQ(0, ioctx.write(string(maxlen-1, 'a').c_str(), bl, bl.length(), 0));
ASSERT_EQ(0, ioctx.write(string(maxlen, 'a').c_str(), bl, bl.length(), 0));
ASSERT_EQ(-ENAMETOOLONG, ioctx.write(string(maxlen+1, 'a').c_str(), bl, bl.length(), 0));
ASSERT_EQ(-ENAMETOOLONG, ioctx.write(string(maxlen*2, 'a').c_str(), bl, bl.length(), 0));
}
TEST_F(LibRadosMiscPP, LongLocatorPP) {
bufferlist bl;
bl.append("content");
int maxlen = g_conf()->osd_max_object_name_len;
ioctx.locator_set_key(
string((maxlen/2), 'a'));
ASSERT_EQ(
0,
ioctx.write(
string("a").c_str(),
bl, bl.length(), 0));
ioctx.locator_set_key(
string(maxlen - 1, 'a'));
ASSERT_EQ(
0,
ioctx.write(
string("a").c_str(),
bl, bl.length(), 0));
ioctx.locator_set_key(
string(maxlen, 'a'));
ASSERT_EQ(
0,
ioctx.write(
string("a").c_str(),
bl, bl.length(), 0));
ioctx.locator_set_key(
string(maxlen+1, 'a'));
ASSERT_EQ(
-ENAMETOOLONG,
ioctx.write(
string("a").c_str(),
bl, bl.length(), 0));
ioctx.locator_set_key(
string((maxlen*2), 'a'));
ASSERT_EQ(
-ENAMETOOLONG,
ioctx.write(
string("a").c_str(),
bl, bl.length(), 0));
}
TEST_F(LibRadosMiscPP, LongNSpacePP) {
bufferlist bl;
bl.append("content");
int maxlen = g_conf()->osd_max_object_namespace_len;
ioctx.set_namespace(
string((maxlen/2), 'a'));
ASSERT_EQ(
0,
ioctx.write(
string("a").c_str(),
bl, bl.length(), 0));
ioctx.set_namespace(
string(maxlen - 1, 'a'));
ASSERT_EQ(
0,
ioctx.write(
string("a").c_str(),
bl, bl.length(), 0));
ioctx.set_namespace(
string(maxlen, 'a'));
ASSERT_EQ(
0,
ioctx.write(
string("a").c_str(),
bl, bl.length(), 0));
ioctx.set_namespace(
string(maxlen+1, 'a'));
ASSERT_EQ(
-ENAMETOOLONG,
ioctx.write(
string("a").c_str(),
bl, bl.length(), 0));
ioctx.set_namespace(
string((maxlen*2), 'a'));
ASSERT_EQ(
-ENAMETOOLONG,
ioctx.write(
string("a").c_str(),
bl, bl.length(), 0));
}
TEST_F(LibRadosMiscPP, LongAttrNamePP) {
bufferlist bl;
bl.append("content");
int maxlen = g_conf()->osd_max_attr_name_len;
ASSERT_EQ(0, ioctx.setxattr("bigattrobj", string(maxlen/2, 'a').c_str(), bl));
ASSERT_EQ(0, ioctx.setxattr("bigattrobj", string(maxlen-1, 'a').c_str(), bl));
ASSERT_EQ(0, ioctx.setxattr("bigattrobj", string(maxlen, 'a').c_str(), bl));
ASSERT_EQ(-ENAMETOOLONG, ioctx.setxattr("bigattrobj", string(maxlen+1, 'a').c_str(), bl));
ASSERT_EQ(-ENAMETOOLONG, ioctx.setxattr("bigattrobj", string(maxlen*2, 'a').c_str(), bl));
}
TEST_F(LibRadosMiscPP, ExecPP) {
bufferlist bl;
ASSERT_EQ(0, ioctx.write("foo", bl, 0, 0));
bufferlist bl2, out;
int r = ioctx.exec("foo", "rbd", "get_all_features", bl2, out);
ASSERT_EQ(0, r);
auto iter = out.cbegin();
uint64_t all_features;
decode(all_features, iter);
// make sure *some* features are specified; don't care which ones
ASSERT_NE(all_features, (unsigned)0);
}
void set_completion_complete(rados_completion_t cb, void *arg)
{
bool *my_aio_complete = (bool*)arg;
*my_aio_complete = true;
}
TEST_F(LibRadosMiscPP, BadFlagsPP) {
unsigned badflags = CEPH_OSD_FLAG_PARALLELEXEC;
{
bufferlist bl;
bl.append("data");
ASSERT_EQ(0, ioctx.write("badfoo", bl, bl.length(), 0));
}
{
ASSERT_EQ(-EINVAL, ioctx.remove("badfoo", badflags));
}
}
TEST_F(LibRadosMiscPP, Operate1PP) {
ObjectWriteOperation o;
{
bufferlist bl;
o.write(0, bl);
}
std::string val1("val1");
{
bufferlist bl;
bl.append(val1.c_str(), val1.size() + 1);
o.setxattr("key1", bl);
o.omap_clear(); // shouldn't affect attrs!
}
ASSERT_EQ(0, ioctx.operate("foo", &o));
ObjectWriteOperation empty;
ASSERT_EQ(0, ioctx.operate("foo", &empty));
{
bufferlist bl;
ASSERT_GT(ioctx.getxattr("foo", "key1", bl), 0);
ASSERT_EQ(0, strcmp(bl.c_str(), val1.c_str()));
}
ObjectWriteOperation o2;
{
bufferlist bl;
bl.append(val1);
o2.cmpxattr("key1", CEPH_OSD_CMPXATTR_OP_EQ, bl);
o2.rmxattr("key1");
}
ASSERT_EQ(-ECANCELED, ioctx.operate("foo", &o2));
ObjectWriteOperation o3;
{
bufferlist bl;
bl.append(val1);
o3.cmpxattr("key1", CEPH_OSD_CMPXATTR_OP_EQ, bl);
}
ASSERT_EQ(-ECANCELED, ioctx.operate("foo", &o3));
}
TEST_F(LibRadosMiscPP, Operate2PP) {
ObjectWriteOperation o;
{
bufferlist bl;
bl.append("abcdefg");
o.write(0, bl);
}
std::string val1("val1");
{
bufferlist bl;
bl.append(val1.c_str(), val1.size() + 1);
o.setxattr("key1", bl);
o.truncate(0);
}
ASSERT_EQ(0, ioctx.operate("foo", &o));
uint64_t size;
time_t mtime;
ASSERT_EQ(0, ioctx.stat("foo", &size, &mtime));
ASSERT_EQ(0U, size);
}
TEST_F(LibRadosMiscPP, BigObjectPP) {
bufferlist bl;
bl.append("abcdefg");
ASSERT_EQ(0, ioctx.write("foo", bl, bl.length(), 0));
{
ObjectWriteOperation o;
o.truncate(500000000000ull);
ASSERT_EQ(-EFBIG, ioctx.operate("foo", &o));
}
{
ObjectWriteOperation o;
o.zero(500000000000ull, 1);
ASSERT_EQ(-EFBIG, ioctx.operate("foo", &o));
}
{
ObjectWriteOperation o;
o.zero(1, 500000000000ull);
ASSERT_EQ(-EFBIG, ioctx.operate("foo", &o));
}
{
ObjectWriteOperation o;
o.zero(500000000000ull, 500000000000ull);
ASSERT_EQ(-EFBIG, ioctx.operate("foo", &o));
}
#ifdef __LP64__
// this test only works on 64-bit platforms
ASSERT_EQ(-EFBIG, ioctx.write("foo", bl, bl.length(), 500000000000ull));
#endif
}
TEST_F(LibRadosMiscPP, AioOperatePP) {
bool my_aio_complete = false;
AioCompletion *my_completion = cluster.aio_create_completion(
(void*)&my_aio_complete, set_completion_complete);
AioCompletion *my_completion_null = NULL;
ASSERT_NE(my_completion, my_completion_null);
ObjectWriteOperation o;
{
bufferlist bl;
o.write(0, bl);
}
std::string val1("val1");
{
bufferlist bl;
bl.append(val1.c_str(), val1.size() + 1);
o.setxattr("key1", bl);
bufferlist bl2;
char buf2[1024];
memset(buf2, 0xdd, sizeof(buf2));
bl2.append(buf2, sizeof(buf2));
o.append(bl2);
}
ASSERT_EQ(0, ioctx.aio_operate("foo", my_completion, &o));
ASSERT_EQ(0, my_completion->wait_for_complete_and_cb());
ASSERT_EQ(my_aio_complete, true);
my_completion->release();
uint64_t size;
time_t mtime;
ASSERT_EQ(0, ioctx.stat("foo", &size, &mtime));
ASSERT_EQ(1024U, size);
}
TEST_F(LibRadosMiscPP, AssertExistsPP) {
char buf[64];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl;
bl.append(buf, sizeof(buf));
ObjectWriteOperation op;
op.assert_exists();
op.write(0, bl);
ASSERT_EQ(-ENOENT, ioctx.operate("asdffoo", &op));
ASSERT_EQ(0, ioctx.create("asdffoo", true));
ASSERT_EQ(0, ioctx.operate("asdffoo", &op));
ASSERT_EQ(-EEXIST, ioctx.create("asdffoo", true));
}
TEST_F(LibRadosMiscPP, AssertVersionPP) {
char buf[64];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl;
bl.append(buf, sizeof(buf));
// Create test object...
ASSERT_EQ(0, ioctx.create("asdfbar", true));
// ...then write it again to guarantee that the
// (unsigned) version must be at least 1 (not 0)
// since we want to decrement it by 1 later.
ASSERT_EQ(0, ioctx.write_full("asdfbar", bl));
uint64_t v = ioctx.get_last_version();
ObjectWriteOperation op1;
op1.assert_version(v+1);
op1.write(0, bl);
ASSERT_EQ(-EOVERFLOW, ioctx.operate("asdfbar", &op1));
ObjectWriteOperation op2;
op2.assert_version(v-1);
op2.write(0, bl);
ASSERT_EQ(-ERANGE, ioctx.operate("asdfbar", &op2));
ObjectWriteOperation op3;
op3.assert_version(v);
op3.write(0, bl);
ASSERT_EQ(0, ioctx.operate("asdfbar", &op3));
}
TEST_F(LibRadosMiscPP, BigAttrPP) {
char buf[64];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl;
bl.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.create("foo", true));
bufferlist got;
cout << "osd_max_attr_size = " << g_conf()->osd_max_attr_size << std::endl;
if (g_conf()->osd_max_attr_size) {
bl.clear();
got.clear();
bl.append(buffer::create(g_conf()->osd_max_attr_size));
ASSERT_EQ(0, ioctx.setxattr("foo", "one", bl));
ASSERT_EQ((int)bl.length(), ioctx.getxattr("foo", "one", got));
ASSERT_TRUE(bl.contents_equal(got));
bl.clear();
bl.append(buffer::create(g_conf()->osd_max_attr_size+1));
ASSERT_EQ(-EFBIG, ioctx.setxattr("foo", "one", bl));
} else {
cout << "osd_max_attr_size == 0; skipping test" << std::endl;
}
for (int i=0; i<1000; i++) {
bl.clear();
got.clear();
bl.append(buffer::create(std::min<uint64_t>(g_conf()->osd_max_attr_size,
1024)));
char n[10];
snprintf(n, sizeof(n), "a%d", i);
ASSERT_EQ(0, ioctx.setxattr("foo", n, bl));
ASSERT_EQ((int)bl.length(), ioctx.getxattr("foo", n, got));
ASSERT_TRUE(bl.contents_equal(got));
}
}
TEST_F(LibRadosMiscPP, CopyPP) {
SKIP_IF_CRIMSON();
bufferlist bl, x;
bl.append("hi there");
x.append("bar");
// small object
bufferlist blc = bl;
bufferlist xc = x;
ASSERT_EQ(0, ioctx.write_full("foo", blc));
ASSERT_EQ(0, ioctx.setxattr("foo", "myattr", xc));
version_t uv = ioctx.get_last_version();
{
// pass future version
ObjectWriteOperation op;
op.copy_from("foo", ioctx, uv + 1, LIBRADOS_OP_FLAG_FADVISE_DONTNEED);
ASSERT_EQ(-EOVERFLOW, ioctx.operate("foo.copy", &op));
}
{
// pass old version
ObjectWriteOperation op;
op.copy_from("foo", ioctx, uv - 1, LIBRADOS_OP_FLAG_FADVISE_DONTNEED);
ASSERT_EQ(-ERANGE, ioctx.operate("foo.copy", &op));
}
{
ObjectWriteOperation op;
op.copy_from("foo", ioctx, uv, LIBRADOS_OP_FLAG_FADVISE_DONTNEED);
ASSERT_EQ(0, ioctx.operate("foo.copy", &op));
bufferlist bl2, x2;
ASSERT_EQ((int)bl.length(), ioctx.read("foo.copy", bl2, 10000, 0));
ASSERT_TRUE(bl.contents_equal(bl2));
ASSERT_EQ((int)x.length(), ioctx.getxattr("foo.copy", "myattr", x2));
ASSERT_TRUE(x.contents_equal(x2));
}
// small object without a version
{
ObjectWriteOperation op;
op.copy_from("foo", ioctx, 0, LIBRADOS_OP_FLAG_FADVISE_DONTNEED);
ASSERT_EQ(0, ioctx.operate("foo.copy2", &op));
bufferlist bl2, x2;
ASSERT_EQ((int)bl.length(), ioctx.read("foo.copy2", bl2, 10000, 0));
ASSERT_TRUE(bl.contents_equal(bl2));
ASSERT_EQ((int)x.length(), ioctx.getxattr("foo.copy2", "myattr", x2));
ASSERT_TRUE(x.contents_equal(x2));
}
// do a big object
bl.append(buffer::create(g_conf()->osd_copyfrom_max_chunk * 3));
bl.zero();
bl.append("tail");
blc = bl;
xc = x;
ASSERT_EQ(0, ioctx.write_full("big", blc));
ASSERT_EQ(0, ioctx.setxattr("big", "myattr", xc));
{
ObjectWriteOperation op;
op.copy_from("big", ioctx, ioctx.get_last_version(), LIBRADOS_OP_FLAG_FADVISE_DONTNEED);
ASSERT_EQ(0, ioctx.operate("big.copy", &op));
bufferlist bl2, x2;
ASSERT_EQ((int)bl.length(), ioctx.read("big.copy", bl2, bl.length(), 0));
ASSERT_TRUE(bl.contents_equal(bl2));
ASSERT_EQ((int)x.length(), ioctx.getxattr("foo.copy", "myattr", x2));
ASSERT_TRUE(x.contents_equal(x2));
}
{
ObjectWriteOperation op;
op.copy_from("big", ioctx, 0, LIBRADOS_OP_FLAG_FADVISE_SEQUENTIAL);
ASSERT_EQ(0, ioctx.operate("big.copy2", &op));
bufferlist bl2, x2;
ASSERT_EQ((int)bl.length(), ioctx.read("big.copy2", bl2, bl.length(), 0));
ASSERT_TRUE(bl.contents_equal(bl2));
ASSERT_EQ((int)x.length(), ioctx.getxattr("foo.copy2", "myattr", x2));
ASSERT_TRUE(x.contents_equal(x2));
}
}
class LibRadosTwoPoolsECPP : public RadosTestECPP
{
public:
LibRadosTwoPoolsECPP() {};
~LibRadosTwoPoolsECPP() override {};
protected:
static void SetUpTestCase() {
SKIP_IF_CRIMSON();
pool_name = get_temp_pool_name();
ASSERT_EQ("", create_one_ec_pool_pp(pool_name, s_cluster));
src_pool_name = get_temp_pool_name();
ASSERT_EQ(0, s_cluster.pool_create(src_pool_name.c_str()));
librados::IoCtx ioctx;
ASSERT_EQ(0, s_cluster.ioctx_create(pool_name.c_str(), ioctx));
ioctx.application_enable("rados", true);
librados::IoCtx src_ioctx;
ASSERT_EQ(0, s_cluster.ioctx_create(src_pool_name.c_str(), src_ioctx));
src_ioctx.application_enable("rados", true);
}
static void TearDownTestCase() {
SKIP_IF_CRIMSON();
ASSERT_EQ(0, s_cluster.pool_delete(src_pool_name.c_str()));
ASSERT_EQ(0, destroy_one_ec_pool_pp(pool_name, s_cluster));
}
static std::string src_pool_name;
void SetUp() override {
SKIP_IF_CRIMSON();
RadosTestECPP::SetUp();
ASSERT_EQ(0, cluster.ioctx_create(src_pool_name.c_str(), src_ioctx));
src_ioctx.set_namespace(nspace);
}
void TearDown() override {
SKIP_IF_CRIMSON();
// wait for maps to settle before next test
cluster.wait_for_latest_osdmap();
RadosTestECPP::TearDown();
cleanup_default_namespace(src_ioctx);
cleanup_namespace(src_ioctx, nspace);
src_ioctx.close();
}
librados::IoCtx src_ioctx;
};
std::string LibRadosTwoPoolsECPP::src_pool_name;
//copy_from between ecpool and no-ecpool.
TEST_F(LibRadosTwoPoolsECPP, CopyFrom) {
SKIP_IF_CRIMSON();
bufferlist z;
z.append_zero(4194304*2);
bufferlist b;
b.append("copyfrom");
// create big object w/ omapheader
{
ASSERT_EQ(0, src_ioctx.write_full("foo", z));
ASSERT_EQ(0, src_ioctx.omap_set_header("foo", b));
version_t uv = src_ioctx.get_last_version();
ObjectWriteOperation op;
op.copy_from("foo", src_ioctx, uv, 0);
ASSERT_EQ(-EOPNOTSUPP, ioctx.operate("foo.copy", &op));
}
// same with small object
{
ASSERT_EQ(0, src_ioctx.omap_set_header("bar", b));
version_t uv = src_ioctx.get_last_version();
ObjectWriteOperation op;
op.copy_from("bar", src_ioctx, uv, 0);
ASSERT_EQ(-EOPNOTSUPP, ioctx.operate("bar.copy", &op));
}
}
TEST_F(LibRadosMiscPP, CopyScrubPP) {
SKIP_IF_CRIMSON();
bufferlist inbl, bl, x;
for (int i=0; i<100; ++i)
x.append("barrrrrrrrrrrrrrrrrrrrrrrrrr");
bl.append(buffer::create(g_conf()->osd_copyfrom_max_chunk * 3));
bl.zero();
bl.append("tail");
bufferlist cbl;
map<string, bufferlist> to_set;
for (int i=0; i<1000; ++i)
to_set[string("foo") + stringify(i)] = x;
// small
cbl = x;
ASSERT_EQ(0, ioctx.write_full("small", cbl));
ASSERT_EQ(0, ioctx.setxattr("small", "myattr", x));
// big
cbl = bl;
ASSERT_EQ(0, ioctx.write_full("big", cbl));
// without header
cbl = bl;
ASSERT_EQ(0, ioctx.write_full("big2", cbl));
ASSERT_EQ(0, ioctx.setxattr("big2", "myattr", x));
ASSERT_EQ(0, ioctx.setxattr("big2", "myattr2", x));
ASSERT_EQ(0, ioctx.omap_set("big2", to_set));
// with header
cbl = bl;
ASSERT_EQ(0, ioctx.write_full("big3", cbl));
ASSERT_EQ(0, ioctx.omap_set_header("big3", x));
ASSERT_EQ(0, ioctx.omap_set("big3", to_set));
// deep scrub to ensure digests are in place
{
for (int i=0; i<10; ++i) {
ostringstream ss;
ss << "{\"prefix\": \"pg deep-scrub\", \"pgid\": \""
<< ioctx.get_id() << "." << i
<< "\"}";
cluster.mon_command(ss.str(), inbl, NULL, NULL);
}
// give it a few seconds to go. this is sloppy but is usually enough time
cout << "waiting for initial deep scrubs..." << std::endl;
sleep(30);
cout << "done waiting, doing copies" << std::endl;
}
{
ObjectWriteOperation op;
op.copy_from("small", ioctx, 0, 0);
ASSERT_EQ(0, ioctx.operate("small.copy", &op));
}
{
ObjectWriteOperation op;
op.copy_from("big", ioctx, 0, 0);
ASSERT_EQ(0, ioctx.operate("big.copy", &op));
}
{
ObjectWriteOperation op;
op.copy_from("big2", ioctx, 0, 0);
ASSERT_EQ(0, ioctx.operate("big2.copy", &op));
}
{
ObjectWriteOperation op;
op.copy_from("big3", ioctx, 0, 0);
ASSERT_EQ(0, ioctx.operate("big3.copy", &op));
}
// deep scrub to ensure digests are correct
{
for (int i=0; i<10; ++i) {
ostringstream ss;
ss << "{\"prefix\": \"pg deep-scrub\", \"pgid\": \""
<< ioctx.get_id() << "." << i
<< "\"}";
cluster.mon_command(ss.str(), inbl, NULL, NULL);
}
// give it a few seconds to go. this is sloppy but is usually enough time
cout << "waiting for final deep scrubs..." << std::endl;
sleep(30);
cout << "done waiting" << std::endl;
}
}
TEST_F(LibRadosMiscPP, WriteSamePP) {
bufferlist bl;
char buf[128];
bufferlist fl;
char full[128 * 4];
char *cmp;
/* zero the full range before using writesame */
memset(full, 0, sizeof(full));
fl.append(full, sizeof(full));
ASSERT_EQ(0, ioctx.write("ws", fl, fl.length(), 0));
memset(buf, 0xcc, sizeof(buf));
bl.clear();
bl.append(buf, sizeof(buf));
/* write the same buf four times */
ASSERT_EQ(0, ioctx.writesame("ws", bl, sizeof(full), 0));
/* read back the full buffer and confirm that it matches */
fl.clear();
fl.append(full, sizeof(full));
ASSERT_EQ((int)fl.length(), ioctx.read("ws", fl, fl.length(), 0));
for (cmp = fl.c_str(); cmp < fl.c_str() + fl.length(); cmp += sizeof(buf)) {
ASSERT_EQ(0, memcmp(cmp, buf, sizeof(buf)));
}
/* write_len not a multiple of data_len should throw error */
bl.clear();
bl.append(buf, sizeof(buf));
ASSERT_EQ(-EINVAL, ioctx.writesame("ws", bl, (sizeof(buf) * 4) - 1, 0));
ASSERT_EQ(-EINVAL,
ioctx.writesame("ws", bl, bl.length() / 2, 0));
/* write_len = data_len, i.e. same as write() */
ASSERT_EQ(0, ioctx.writesame("ws", bl, sizeof(buf), 0));
bl.clear();
ASSERT_EQ(-EINVAL,
ioctx.writesame("ws", bl, sizeof(buf), 0));
}
template <typename T>
class LibRadosChecksum : public LibRadosMiscPP {
public:
typedef typename T::alg_t alg_t;
typedef typename T::value_t value_t;
typedef typename alg_t::init_value_t init_value_t;
static const rados_checksum_type_t type = T::type;
bufferlist content_bl;
using LibRadosMiscPP::SetUpTestCase;
using LibRadosMiscPP::TearDownTestCase;
void SetUp() override {
LibRadosMiscPP::SetUp();
std::string content(4096, '\0');
for (size_t i = 0; i < content.length(); ++i) {
content[i] = static_cast<char>(rand() % (126 - 33) + 33);
}
content_bl.append(content);
ASSERT_EQ(0, ioctx.write("foo", content_bl, content_bl.length(), 0));
}
};
template <rados_checksum_type_t _type, typename AlgT, typename ValueT>
class LibRadosChecksumParams {
public:
typedef AlgT alg_t;
typedef ValueT value_t;
static const rados_checksum_type_t type = _type;
};
typedef ::testing::Types<
LibRadosChecksumParams<LIBRADOS_CHECKSUM_TYPE_XXHASH32,
Checksummer::xxhash32, ceph_le32>,
LibRadosChecksumParams<LIBRADOS_CHECKSUM_TYPE_XXHASH64,
Checksummer::xxhash64, ceph_le64>,
LibRadosChecksumParams<LIBRADOS_CHECKSUM_TYPE_CRC32C,
Checksummer::crc32c, ceph_le32>
> LibRadosChecksumTypes;
TYPED_TEST_SUITE(LibRadosChecksum, LibRadosChecksumTypes);
TYPED_TEST(LibRadosChecksum, Subset) {
uint32_t chunk_size = 1024;
uint32_t csum_count = this->content_bl.length() / chunk_size;
typename TestFixture::init_value_t init_value = -1;
bufferlist init_value_bl;
encode(init_value, init_value_bl);
std::vector<bufferlist> checksum_bls(csum_count);
std::vector<int> checksum_rvals(csum_count);
// individual checksum ops for each chunk
ObjectReadOperation op;
for (uint32_t i = 0; i < csum_count; ++i) {
op.checksum(TestFixture::type, init_value_bl, i * chunk_size, chunk_size,
0, &checksum_bls[i], &checksum_rvals[i]);
}
ASSERT_EQ(0, this->ioctx.operate("foo", &op, NULL));
for (uint32_t i = 0; i < csum_count; ++i) {
ASSERT_EQ(0, checksum_rvals[i]);
auto bl_it = checksum_bls[i].cbegin();
uint32_t count;
decode(count, bl_it);
ASSERT_EQ(1U, count);
typename TestFixture::value_t value;
decode(value, bl_it);
bufferlist content_sub_bl;
content_sub_bl.substr_of(this->content_bl, i * chunk_size, chunk_size);
typename TestFixture::value_t expected_value;
bufferptr expected_value_bp = buffer::create_static(
sizeof(expected_value), reinterpret_cast<char*>(&expected_value));
Checksummer::template calculate<typename TestFixture::alg_t>(
init_value, chunk_size, 0, chunk_size, content_sub_bl,
&expected_value_bp);
ASSERT_EQ(expected_value, value);
}
}
TYPED_TEST(LibRadosChecksum, Chunked) {
uint32_t chunk_size = 1024;
uint32_t csum_count = this->content_bl.length() / chunk_size;
typename TestFixture::init_value_t init_value = -1;
bufferlist init_value_bl;
encode(init_value, init_value_bl);
bufferlist checksum_bl;
int checksum_rval;
// single op with chunked checksum results
ObjectReadOperation op;
op.checksum(TestFixture::type, init_value_bl, 0, this->content_bl.length(),
chunk_size, &checksum_bl, &checksum_rval);
ASSERT_EQ(0, this->ioctx.operate("foo", &op, NULL));
ASSERT_EQ(0, checksum_rval);
auto bl_it = checksum_bl.cbegin();
uint32_t count;
decode(count, bl_it);
ASSERT_EQ(csum_count, count);
std::vector<typename TestFixture::value_t> expected_values(csum_count);
bufferptr expected_values_bp = buffer::create_static(
csum_count * sizeof(typename TestFixture::value_t),
reinterpret_cast<char*>(&expected_values[0]));
Checksummer::template calculate<typename TestFixture::alg_t>(
init_value, chunk_size, 0, this->content_bl.length(), this->content_bl,
&expected_values_bp);
for (uint32_t i = 0; i < csum_count; ++i) {
typename TestFixture::value_t value;
decode(value, bl_it);
ASSERT_EQ(expected_values[i], value);
}
}
TEST_F(LibRadosMiscPP, CmpExtPP) {
bufferlist cmp_bl, bad_cmp_bl, write_bl;
char stored_str[] = "1234567891";
char mismatch_str[] = "1234577777";
write_bl.append(stored_str);
ioctx.write("cmpextpp", write_bl, write_bl.length(), 0);
cmp_bl.append(stored_str);
ASSERT_EQ(0, ioctx.cmpext("cmpextpp", 0, cmp_bl));
bad_cmp_bl.append(mismatch_str);
ASSERT_EQ(-MAX_ERRNO - 5, ioctx.cmpext("cmpextpp", 0, bad_cmp_bl));
}
TEST_F(LibRadosMiscPP, Applications) {
bufferlist inbl, outbl;
string outs;
ASSERT_EQ(0, cluster.mon_command("{\"prefix\": \"osd dump\"}",
inbl, &outbl, &outs));
ASSERT_LT(0u, outbl.length());
ASSERT_LE(0u, outs.length());
if (!std::regex_search(outbl.to_str(),
std::regex("require_osd_release [l-z]"))) {
std::cout << "SKIPPING";
return;
}
std::set<std::string> expected_apps = {"rados"};
std::set<std::string> apps;
ASSERT_EQ(0, ioctx.application_list(&apps));
ASSERT_EQ(expected_apps, apps);
ASSERT_EQ(0, ioctx.application_enable("app1", true));
ASSERT_EQ(-EPERM, ioctx.application_enable("app2", false));
ASSERT_EQ(0, ioctx.application_enable("app2", true));
expected_apps = {"app1", "app2", "rados"};
ASSERT_EQ(0, ioctx.application_list(&apps));
ASSERT_EQ(expected_apps, apps);
std::map<std::string, std::string> expected_meta;
std::map<std::string, std::string> meta;
ASSERT_EQ(-ENOENT, ioctx.application_metadata_list("dne", &meta));
ASSERT_EQ(0, ioctx.application_metadata_list("app1", &meta));
ASSERT_EQ(expected_meta, meta);
ASSERT_EQ(-ENOENT, ioctx.application_metadata_set("dne", "key1", "value1"));
ASSERT_EQ(0, ioctx.application_metadata_set("app1", "key1", "value1"));
ASSERT_EQ(0, ioctx.application_metadata_set("app1", "key2", "value2"));
expected_meta = {{"key1", "value1"}, {"key2", "value2"}};
ASSERT_EQ(0, ioctx.application_metadata_list("app1", &meta));
ASSERT_EQ(expected_meta, meta);
ASSERT_EQ(0, ioctx.application_metadata_remove("app1", "key1"));
expected_meta = {{"key2", "value2"}};
ASSERT_EQ(0, ioctx.application_metadata_list("app1", &meta));
ASSERT_EQ(expected_meta, meta);
}
TEST_F(LibRadosMiscECPP, CompareExtentRange) {
SKIP_IF_CRIMSON();
bufferlist bl1;
bl1.append("ceph");
ObjectWriteOperation write;
write.write(0, bl1);
ASSERT_EQ(0, ioctx.operate("foo", &write));
bufferlist bl2;
bl2.append("ph");
bl2.append(std::string(2, '\0'));
ObjectReadOperation read1;
read1.cmpext(2, bl2, nullptr);
ASSERT_EQ(0, ioctx.operate("foo", &read1, nullptr));
bufferlist bl3;
bl3.append(std::string(4, '\0'));
ObjectReadOperation read2;
read2.cmpext(2097152, bl3, nullptr);
ASSERT_EQ(0, ioctx.operate("foo", &read2, nullptr));
}
TEST_F(LibRadosMiscPP, MinCompatOSD) {
int8_t require_osd_release;
ASSERT_EQ(0, cluster.get_min_compatible_osd(&require_osd_release));
ASSERT_LE(-1, require_osd_release);
ASSERT_GT(CEPH_RELEASE_MAX, require_osd_release);
}
TEST_F(LibRadosMiscPP, MinCompatClient) {
int8_t min_compat_client;
int8_t require_min_compat_client;
ASSERT_EQ(0, cluster.get_min_compatible_client(&min_compat_client,
&require_min_compat_client));
ASSERT_LE(-1, min_compat_client);
ASSERT_GT(CEPH_RELEASE_MAX, min_compat_client);
ASSERT_LE(-1, require_min_compat_client);
ASSERT_GT(CEPH_RELEASE_MAX, require_min_compat_client);
}
TEST_F(LibRadosMiscPP, Conf) {
const char* const option = "bluestore_throttle_bytes";
size_t new_size = 1 << 20;
std::string original;
ASSERT_EQ(0, cluster.conf_get(option, original));
auto restore_setting = make_scope_guard([&] {
cluster.conf_set(option, original.c_str());
});
std::string expected = std::to_string(new_size);
ASSERT_EQ(0, cluster.conf_set(option, expected.c_str()));
std::string actual;
ASSERT_EQ(0, cluster.conf_get(option, actual));
ASSERT_EQ(expected, actual);
}
| 26,649 | 27.841991 | 92 | cc |
null | ceph-main/src/test/librados/op_speed.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*
// vim: ts=8 sw=2 smarttab
#include <cstdint>
#include "include/rados/librados.hpp"
constexpr int to_create = 10'000'000;
int main() {
for (int i = 0; i < to_create; ++i) {
librados::ObjectReadOperation op;
bufferlist bl;
std::uint64_t sz;
struct timespec tm;
std::map<std::string, ceph::buffer::list> xattrs;
std::map<std::string, ceph::buffer::list> omap;
bool more;
op.read(0, 0, &bl, nullptr);
op.stat2(&sz, &tm, nullptr);
op.getxattrs(&xattrs, nullptr);
op.omap_get_vals2({}, 1000, &omap, &more, nullptr);
}
}
| 637 | 24.52 | 69 | cc |
null | ceph-main/src/test/librados/pool.cc | #include <errno.h>
#include <vector>
#include "crimson_utils.h"
#include "gtest/gtest.h"
#include "include/rados/librados.h"
#include "test/librados/test.h"
#define POOL_LIST_BUF_SZ 32768
TEST(LibRadosPools, PoolList) {
char pool_list_buf[POOL_LIST_BUF_SZ];
char *buf = pool_list_buf;
rados_t cluster;
std::string pool_name = get_temp_pool_name();
ASSERT_EQ("", create_one_pool(pool_name, &cluster));
ASSERT_LT(rados_pool_list(cluster, buf, POOL_LIST_BUF_SZ), POOL_LIST_BUF_SZ);
// we can pass a null buffer too.
ASSERT_LT(rados_pool_list(cluster, NULL, POOL_LIST_BUF_SZ), POOL_LIST_BUF_SZ);
bool found_pool = false;
int firstlen = 0;
while (buf[0] != '\0') {
if ((found_pool == false) && (strcmp(buf, pool_name.c_str()) == 0)) {
found_pool = true;
}
if (!firstlen)
firstlen = strlen(buf) + 1;
buf += strlen(buf) + 1;
}
ASSERT_EQ(found_pool, true);
// make sure we honor the buffer size limit
buf = pool_list_buf;
memset(buf, 0, POOL_LIST_BUF_SZ);
ASSERT_LT(rados_pool_list(cluster, buf, firstlen), POOL_LIST_BUF_SZ);
ASSERT_NE(0, buf[0]); // include at least one pool name
ASSERT_EQ(0, buf[firstlen]); // but don't touch the stopping point
ASSERT_EQ(0, destroy_one_pool(pool_name, &cluster));
}
int64_t rados_pool_lookup(rados_t cluster, const char *pool_name);
TEST(LibRadosPools, PoolLookup) {
rados_t cluster;
std::string pool_name = get_temp_pool_name();
ASSERT_EQ("", create_one_pool(pool_name, &cluster));
ASSERT_LT(0, rados_pool_lookup(cluster, pool_name.c_str()));
ASSERT_EQ(0, destroy_one_pool(pool_name, &cluster));
}
TEST(LibRadosPools, PoolLookup2) {
rados_t cluster;
std::string pool_name = get_temp_pool_name();
ASSERT_EQ("", create_one_pool(pool_name, &cluster));
int64_t pool_id = rados_pool_lookup(cluster, pool_name.c_str());
ASSERT_GT(pool_id, 0);
rados_ioctx_t ioctx;
ASSERT_EQ(0, rados_ioctx_create(cluster, pool_name.c_str(), &ioctx));
int64_t pool_id2 = rados_ioctx_get_id(ioctx);
ASSERT_EQ(pool_id, pool_id2);
rados_ioctx_destroy(ioctx);
ASSERT_EQ(0, destroy_one_pool(pool_name, &cluster));
}
TEST(LibRadosPools, PoolLookupOtherInstance) {
rados_t cluster1;
ASSERT_EQ("", connect_cluster(&cluster1));
rados_t cluster2;
std::string pool_name = get_temp_pool_name();
ASSERT_EQ("", create_one_pool(pool_name, &cluster2));
int64_t pool_id = rados_pool_lookup(cluster2, pool_name.c_str());
ASSERT_GT(pool_id, 0);
ASSERT_EQ(pool_id, rados_pool_lookup(cluster1, pool_name.c_str()));
ASSERT_EQ(0, destroy_one_pool(pool_name, &cluster2));
rados_shutdown(cluster1);
}
TEST(LibRadosPools, PoolReverseLookupOtherInstance) {
rados_t cluster1;
ASSERT_EQ("", connect_cluster(&cluster1));
rados_t cluster2;
std::string pool_name = get_temp_pool_name();
ASSERT_EQ("", create_one_pool(pool_name, &cluster2));
int64_t pool_id = rados_pool_lookup(cluster2, pool_name.c_str());
ASSERT_GT(pool_id, 0);
char buf[100];
ASSERT_LT(0, rados_pool_reverse_lookup(cluster1, pool_id, buf, 100));
ASSERT_EQ(0, strcmp(buf, pool_name.c_str()));
ASSERT_EQ(0, destroy_one_pool(pool_name, &cluster2));
rados_shutdown(cluster1);
}
TEST(LibRadosPools, PoolDelete) {
rados_t cluster;
std::string pool_name = get_temp_pool_name();
ASSERT_EQ("", create_one_pool(pool_name, &cluster));
ASSERT_EQ(0, rados_pool_delete(cluster, pool_name.c_str()));
ASSERT_GT(0, rados_pool_lookup(cluster, pool_name.c_str()));
ASSERT_EQ(0, rados_pool_create(cluster, pool_name.c_str()));
ASSERT_EQ(0, destroy_one_pool(pool_name, &cluster));
}
TEST(LibRadosPools, PoolCreateDelete) {
rados_t cluster;
std::string pool_name = get_temp_pool_name();
ASSERT_EQ("", create_one_pool(pool_name, &cluster));
std::string n = pool_name + "abc123";
ASSERT_EQ(0, rados_pool_create(cluster, n.c_str()));
ASSERT_EQ(-EEXIST, rados_pool_create(cluster, n.c_str()));
ASSERT_EQ(0, rados_pool_delete(cluster, n.c_str()));
ASSERT_EQ(-ENOENT, rados_pool_delete(cluster, n.c_str()));
ASSERT_EQ(0, destroy_one_pool(pool_name, &cluster));
}
TEST(LibRadosPools, PoolCreateWithCrushRule) {
rados_t cluster;
std::string pool_name = get_temp_pool_name();
ASSERT_EQ("", create_one_pool(pool_name, &cluster));
std::string pool2_name = get_temp_pool_name();
ASSERT_EQ(0, rados_pool_create_with_crush_rule(cluster,
pool2_name.c_str(), 0));
ASSERT_EQ(0, rados_pool_delete(cluster, pool2_name.c_str()));
ASSERT_EQ(0, destroy_one_pool(pool_name, &cluster));
}
TEST(LibRadosPools, PoolGetBaseTier) {
SKIP_IF_CRIMSON();
rados_t cluster;
std::string pool_name = get_temp_pool_name();
ASSERT_EQ("", create_one_pool(pool_name, &cluster));
std::string tier_pool_name = pool_name + "-cache";
ASSERT_EQ(0, rados_pool_create(cluster, tier_pool_name.c_str()));
int64_t pool_id = rados_pool_lookup(cluster, pool_name.c_str());
ASSERT_GE(pool_id, 0);
int64_t tier_pool_id = rados_pool_lookup(cluster, tier_pool_name.c_str());
ASSERT_GE(tier_pool_id, 0);
int64_t base_tier = 0;
EXPECT_EQ(0, rados_pool_get_base_tier(cluster, pool_id, &base_tier));
EXPECT_EQ(pool_id, base_tier);
std::string cmdstr = "{\"prefix\": \"osd tier add\", \"pool\": \"" +
pool_name + "\", \"tierpool\":\"" + tier_pool_name + "\", \"force_nonempty\":\"\"}";
char *cmd[1];
cmd[0] = (char *)cmdstr.c_str();
ASSERT_EQ(0, rados_mon_command(cluster, (const char **)cmd, 1, "", 0, NULL, 0, NULL, 0));
cmdstr = "{\"prefix\": \"osd tier cache-mode\", \"pool\": \"" +
tier_pool_name + "\", \"mode\":\"readonly\"," +
" \"yes_i_really_mean_it\": true}";
cmd[0] = (char *)cmdstr.c_str();
ASSERT_EQ(0, rados_mon_command(cluster, (const char **)cmd, 1, "", 0, NULL, 0, NULL, 0));
EXPECT_EQ(0, rados_wait_for_latest_osdmap(cluster));
EXPECT_EQ(0, rados_pool_get_base_tier(cluster, pool_id, &base_tier));
EXPECT_EQ(pool_id, base_tier);
EXPECT_EQ(0, rados_pool_get_base_tier(cluster, tier_pool_id, &base_tier));
EXPECT_EQ(pool_id, base_tier);
int64_t nonexistent_pool_id = (int64_t)((-1ULL) >> 1);
EXPECT_EQ(-ENOENT, rados_pool_get_base_tier(cluster, nonexistent_pool_id, &base_tier));
cmdstr = "{\"prefix\": \"osd tier remove\", \"pool\": \"" +
pool_name + "\", \"tierpool\":\"" + tier_pool_name + "\"}";
cmd[0] = (char *)cmdstr.c_str();
ASSERT_EQ(0, rados_mon_command(cluster, (const char **)cmd, 1, "", 0, NULL, 0, NULL, 0));
ASSERT_EQ(0, rados_pool_delete(cluster, tier_pool_name.c_str()));
ASSERT_EQ(0, destroy_one_pool(pool_name, &cluster));
}
| 6,540 | 33.97861 | 91 | cc |
null | ceph-main/src/test/librados/service.cc | #include "include/rados/librados.h"
#include "include/rados/librados.hpp"
#include "include/stringify.h"
#include "common/config_proxy.h"
#include "test/librados/test.h"
#include "test/librados/TestCase.h"
#ifndef _WIN32
#include <sys/resource.h>
#endif
#include <mutex>
#include <condition_variable>
#include <algorithm>
#include <thread>
#include <errno.h>
#include "gtest/gtest.h"
#include "test/unit.cc"
using namespace std;
using namespace librados;
TEST(LibRadosService, RegisterEarly) {
rados_t cluster;
ASSERT_EQ(0, rados_create(&cluster, "admin"));
ASSERT_EQ(0, rados_conf_read_file(cluster, NULL));
ASSERT_EQ(0, rados_conf_parse_env(cluster, NULL));
string name = string("pid") + stringify(getpid());
ASSERT_EQ(0, rados_service_register(cluster, "laundry", name.c_str(),
"foo\0bar\0this\0that\0"));
ASSERT_EQ(-EEXIST, rados_service_register(cluster, "laundry", name.c_str(),
"foo\0bar\0this\0that\0"));
ASSERT_EQ(0, rados_connect(cluster));
sleep(5);
rados_shutdown(cluster);
}
TEST(LibRadosService, RegisterLate) {
rados_t cluster;
ASSERT_EQ(0, rados_create(&cluster, "admin"));
ASSERT_EQ(0, rados_conf_read_file(cluster, NULL));
ASSERT_EQ(0, rados_conf_parse_env(cluster, NULL));
ASSERT_EQ(0, rados_connect(cluster));
string name = string("pid") + stringify(getpid());
ASSERT_EQ(0, rados_service_register(cluster, "laundry", name.c_str(),
"foo\0bar\0this\0that\0"));
ASSERT_EQ(-EEXIST, rados_service_register(cluster, "laundry", name.c_str(),
"foo\0bar\0this\0that\0"));
rados_shutdown(cluster);
}
static void status_format_func(const int i, std::mutex &lock,
std::condition_variable &cond,
int &threads_started, bool &stopped)
{
rados_t cluster;
char metadata_buf[4096];
ASSERT_EQ(0, rados_create(&cluster, "admin"));
ASSERT_EQ(0, rados_conf_read_file(cluster, NULL));
ASSERT_EQ(0, rados_conf_parse_env(cluster, NULL));
ASSERT_EQ(0, rados_connect(cluster));
if (i == 0) {
ASSERT_LT(0, sprintf(metadata_buf, "%s%c%s%c",
"foo", '\0', "bar", '\0'));
} else if (i == 1) {
ASSERT_LT(0, sprintf(metadata_buf, "%s%c%s%c",
"daemon_type", '\0', "portal", '\0'));
} else if (i == 2) {
ASSERT_LT(0, sprintf(metadata_buf, "%s%c%s%c",
"daemon_prefix", '\0', "gateway", '\0'));
} else {
string prefix = string("gw") + stringify(i % 4);
string zone = string("z") + stringify(i % 3);
ASSERT_LT(0, sprintf(metadata_buf, "%s%c%s%c%s%c%s%c%s%c%s%c%s%c%s%c",
"daemon_type", '\0', "portal", '\0',
"daemon_prefix", '\0', prefix.c_str(), '\0',
"hostname", '\0', prefix.c_str(), '\0',
"zone_id", '\0', zone.c_str(), '\0'));
}
string name = string("rbd/image") + stringify(i);
ASSERT_EQ(0, rados_service_register(cluster, "foo", name.c_str(),
metadata_buf));
std::unique_lock<std::mutex> l(lock);
threads_started++;
cond.notify_all();
cond.wait(l, [&stopped] {
return stopped;
});
rados_shutdown(cluster);
}
TEST(LibRadosService, StatusFormat) {
const int nthreads = 16;
std::thread threads[nthreads];
std::mutex lock;
std::condition_variable cond;
bool stopped = false;
int threads_started = 0;
// no rlimits on Windows
#ifndef _WIN32
// Need a bunch of fd's for this test
struct rlimit rold, rnew;
ASSERT_EQ(getrlimit(RLIMIT_NOFILE, &rold), 0);
rnew = rold;
rnew.rlim_cur = rnew.rlim_max;
ASSERT_EQ(setrlimit(RLIMIT_NOFILE, &rnew), 0);
#endif
for (int i = 0; i < nthreads; ++i)
threads[i] = std::thread(status_format_func, i, std::ref(lock),
std::ref(cond), std::ref(threads_started),
std::ref(stopped));
{
std::unique_lock<std::mutex> l(lock);
cond.wait(l, [&threads_started] {
return nthreads == threads_started;
});
}
int retry = 60; // mon thrashing may make this take a long time
while (retry) {
rados_t cluster;
ASSERT_EQ(0, rados_create(&cluster, "admin"));
ASSERT_EQ(0, rados_conf_read_file(cluster, NULL));
ASSERT_EQ(0, rados_conf_parse_env(cluster, NULL));
ASSERT_EQ(0, rados_connect(cluster));
JSONFormatter cmd_f;
cmd_f.open_object_section("command");
cmd_f.dump_string("prefix", "status");
cmd_f.close_section();
std::ostringstream cmd_stream;
cmd_f.flush(cmd_stream);
const std::string serialized_cmd = cmd_stream.str();
const char *cmd[2];
cmd[1] = NULL;
cmd[0] = serialized_cmd.c_str();
char *outbuf = NULL;
size_t outlen = 0;
ASSERT_EQ(0, rados_mon_command(cluster, (const char **)cmd, 1, "", 0,
&outbuf, &outlen, NULL, NULL));
std::string out(outbuf, outlen);
cout << out << std::endl;
bool success = false;
auto r1 = out.find("16 portals active (1 hosts, 3 zones)");
if (std::string::npos != r1) {
success = true;
}
rados_buffer_free(outbuf);
rados_shutdown(cluster);
if (success || !retry) {
break;
}
// wait for 2 seconds to make sure all the
// services have been successfully updated
// to ceph mon, then retry it.
sleep(2);
retry--;
}
{
std::scoped_lock<std::mutex> l(lock);
stopped = true;
cond.notify_all();
}
for (int i = 0; i < nthreads; ++i)
threads[i].join();
ASSERT_NE(0, retry);
#ifndef _WIN32
ASSERT_EQ(setrlimit(RLIMIT_NOFILE, &rold), 0);
#endif
}
TEST(LibRadosService, Status) {
rados_t cluster;
ASSERT_EQ(0, rados_create(&cluster, "admin"));
ASSERT_EQ(0, rados_conf_read_file(cluster, NULL));
ASSERT_EQ(0, rados_conf_parse_env(cluster, NULL));
ASSERT_EQ(-ENOTCONN, rados_service_update_status(cluster,
"testing\0testing\0"));
ASSERT_EQ(0, rados_connect(cluster));
string name = string("pid") + stringify(getpid());
ASSERT_EQ(0, rados_service_register(cluster, "laundry", name.c_str(),
"foo\0bar\0this\0that\0"));
for (int i=0; i<20; ++i) {
char buffer[1024];
snprintf(buffer, sizeof(buffer), "%s%c%s%c%s%c%d%c",
"testing", '\0', "testing", '\0',
"count", '\0', i, '\0');
ASSERT_EQ(0, rados_service_update_status(cluster, buffer));
sleep(1);
}
rados_shutdown(cluster);
}
| 6,615 | 30.504762 | 77 | cc |
null | ceph-main/src/test/librados/service_cxx.cc | #include <algorithm>
#include <thread>
#include <errno.h>
#include "gtest/gtest.h"
#include "include/rados/librados.hpp"
#include "include/stringify.h"
#include "common/config_proxy.h"
#include "test/librados/test_cxx.h"
#include "test/librados/testcase_cxx.h"
#include "test/unit.cc"
using namespace std;
using namespace librados;
TEST(LibRadosServicePP, RegisterEarly) {
Rados cluster;
cluster.init("admin");
ASSERT_EQ(0, cluster.conf_read_file(NULL));
cluster.conf_parse_env(NULL);
string name = string("pid") + stringify(getpid());
ASSERT_EQ(0, cluster.service_daemon_register(
"laundry", name, {{"foo", "bar"}, {"this", "that"}}));
ASSERT_EQ(-EEXIST, cluster.service_daemon_register(
"laundry", name, {{"foo", "bar"}, {"this", "that"}}));
ASSERT_EQ(0, cluster.connect());
sleep(5);
cluster.shutdown();
}
TEST(LibRadosServicePP, RegisterLate) {
Rados cluster;
cluster.init("admin");
ASSERT_EQ(0, cluster.conf_read_file(NULL));
cluster.conf_parse_env(NULL);
ASSERT_EQ("", connect_cluster_pp(cluster));
string name = string("pid") + stringify(getpid());
ASSERT_EQ(0, cluster.service_daemon_register(
"laundry", name, {{"foo", "bar"}, {"this", "that"}}));
}
TEST(LibRadosServicePP, Status) {
Rados cluster;
cluster.init("admin");
ASSERT_EQ(0, cluster.conf_read_file(NULL));
cluster.conf_parse_env(NULL);
string name = string("pid") + stringify(getpid());
ASSERT_EQ(-ENOTCONN, cluster.service_daemon_update_status(
{{"testing", "starting"}}));
ASSERT_EQ(0, cluster.connect());
ASSERT_EQ(0, cluster.service_daemon_register(
"laundry", name, {{"foo", "bar"}, {"this", "that"}}));
for (int i=0; i<20; ++i) {
ASSERT_EQ(0, cluster.service_daemon_update_status({
{"testing", "running"},
{"count", stringify(i)}
}));
sleep(1);
}
cluster.shutdown();
}
TEST(LibRadosServicePP, Close) {
int tries = 20;
string name = string("close-test-pid") + stringify(getpid());
int i;
for (i = 0; i < tries; ++i) {
cout << "attempt " << i << " of " << tries << std::endl;
{
Rados cluster;
cluster.init("admin");
ASSERT_EQ(0, cluster.conf_read_file(NULL));
cluster.conf_parse_env(NULL);
ASSERT_EQ(0, cluster.connect());
ASSERT_EQ(0, cluster.service_daemon_register(
"laundry", name, {{"foo", "bar"}, {"this", "that"}}));
sleep(3); // let it register
cluster.shutdown();
}
// mgr updates servicemap every tick
//sleep(g_conf().get_val<int64_t>("mgr_tick_period"));
std::this_thread::sleep_for(g_conf().get_val<std::chrono::seconds>(
"mgr_tick_period"));
// make sure we are deregistered
{
Rados cluster;
cluster.init("admin");
ASSERT_EQ(0, cluster.conf_read_file(NULL));
cluster.conf_parse_env(NULL);
ASSERT_EQ(0, cluster.connect());
bufferlist inbl, outbl;
ASSERT_EQ(0, cluster.mon_command("{\"prefix\": \"service dump\"}",
inbl, &outbl, NULL));
string s = outbl.to_str();
cluster.shutdown();
if (s.find(name) != string::npos) {
cout << " failed to deregister:\n" << s << std::endl;
} else {
break;
}
}
}
ASSERT_LT(i, tries);
}
| 3,208 | 29.273585 | 72 | cc |
null | ceph-main/src/test/librados/snapshots.cc | #include "include/rados.h"
#include "test/librados/test.h"
#include "test/librados/TestCase.h"
#include "crimson_utils.h"
#include <algorithm>
#include <errno.h>
#include "gtest/gtest.h"
#include <string>
using std::string;
typedef RadosTest LibRadosSnapshots;
typedef RadosTest LibRadosSnapshotsSelfManaged;
typedef RadosTestEC LibRadosSnapshotsEC;
typedef RadosTestEC LibRadosSnapshotsSelfManagedEC;
const int bufsize = 128;
TEST_F(LibRadosSnapshots, SnapList) {
char buf[bufsize];
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_write(ioctx, "foo", buf, sizeof(buf), 0));
ASSERT_EQ(0, rados_ioctx_snap_create(ioctx, "snap1"));
rados_snap_t snaps[10];
EXPECT_EQ(1, rados_ioctx_snap_list(ioctx, snaps,
sizeof(snaps) / sizeof(snaps[0])));
rados_snap_t rid;
EXPECT_EQ(0, rados_ioctx_snap_lookup(ioctx, "snap1", &rid));
EXPECT_EQ(rid, snaps[0]);
EXPECT_EQ(0, rados_ioctx_snap_remove(ioctx, "snap1"));
}
TEST_F(LibRadosSnapshots, SnapRemove) {
char buf[bufsize];
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_write(ioctx, "foo", buf, sizeof(buf), 0));
ASSERT_EQ(0, rados_ioctx_snap_create(ioctx, "snap1"));
rados_snap_t rid;
ASSERT_EQ(0, rados_ioctx_snap_lookup(ioctx, "snap1", &rid));
ASSERT_EQ(-EEXIST, rados_ioctx_snap_create(ioctx, "snap1"));
ASSERT_EQ(0, rados_ioctx_snap_remove(ioctx, "snap1"));
ASSERT_EQ(-ENOENT, rados_ioctx_snap_lookup(ioctx, "snap1", &rid));
}
TEST_F(LibRadosSnapshots, Rollback) {
char buf[bufsize];
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_write(ioctx, "foo", buf, sizeof(buf), 0));
ASSERT_EQ(0, rados_ioctx_snap_create(ioctx, "snap1"));
char buf2[sizeof(buf)];
memset(buf2, 0xdd, sizeof(buf2));
EXPECT_EQ(0, rados_write_full(ioctx, "foo", buf2, sizeof(buf2)));
EXPECT_EQ(0, rados_ioctx_snap_rollback(ioctx, "foo", "snap1"));
char buf3[sizeof(buf)];
EXPECT_EQ((int)sizeof(buf3), rados_read(ioctx, "foo", buf3, sizeof(buf3), 0));
EXPECT_EQ(0, memcmp(buf, buf3, sizeof(buf)));
EXPECT_EQ(0, rados_ioctx_snap_remove(ioctx, "snap1"));
}
TEST_F(LibRadosSnapshots, SnapGetName) {
char buf[bufsize];
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_write(ioctx, "foo", buf, sizeof(buf), 0));
ASSERT_EQ(0, rados_ioctx_snap_create(ioctx, "snapfoo"));
rados_snap_t rid;
EXPECT_EQ(0, rados_ioctx_snap_lookup(ioctx, "snapfoo", &rid));
EXPECT_EQ(-ENOENT, rados_ioctx_snap_lookup(ioctx, "snapbar", &rid));
char name[128];
memset(name, 0, sizeof(name));
EXPECT_EQ(0, rados_ioctx_snap_get_name(ioctx, rid, name, sizeof(name)));
time_t snaptime;
EXPECT_EQ(0, rados_ioctx_snap_get_stamp(ioctx, rid, &snaptime));
EXPECT_EQ(0, strcmp(name, "snapfoo"));
EXPECT_EQ(0, rados_ioctx_snap_remove(ioctx, "snapfoo"));
}
TEST_F(LibRadosSnapshotsSelfManaged, Snap) {
std::vector<uint64_t> my_snaps;
my_snaps.push_back(-2);
ASSERT_EQ(0, rados_ioctx_selfmanaged_snap_create(ioctx, &my_snaps.back()));
::std::reverse(my_snaps.begin(), my_snaps.end());
ASSERT_EQ(0, rados_ioctx_selfmanaged_snap_set_write_ctx(ioctx, my_snaps[0],
&my_snaps[0], my_snaps.size()));
::std::reverse(my_snaps.begin(), my_snaps.end());
char buf[bufsize];
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_write(ioctx, "foo", buf, sizeof(buf), 0));
my_snaps.push_back(-2);
rados_completion_t completion;
ASSERT_EQ(0, rados_aio_create_completion2(nullptr, nullptr,
&completion));
rados_aio_ioctx_selfmanaged_snap_create(ioctx, &my_snaps.back(), completion);
ASSERT_EQ(0, rados_aio_wait_for_complete(completion));
rados_aio_release(completion);
::std::reverse(my_snaps.begin(), my_snaps.end());
ASSERT_EQ(0, rados_ioctx_selfmanaged_snap_set_write_ctx(ioctx, my_snaps[0],
&my_snaps[0], my_snaps.size()));
::std::reverse(my_snaps.begin(), my_snaps.end());
char buf2[sizeof(buf)];
memset(buf2, 0xdd, sizeof(buf2));
ASSERT_EQ(0, rados_write(ioctx, "foo", buf2, sizeof(buf2), 0));
rados_ioctx_snap_set_read(ioctx, my_snaps[1]-1);
char buf3[sizeof(buf)];
ASSERT_EQ(-ENOENT, rados_read(ioctx, "foo", buf3, sizeof(buf3), 0));
rados_ioctx_snap_set_read(ioctx, my_snaps[1]);
ASSERT_EQ((int)sizeof(buf3), rados_read(ioctx, "foo", buf3, sizeof(buf3), 0));
ASSERT_EQ(0, memcmp(buf3, buf, sizeof(buf)));
ASSERT_EQ(0, rados_aio_create_completion2(nullptr, nullptr,
&completion));
rados_aio_ioctx_selfmanaged_snap_remove(ioctx, my_snaps.back(), completion);
ASSERT_EQ(0, rados_aio_wait_for_complete(completion));
rados_aio_release(completion);
my_snaps.pop_back();
ASSERT_EQ(0, rados_ioctx_selfmanaged_snap_remove(ioctx, my_snaps.back()));
my_snaps.pop_back();
rados_ioctx_snap_set_read(ioctx, LIBRADOS_SNAP_HEAD);
ASSERT_EQ(0, rados_remove(ioctx, "foo"));
}
TEST_F(LibRadosSnapshotsSelfManaged, Rollback) {
std::vector<uint64_t> my_snaps;
my_snaps.push_back(-2);
ASSERT_EQ(0, rados_ioctx_selfmanaged_snap_create(ioctx, &my_snaps.back()));
::std::reverse(my_snaps.begin(), my_snaps.end());
ASSERT_EQ(0, rados_ioctx_selfmanaged_snap_set_write_ctx(ioctx, my_snaps[0],
&my_snaps[0], my_snaps.size()));
::std::reverse(my_snaps.begin(), my_snaps.end());
char buf[bufsize];
memset(buf, 0xcc, sizeof(buf));
// First write
ASSERT_EQ(0, rados_write(ioctx, "foo", buf, sizeof(buf), 0));
my_snaps.push_back(-2);
ASSERT_EQ(0, rados_ioctx_selfmanaged_snap_create(ioctx, &my_snaps.back()));
::std::reverse(my_snaps.begin(), my_snaps.end());
ASSERT_EQ(0, rados_ioctx_selfmanaged_snap_set_write_ctx(ioctx, my_snaps[0],
&my_snaps[0], my_snaps.size()));
::std::reverse(my_snaps.begin(), my_snaps.end());
char buf2[sizeof(buf)];
memset(buf2, 0xdd, sizeof(buf2));
// Second write
ASSERT_EQ(0, rados_write(ioctx, "foo", buf2, sizeof(buf2), 0));
// Rollback to my_snaps[1] - Object is expeceted to conatin the first write
rados_ioctx_selfmanaged_snap_rollback(ioctx, "foo", my_snaps[1]);
char buf3[sizeof(buf)];
ASSERT_EQ((int)sizeof(buf3), rados_read(ioctx, "foo", buf3, sizeof(buf3), 0));
ASSERT_EQ(0, memcmp(buf3, buf, sizeof(buf)));
ASSERT_EQ(0, rados_ioctx_selfmanaged_snap_remove(ioctx, my_snaps.back()));
my_snaps.pop_back();
ASSERT_EQ(0, rados_ioctx_selfmanaged_snap_remove(ioctx, my_snaps.back()));
my_snaps.pop_back();
ASSERT_EQ(0, rados_remove(ioctx, "foo"));
}
TEST_F(LibRadosSnapshotsSelfManaged, FutureSnapRollback) {
std::vector<uint64_t> my_snaps;
// Snapshot 1
my_snaps.push_back(-2);
ASSERT_EQ(0, rados_ioctx_selfmanaged_snap_create(ioctx, &my_snaps.back()));
::std::reverse(my_snaps.begin(), my_snaps.end());
ASSERT_EQ(0, rados_ioctx_selfmanaged_snap_set_write_ctx(ioctx, my_snaps[0],
&my_snaps[0], my_snaps.size()));
::std::reverse(my_snaps.begin(), my_snaps.end());
char buf[bufsize];
memset(buf, 0xcc, sizeof(buf));
// First write
ASSERT_EQ(0, rados_write(ioctx, "foo", buf, sizeof(buf), 0));
// Snapshot 2
my_snaps.push_back(-2);
ASSERT_EQ(0, rados_ioctx_selfmanaged_snap_create(ioctx, &my_snaps.back()));
::std::reverse(my_snaps.begin(), my_snaps.end());
ASSERT_EQ(0, rados_ioctx_selfmanaged_snap_set_write_ctx(ioctx, my_snaps[0],
&my_snaps[0], my_snaps.size()));
::std::reverse(my_snaps.begin(), my_snaps.end());
char buf2[sizeof(buf)];
memset(buf2, 0xdd, sizeof(buf2));
// Second write
ASSERT_EQ(0, rados_write(ioctx, "foo", buf2, sizeof(buf2), 0));
// Snapshot 3
my_snaps.push_back(-2);
ASSERT_EQ(0, rados_ioctx_selfmanaged_snap_create(ioctx, &my_snaps.back()));
// Rollback to the last snap id - Object is expected to conatin
// latest write (head object)
rados_ioctx_selfmanaged_snap_rollback(ioctx, "foo", my_snaps[2]);
char buf3[sizeof(buf)];
ASSERT_EQ((int)sizeof(buf3), rados_read(ioctx, "foo", buf3, sizeof(buf3), 0));
ASSERT_EQ(0, memcmp(buf3, buf2, sizeof(buf)));
ASSERT_EQ(0, rados_ioctx_selfmanaged_snap_remove(ioctx, my_snaps.back()));
my_snaps.pop_back();
ASSERT_EQ(0, rados_ioctx_selfmanaged_snap_remove(ioctx, my_snaps.back()));
my_snaps.pop_back();
ASSERT_EQ(0, rados_remove(ioctx, "foo"));
}
// EC testing
TEST_F(LibRadosSnapshotsEC, SnapList) {
SKIP_IF_CRIMSON();
char buf[bufsize];
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_write(ioctx, "foo", buf, sizeof(buf), 0));
ASSERT_EQ(0, rados_ioctx_snap_create(ioctx, "snap1"));
rados_snap_t snaps[10];
EXPECT_EQ(1, rados_ioctx_snap_list(ioctx, snaps,
sizeof(snaps) / sizeof(snaps[0])));
rados_snap_t rid;
EXPECT_EQ(0, rados_ioctx_snap_lookup(ioctx, "snap1", &rid));
EXPECT_EQ(rid, snaps[0]);
EXPECT_EQ(0, rados_ioctx_snap_remove(ioctx, "snap1"));
}
TEST_F(LibRadosSnapshotsEC, SnapRemove) {
SKIP_IF_CRIMSON();
char buf[bufsize];
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_write(ioctx, "foo", buf, sizeof(buf), 0));
ASSERT_EQ(0, rados_ioctx_snap_create(ioctx, "snap1"));
rados_snap_t rid;
ASSERT_EQ(0, rados_ioctx_snap_lookup(ioctx, "snap1", &rid));
ASSERT_EQ(-EEXIST, rados_ioctx_snap_create(ioctx, "snap1"));
ASSERT_EQ(0, rados_ioctx_snap_remove(ioctx, "snap1"));
ASSERT_EQ(-ENOENT, rados_ioctx_snap_lookup(ioctx, "snap1", &rid));
}
TEST_F(LibRadosSnapshotsEC, Rollback) {
SKIP_IF_CRIMSON();
char buf[bufsize];
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_write(ioctx, "foo", buf, sizeof(buf), 0));
ASSERT_EQ(0, rados_ioctx_snap_create(ioctx, "snap1"));
char buf2[sizeof(buf)];
memset(buf2, 0xdd, sizeof(buf2));
EXPECT_EQ(0, rados_write_full(ioctx, "foo", buf2, sizeof(buf2)));
EXPECT_EQ(0, rados_ioctx_snap_rollback(ioctx, "foo", "snap1"));
char buf3[sizeof(buf)];
EXPECT_EQ((int)sizeof(buf3), rados_read(ioctx, "foo", buf3, sizeof(buf3), 0));
EXPECT_EQ(0, memcmp(buf, buf3, sizeof(buf)));
EXPECT_EQ(0, rados_ioctx_snap_remove(ioctx, "snap1"));
}
TEST_F(LibRadosSnapshotsEC, SnapGetName) {
SKIP_IF_CRIMSON();
char buf[bufsize];
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_write(ioctx, "foo", buf, sizeof(buf), 0));
ASSERT_EQ(0, rados_ioctx_snap_create(ioctx, "snapfoo"));
rados_snap_t rid;
EXPECT_EQ(0, rados_ioctx_snap_lookup(ioctx, "snapfoo", &rid));
EXPECT_EQ(-ENOENT, rados_ioctx_snap_lookup(ioctx, "snapbar", &rid));
char name[128];
memset(name, 0, sizeof(name));
EXPECT_EQ(0, rados_ioctx_snap_get_name(ioctx, rid, name, sizeof(name)));
time_t snaptime;
EXPECT_EQ(0, rados_ioctx_snap_get_stamp(ioctx, rid, &snaptime));
EXPECT_EQ(0, strcmp(name, "snapfoo"));
EXPECT_EQ(0, rados_ioctx_snap_remove(ioctx, "snapfoo"));
}
TEST_F(LibRadosSnapshotsSelfManagedEC, Snap) {
SKIP_IF_CRIMSON();
std::vector<uint64_t> my_snaps;
my_snaps.push_back(-2);
ASSERT_EQ(0, rados_ioctx_selfmanaged_snap_create(ioctx, &my_snaps.back()));
::std::reverse(my_snaps.begin(), my_snaps.end());
ASSERT_EQ(0, rados_ioctx_selfmanaged_snap_set_write_ctx(ioctx, my_snaps[0],
&my_snaps[0], my_snaps.size()));
::std::reverse(my_snaps.begin(), my_snaps.end());
int bsize = alignment;
char *buf = (char *)new char[bsize];
memset(buf, 0xcc, bsize);
ASSERT_EQ(0, rados_write(ioctx, "foo", buf, bsize, 0));
my_snaps.push_back(-2);
rados_completion_t completion;
ASSERT_EQ(0, rados_aio_create_completion2(nullptr, nullptr,
&completion));
rados_aio_ioctx_selfmanaged_snap_create(ioctx, &my_snaps.back(), completion);
ASSERT_EQ(0, rados_aio_wait_for_complete(completion));
rados_aio_release(completion);
::std::reverse(my_snaps.begin(), my_snaps.end());
ASSERT_EQ(0, rados_ioctx_selfmanaged_snap_set_write_ctx(ioctx, my_snaps[0],
&my_snaps[0], my_snaps.size()));
::std::reverse(my_snaps.begin(), my_snaps.end());
char *buf2 = (char *)new char[bsize];
memset(buf2, 0xdd, bsize);
ASSERT_EQ(0, rados_write(ioctx, "foo", buf2, bsize, bsize));
rados_ioctx_snap_set_read(ioctx, my_snaps[1]-1);
char *buf3 = (char *)new char[bsize*2];
ASSERT_EQ(-ENOENT, rados_read(ioctx, "foo", buf3, bsize*2, 0));
rados_ioctx_snap_set_read(ioctx, my_snaps[1]);
ASSERT_EQ(bsize, rados_read(ioctx, "foo", buf3, bsize*2, 0));
ASSERT_EQ(0, memcmp(buf3, buf, bsize));
ASSERT_EQ(0, rados_aio_create_completion2(nullptr, nullptr,
&completion));
rados_aio_ioctx_selfmanaged_snap_remove(ioctx, my_snaps.back(), completion);
ASSERT_EQ(0, rados_aio_wait_for_complete(completion));
rados_aio_release(completion);
my_snaps.pop_back();
ASSERT_EQ(0, rados_ioctx_selfmanaged_snap_remove(ioctx, my_snaps.back()));
my_snaps.pop_back();
rados_ioctx_snap_set_read(ioctx, LIBRADOS_SNAP_HEAD);
ASSERT_EQ(0, rados_remove(ioctx, "foo"));
delete[] buf;
delete[] buf2;
delete[] buf3;
}
TEST_F(LibRadosSnapshotsSelfManagedEC, Rollback) {
SKIP_IF_CRIMSON();
std::vector<uint64_t> my_snaps;
my_snaps.push_back(-2);
ASSERT_EQ(0, rados_ioctx_selfmanaged_snap_create(ioctx, &my_snaps.back()));
::std::reverse(my_snaps.begin(), my_snaps.end());
ASSERT_EQ(0, rados_ioctx_selfmanaged_snap_set_write_ctx(ioctx, my_snaps[0],
&my_snaps[0], my_snaps.size()));
::std::reverse(my_snaps.begin(), my_snaps.end());
int bsize = alignment;
char *buf = (char *)new char[bsize];
memset(buf, 0xcc, bsize);
ASSERT_EQ(0, rados_write(ioctx, "foo", buf, bsize, 0));
my_snaps.push_back(-2);
ASSERT_EQ(0, rados_ioctx_selfmanaged_snap_create(ioctx, &my_snaps.back()));
::std::reverse(my_snaps.begin(), my_snaps.end());
ASSERT_EQ(0, rados_ioctx_selfmanaged_snap_set_write_ctx(ioctx, my_snaps[0],
&my_snaps[0], my_snaps.size()));
::std::reverse(my_snaps.begin(), my_snaps.end());
char *buf2 = (char *)new char[bsize];
memset(buf2, 0xdd, bsize);
ASSERT_EQ(0, rados_write(ioctx, "foo", buf2, bsize, bsize));
rados_ioctx_selfmanaged_snap_rollback(ioctx, "foo", my_snaps[1]);
char *buf3 = (char *)new char[bsize*2];
ASSERT_EQ(bsize, rados_read(ioctx, "foo", buf3, bsize*2, 0));
ASSERT_EQ(0, memcmp(buf3, buf, bsize));
ASSERT_EQ(0, rados_ioctx_selfmanaged_snap_remove(ioctx, my_snaps.back()));
my_snaps.pop_back();
ASSERT_EQ(0, rados_ioctx_selfmanaged_snap_remove(ioctx, my_snaps.back()));
my_snaps.pop_back();
ASSERT_EQ(0, rados_remove(ioctx, "foo"));
delete[] buf;
delete[] buf2;
delete[] buf3;
}
| 14,249 | 38.915966 | 80 | cc |
null | ceph-main/src/test/librados/snapshots_cxx.cc | #include <algorithm>
#include <errno.h>
#include <string>
#include "gtest/gtest.h"
#include "include/rados.h"
#include "include/rados/librados.hpp"
#include "test/librados/test_cxx.h"
#include "test/librados/testcase_cxx.h"
#include "crimson_utils.h"
using namespace librados;
typedef RadosTestPP LibRadosSnapshotsPP;
typedef RadosTestPP LibRadosSnapshotsSelfManagedPP;
typedef RadosTestECPP LibRadosSnapshotsECPP;
typedef RadosTestECPP LibRadosSnapshotsSelfManagedECPP;
const int bufsize = 128;
TEST_F(LibRadosSnapshotsPP, SnapListPP) {
char buf[bufsize];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.write("foo", bl1, sizeof(buf), 0));
ASSERT_FALSE(cluster.get_pool_is_selfmanaged_snaps_mode(pool_name));
ASSERT_EQ(0, ioctx.snap_create("snap1"));
ASSERT_FALSE(cluster.get_pool_is_selfmanaged_snaps_mode(pool_name));
std::vector<snap_t> snaps;
EXPECT_EQ(0, ioctx.snap_list(&snaps));
EXPECT_EQ(1U, snaps.size());
snap_t rid;
EXPECT_EQ(0, ioctx.snap_lookup("snap1", &rid));
EXPECT_EQ(rid, snaps[0]);
EXPECT_EQ(0, ioctx.snap_remove("snap1"));
ASSERT_FALSE(cluster.get_pool_is_selfmanaged_snaps_mode(pool_name));
}
TEST_F(LibRadosSnapshotsPP, SnapRemovePP) {
char buf[bufsize];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.write("foo", bl1, sizeof(buf), 0));
ASSERT_EQ(0, ioctx.snap_create("snap1"));
rados_snap_t rid;
ASSERT_EQ(0, ioctx.snap_lookup("snap1", &rid));
ASSERT_EQ(0, ioctx.snap_remove("snap1"));
ASSERT_EQ(-ENOENT, ioctx.snap_lookup("snap1", &rid));
}
TEST_F(LibRadosSnapshotsPP, RollbackPP) {
char buf[bufsize];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.write("foo", bl1, sizeof(buf), 0));
ASSERT_EQ(0, ioctx.snap_create("snap1"));
char buf2[sizeof(buf)];
memset(buf2, 0xdd, sizeof(buf2));
bufferlist bl2;
bl2.append(buf2, sizeof(buf2));
EXPECT_EQ(0, ioctx.write_full("foo", bl2));
EXPECT_EQ(0, ioctx.snap_rollback("foo", "snap1"));
bufferlist bl3;
EXPECT_EQ((int)sizeof(buf), ioctx.read("foo", bl3, sizeof(buf), 0));
EXPECT_EQ(0, memcmp(buf, bl3.c_str(), sizeof(buf)));
EXPECT_EQ(0, ioctx.snap_remove("snap1"));
}
TEST_F(LibRadosSnapshotsPP, SnapGetNamePP) {
char buf[bufsize];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl;
bl.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.write("foo", bl, sizeof(buf), 0));
ASSERT_EQ(0, ioctx.snap_create("snapfoo"));
rados_snap_t rid;
EXPECT_EQ(0, ioctx.snap_lookup("snapfoo", &rid));
EXPECT_EQ(-ENOENT, ioctx.snap_lookup("snapbar", &rid));
std::string name;
EXPECT_EQ(0, ioctx.snap_get_name(rid, &name));
time_t snaptime;
EXPECT_EQ(0, ioctx.snap_get_stamp(rid, &snaptime));
EXPECT_EQ(0, strcmp(name.c_str(), "snapfoo"));
EXPECT_EQ(0, ioctx.snap_remove("snapfoo"));
}
TEST_F(LibRadosSnapshotsPP, SnapCreateRemovePP) {
// reproduces http://tracker.ceph.com/issues/10262
bufferlist bl;
bl.append("foo");
ASSERT_EQ(0, ioctx.write("foo", bl, bl.length(), 0));
ASSERT_EQ(0, ioctx.snap_create("snapfoo"));
ASSERT_EQ(0, ioctx.remove("foo"));
ASSERT_EQ(0, ioctx.snap_create("snapbar"));
std::unique_ptr<librados::ObjectWriteOperation> op(new librados::ObjectWriteOperation());
op->create(false);
op->remove();
ASSERT_EQ(0, ioctx.operate("foo", op.get()));
EXPECT_EQ(0, ioctx.snap_remove("snapfoo"));
EXPECT_EQ(0, ioctx.snap_remove("snapbar"));
}
TEST_F(LibRadosSnapshotsSelfManagedPP, SnapPP) {
std::vector<uint64_t> my_snaps;
my_snaps.push_back(-2);
ASSERT_FALSE(cluster.get_pool_is_selfmanaged_snaps_mode(pool_name));
ASSERT_EQ(0, ioctx.selfmanaged_snap_create(&my_snaps.back()));
ASSERT_TRUE(cluster.get_pool_is_selfmanaged_snaps_mode(pool_name));
::std::reverse(my_snaps.begin(), my_snaps.end());
ASSERT_EQ(0, ioctx.selfmanaged_snap_set_write_ctx(my_snaps[0], my_snaps));
::std::reverse(my_snaps.begin(), my_snaps.end());
char buf[bufsize];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.write("foo", bl1, sizeof(buf), 0));
my_snaps.push_back(-2);
librados::AioCompletion *completion = cluster.aio_create_completion();
ioctx.aio_selfmanaged_snap_create(&my_snaps.back(), completion);
ASSERT_EQ(0, completion->wait_for_complete());
completion->release();
::std::reverse(my_snaps.begin(), my_snaps.end());
ASSERT_EQ(0, ioctx.selfmanaged_snap_set_write_ctx(my_snaps[0], my_snaps));
::std::reverse(my_snaps.begin(), my_snaps.end());
char buf2[sizeof(buf)];
memset(buf2, 0xdd, sizeof(buf2));
bufferlist bl2;
bl2.append(buf2, sizeof(buf2));
ASSERT_EQ(0, ioctx.write("foo", bl2, sizeof(buf2), 0));
ioctx.snap_set_read(my_snaps[1]);
bufferlist bl3;
ASSERT_EQ((int)sizeof(buf), ioctx.read("foo", bl3, sizeof(buf), 0));
ASSERT_EQ(0, memcmp(bl3.c_str(), buf, sizeof(buf)));
completion = cluster.aio_create_completion();
ioctx.aio_selfmanaged_snap_remove(my_snaps.back(), completion);
ASSERT_EQ(0, completion->wait_for_complete());
completion->release();
my_snaps.pop_back();
ASSERT_EQ(0, ioctx.selfmanaged_snap_remove(my_snaps.back()));
my_snaps.pop_back();
ioctx.snap_set_read(LIBRADOS_SNAP_HEAD);
ASSERT_TRUE(cluster.get_pool_is_selfmanaged_snaps_mode(pool_name));
ASSERT_EQ(0, ioctx.remove("foo"));
}
TEST_F(LibRadosSnapshotsSelfManagedPP, RollbackPP) {
SKIP_IF_CRIMSON();
std::vector<uint64_t> my_snaps;
IoCtx readioctx;
ASSERT_EQ(0, cluster.ioctx_create(pool_name.c_str(), readioctx));
readioctx.set_namespace(nspace);
readioctx.snap_set_read(LIBRADOS_SNAP_DIR);
my_snaps.push_back(-2);
ASSERT_EQ(0, ioctx.selfmanaged_snap_create(&my_snaps.back()));
::std::reverse(my_snaps.begin(), my_snaps.end());
ASSERT_EQ(0, ioctx.selfmanaged_snap_set_write_ctx(my_snaps[0], my_snaps));
::std::reverse(my_snaps.begin(), my_snaps.end());
char buf[bufsize];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
//Write 3 consecutive buffers
ASSERT_EQ(0, ioctx.write("foo", bl1, sizeof(buf), 0));
ASSERT_EQ(0, ioctx.write("foo", bl1, sizeof(buf), bufsize));
ASSERT_EQ(0, ioctx.write("foo", bl1, sizeof(buf), bufsize*2));
snap_set_t ss;
snap_t head = SNAP_HEAD;
ASSERT_EQ(0, readioctx.list_snaps("foo", &ss));
ASSERT_EQ(1u, ss.clones.size());
ASSERT_EQ(head, ss.clones[0].cloneid);
ASSERT_EQ(0u, ss.clones[0].snaps.size());
ASSERT_EQ(0u, ss.clones[0].overlap.size());
ASSERT_EQ(384u, ss.clones[0].size);
my_snaps.push_back(-2);
ASSERT_EQ(0, ioctx.selfmanaged_snap_create(&my_snaps.back()));
::std::reverse(my_snaps.begin(), my_snaps.end());
ASSERT_EQ(0, ioctx.selfmanaged_snap_set_write_ctx(my_snaps[0], my_snaps));
::std::reverse(my_snaps.begin(), my_snaps.end());
char buf2[sizeof(buf)];
memset(buf2, 0xdd, sizeof(buf2));
bufferlist bl2;
bl2.append(buf2, sizeof(buf2));
//Change the middle buffer
ASSERT_EQ(0, ioctx.write("foo", bl2, sizeof(buf2), bufsize));
//Add another after
ASSERT_EQ(0, ioctx.write("foo", bl2, sizeof(buf2), bufsize*3));
ASSERT_EQ(-EINVAL, ioctx.list_snaps("foo", &ss));
ObjectReadOperation o;
o.list_snaps(&ss, NULL);
ASSERT_EQ(-EINVAL, ioctx.operate("foo", &o, NULL));
ASSERT_EQ(0, readioctx.list_snaps("foo", &ss));
ASSERT_EQ(2u, ss.clones.size());
ASSERT_EQ(my_snaps[1], ss.clones[0].cloneid);
ASSERT_EQ(1u, ss.clones[0].snaps.size());
ASSERT_EQ(my_snaps[1], ss.clones[0].snaps[0]);
ASSERT_EQ(2u, ss.clones[0].overlap.size());
ASSERT_EQ(0u, ss.clones[0].overlap[0].first);
ASSERT_EQ(128u, ss.clones[0].overlap[0].second);
ASSERT_EQ(256u, ss.clones[0].overlap[1].first);
ASSERT_EQ(128u, ss.clones[0].overlap[1].second);
ASSERT_EQ(384u, ss.clones[0].size);
ASSERT_EQ(head, ss.clones[1].cloneid);
ASSERT_EQ(0u, ss.clones[1].snaps.size());
ASSERT_EQ(0u, ss.clones[1].overlap.size());
ASSERT_EQ(512u, ss.clones[1].size);
ioctx.selfmanaged_snap_rollback("foo", my_snaps[1]);
bufferlist bl3;
ASSERT_EQ((int)sizeof(buf), ioctx.read("foo", bl3, sizeof(buf), 0));
ASSERT_EQ(0, memcmp(bl3.c_str(), buf, sizeof(buf)));
ASSERT_EQ((int)sizeof(buf), ioctx.read("foo", bl3, sizeof(buf), bufsize));
ASSERT_EQ(0, memcmp(bl3.c_str(), buf, sizeof(buf)));
ASSERT_EQ((int)sizeof(buf), ioctx.read("foo", bl3, sizeof(buf), bufsize*2));
ASSERT_EQ(0, memcmp(bl3.c_str(), buf, sizeof(buf)));
ASSERT_EQ((int)0, ioctx.read("foo", bl3, sizeof(buf), bufsize*3));
ASSERT_EQ(0, ioctx.selfmanaged_snap_remove(my_snaps.back()));
my_snaps.pop_back();
ASSERT_EQ(0, ioctx.selfmanaged_snap_remove(my_snaps.back()));
my_snaps.pop_back();
readioctx.close();
}
TEST_F(LibRadosSnapshotsSelfManagedPP, SnapOverlapPP) {
// WIP https://tracker.ceph.com/issues/58263
SKIP_IF_CRIMSON();
std::vector<uint64_t> my_snaps;
IoCtx readioctx;
ASSERT_EQ(0, cluster.ioctx_create(pool_name.c_str(), readioctx));
readioctx.set_namespace(nspace);
readioctx.snap_set_read(LIBRADOS_SNAP_DIR);
my_snaps.push_back(-2);
ASSERT_EQ(0, ioctx.selfmanaged_snap_create(&my_snaps.back()));
::std::reverse(my_snaps.begin(), my_snaps.end());
ASSERT_EQ(0, ioctx.selfmanaged_snap_set_write_ctx(my_snaps[0], my_snaps));
::std::reverse(my_snaps.begin(), my_snaps.end());
char buf[bufsize];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.write("foo", bl1, sizeof(buf), 0));
ASSERT_EQ(0, ioctx.write("foo", bl1, sizeof(buf), bufsize*2));
ASSERT_EQ(0, ioctx.write("foo", bl1, sizeof(buf), bufsize*4));
ASSERT_EQ(0, ioctx.write("foo", bl1, sizeof(buf), bufsize*6));
ASSERT_EQ(0, ioctx.write("foo", bl1, sizeof(buf), bufsize*8));
snap_set_t ss;
snap_t head = SNAP_HEAD;
ASSERT_EQ(0, readioctx.list_snaps("foo", &ss));
ASSERT_EQ(1u, ss.clones.size());
ASSERT_EQ(head, ss.clones[0].cloneid);
ASSERT_EQ(0u, ss.clones[0].snaps.size());
ASSERT_EQ(0u, ss.clones[0].overlap.size());
ASSERT_EQ(1152u, ss.clones[0].size);
my_snaps.push_back(-2);
ASSERT_EQ(0, ioctx.selfmanaged_snap_create(&my_snaps.back()));
::std::reverse(my_snaps.begin(), my_snaps.end());
ASSERT_EQ(0, ioctx.selfmanaged_snap_set_write_ctx(my_snaps[0], my_snaps));
::std::reverse(my_snaps.begin(), my_snaps.end());
char buf2[sizeof(buf)];
memset(buf2, 0xdd, sizeof(buf2));
bufferlist bl2;
bl2.append(buf2, sizeof(buf2));
ASSERT_EQ(0, ioctx.write("foo", bl2, sizeof(buf2), bufsize*1));
ASSERT_EQ(0, ioctx.write("foo", bl2, sizeof(buf2), bufsize*3));
ASSERT_EQ(0, ioctx.write("foo", bl2, sizeof(buf2), bufsize*5));
ASSERT_EQ(0, ioctx.write("foo", bl2, sizeof(buf2), bufsize*7));
ASSERT_EQ(0, ioctx.write("foo", bl2, sizeof(buf2), bufsize*9));
ASSERT_EQ(0, readioctx.list_snaps("foo", &ss));
ASSERT_EQ(2u, ss.clones.size());
ASSERT_EQ(my_snaps[1], ss.clones[0].cloneid);
ASSERT_EQ(1u, ss.clones[0].snaps.size());
ASSERT_EQ(my_snaps[1], ss.clones[0].snaps[0]);
ASSERT_EQ(5u, ss.clones[0].overlap.size());
ASSERT_EQ(0u, ss.clones[0].overlap[0].first);
ASSERT_EQ(128u, ss.clones[0].overlap[0].second);
ASSERT_EQ(256u, ss.clones[0].overlap[1].first);
ASSERT_EQ(128u, ss.clones[0].overlap[1].second);
ASSERT_EQ(512u, ss.clones[0].overlap[2].first);
ASSERT_EQ(128u, ss.clones[0].overlap[2].second);
ASSERT_EQ(768u, ss.clones[0].overlap[3].first);
ASSERT_EQ(128u, ss.clones[0].overlap[3].second);
ASSERT_EQ(1024u, ss.clones[0].overlap[4].first);
ASSERT_EQ(128u, ss.clones[0].overlap[4].second);
ASSERT_EQ(1152u, ss.clones[0].size);
ASSERT_EQ(head, ss.clones[1].cloneid);
ASSERT_EQ(0u, ss.clones[1].snaps.size());
ASSERT_EQ(0u, ss.clones[1].overlap.size());
ASSERT_EQ(1280u, ss.clones[1].size);
my_snaps.push_back(-2);
ASSERT_EQ(0, ioctx.selfmanaged_snap_create(&my_snaps.back()));
::std::reverse(my_snaps.begin(), my_snaps.end());
ASSERT_EQ(0, ioctx.selfmanaged_snap_set_write_ctx(my_snaps[0], my_snaps));
::std::reverse(my_snaps.begin(), my_snaps.end());
char buf3[sizeof(buf)];
memset(buf3, 0xee, sizeof(buf3));
bufferlist bl4;
bl4.append(buf3, sizeof(buf3));
ASSERT_EQ(0, ioctx.write("foo", bl2, sizeof(buf3), bufsize*1));
ASSERT_EQ(0, ioctx.write("foo", bl2, sizeof(buf3), bufsize*4));
ASSERT_EQ(0, ioctx.write("foo", bl2, sizeof(buf3), bufsize*5));
ASSERT_EQ(0, ioctx.write("foo", bl2, sizeof(buf3), bufsize*8));
ASSERT_EQ(0, readioctx.list_snaps("foo", &ss));
ASSERT_EQ(3u, ss.clones.size());
ASSERT_EQ(my_snaps[1], ss.clones[0].cloneid);
ASSERT_EQ(1u, ss.clones[0].snaps.size());
ASSERT_EQ(my_snaps[1], ss.clones[0].snaps[0]);
ASSERT_EQ(5u, ss.clones[0].overlap.size());
ASSERT_EQ(0u, ss.clones[0].overlap[0].first);
ASSERT_EQ(128u, ss.clones[0].overlap[0].second);
ASSERT_EQ(256u, ss.clones[0].overlap[1].first);
ASSERT_EQ(128u, ss.clones[0].overlap[1].second);
ASSERT_EQ(512u, ss.clones[0].overlap[2].first);
ASSERT_EQ(128u, ss.clones[0].overlap[2].second);
ASSERT_EQ(768u, ss.clones[0].overlap[3].first);
ASSERT_EQ(128u, ss.clones[0].overlap[3].second);
ASSERT_EQ(1024u, ss.clones[0].overlap[4].first);
ASSERT_EQ(128u, ss.clones[0].overlap[4].second);
ASSERT_EQ(1152u, ss.clones[0].size);
ASSERT_EQ(my_snaps[2], ss.clones[1].cloneid);
ASSERT_EQ(1u, ss.clones[1].snaps.size());
ASSERT_EQ(my_snaps[2], ss.clones[1].snaps[0]);
ASSERT_EQ(4u, ss.clones[1].overlap.size());
ASSERT_EQ(0u, ss.clones[1].overlap[0].first);
ASSERT_EQ(128u, ss.clones[1].overlap[0].second);
ASSERT_EQ(256u, ss.clones[1].overlap[1].first);
ASSERT_EQ(256u, ss.clones[1].overlap[1].second);
ASSERT_EQ(768u, ss.clones[1].overlap[2].first);
ASSERT_EQ(256u, ss.clones[1].overlap[2].second);
ASSERT_EQ(1152u, ss.clones[1].overlap[3].first);
ASSERT_EQ(128u, ss.clones[1].overlap[3].second);
ASSERT_EQ(1280u, ss.clones[1].size);
ASSERT_EQ(head, ss.clones[2].cloneid);
ASSERT_EQ(0u, ss.clones[2].snaps.size());
ASSERT_EQ(0u, ss.clones[2].overlap.size());
ASSERT_EQ(1280u, ss.clones[2].size);
ASSERT_EQ(0, ioctx.selfmanaged_snap_remove(my_snaps.back()));
my_snaps.pop_back();
ASSERT_EQ(0, ioctx.selfmanaged_snap_remove(my_snaps.back()));
my_snaps.pop_back();
ASSERT_EQ(0, ioctx.selfmanaged_snap_remove(my_snaps.back()));
my_snaps.pop_back();
readioctx.close();
}
TEST_F(LibRadosSnapshotsSelfManagedPP, Bug11677) {
std::vector<uint64_t> my_snaps;
my_snaps.push_back(-2);
ASSERT_EQ(0, ioctx.selfmanaged_snap_create(&my_snaps.back()));
::std::reverse(my_snaps.begin(), my_snaps.end());
ASSERT_EQ(0, ioctx.selfmanaged_snap_set_write_ctx(my_snaps[0], my_snaps));
::std::reverse(my_snaps.begin(), my_snaps.end());
int bsize = 1<<20;
char *buf = (char *)new char[bsize];
memset(buf, 0xcc, bsize);
bufferlist bl1;
bl1.append(buf, bsize);
ASSERT_EQ(0, ioctx.write("foo", bl1, bsize, 0));
my_snaps.push_back(-2);
ASSERT_EQ(0, ioctx.selfmanaged_snap_create(&my_snaps.back()));
::std::reverse(my_snaps.begin(), my_snaps.end());
ASSERT_EQ(0, ioctx.selfmanaged_snap_set_write_ctx(my_snaps[0], my_snaps));
::std::reverse(my_snaps.begin(), my_snaps.end());
std::unique_ptr<librados::ObjectWriteOperation> op(new librados::ObjectWriteOperation());
op->assert_exists();
op->remove();
ASSERT_EQ(0, ioctx.operate("foo", op.get()));
ASSERT_EQ(0, ioctx.selfmanaged_snap_remove(my_snaps.back()));
my_snaps.pop_back();
ASSERT_EQ(0, ioctx.selfmanaged_snap_remove(my_snaps.back()));
my_snaps.pop_back();
ioctx.snap_set_read(LIBRADOS_SNAP_HEAD);
delete[] buf;
}
TEST_F(LibRadosSnapshotsSelfManagedPP, OrderSnap) {
std::vector<uint64_t> my_snaps;
char buf[bufsize];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl;
bl.append(buf, sizeof(buf));
int flags = librados::OPERATION_ORDERSNAP;
my_snaps.push_back(-2);
ASSERT_EQ(0, ioctx.selfmanaged_snap_create(&my_snaps.back()));
::std::reverse(my_snaps.begin(), my_snaps.end());
ASSERT_EQ(0, ioctx.selfmanaged_snap_set_write_ctx(my_snaps[0], my_snaps));
::std::reverse(my_snaps.begin(), my_snaps.end());
ObjectWriteOperation op1;
op1.write(0, bl);
librados::AioCompletion *comp1 = cluster.aio_create_completion();
ASSERT_EQ(0, ioctx.aio_operate("foo", comp1, &op1, flags));
ASSERT_EQ(0, comp1->wait_for_complete());
ASSERT_EQ(0, comp1->get_return_value());
comp1->release();
my_snaps.push_back(-2);
ASSERT_EQ(0, ioctx.selfmanaged_snap_create(&my_snaps.back()));
::std::reverse(my_snaps.begin(), my_snaps.end());
ASSERT_EQ(0, ioctx.selfmanaged_snap_set_write_ctx(my_snaps[0], my_snaps));
::std::reverse(my_snaps.begin(), my_snaps.end());
ObjectWriteOperation op2;
op2.write(0, bl);
librados::AioCompletion *comp2 = cluster.aio_create_completion();
ASSERT_EQ(0, ioctx.aio_operate("foo", comp2, &op2, flags));
ASSERT_EQ(0, comp2->wait_for_complete());
ASSERT_EQ(0, comp2->get_return_value());
comp2->release();
my_snaps.pop_back();
::std::reverse(my_snaps.begin(), my_snaps.end());
ASSERT_EQ(0, ioctx.selfmanaged_snap_set_write_ctx(my_snaps[0], my_snaps));
::std::reverse(my_snaps.begin(), my_snaps.end());
ObjectWriteOperation op3;
op3.write(0, bl);
librados::AioCompletion *comp3 = cluster.aio_create_completion();
ASSERT_EQ(0, ioctx.aio_operate("foo", comp3, &op3, flags));
ASSERT_EQ(0, comp3->wait_for_complete());
ASSERT_EQ(-EOLDSNAPC, comp3->get_return_value());
comp3->release();
ObjectWriteOperation op4;
op4.write(0, bl);
librados::AioCompletion *comp4 = cluster.aio_create_completion();
ASSERT_EQ(0, ioctx.aio_operate("foo", comp4, &op4, 0));
ASSERT_EQ(0, comp4->wait_for_complete());
ASSERT_EQ(0, comp4->get_return_value());
comp4->release();
}
TEST_F(LibRadosSnapshotsSelfManagedPP, WriteRollback) {
// https://tracker.ceph.com/issues/59114
GTEST_SKIP();
uint64_t snapid = 5;
// buf1
char buf[bufsize];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl;
bl.append(buf, sizeof(buf));
// buf2
char buf2[sizeof(buf)];
memset(buf2, 0xdd, sizeof(buf2));
bufferlist bl2;
bl2.append(buf2, sizeof(buf2));
// First write
ObjectWriteOperation op_write1;
op_write1.write(0, bl);
// Operate
librados::AioCompletion *comp_write = cluster.aio_create_completion();
ASSERT_EQ(0, ioctx.aio_operate("foo", comp_write, &op_write1, 0));
ASSERT_EQ(0, comp_write->wait_for_complete());
ASSERT_EQ(0, comp_write->get_return_value());
comp_write->release();
// Take Snapshot
ASSERT_EQ(0, ioctx.selfmanaged_snap_create(&snapid));
// Rollback + Second write in the same op
ObjectWriteOperation op_write2_snap_rollback;
op_write2_snap_rollback.write(0, bl2);
op_write2_snap_rollback.selfmanaged_snap_rollback(snapid);
// Operate
librados::AioCompletion *comp_write2 = cluster.aio_create_completion();
ASSERT_EQ(0, ioctx.aio_operate("foo", comp_write2, &op_write2_snap_rollback, 0));
ASSERT_EQ(0, comp_write2->wait_for_complete());
ASSERT_EQ(0, comp_write2->get_return_value());
comp_write2->release();
// Resolved should be first write
bufferlist bl3;
EXPECT_EQ((int)sizeof(buf), ioctx.read("foo", bl3, sizeof(buf), 0));
EXPECT_EQ(0, memcmp(buf, bl3.c_str(), sizeof(buf)));
}
TEST_F(LibRadosSnapshotsSelfManagedPP, ReusePurgedSnap) {
std::vector<uint64_t> my_snaps;
my_snaps.push_back(-2);
ASSERT_EQ(0, ioctx.selfmanaged_snap_create(&my_snaps.back()));
ASSERT_TRUE(cluster.get_pool_is_selfmanaged_snaps_mode(pool_name));
::std::reverse(my_snaps.begin(), my_snaps.end());
ASSERT_EQ(0, ioctx.selfmanaged_snap_set_write_ctx(my_snaps[0], my_snaps));
::std::reverse(my_snaps.begin(), my_snaps.end());
char buf[bufsize];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.write("foo", bl1, sizeof(buf), 0));
my_snaps.push_back(-2);
librados::AioCompletion *completion = cluster.aio_create_completion();
ioctx.aio_selfmanaged_snap_create(&my_snaps.back(), completion);
ASSERT_EQ(0, completion->wait_for_complete());
completion->release();
std::cout << "deleting snap " << my_snaps.back() << " in pool "
<< ioctx.get_pool_name() << std::endl;
completion = cluster.aio_create_completion();
ioctx.aio_selfmanaged_snap_remove(my_snaps.back(), completion);
ASSERT_EQ(0, completion->wait_for_complete());
completion->release();
std::cout << "waiting for snaps to purge" << std::endl;
sleep(15);
::std::reverse(my_snaps.begin(), my_snaps.end());
ASSERT_EQ(0, ioctx.selfmanaged_snap_set_write_ctx(my_snaps[0], my_snaps));
::std::reverse(my_snaps.begin(), my_snaps.end());
char buf2[sizeof(buf)];
memset(buf2, 0xdd, sizeof(buf2));
bufferlist bl2;
bl2.append(buf2, sizeof(buf2));
ASSERT_EQ(0, ioctx.write("foo", bl2, sizeof(buf2), 0));
// scrub it out?
//sleep(600);
}
// EC testing
TEST_F(LibRadosSnapshotsECPP, SnapListPP) {
SKIP_IF_CRIMSON();
char buf[bufsize];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.write("foo", bl1, sizeof(buf), 0));
ASSERT_EQ(0, ioctx.snap_create("snap1"));
std::vector<snap_t> snaps;
EXPECT_EQ(0, ioctx.snap_list(&snaps));
EXPECT_EQ(1U, snaps.size());
snap_t rid;
EXPECT_EQ(0, ioctx.snap_lookup("snap1", &rid));
EXPECT_EQ(rid, snaps[0]);
EXPECT_EQ(0, ioctx.snap_remove("snap1"));
}
TEST_F(LibRadosSnapshotsECPP, SnapRemovePP) {
SKIP_IF_CRIMSON();
char buf[bufsize];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.write("foo", bl1, sizeof(buf), 0));
ASSERT_EQ(0, ioctx.snap_create("snap1"));
rados_snap_t rid;
ASSERT_EQ(0, ioctx.snap_lookup("snap1", &rid));
ASSERT_EQ(0, ioctx.snap_remove("snap1"));
ASSERT_EQ(-ENOENT, ioctx.snap_lookup("snap1", &rid));
}
TEST_F(LibRadosSnapshotsECPP, RollbackPP) {
SKIP_IF_CRIMSON();
char buf[bufsize];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.write("foo", bl1, sizeof(buf), 0));
ASSERT_EQ(0, ioctx.snap_create("snap1"));
char buf2[sizeof(buf)];
memset(buf2, 0xdd, sizeof(buf2));
bufferlist bl2;
bl2.append(buf2, sizeof(buf2));
EXPECT_EQ(0, ioctx.write_full("foo", bl2));
EXPECT_EQ(0, ioctx.snap_rollback("foo", "snap1"));
bufferlist bl3;
EXPECT_EQ((int)sizeof(buf), ioctx.read("foo", bl3, sizeof(buf), 0));
EXPECT_EQ(0, memcmp(buf, bl3.c_str(), sizeof(buf)));
EXPECT_EQ(0, ioctx.snap_remove("snap1"));
}
TEST_F(LibRadosSnapshotsECPP, SnapGetNamePP) {
SKIP_IF_CRIMSON();
char buf[bufsize];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl;
bl.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.write("foo", bl, sizeof(buf), 0));
ASSERT_EQ(0, ioctx.snap_create("snapfoo"));
rados_snap_t rid;
EXPECT_EQ(0, ioctx.snap_lookup("snapfoo", &rid));
EXPECT_EQ(-ENOENT, ioctx.snap_lookup("snapbar", &rid));
std::string name;
EXPECT_EQ(0, ioctx.snap_get_name(rid, &name));
time_t snaptime;
EXPECT_EQ(0, ioctx.snap_get_stamp(rid, &snaptime));
EXPECT_EQ(0, strcmp(name.c_str(), "snapfoo"));
EXPECT_EQ(0, ioctx.snap_remove("snapfoo"));
}
TEST_F(LibRadosSnapshotsSelfManagedECPP, SnapPP) {
SKIP_IF_CRIMSON();
std::vector<uint64_t> my_snaps;
my_snaps.push_back(-2);
ASSERT_EQ(0, ioctx.selfmanaged_snap_create(&my_snaps.back()));
::std::reverse(my_snaps.begin(), my_snaps.end());
ASSERT_EQ(0, ioctx.selfmanaged_snap_set_write_ctx(my_snaps[0], my_snaps));
::std::reverse(my_snaps.begin(), my_snaps.end());
int bsize = alignment;
char *buf = (char *)new char[bsize];
memset(buf, 0xcc, bsize);
bufferlist bl1;
bl1.append(buf, bsize);
ASSERT_EQ(0, ioctx.write("foo", bl1, bsize, 0));
my_snaps.push_back(-2);
librados::AioCompletion *completion = cluster.aio_create_completion();
ioctx.aio_selfmanaged_snap_create(&my_snaps.back(), completion);
ASSERT_EQ(0, completion->wait_for_complete());
completion->release();
::std::reverse(my_snaps.begin(), my_snaps.end());
ASSERT_EQ(0, ioctx.selfmanaged_snap_set_write_ctx(my_snaps[0], my_snaps));
::std::reverse(my_snaps.begin(), my_snaps.end());
char *buf2 = (char *)new char[bsize];
memset(buf2, 0xdd, bsize);
bufferlist bl2;
bl2.append(buf2, bsize);
// Add another aligned buffer
ASSERT_EQ(0, ioctx.write("foo", bl2, bsize, bsize));
ioctx.snap_set_read(my_snaps[1]);
bufferlist bl3;
ASSERT_EQ(bsize, ioctx.read("foo", bl3, bsize*3, 0));
ASSERT_EQ(0, memcmp(bl3.c_str(), buf, bsize));
completion = cluster.aio_create_completion();
ioctx.aio_selfmanaged_snap_remove(my_snaps.back(), completion);
ASSERT_EQ(0, completion->wait_for_complete());
completion->release();
my_snaps.pop_back();
ASSERT_EQ(0, ioctx.selfmanaged_snap_remove(my_snaps.back()));
my_snaps.pop_back();
ioctx.snap_set_read(LIBRADOS_SNAP_HEAD);
ASSERT_EQ(0, ioctx.remove("foo"));
delete[] buf;
delete[] buf2;
}
TEST_F(LibRadosSnapshotsSelfManagedECPP, RollbackPP) {
SKIP_IF_CRIMSON();
std::vector<uint64_t> my_snaps;
IoCtx readioctx;
ASSERT_EQ(0, cluster.ioctx_create(pool_name.c_str(), readioctx));
readioctx.set_namespace(nspace);
readioctx.snap_set_read(LIBRADOS_SNAP_DIR);
my_snaps.push_back(-2);
ASSERT_EQ(0, ioctx.selfmanaged_snap_create(&my_snaps.back()));
::std::reverse(my_snaps.begin(), my_snaps.end());
ASSERT_EQ(0, ioctx.selfmanaged_snap_set_write_ctx(my_snaps[0], my_snaps));
::std::reverse(my_snaps.begin(), my_snaps.end());
int bsize = alignment;
char *buf = (char *)new char[bsize];
memset(buf, 0xcc, bsize);
bufferlist bl1;
bl1.append(buf, bsize);
//Write 3 consecutive buffers
ASSERT_EQ(0, ioctx.write("foo", bl1, bsize, 0));
ASSERT_EQ(0, ioctx.write("foo", bl1, bsize, bsize));
ASSERT_EQ(0, ioctx.write("foo", bl1, bsize, bsize*2));
snap_set_t ss;
snap_t head = SNAP_HEAD;
ASSERT_EQ(0, readioctx.list_snaps("foo", &ss));
ASSERT_EQ(1u, ss.clones.size());
ASSERT_EQ(head, ss.clones[0].cloneid);
ASSERT_EQ(0u, ss.clones[0].snaps.size());
ASSERT_EQ(0u, ss.clones[0].overlap.size());
ASSERT_EQ((unsigned)(bsize*3), ss.clones[0].size);
my_snaps.push_back(-2);
ASSERT_EQ(0, ioctx.selfmanaged_snap_create(&my_snaps.back()));
::std::reverse(my_snaps.begin(), my_snaps.end());
ASSERT_EQ(0, ioctx.selfmanaged_snap_set_write_ctx(my_snaps[0], my_snaps));
::std::reverse(my_snaps.begin(), my_snaps.end());
char *buf2 = (char *)new char[bsize];
memset(buf2, 0xdd, bsize);
bufferlist bl2;
bl2.append(buf2, bsize);
//Change the middle buffer
//ASSERT_EQ(0, ioctx.write("foo", bl2, sizeof(buf2), bufsize));
//Add another after
ASSERT_EQ(0, ioctx.write("foo", bl2, bsize, bsize*3));
ASSERT_EQ(-EINVAL, ioctx.list_snaps("foo", &ss));
ObjectReadOperation o;
o.list_snaps(&ss, NULL);
ASSERT_EQ(-EINVAL, ioctx.operate("foo", &o, NULL));
ASSERT_EQ(0, readioctx.list_snaps("foo", &ss));
ASSERT_EQ(2u, ss.clones.size());
ASSERT_EQ(my_snaps[1], ss.clones[0].cloneid);
ASSERT_EQ(1u, ss.clones[0].snaps.size());
ASSERT_EQ(my_snaps[1], ss.clones[0].snaps[0]);
ASSERT_EQ(1u, ss.clones[0].overlap.size());
ASSERT_EQ(0u, ss.clones[0].overlap[0].first);
ASSERT_EQ((unsigned)bsize*3, ss.clones[0].overlap[0].second);
ASSERT_EQ((unsigned)bsize*3, ss.clones[0].size);
ASSERT_EQ(head, ss.clones[1].cloneid);
ASSERT_EQ(0u, ss.clones[1].snaps.size());
ASSERT_EQ(0u, ss.clones[1].overlap.size());
ASSERT_EQ((unsigned)bsize*4, ss.clones[1].size);
ioctx.selfmanaged_snap_rollback("foo", my_snaps[1]);
bufferlist bl3;
ASSERT_EQ(bsize, ioctx.read("foo", bl3, bsize, 0));
ASSERT_EQ(0, memcmp(bl3.c_str(), buf, bsize));
ASSERT_EQ(bsize, ioctx.read("foo", bl3, bsize, bsize));
ASSERT_EQ(0, memcmp(bl3.c_str(), buf, bsize));
ASSERT_EQ(bsize, ioctx.read("foo", bl3, bsize, bsize*2));
ASSERT_EQ(0, memcmp(bl3.c_str(), buf, bsize));
ASSERT_EQ(0, ioctx.read("foo", bl3, bsize, bsize*3));
ASSERT_EQ(0, ioctx.selfmanaged_snap_remove(my_snaps.back()));
my_snaps.pop_back();
ASSERT_EQ(0, ioctx.selfmanaged_snap_remove(my_snaps.back()));
my_snaps.pop_back();
readioctx.close();
delete[] buf;
delete[] buf2;
}
TEST_F(LibRadosSnapshotsSelfManagedECPP, Bug11677) {
SKIP_IF_CRIMSON();
std::vector<uint64_t> my_snaps;
my_snaps.push_back(-2);
ASSERT_EQ(0, ioctx.selfmanaged_snap_create(&my_snaps.back()));
::std::reverse(my_snaps.begin(), my_snaps.end());
ASSERT_EQ(0, ioctx.selfmanaged_snap_set_write_ctx(my_snaps[0], my_snaps));
::std::reverse(my_snaps.begin(), my_snaps.end());
int bsize = alignment;
char *buf = (char *)new char[bsize];
memset(buf, 0xcc, bsize);
bufferlist bl1;
bl1.append(buf, bsize);
ASSERT_EQ(0, ioctx.write("foo", bl1, bsize, 0));
my_snaps.push_back(-2);
ASSERT_EQ(0, ioctx.selfmanaged_snap_create(&my_snaps.back()));
::std::reverse(my_snaps.begin(), my_snaps.end());
ASSERT_EQ(0, ioctx.selfmanaged_snap_set_write_ctx(my_snaps[0], my_snaps));
::std::reverse(my_snaps.begin(), my_snaps.end());
std::unique_ptr<librados::ObjectWriteOperation> op(new librados::ObjectWriteOperation());
op->assert_exists();
op->remove();
ASSERT_EQ(0, ioctx.operate("foo", op.get()));
ASSERT_EQ(0, ioctx.selfmanaged_snap_remove(my_snaps.back()));
my_snaps.pop_back();
ASSERT_EQ(0, ioctx.selfmanaged_snap_remove(my_snaps.back()));
my_snaps.pop_back();
ioctx.snap_set_read(LIBRADOS_SNAP_HEAD);
delete[] buf;
}
| 29,252 | 35.982301 | 91 | cc |
null | ceph-main/src/test/librados/snapshots_stats.cc | #include "include/rados.h"
#include "json_spirit/json_spirit.h"
#include "test/librados/test.h"
#include "test/librados/TestCase.h"
#include <algorithm>
#include <errno.h>
#include "gtest/gtest.h"
#include <string>
#include <vector>
using std::string;
class LibRadosSnapshotStatsSelfManaged : public RadosTest {
public:
LibRadosSnapshotStatsSelfManaged() {};
~LibRadosSnapshotStatsSelfManaged() override {};
protected:
void SetUp() override {
// disable pg autoscaler for the tests
string c =
"{"
"\"prefix\": \"config set\", "
"\"who\": \"global\", "
"\"name\": \"osd_pool_default_pg_autoscale_mode\", "
"\"value\": \"off\""
"}";
char *cmd[1];
cmd[0] = (char *)c.c_str();
std::cout << "Setting pg_autoscaler to 'off'" << std::endl;
ASSERT_EQ(0, rados_mon_command(s_cluster, (const char **)cmd, 1, "", 0, NULL,
0, NULL, 0));
// disable scrubs for the test
c = string("{\"prefix\": \"osd set\",\"key\":\"noscrub\"}");
cmd[0] = (char *)c.c_str();
ASSERT_EQ(0, rados_mon_command(s_cluster, (const char **)cmd, 1, "", 0, NULL, 0, NULL, 0));
c = string("{\"prefix\": \"osd set\",\"key\":\"nodeep-scrub\"}");
cmd[0] = (char *)c.c_str();
ASSERT_EQ(0, rados_mon_command(s_cluster, (const char **)cmd, 1, "", 0, NULL, 0, NULL, 0));
RadosTest::SetUp();
}
void TearDown() override {
// re-enable pg autoscaler
string c =
"{"
"\"prefix\": \"config set\", "
"\"who\": \"global\", "
"\"name\": \"osd_pool_default_pg_autoscale_mode\", "
"\"value\": \"on\""
"}";
char *cmd[1];
cmd[0] = (char *)c.c_str();
std::cout << "Setting pg_autoscaler to 'on'" << std::endl;
ASSERT_EQ(0, rados_mon_command(s_cluster, (const char **)cmd, 1, "", 0, NULL,
0, NULL, 0));
// re-enable scrubs
c = string("{\"prefix\": \"osd unset\",\"key\":\"noscrub\"}");
cmd[0] = (char *)c.c_str();
ASSERT_EQ(0, rados_mon_command(s_cluster, (const char **)cmd, 1, "", 0, NULL, 0, NULL, 0));
c = string("{\"prefix\": \"osd unset\",\"key\":\"nodeep-scrub\"}");
cmd[0] = (char *)c.c_str();
ASSERT_EQ(0, rados_mon_command(s_cluster, (const char **)cmd, 1, "", 0, NULL, 0, NULL, 0));
RadosTest::TearDown();
}
};
class LibRadosSnapshotStatsSelfManagedEC : public RadosTestEC {
public:
LibRadosSnapshotStatsSelfManagedEC() {};
~LibRadosSnapshotStatsSelfManagedEC() override {};
protected:
void SetUp() override {
// disable pg autoscaler for the tests
string c =
"{"
"\"prefix\": \"config set\", "
"\"who\": \"global\", "
"\"name\": \"osd_pool_default_pg_autoscale_mode\", "
"\"value\": \"off\""
"}";
char *cmd[1];
cmd[0] = (char *)c.c_str();
std::cout << "Setting pg_autoscaler to 'off'" << std::endl;
ASSERT_EQ(0, rados_mon_command(s_cluster, (const char **)cmd, 1, "", 0, NULL,
0, NULL, 0));
// disable scrubs for the test
c = string("{\"prefix\": \"osd set\",\"key\":\"noscrub\"}");
cmd[0] = (char *)c.c_str();
ASSERT_EQ(0, rados_mon_command(s_cluster, (const char **)cmd, 1, "", 0, NULL, 0, NULL, 0));
c = string("{\"prefix\": \"osd set\",\"key\":\"nodeep-scrub\"}");
cmd[0] = (char *)c.c_str();
ASSERT_EQ(0, rados_mon_command(s_cluster, (const char **)cmd, 1, "", 0, NULL, 0, NULL, 0));
RadosTestEC::SetUp();
}
void TearDown() override {
// re-enable pg autoscaler
string c =
"{"
"\"prefix\": \"config set\", "
"\"who\": \"global\", "
"\"name\": \"osd_pool_default_pg_autoscale_mode\", "
"\"value\": \"on\""
"}";
char *cmd[1];
cmd[0] = (char *)c.c_str();
std::cout << "Setting pg_autoscaler to 'on'" << std::endl;
ASSERT_EQ(0, rados_mon_command(s_cluster, (const char **)cmd, 1, "", 0, NULL,
0, NULL, 0));
// re-enable scrubs
c = string("{\"prefix\": \"osd unset\",\"key\":\"noscrub\"}");
cmd[0] = (char *)c.c_str();
ASSERT_EQ(0, rados_mon_command(s_cluster, (const char **)cmd, 1, "", 0, NULL, 0, NULL, 0));
c = string("{\"prefix\": \"osd unset\",\"key\":\"nodeep-scrub\"}");
cmd[0] = (char *)c.c_str();
ASSERT_EQ(0, rados_mon_command(s_cluster, (const char **)cmd, 1, "", 0, NULL, 0, NULL, 0));
RadosTestEC::TearDown();
}
};
void get_snaptrim_stats(json_spirit::Object& pg_dump,
int *objs_trimmed,
double *trim_duration) {
// pg_map
json_spirit::Object pgmap;
for (json_spirit::Object::size_type i = 0; i < pg_dump.size(); ++i) {
json_spirit::Pair& p = pg_dump[i];
if (p.name_ == "pg_map") {
pgmap = p.value_.get_obj();
break;
}
}
// pg_stats array
json_spirit::Array pgs;
for (json_spirit::Object::size_type i = 0; i < pgmap.size(); ++i) {
json_spirit::Pair& p = pgmap[i];
if (p.name_ == "pg_stats") {
pgs = p.value_.get_array();
break;
}
}
// snaptrim stats
for (json_spirit::Object::size_type j = 0; j < pgs.size(); ++j) {
json_spirit::Object& pg_stat = pgs[j].get_obj();
for(json_spirit::Object::size_type k = 0; k < pg_stat.size(); ++k) {
json_spirit::Pair& stats = pg_stat[k];
if (stats.name_ == "objects_trimmed") {
*objs_trimmed += stats.value_.get_int();
}
if (stats.name_ == "snaptrim_duration") {
*trim_duration += stats.value_.get_real();
}
}
}
}
const int bufsize = 128;
TEST_F(LibRadosSnapshotStatsSelfManaged, SnaptrimStats) {
int num_objs = 10;
// create objects
char buf[bufsize];
memset(buf, 0xcc, sizeof(buf));
for (int i = 0; i < num_objs; ++i) {
string obj = string("foo") + std::to_string(i);
ASSERT_EQ(0, rados_write(ioctx, obj.c_str(), buf, sizeof(buf), 0));
}
std::vector<uint64_t> my_snaps;
for (int snap = 0; snap < 1; ++snap) {
// create a snapshot, clone
std::vector<uint64_t> ns(1);
ns.insert(ns.end(), my_snaps.begin(), my_snaps.end());
my_snaps.swap(ns);
ASSERT_EQ(0, rados_ioctx_selfmanaged_snap_create(ioctx, &my_snaps[0]));
ASSERT_EQ(0, rados_ioctx_selfmanaged_snap_set_write_ctx(ioctx, my_snaps[0],
&my_snaps[0], my_snaps.size()));
char buf2[sizeof(buf)];
memset(buf2, 0xdd, sizeof(buf2));
for (int i = 0; i < num_objs; ++i) {
string obj = string("foo") + std::to_string(i);
ASSERT_EQ(0, rados_write(ioctx, obj.c_str(), buf2, sizeof(buf2), 0));
}
}
// wait for maps to settle
ASSERT_EQ(0, rados_wait_for_latest_osdmap(cluster));
// remove snaps - should trigger snaptrim
rados_ioctx_snap_set_read(ioctx, LIBRADOS_SNAP_HEAD);
for (unsigned snap = 0; snap < my_snaps.size(); ++snap) {
ASSERT_EQ(0, rados_ioctx_selfmanaged_snap_remove(ioctx, my_snaps[snap]));
}
// sleep for few secs for the trim stats to populate
std::cout << "Waiting for snaptrim stats to be generated" << std::endl;
sleep(30);
// Dump pg stats and determine if snaptrim stats are getting set
int objects_trimmed = 0;
double snaptrim_duration = 0.0;
int tries = 0;
do {
char *buf, *st;
size_t buflen, stlen;
string c = string("{\"prefix\": \"pg dump\",\"format\":\"json\"}");
const char *cmd = c.c_str();
ASSERT_EQ(0, rados_mon_command(cluster, (const char **)&cmd, 1, "", 0,
&buf, &buflen, &st, &stlen));
string outstr(buf, buflen);
json_spirit::Value v;
ASSERT_NE(0, json_spirit::read(outstr, v)) << "unable to parse json."
<< '\n' << outstr;
// pg dump object
json_spirit::Object& obj = v.get_obj();
get_snaptrim_stats(obj, &objects_trimmed, &snaptrim_duration);
if (objects_trimmed < num_objs) {
tries++;
objects_trimmed = 0;
std::cout << "Still waiting for all objects to be trimmed... " <<std::endl;
sleep(30);
}
} while(objects_trimmed < num_objs && tries < 5);
// final check for objects trimmed
ASSERT_EQ(objects_trimmed, num_objs);
std::cout << "Snaptrim duration: " << snaptrim_duration << std::endl;
ASSERT_GT(snaptrim_duration, 0.0);
// clean-up remaining objects
for (int i = 0; i < num_objs; ++i) {
string obj = string("foo") + std::to_string(i);
ASSERT_EQ(0, rados_remove(ioctx, obj.c_str()));
}
}
// EC testing
TEST_F(LibRadosSnapshotStatsSelfManagedEC, SnaptrimStats) {
int num_objs = 10;
int bsize = alignment;
char *buf = (char *)new char[bsize];
memset(buf, 0xcc, bsize);
// create objects
for (int i = 0; i < num_objs; ++i) {
string obj = string("foo") + std::to_string(i);
ASSERT_EQ(0, rados_write(ioctx, obj.c_str(), buf, bsize, 0));
}
std::vector<uint64_t> my_snaps;
for (int snap = 0; snap < 1; ++snap) {
// create a snapshot, clone
std::vector<uint64_t> ns(1);
ns.insert(ns.end(), my_snaps.begin(), my_snaps.end());
my_snaps.swap(ns);
ASSERT_EQ(0, rados_ioctx_selfmanaged_snap_create(ioctx, &my_snaps[0]));
ASSERT_EQ(0, rados_ioctx_selfmanaged_snap_set_write_ctx(ioctx, my_snaps[0],
&my_snaps[0], my_snaps.size()));
char *buf2 = (char *)new char[bsize];
memset(buf2, 0xdd, bsize);
for (int i = 0; i < num_objs; ++i) {
string obj = string("foo") + std::to_string(i);
ASSERT_EQ(0, rados_write(ioctx, obj.c_str(), buf2, bsize, bsize));
}
delete[] buf2;
}
// wait for maps to settle
ASSERT_EQ(0, rados_wait_for_latest_osdmap(cluster));
// remove snaps - should trigger snaptrim
rados_ioctx_snap_set_read(ioctx, LIBRADOS_SNAP_HEAD);
for (unsigned snap = 0; snap < my_snaps.size(); ++snap) {
ASSERT_EQ(0, rados_ioctx_selfmanaged_snap_remove(ioctx, my_snaps[snap]));
}
// sleep for few secs for the trim stats to populate
std::cout << "Waiting for snaptrim stats to be generated" << std::endl;
sleep(30);
// Dump pg stats and determine if snaptrim stats are getting set
int objects_trimmed = 0;
double snaptrim_duration = 0.0;
int tries = 0;
do {
char *buf, *st;
size_t buflen, stlen;
string c = string("{\"prefix\": \"pg dump\",\"format\":\"json\"}");
const char *cmd = c.c_str();
ASSERT_EQ(0, rados_mon_command(cluster, (const char **)&cmd, 1, 0, 0,
&buf, &buflen, &st, &stlen));
string outstr(buf, buflen);
json_spirit::Value v;
ASSERT_NE(0, json_spirit::read(outstr, v)) << "Unable tp parse json."
<< '\n' << outstr;
// pg dump object
json_spirit::Object& obj = v.get_obj();
get_snaptrim_stats(obj, &objects_trimmed, &snaptrim_duration);
if (objects_trimmed != num_objs) {
tries++;
objects_trimmed = 0;
std::cout << "Still waiting for all objects to be trimmed... " <<std::endl;
sleep(30);
}
} while (objects_trimmed != num_objs && tries < 5);
// final check for objects trimmed
ASSERT_EQ(objects_trimmed, num_objs);
std::cout << "Snaptrim duration: " << snaptrim_duration << std::endl;
ASSERT_GT(snaptrim_duration, 0.0);
// clean-up remaining objects
for (int i = 0; i < num_objs; ++i) {
string obj = string("foo") + std::to_string(i);
ASSERT_EQ(0, rados_remove(ioctx, obj.c_str()));
}
delete[] buf;
}
| 11,177 | 32.567568 | 95 | cc |
null | ceph-main/src/test/librados/snapshots_stats_cxx.cc | #include <algorithm>
#include <errno.h>
#include <string>
#include <vector>
#include "gtest/gtest.h"
#include "include/rados.h"
#include "include/rados/librados.hpp"
#include "json_spirit/json_spirit.h"
#include "test/librados/test_cxx.h"
#include "test/librados/testcase_cxx.h"
using namespace librados;
using std::string;
class LibRadosSnapshotStatsSelfManagedPP : public RadosTestPP {
public:
LibRadosSnapshotStatsSelfManagedPP() {};
~LibRadosSnapshotStatsSelfManagedPP() override {};
protected:
void SetUp() override {
// disable pg autoscaler for the tests
string cmd =
"{"
"\"prefix\": \"config set\", "
"\"who\": \"global\", "
"\"name\": \"osd_pool_default_pg_autoscale_mode\", "
"\"value\": \"off\""
"}";
std::cout << "Setting pg_autoscaler to 'off'" << std::endl;
bufferlist inbl;
bufferlist outbl;
ASSERT_EQ(0, s_cluster.mon_command(cmd, inbl, &outbl, NULL));
// disable scrubs for the test
cmd = "{\"prefix\": \"osd set\",\"key\":\"noscrub\"}";
ASSERT_EQ(0, s_cluster.mon_command(cmd, inbl, &outbl, NULL));
cmd = "{\"prefix\": \"osd set\",\"key\":\"nodeep-scrub\"}";
ASSERT_EQ(0, s_cluster.mon_command(cmd, inbl, &outbl, NULL));
RadosTestPP::SetUp();
}
void TearDown() override {
// re-enable pg autoscaler
string cmd =
"{"
"\"prefix\": \"config set\", "
"\"who\": \"global\", "
"\"name\": \"osd_pool_default_pg_autoscale_mode\", "
"\"value\": \"on\""
"}";
std::cout << "Setting pg_autoscaler to 'on'" << std::endl;
bufferlist inbl;
bufferlist outbl;
ASSERT_EQ(0, s_cluster.mon_command(cmd, inbl, &outbl, NULL));
// re-enable scrubs
cmd = "{\"prefix\": \"osd unset\",\"key\":\"noscrub\"}";
ASSERT_EQ(0, s_cluster.mon_command(cmd, inbl, &outbl, NULL));
cmd = string("{\"prefix\": \"osd unset\",\"key\":\"nodeep-scrub\"}");
ASSERT_EQ(0, s_cluster.mon_command(cmd, inbl, &outbl, NULL));
RadosTestPP::TearDown();
}
};
class LibRadosSnapshotStatsSelfManagedECPP : public RadosTestECPP {
public:
LibRadosSnapshotStatsSelfManagedECPP() {};
~LibRadosSnapshotStatsSelfManagedECPP() override {};
protected:
void SetUp() override {
// disable pg autoscaler for the tests
string cmd =
"{"
"\"prefix\": \"config set\", "
"\"who\": \"global\", "
"\"name\": \"osd_pool_default_pg_autoscale_mode\", "
"\"value\": \"off\""
"}";
std::cout << "Setting pg_autoscaler to 'off'" << std::endl;
bufferlist inbl;
bufferlist outbl;
ASSERT_EQ(0, s_cluster.mon_command(cmd, inbl, &outbl, NULL));
// disable scrubs for the test
cmd = string("{\"prefix\": \"osd set\",\"key\":\"noscrub\"}");
ASSERT_EQ(0, s_cluster.mon_command(cmd, inbl, &outbl, NULL));
cmd = string("{\"prefix\": \"osd set\",\"key\":\"nodeep-scrub\"}");
ASSERT_EQ(0, s_cluster.mon_command(cmd, inbl, &outbl, NULL));
RadosTestECPP::SetUp();
}
void TearDown() override {
// re-enable pg autoscaler
string cmd =
"{"
"\"prefix\": \"config set\", "
"\"who\": \"global\", "
"\"name\": \"osd_pool_default_pg_autoscale_mode\", "
"\"value\": \"on\""
"}";
std::cout << "Setting pg_autoscaler to 'on'" << std::endl;
bufferlist inbl;
bufferlist outbl;
ASSERT_EQ(0, s_cluster.mon_command(cmd, inbl, &outbl, NULL));
// re-enable scrubs
cmd = string("{\"prefix\": \"osd unset\",\"key\":\"noscrub\"}");
ASSERT_EQ(0, s_cluster.mon_command(cmd, inbl, &outbl, NULL));
cmd = string("{\"prefix\": \"osd unset\",\"key\":\"nodeep-scrub\"}");
ASSERT_EQ(0, s_cluster.mon_command(cmd, inbl, &outbl, NULL));
RadosTestECPP::TearDown();
}
};
void get_snaptrim_stats(json_spirit::Object& pg_dump,
int *objs_trimmed,
double *trim_duration) {
// pg_map
json_spirit::Object pgmap;
for (json_spirit::Object::size_type i = 0; i < pg_dump.size(); ++i) {
json_spirit::Pair& p = pg_dump[i];
if (p.name_ == "pg_map") {
pgmap = p.value_.get_obj();
break;
}
}
// pg_stats array
json_spirit::Array pgs;
for (json_spirit::Object::size_type i = 0; i < pgmap.size(); ++i) {
json_spirit::Pair& p = pgmap[i];
if (p.name_ == "pg_stats") {
pgs = p.value_.get_array();
break;
}
}
// snaptrim stats
for (json_spirit::Object::size_type j = 0; j < pgs.size(); ++j) {
json_spirit::Object& pg_stat = pgs[j].get_obj();
for(json_spirit::Object::size_type k = 0; k < pg_stat.size(); ++k) {
json_spirit::Pair& stats = pg_stat[k];
if (stats.name_ == "objects_trimmed") {
*objs_trimmed += stats.value_.get_int();
}
if (stats.name_ == "snaptrim_duration") {
*trim_duration += stats.value_.get_real();
}
}
}
}
const int bufsize = 128;
TEST_F(LibRadosSnapshotStatsSelfManagedPP, SnaptrimStatsPP) {
int num_objs = 10;
// create objects
char buf[bufsize];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl;
bl.append(buf, sizeof(buf));
for (int i = 0; i < num_objs; ++i) {
string obj = string("foo") + std::to_string(i);
ASSERT_EQ(0, ioctx.write(obj, bl, sizeof(buf), 0));
}
std::vector<uint64_t> my_snaps;
char buf2[sizeof(buf)];
memset(buf2, 0xdd, sizeof(buf2));
bufferlist bl2;
bl2.append(buf2, sizeof(buf2));
for (int snap = 0; snap < 1; ++snap) {
// create a snapshot, clone
std::vector<uint64_t> ns(1);
ns.insert(ns.end(), my_snaps.begin(), my_snaps.end());
my_snaps.swap(ns);
ASSERT_EQ(0, ioctx.selfmanaged_snap_create(&my_snaps[0]));
ASSERT_EQ(0, ioctx.selfmanaged_snap_set_write_ctx(my_snaps[0], my_snaps));
for (int i = 0; i < num_objs; ++i) {
string obj = string("foo") + std::to_string(i);
ASSERT_EQ(0, ioctx.write(obj, bl2, sizeof(buf2), 0));
}
}
// wait for maps to settle
cluster.wait_for_latest_osdmap();
// remove snaps - should trigger snaptrim
for (unsigned snap = 0; snap < my_snaps.size(); ++snap) {
ioctx.selfmanaged_snap_remove(my_snaps[snap]);
}
// sleep for few secs for the trim stats to populate
std::cout << "Waiting for snaptrim stats to be generated" << std::endl;
sleep(30);
// Dump pg stats and determine if snaptrim stats are getting set
int objects_trimmed = 0;
double snaptrim_duration = 0.0;
int tries = 0;
do {
string cmd = string("{\"prefix\": \"pg dump\",\"format\":\"json\"}");
bufferlist inbl;
bufferlist outbl;
ASSERT_EQ(0, cluster.mon_command(cmd, inbl, &outbl, NULL));
string outstr(outbl.c_str(), outbl.length());
json_spirit::Value v;
ASSERT_NE(0, json_spirit::read(outstr, v)) << "unable to parse json." << '\n' << outstr;
// pg_map
json_spirit::Object& obj = v.get_obj();
get_snaptrim_stats(obj, &objects_trimmed, &snaptrim_duration);
if (objects_trimmed < num_objs) {
tries++;
objects_trimmed = 0;
std::cout << "Still waiting for all objects to be trimmed... " <<std::endl;
sleep(30);
}
} while(objects_trimmed < num_objs && tries < 5);
// final check for objects trimmed
ASSERT_EQ(objects_trimmed, num_objs);
std::cout << "Snaptrim duration: " << snaptrim_duration << std::endl;
ASSERT_GT(snaptrim_duration, 0.0);
// clean-up remaining objects
ioctx.snap_set_read(librados::SNAP_HEAD);
for (int i = 0; i < num_objs; ++i) {
string obj = string("foo") + std::to_string(i);
ASSERT_EQ(0, ioctx.remove(obj));
}
}
// EC testing
TEST_F(LibRadosSnapshotStatsSelfManagedECPP, SnaptrimStatsECPP) {
int num_objs = 10;
int bsize = alignment;
// create objects
char *buf = (char *)new char[bsize];
memset(buf, 0xcc, bsize);
bufferlist bl;
bl.append(buf, bsize);
for (int i = 0; i < num_objs; ++i) {
string obj = string("foo") + std::to_string(i);
ASSERT_EQ(0, ioctx.write(obj, bl, bsize, 0));
}
std::vector<uint64_t> my_snaps;
char *buf2 = (char *)new char[bsize];
memset(buf2, 0xdd, bsize);
bufferlist bl2;
bl2.append(buf2, bsize);
for (int snap = 0; snap < 1; ++snap) {
// create a snapshot, clone
std::vector<uint64_t> ns(1);
ns.insert(ns.end(), my_snaps.begin(), my_snaps.end());
my_snaps.swap(ns);
ASSERT_EQ(0, ioctx.selfmanaged_snap_create(&my_snaps[0]));
ASSERT_EQ(0, ioctx.selfmanaged_snap_set_write_ctx(my_snaps[0], my_snaps));
for (int i = 0; i < num_objs; ++i) {
string obj = string("foo") + std::to_string(i);
ASSERT_EQ(0, ioctx.write(obj, bl2, bsize, bsize));
}
}
// wait for maps to settle
cluster.wait_for_latest_osdmap();
// remove snaps - should trigger snaptrim
for (unsigned snap = 0; snap < my_snaps.size(); ++snap) {
ioctx.selfmanaged_snap_remove(my_snaps[snap]);
}
// sleep for few secs for the trim stats to populate
std::cout << "Waiting for snaptrim stats to be generated" << std::endl;
sleep(30);
// Dump pg stats and determine if snaptrim stats are getting set
int objects_trimmed = 0;
double snaptrim_duration = 0.0;
int tries = 0;
do {
string cmd = string("{\"prefix\": \"pg dump\",\"format\":\"json\"}");
bufferlist inbl;
bufferlist outbl;
ASSERT_EQ(0, cluster.mon_command(cmd, inbl, &outbl, NULL));
string outstr(outbl.c_str(), outbl.length());
json_spirit::Value v;
ASSERT_NE(0, json_spirit::read(outstr, v)) << "unable to parse json." << '\n' << outstr;
// pg_map
json_spirit::Object& obj = v.get_obj();
get_snaptrim_stats(obj, &objects_trimmed, &snaptrim_duration);
if (objects_trimmed < num_objs) {
tries++;
objects_trimmed = 0;
std::cout << "Still waiting for all objects to be trimmed... " <<std::endl;
sleep(30);
}
} while(objects_trimmed < num_objs && tries < 5);
// final check for objects trimmed
ASSERT_EQ(objects_trimmed, num_objs);
std::cout << "Snaptrim duration: " << snaptrim_duration << std::endl;
ASSERT_GT(snaptrim_duration, 0.0);
// clean-up remaining objects
ioctx.snap_set_read(LIBRADOS_SNAP_HEAD);
for (int i = 0; i < num_objs; ++i) {
string obj = string("foo") + std::to_string(i);
ASSERT_EQ(0, ioctx.remove(obj));
}
delete[] buf;
delete[] buf2;
}
| 10,251 | 30.544615 | 92 | cc |
null | ceph-main/src/test/librados/stat.cc | #include "include/rados/librados.h"
#include "test/librados/test.h"
#include "test/librados/TestCase.h"
#include "common/ceph_time.h"
#include <algorithm>
#include <errno.h>
#include "gtest/gtest.h"
#include "crimson_utils.h"
typedef RadosTest LibRadosStat;
typedef RadosTestEC LibRadosStatEC;
TEST_F(LibRadosStat, Stat) {
char buf[128];
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_write(ioctx, "foo", buf, sizeof(buf), 0));
uint64_t size = 0;
time_t mtime = 0;
ASSERT_EQ(0, rados_stat(ioctx, "foo", &size, &mtime));
ASSERT_EQ(sizeof(buf), size);
ASSERT_EQ(-ENOENT, rados_stat(ioctx, "nonexistent", &size, &mtime));
}
TEST_F(LibRadosStat, Stat2) {
char buf[128];
memset(buf, 0xcc, sizeof(buf));
rados_write_op_t op = rados_create_write_op();
rados_write_op_write(op, buf, sizeof(buf), 0);
struct timespec ts;
ts.tv_sec = 1457129052;
ts.tv_nsec = 123456789;
ASSERT_EQ(0, rados_write_op_operate2(op, ioctx, "foo", &ts, 0));
rados_release_write_op(op);
uint64_t size = 0;
time_t mtime = 0;
ASSERT_EQ(0, rados_stat(ioctx, "foo", &size, &mtime));
ASSERT_EQ(sizeof(buf), size);
ASSERT_EQ(mtime, ts.tv_sec);
struct timespec ts2 = {};
ASSERT_EQ(0, rados_stat2(ioctx, "foo", &size, &ts2));
ASSERT_EQ(sizeof(buf), size);
ASSERT_EQ(ts2.tv_sec, ts.tv_sec);
ASSERT_EQ(ts2.tv_nsec, ts.tv_nsec);
ASSERT_EQ(-ENOENT, rados_stat2(ioctx, "nonexistent", &size, &ts2));
}
TEST_F(LibRadosStat, StatNS) {
char buf[128];
memset(buf, 0xcc, sizeof(buf));
rados_ioctx_set_namespace(ioctx, "");
ASSERT_EQ(0, rados_write(ioctx, "foo", buf, sizeof(buf), 0));
ASSERT_EQ(0, rados_write(ioctx, "foo2", buf, sizeof(buf), 0));
char buf2[64];
memset(buf2, 0xcc, sizeof(buf2));
rados_ioctx_set_namespace(ioctx, "nspace");
ASSERT_EQ(0, rados_write(ioctx, "foo", buf2, sizeof(buf2), 0));
uint64_t size = 0;
time_t mtime = 0;
rados_ioctx_set_namespace(ioctx, "");
ASSERT_EQ(0, rados_stat(ioctx, "foo", &size, &mtime));
ASSERT_EQ(sizeof(buf), size);
ASSERT_EQ(-ENOENT, rados_stat(ioctx, "nonexistent", &size, &mtime));
rados_ioctx_set_namespace(ioctx, "nspace");
ASSERT_EQ(0, rados_stat(ioctx, "foo", &size, &mtime));
ASSERT_EQ(sizeof(buf2), size);
ASSERT_EQ(-ENOENT, rados_stat(ioctx, "nonexistent", &size, &mtime));
ASSERT_EQ(-ENOENT, rados_stat(ioctx, "foo2", &size, &mtime));
}
TEST_F(LibRadosStat, ClusterStat) {
struct rados_cluster_stat_t result;
ASSERT_EQ(0, rados_cluster_stat(cluster, &result));
}
TEST_F(LibRadosStat, PoolStat) {
char buf[128];
char actual_pool_name[80];
unsigned l = rados_ioctx_get_pool_name(ioctx, actual_pool_name, sizeof(actual_pool_name));
ASSERT_EQ(strlen(actual_pool_name), l);
ASSERT_EQ(0, strcmp(actual_pool_name, pool_name.c_str()));
memset(buf, 0xff, sizeof(buf));
ASSERT_EQ(0, rados_write(ioctx, "foo", buf, sizeof(buf), 0));
struct rados_pool_stat_t stats;
memset(&stats, 0, sizeof(stats));
ASSERT_EQ(0, rados_ioctx_pool_stat(ioctx, &stats));
}
TEST_F(LibRadosStatEC, Stat) {
SKIP_IF_CRIMSON();
char buf[128];
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_write(ioctx, "foo", buf, sizeof(buf), 0));
uint64_t size = 0;
time_t mtime = 0;
ASSERT_EQ(0, rados_stat(ioctx, "foo", &size, &mtime));
ASSERT_EQ(sizeof(buf), size);
ASSERT_EQ(-ENOENT, rados_stat(ioctx, "nonexistent", &size, &mtime));
}
TEST_F(LibRadosStatEC, StatNS) {
SKIP_IF_CRIMSON();
char buf[128];
memset(buf, 0xcc, sizeof(buf));
rados_ioctx_set_namespace(ioctx, "");
ASSERT_EQ(0, rados_write(ioctx, "foo", buf, sizeof(buf), 0));
ASSERT_EQ(0, rados_write(ioctx, "foo2", buf, sizeof(buf), 0));
char buf2[64];
memset(buf2, 0xcc, sizeof(buf2));
rados_ioctx_set_namespace(ioctx, "nspace");
ASSERT_EQ(0, rados_write(ioctx, "foo", buf2, sizeof(buf2), 0));
uint64_t size = 0;
time_t mtime = 0;
rados_ioctx_set_namespace(ioctx, "");
ASSERT_EQ(0, rados_stat(ioctx, "foo", &size, &mtime));
ASSERT_EQ(sizeof(buf), size);
ASSERT_EQ(-ENOENT, rados_stat(ioctx, "nonexistent", &size, &mtime));
rados_ioctx_set_namespace(ioctx, "nspace");
ASSERT_EQ(0, rados_stat(ioctx, "foo", &size, &mtime));
ASSERT_EQ(sizeof(buf2), size);
ASSERT_EQ(-ENOENT, rados_stat(ioctx, "nonexistent", &size, &mtime));
ASSERT_EQ(-ENOENT, rados_stat(ioctx, "foo2", &size, &mtime));
}
TEST_F(LibRadosStatEC, ClusterStat) {
SKIP_IF_CRIMSON();
struct rados_cluster_stat_t result;
ASSERT_EQ(0, rados_cluster_stat(cluster, &result));
}
TEST_F(LibRadosStatEC, PoolStat) {
SKIP_IF_CRIMSON();
char buf[128];
char actual_pool_name[80];
unsigned l = rados_ioctx_get_pool_name(ioctx, actual_pool_name, sizeof(actual_pool_name));
ASSERT_EQ(strlen(actual_pool_name), l);
ASSERT_EQ(0, strcmp(actual_pool_name, pool_name.c_str()));
memset(buf, 0xff, sizeof(buf));
ASSERT_EQ(0, rados_write(ioctx, "foo", buf, sizeof(buf), 0));
struct rados_pool_stat_t stats;
memset(&stats, 0, sizeof(stats));
ASSERT_EQ(0, rados_ioctx_pool_stat(ioctx, &stats));
}
| 4,992 | 31.422078 | 92 | cc |
null | ceph-main/src/test/librados/stat_cxx.cc | #include "gtest/gtest.h"
#include "include/rados/librados.hpp"
#include "test/librados/test_cxx.h"
#include "test/librados/testcase_cxx.h"
#include "crimson_utils.h"
using namespace librados;
typedef RadosTestPP LibRadosStatPP;
typedef RadosTestECPP LibRadosStatECPP;
TEST_F(LibRadosStatPP, StatPP) {
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl;
bl.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.write("foo", bl, sizeof(buf), 0));
uint64_t size;
time_t mtime;
ASSERT_EQ(0, ioctx.stat("foo", &size, &mtime));
ASSERT_EQ(sizeof(buf), size);
ASSERT_EQ(-ENOENT, ioctx.stat("nonexistent", &size, &mtime));
}
TEST_F(LibRadosStatPP, Stat2Mtime2PP) {
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl;
bl.append(buf, sizeof(buf));
librados::ObjectWriteOperation op;
struct timespec ts;
ts.tv_sec = 1457129052;
ts.tv_nsec = 123456789;
op.mtime2(&ts);
op.write(0, bl);
ASSERT_EQ(0, ioctx.operate("foo", &op));
/* XXX time comparison asserts could spuriously fail */
uint64_t size;
time_t mtime;
ASSERT_EQ(0, ioctx.stat("foo", &size, &mtime));
ASSERT_EQ(sizeof(buf), size);
ASSERT_EQ(mtime, ts.tv_sec);
struct timespec ts2;
ASSERT_EQ(0, ioctx.stat2("foo", &size, &ts2));
ASSERT_EQ(sizeof(buf), size);
ASSERT_EQ(ts2.tv_sec, ts.tv_sec);
ASSERT_EQ(ts2.tv_nsec, ts.tv_nsec);
ASSERT_EQ(-ENOENT, ioctx.stat2("nonexistent", &size, &ts2));
}
TEST_F(LibRadosStatPP, ClusterStatPP) {
cluster_stat_t cstat;
ASSERT_EQ(0, cluster.cluster_stat(cstat));
}
TEST_F(LibRadosStatPP, PoolStatPP) {
std::string n = ioctx.get_pool_name();
ASSERT_EQ(n, pool_name);
char buf[128];
memset(buf, 0xff, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.write("foo", bl1, sizeof(buf), 0));
std::list<std::string> v;
std::map<std::string,stats_map> stats;
ASSERT_EQ(0, cluster.get_pool_stats(v, stats));
}
TEST_F(LibRadosStatECPP, StatPP) {
SKIP_IF_CRIMSON();
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl;
bl.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.write("foo", bl, sizeof(buf), 0));
uint64_t size;
time_t mtime;
ASSERT_EQ(0, ioctx.stat("foo", &size, &mtime));
ASSERT_EQ(sizeof(buf), size);
ASSERT_EQ(-ENOENT, ioctx.stat("nonexistent", &size, &mtime));
}
TEST_F(LibRadosStatECPP, ClusterStatPP) {
SKIP_IF_CRIMSON();
cluster_stat_t cstat;
ASSERT_EQ(0, cluster.cluster_stat(cstat));
}
TEST_F(LibRadosStatECPP, PoolStatPP) {
SKIP_IF_CRIMSON();
std::string n = ioctx.get_pool_name();
ASSERT_EQ(n, pool_name);
char buf[128];
memset(buf, 0xff, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.write("foo", bl1, sizeof(buf), 0));
std::list<std::string> v;
std::map<std::string,stats_map> stats;
ASSERT_EQ(0, cluster.get_pool_stats(v, stats));
}
TEST_F(LibRadosStatPP, StatPPNS) {
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl;
bl.append(buf, sizeof(buf));
ioctx.set_namespace("");
ASSERT_EQ(0, ioctx.write("foo", bl, sizeof(buf), 0));
ASSERT_EQ(0, ioctx.write("foo2", bl, sizeof(buf), 0));
char buf2[64];
memset(buf2, 0xbb, sizeof(buf2));
bufferlist bl2;
bl2.append(buf2, sizeof(buf2));
ioctx.set_namespace("nspace");
ASSERT_EQ(0, ioctx.write("foo", bl2, sizeof(buf2), 0));
uint64_t size;
time_t mtime;
ioctx.set_namespace("");
ASSERT_EQ(0, ioctx.stat("foo", &size, &mtime));
ASSERT_EQ(sizeof(buf), size);
ASSERT_EQ(-ENOENT, ioctx.stat("nonexistent", &size, &mtime));
ioctx.set_namespace("nspace");
ASSERT_EQ(0, ioctx.stat("foo", &size, &mtime));
ASSERT_EQ(sizeof(buf2), size);
ASSERT_EQ(-ENOENT, ioctx.stat("nonexistent", &size, &mtime));
ASSERT_EQ(-ENOENT, ioctx.stat("foo2", &size, &mtime));
}
TEST_F(LibRadosStatECPP, StatPPNS) {
SKIP_IF_CRIMSON();
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl;
bl.append(buf, sizeof(buf));
ioctx.set_namespace("");
ASSERT_EQ(0, ioctx.write("foo", bl, sizeof(buf), 0));
ASSERT_EQ(0, ioctx.write("foo2", bl, sizeof(buf), 0));
char buf2[64];
memset(buf2, 0xbb, sizeof(buf2));
bufferlist bl2;
bl2.append(buf2, sizeof(buf2));
ioctx.set_namespace("nspace");
ASSERT_EQ(0, ioctx.write("foo", bl2, sizeof(buf2), 0));
uint64_t size;
time_t mtime;
ioctx.set_namespace("");
ASSERT_EQ(0, ioctx.stat("foo", &size, &mtime));
ASSERT_EQ(sizeof(buf), size);
ASSERT_EQ(-ENOENT, ioctx.stat("nonexistent", &size, &mtime));
ioctx.set_namespace("nspace");
ASSERT_EQ(0, ioctx.stat("foo", &size, &mtime));
ASSERT_EQ(sizeof(buf2), size);
ASSERT_EQ(-ENOENT, ioctx.stat("nonexistent", &size, &mtime));
ASSERT_EQ(-ENOENT, ioctx.stat("foo2", &size, &mtime));
}
| 4,718 | 26.923077 | 63 | cc |
null | ceph-main/src/test/librados/test.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*
// vim: ts=8 sw=2 smarttab
#include "include/rados/librados.h"
#include "include/rados/librados.hpp"
#include "test/librados/test.h"
#include "include/stringify.h"
#include "common/ceph_context.h"
#include "common/config.h"
#include <errno.h>
#include <sstream>
#include <stdlib.h>
#include <string>
#include <time.h>
#include <unistd.h>
#include <iostream>
#include "gtest/gtest.h"
std::string create_one_pool(
const std::string &pool_name, rados_t *cluster, uint32_t pg_num)
{
std::string err_str = connect_cluster(cluster);
if (err_str.length())
return err_str;
int ret = rados_pool_create(*cluster, pool_name.c_str());
if (ret) {
rados_shutdown(*cluster);
std::ostringstream oss;
oss << "create_one_pool(" << pool_name << ") failed with error " << ret;
return oss.str();
}
rados_ioctx_t ioctx;
ret = rados_ioctx_create(*cluster, pool_name.c_str(), &ioctx);
if (ret < 0) {
rados_shutdown(*cluster);
std::ostringstream oss;
oss << "rados_ioctx_create(" << pool_name << ") failed with error " << ret;
return oss.str();
}
rados_application_enable(ioctx, "rados", 1);
rados_ioctx_destroy(ioctx);
return "";
}
int destroy_ec_profile(rados_t *cluster,
const std::string& pool_name,
std::ostream &oss)
{
char buf[1000];
snprintf(buf, sizeof(buf),
"{\"prefix\": \"osd erasure-code-profile rm\", \"name\": \"testprofile-%s\"}",
pool_name.c_str());
char *cmd[2];
cmd[0] = buf;
cmd[1] = NULL;
int ret = rados_mon_command(*cluster, (const char **)cmd, 1, "", 0, NULL,
0, NULL, 0);
if (ret)
oss << "rados_mon_command: erasure-code-profile rm testprofile-"
<< pool_name << " failed with error " << ret;
return ret;
}
int destroy_rule(rados_t *cluster,
const std::string &rule,
std::ostream &oss)
{
char *cmd[2];
std::string tmp = ("{\"prefix\": \"osd crush rule rm\", \"name\":\"" +
rule + "\"}");
cmd[0] = (char*)tmp.c_str();
cmd[1] = NULL;
int ret = rados_mon_command(*cluster, (const char **)cmd, 1, "", 0, NULL, 0, NULL, 0);
if (ret)
oss << "rados_mon_command: osd crush rule rm " + rule + " failed with error " << ret;
return ret;
}
int destroy_ec_profile_and_rule(rados_t *cluster,
const std::string &rule,
std::ostream &oss)
{
int ret;
ret = destroy_ec_profile(cluster, rule, oss);
if (ret)
return ret;
return destroy_rule(cluster, rule, oss);
}
std::string create_one_ec_pool(const std::string &pool_name, rados_t *cluster)
{
std::string err = connect_cluster(cluster);
if (err.length())
return err;
std::ostringstream oss;
int ret = destroy_ec_profile_and_rule(cluster, pool_name, oss);
if (ret) {
rados_shutdown(*cluster);
return oss.str();
}
char *cmd[2];
cmd[1] = NULL;
std::string profile_create = "{\"prefix\": \"osd erasure-code-profile set\", \"name\": \"testprofile-" + pool_name + "\", \"profile\": [ \"k=2\", \"m=1\", \"crush-failure-domain=osd\"]}";
cmd[0] = (char *)profile_create.c_str();
ret = rados_mon_command(*cluster, (const char **)cmd, 1, "", 0, NULL, 0, NULL, 0);
if (ret) {
rados_shutdown(*cluster);
oss << "rados_mon_command erasure-code-profile set name:testprofile-" << pool_name << " failed with error " << ret;
return oss.str();
}
std::string cmdstr = "{\"prefix\": \"osd pool create\", \"pool\": \"" +
pool_name + "\", \"pool_type\":\"erasure\", \"pg_num\":8, \"pgp_num\":8, \"erasure_code_profile\":\"testprofile-" + pool_name + "\"}";
cmd[0] = (char *)cmdstr.c_str();
ret = rados_mon_command(*cluster, (const char **)cmd, 1, "", 0, NULL, 0, NULL, 0);
if (ret) {
destroy_ec_profile(cluster, pool_name, oss);
rados_shutdown(*cluster);
oss << "rados_mon_command osd pool create failed with error " << ret;
return oss.str();
}
rados_wait_for_latest_osdmap(*cluster);
return "";
}
std::string connect_cluster(rados_t *cluster)
{
char *id = getenv("CEPH_CLIENT_ID");
if (id) std::cerr << "Client id is: " << id << std::endl;
int ret;
ret = rados_create(cluster, NULL);
if (ret) {
std::ostringstream oss;
oss << "rados_create failed with error " << ret;
return oss.str();
}
ret = rados_conf_read_file(*cluster, NULL);
if (ret) {
rados_shutdown(*cluster);
std::ostringstream oss;
oss << "rados_conf_read_file failed with error " << ret;
return oss.str();
}
rados_conf_parse_env(*cluster, NULL);
ret = rados_connect(*cluster);
if (ret) {
rados_shutdown(*cluster);
std::ostringstream oss;
oss << "rados_connect failed with error " << ret;
return oss.str();
}
return "";
}
int destroy_one_pool(const std::string &pool_name, rados_t *cluster)
{
int ret = rados_pool_delete(*cluster, pool_name.c_str());
if (ret) {
rados_shutdown(*cluster);
return ret;
}
rados_shutdown(*cluster);
return 0;
}
int destroy_one_ec_pool(const std::string &pool_name, rados_t *cluster)
{
int ret = rados_pool_delete(*cluster, pool_name.c_str());
if (ret) {
rados_shutdown(*cluster);
return ret;
}
CephContext *cct = static_cast<CephContext*>(rados_cct(*cluster));
if (!cct->_conf->mon_fake_pool_delete) { // hope this is in [global]
std::ostringstream oss;
ret = destroy_ec_profile_and_rule(cluster, pool_name, oss);
if (ret) {
rados_shutdown(*cluster);
return ret;
}
}
rados_wait_for_latest_osdmap(*cluster);
rados_shutdown(*cluster);
return ret;
}
| 5,555 | 26.919598 | 189 | cc |
null | ceph-main/src/test/librados/test.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2011 New Dream Network
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_TEST_RADOS_API_TEST_H
#define CEPH_TEST_RADOS_API_TEST_H
#include "include/rados/librados.h"
#include "test/librados/test_shared.h"
#include <map>
#include <string>
#include <unistd.h>
std::string create_one_pool(const std::string &pool_name, rados_t *cluster,
uint32_t pg_num=0);
std::string create_one_ec_pool(const std::string &pool_name, rados_t *cluster);
std::string connect_cluster(rados_t *cluster);
int destroy_one_pool(const std::string &pool_name, rados_t *cluster);
int destroy_one_ec_pool(const std::string &pool_name, rados_t *cluster);
#endif
| 988 | 28.969697 | 79 | h |
null | ceph-main/src/test/librados/test_common.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "common/Formatter.h"
#include "include/stringify.h"
#include "json_spirit/json_spirit.h"
#include "test_common.h"
using namespace std;
namespace {
using namespace ceph;
int wait_for_healthy(rados_t *cluster)
{
bool healthy = false;
// This timeout is very long because the tests are sometimes
// run on a thrashing cluster
int timeout = 3600;
int slept = 0;
while(!healthy) {
JSONFormatter cmd_f;
cmd_f.open_object_section("command");
cmd_f.dump_string("prefix", "status");
cmd_f.dump_string("format", "json");
cmd_f.close_section();
std::ostringstream cmd_stream;
cmd_f.flush(cmd_stream);
const std::string serialized_cmd = cmd_stream.str();
const char *cmd[2];
cmd[1] = NULL;
cmd[0] = serialized_cmd.c_str();
char *outbuf = NULL;
size_t outlen = 0;
int ret = rados_mon_command(*cluster, (const char **)cmd, 1, "", 0,
&outbuf, &outlen, NULL, NULL);
if (ret) {
return ret;
}
std::string out(outbuf, outlen);
rados_buffer_free(outbuf);
json_spirit::mValue root;
[[maybe_unused]] bool json_parse_success = json_spirit::read(out, root);
ceph_assert(json_parse_success);
json_spirit::mObject root_obj = root.get_obj();
json_spirit::mObject pgmap = root_obj["pgmap"].get_obj();
json_spirit::mArray pgs_by_state = pgmap["pgs_by_state"].get_array();
if (pgs_by_state.size() == 1) {
json_spirit::mObject state = pgs_by_state[0].get_obj();
std::string state_name = state["state_name"].get_str();
if (state_name != std::string("active+clean")) {
healthy = false;
} else {
healthy = true;
}
} else {
healthy = false;
}
if (slept >= timeout) {
return -ETIMEDOUT;
};
if (!healthy) {
sleep(1);
slept += 1;
}
}
return 0;
}
int rados_pool_set(
rados_t *cluster,
const std::string &pool_name,
const std::string &var,
const std::string &val)
{
JSONFormatter cmd_f;
cmd_f.open_object_section("command");
cmd_f.dump_string("prefix", "osd pool set");
cmd_f.dump_string("pool", pool_name);
cmd_f.dump_string("var", var);
cmd_f.dump_string("val", val);
cmd_f.close_section();
std::ostringstream cmd_stream;
cmd_f.flush(cmd_stream);
const std::string serialized_cmd = cmd_stream.str();
const char *cmd[2];
cmd[1] = NULL;
cmd[0] = serialized_cmd.c_str();
int ret = rados_mon_command(*cluster, (const char **)cmd, 1, "", 0, NULL,
NULL, NULL, NULL);
return ret;
}
struct pool_op_error : std::exception {
string msg;
pool_op_error(const std::string& pool_name,
const std::string& func_name,
int err) {
std::ostringstream oss;
oss << func_name << "(" << pool_name << ") failed with error " << err;
msg = oss.str();
}
const char* what() const noexcept override {
return msg.c_str();
}
};
template<typename Func>
std::string with_healthy_cluster(rados_t* cluster,
const std::string& pool_name,
Func&& func)
{
try {
// Wait for 'creating/backfilling' to clear
if (int r = wait_for_healthy(cluster); r != 0) {
throw pool_op_error{pool_name, "wait_for_healthy", r};
}
func();
// Wait for 'creating/backfilling' to clear
if (int r = wait_for_healthy(cluster); r != 0) {
throw pool_op_error{pool_name, "wait_for_healthy", r};
}
} catch (const pool_op_error& e) {
return e.what();
}
return "";
}
}
std::string set_pg_num(
rados_t *cluster, const std::string &pool_name, uint32_t pg_num)
{
return with_healthy_cluster(cluster, pool_name, [&] {
// Adjust pg_num
if (int r = rados_pool_set(cluster, pool_name, "pg_num",
stringify(pg_num));
r != 0) {
throw pool_op_error{pool_name, "set_pg_num", r};
}
});
}
std::string set_pgp_num(
rados_t *cluster, const std::string &pool_name, uint32_t pgp_num)
{
return with_healthy_cluster(cluster, pool_name, [&] {
// Adjust pgp_num
if (int r = rados_pool_set(cluster, pool_name, "pgp_num",
stringify(pgp_num));
r != 0) {
throw pool_op_error{pool_name, "set_pgp_num", r};
}
});
}
| 4,246 | 24.279762 | 76 | cc |
null | ceph-main/src/test/librados/test_common.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*
// vim: ts=8 sw=2 smarttab
#include "include/rados/librados.h"
std::string set_pg_num(
rados_t *cluster, const std::string &pool_name, uint32_t pg_num);
std::string set_pgp_num(
rados_t *cluster, const std::string &pool_name, uint32_t pgp_num);
| 325 | 31.6 | 70 | h |
null | ceph-main/src/test/librados/test_cxx.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*
// vim: ts=8 sw=2 smarttab
#include "test_cxx.h"
#include "include/stringify.h"
#include "common/ceph_context.h"
#include "common/config.h"
#include <errno.h>
#include <sstream>
#include <stdlib.h>
#include <string>
#include <time.h>
#include <unistd.h>
#include <iostream>
#include "gtest/gtest.h"
using namespace librados;
std::string create_one_pool_pp(const std::string &pool_name, Rados &cluster)
{
return create_one_pool_pp(pool_name, cluster, {});
}
std::string create_one_pool_pp(const std::string &pool_name, Rados &cluster,
const std::map<std::string, std::string> &config)
{
std::string err = connect_cluster_pp(cluster, config);
if (err.length())
return err;
int ret = cluster.pool_create(pool_name.c_str());
if (ret) {
cluster.shutdown();
std::ostringstream oss;
oss << "cluster.pool_create(" << pool_name << ") failed with error " << ret;
return oss.str();
}
IoCtx ioctx;
ret = cluster.ioctx_create(pool_name.c_str(), ioctx);
if (ret < 0) {
cluster.shutdown();
std::ostringstream oss;
oss << "cluster.ioctx_create(" << pool_name << ") failed with error "
<< ret;
return oss.str();
}
ioctx.application_enable("rados", true);
return "";
}
int destroy_rule_pp(Rados &cluster,
const std::string &rule,
std::ostream &oss)
{
bufferlist inbl;
int ret = cluster.mon_command("{\"prefix\": \"osd crush rule rm\", \"name\":\"" +
rule + "\"}", inbl, NULL, NULL);
if (ret)
oss << "mon_command: osd crush rule rm " + rule + " failed with error " << ret << std::endl;
return ret;
}
int destroy_ec_profile_pp(Rados &cluster, const std::string& pool_name,
std::ostream &oss)
{
bufferlist inbl;
int ret = cluster.mon_command("{\"prefix\": \"osd erasure-code-profile rm\", \"name\": \"testprofile-" + pool_name + "\"}",
inbl, NULL, NULL);
if (ret)
oss << "mon_command: osd erasure-code-profile rm testprofile-" << pool_name << " failed with error " << ret << std::endl;
return ret;
}
int destroy_ec_profile_and_rule_pp(Rados &cluster,
const std::string &rule,
std::ostream &oss)
{
int ret;
ret = destroy_ec_profile_pp(cluster, rule, oss);
if (ret)
return ret;
return destroy_rule_pp(cluster, rule, oss);
}
std::string create_one_ec_pool_pp(const std::string &pool_name, Rados &cluster)
{
std::string err = connect_cluster_pp(cluster);
if (err.length())
return err;
std::ostringstream oss;
int ret = destroy_ec_profile_and_rule_pp(cluster, pool_name, oss);
if (ret) {
cluster.shutdown();
return oss.str();
}
bufferlist inbl;
ret = cluster.mon_command(
"{\"prefix\": \"osd erasure-code-profile set\", \"name\": \"testprofile-" + pool_name + "\", \"profile\": [ \"k=2\", \"m=1\", \"crush-failure-domain=osd\"]}",
inbl, NULL, NULL);
if (ret) {
cluster.shutdown();
oss << "mon_command erasure-code-profile set name:testprofile-" << pool_name << " failed with error " << ret;
return oss.str();
}
ret = cluster.mon_command(
"{\"prefix\": \"osd pool create\", \"pool\": \"" + pool_name + "\", \"pool_type\":\"erasure\", \"pg_num\":8, \"pgp_num\":8, \"erasure_code_profile\":\"testprofile-" + pool_name + "\"}",
inbl, NULL, NULL);
if (ret) {
bufferlist inbl;
destroy_ec_profile_pp(cluster, pool_name, oss);
cluster.shutdown();
oss << "mon_command osd pool create pool:" << pool_name << " pool_type:erasure failed with error " << ret;
return oss.str();
}
cluster.wait_for_latest_osdmap();
return "";
}
std::string connect_cluster_pp(librados::Rados &cluster)
{
return connect_cluster_pp(cluster, {});
}
std::string connect_cluster_pp(librados::Rados &cluster,
const std::map<std::string, std::string> &config)
{
char *id = getenv("CEPH_CLIENT_ID");
if (id) std::cerr << "Client id is: " << id << std::endl;
int ret;
ret = cluster.init(id);
if (ret) {
std::ostringstream oss;
oss << "cluster.init failed with error " << ret;
return oss.str();
}
ret = cluster.conf_read_file(NULL);
if (ret) {
cluster.shutdown();
std::ostringstream oss;
oss << "cluster.conf_read_file failed with error " << ret;
return oss.str();
}
cluster.conf_parse_env(NULL);
for (auto &setting : config) {
ret = cluster.conf_set(setting.first.c_str(), setting.second.c_str());
if (ret) {
std::ostringstream oss;
oss << "failed to set config value " << setting.first << " to '"
<< setting.second << "': " << strerror(-ret);
return oss.str();
}
}
ret = cluster.connect();
if (ret) {
cluster.shutdown();
std::ostringstream oss;
oss << "cluster.connect failed with error " << ret;
return oss.str();
}
return "";
}
int destroy_one_pool_pp(const std::string &pool_name, Rados &cluster)
{
int ret = cluster.pool_delete(pool_name.c_str());
if (ret) {
cluster.shutdown();
return ret;
}
cluster.shutdown();
return 0;
}
int destroy_one_ec_pool_pp(const std::string &pool_name, Rados &cluster)
{
int ret = cluster.pool_delete(pool_name.c_str());
if (ret) {
cluster.shutdown();
return ret;
}
CephContext *cct = static_cast<CephContext*>(cluster.cct());
if (!cct->_conf->mon_fake_pool_delete) { // hope this is in [global]
std::ostringstream oss;
ret = destroy_ec_profile_and_rule_pp(cluster, pool_name, oss);
if (ret) {
cluster.shutdown();
return ret;
}
}
cluster.wait_for_latest_osdmap();
cluster.shutdown();
return ret;
}
| 5,801 | 27.441176 | 189 | cc |
null | ceph-main/src/test/librados/test_cxx.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include "include/rados/librados.hpp"
#include "test/librados/test_shared.h"
std::string create_one_pool_pp(const std::string &pool_name,
librados::Rados &cluster);
std::string create_one_pool_pp(const std::string &pool_name,
librados::Rados &cluster,
const std::map<std::string, std::string> &config);
std::string create_one_ec_pool_pp(const std::string &pool_name,
librados::Rados &cluster);
std::string connect_cluster_pp(librados::Rados &cluster);
std::string connect_cluster_pp(librados::Rados &cluster,
const std::map<std::string, std::string> &config);
int destroy_one_pool_pp(const std::string &pool_name, librados::Rados &cluster);
int destroy_one_ec_pool_pp(const std::string &pool_name, librados::Rados &cluster);
| 882 | 43.15 | 83 | h |
null | ceph-main/src/test/librados/test_shared.cc | #include "test_shared.h"
#include <cstring>
#include "gtest/gtest.h"
#include "include/buffer.h"
using namespace ceph;
std::string get_temp_pool_name(const std::string &prefix)
{
char hostname[80];
char out[160];
memset(hostname, 0, sizeof(hostname));
memset(out, 0, sizeof(out));
gethostname(hostname, sizeof(hostname)-1);
static int num = 1;
snprintf(out, sizeof(out), "%s-%d-%d", hostname, getpid(), num);
num++;
return prefix + out;
}
void assert_eq_sparse(bufferlist& expected,
const std::map<uint64_t, uint64_t>& extents,
bufferlist& actual) {
auto i = expected.begin();
auto p = actual.begin();
uint64_t pos = 0;
for (auto extent : extents) {
const uint64_t start = extent.first;
const uint64_t end = start + extent.second;
for (; pos < end; ++i, ++pos) {
ASSERT_FALSE(i.end());
if (pos < start) {
// check the hole
ASSERT_EQ('\0', *i);
} else {
// then the extent
ASSERT_EQ(*i, *p);
++p;
}
}
}
ASSERT_EQ(expected.length(), pos);
}
| 1,097 | 23.4 | 66 | cc |
null | ceph-main/src/test/librados/test_shared.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*
// vim: ts=8 sw=2 smarttab
#pragma once
#include <unistd.h>
#include <chrono>
#include <map>
#include <string>
#include <thread>
#include "include/buffer_fwd.h"
// helpers shared by librados tests
std::string get_temp_pool_name(const std::string &prefix = "test-rados-api-");
void assert_eq_sparse(ceph::bufferlist& expected,
const std::map<uint64_t, uint64_t>& extents,
ceph::bufferlist& actual);
class TestAlarm
{
public:
#ifndef _WIN32
TestAlarm() {
alarm(2400);
}
~TestAlarm() {
alarm(0);
}
#else
// TODO: add a timeout mechanism for Windows as well, possibly by using
// CreateTimerQueueTimer.
TestAlarm() {
}
~TestAlarm() {
}
#endif
};
template<class Rep, class Period, typename Func, typename... Args,
typename Return = std::result_of_t<Func&&(Args&&...)>>
Return wait_until(const std::chrono::duration<Rep, Period>& rel_time,
const std::chrono::duration<Rep, Period>& step,
const Return& expected,
Func&& func, Args&&... args)
{
std::this_thread::sleep_for(rel_time - step);
for (auto& s : {step, step}) {
if (!s.count()) {
break;
}
auto ret = func(std::forward<Args>(args)...);
if (ret == expected) {
return ret;
}
std::this_thread::sleep_for(s);
}
return func(std::forward<Args>(args)...);
}
| 1,456 | 23.694915 | 78 | h |
null | ceph-main/src/test/librados/testcase_cxx.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "testcase_cxx.h"
#include <errno.h>
#include <fmt/format.h>
#include "test_cxx.h"
#include "test_shared.h"
#include "crimson_utils.h"
#include "include/scope_guard.h"
using namespace librados;
namespace {
void init_rand() {
static bool seeded = false;
if (!seeded) {
seeded = true;
int seed = getpid();
std::cout << "seed " << seed << std::endl;
srand(seed);
}
}
} // anonymous namespace
std::string RadosTestPPNS::pool_name;
Rados RadosTestPPNS::s_cluster;
void RadosTestPPNS::SetUpTestCase()
{
auto pool_prefix = fmt::format("{}_", ::testing::UnitTest::GetInstance()->current_test_case()->name());
pool_name = get_temp_pool_name(pool_prefix);
ASSERT_EQ("", create_one_pool_pp(pool_name, s_cluster));
}
void RadosTestPPNS::TearDownTestCase()
{
ASSERT_EQ(0, destroy_one_pool_pp(pool_name, s_cluster));
}
void RadosTestPPNS::SetUp()
{
ASSERT_EQ(0, cluster.ioctx_create(pool_name.c_str(), ioctx));
bool req;
ASSERT_EQ(0, ioctx.pool_requires_alignment2(&req));
ASSERT_FALSE(req);
}
void RadosTestPPNS::TearDown()
{
if (cleanup)
cleanup_all_objects(ioctx);
ioctx.close();
}
void RadosTestPPNS::cleanup_all_objects(librados::IoCtx ioctx)
{
// remove all objects to avoid polluting other tests
ioctx.snap_set_read(librados::SNAP_HEAD);
ioctx.set_namespace(all_nspaces);
for (NObjectIterator it = ioctx.nobjects_begin();
it != ioctx.nobjects_end(); ++it) {
ioctx.locator_set_key(it->get_locator());
ioctx.set_namespace(it->get_nspace());
ASSERT_EQ(0, ioctx.remove(it->get_oid()));
}
}
std::string RadosTestParamPPNS::pool_name;
std::string RadosTestParamPPNS::cache_pool_name;
Rados RadosTestParamPPNS::s_cluster;
void RadosTestParamPPNS::SetUpTestCase()
{
auto pool_prefix = fmt::format("{}_", ::testing::UnitTest::GetInstance()->current_test_case()->name());
pool_name = get_temp_pool_name(pool_prefix);
ASSERT_EQ("", create_one_pool_pp(pool_name, s_cluster));
}
void RadosTestParamPPNS::TearDownTestCase()
{
if (cache_pool_name.length()) {
// tear down tiers
bufferlist inbl;
ASSERT_EQ(0, s_cluster.mon_command(
"{\"prefix\": \"osd tier remove-overlay\", \"pool\": \"" + pool_name +
"\"}",
inbl, NULL, NULL));
ASSERT_EQ(0, s_cluster.mon_command(
"{\"prefix\": \"osd tier remove\", \"pool\": \"" + pool_name +
"\", \"tierpool\": \"" + cache_pool_name + "\"}",
inbl, NULL, NULL));
ASSERT_EQ(0, s_cluster.mon_command(
"{\"prefix\": \"osd pool delete\", \"pool\": \"" + cache_pool_name +
"\", \"pool2\": \"" + cache_pool_name + "\", \"yes_i_really_really_mean_it\": true}",
inbl, NULL, NULL));
cache_pool_name = "";
}
ASSERT_EQ(0, destroy_one_pool_pp(pool_name, s_cluster));
}
void RadosTestParamPPNS::SetUp()
{
if (!is_crimson_cluster() && strcmp(GetParam(), "cache") == 0 &&
cache_pool_name.empty()) {
auto pool_prefix = fmt::format("{}_", ::testing::UnitTest::GetInstance()->current_test_case()->name());
cache_pool_name = get_temp_pool_name();
bufferlist inbl;
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd pool create\", \"pool\": \"" + cache_pool_name +
"\", \"pg_num\": 4}",
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier add\", \"pool\": \"" + pool_name +
"\", \"tierpool\": \"" + cache_pool_name +
"\", \"force_nonempty\": \"--force-nonempty\" }",
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier set-overlay\", \"pool\": \"" + pool_name +
"\", \"overlaypool\": \"" + cache_pool_name + "\"}",
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier cache-mode\", \"pool\": \"" + cache_pool_name +
"\", \"mode\": \"writeback\"}",
inbl, NULL, NULL));
cluster.wait_for_latest_osdmap();
}
ASSERT_EQ(0, cluster.ioctx_create(pool_name.c_str(), ioctx));
bool req;
ASSERT_EQ(0, ioctx.pool_requires_alignment2(&req));
ASSERT_FALSE(req);
}
void RadosTestParamPPNS::TearDown()
{
if (cleanup)
cleanup_all_objects(ioctx);
ioctx.close();
}
void RadosTestParamPPNS::cleanup_all_objects(librados::IoCtx ioctx)
{
// remove all objects to avoid polluting other tests
ioctx.snap_set_read(librados::SNAP_HEAD);
ioctx.set_namespace(all_nspaces);
for (NObjectIterator it = ioctx.nobjects_begin();
it != ioctx.nobjects_end(); ++it) {
ioctx.locator_set_key(it->get_locator());
ioctx.set_namespace(it->get_nspace());
ASSERT_EQ(0, ioctx.remove(it->get_oid()));
}
}
std::string RadosTestECPPNS::pool_name;
Rados RadosTestECPPNS::s_cluster;
void RadosTestECPPNS::SetUpTestCase()
{
auto pool_prefix = fmt::format("{}_", ::testing::UnitTest::GetInstance()->current_test_case()->name());
pool_name = get_temp_pool_name(pool_prefix);
ASSERT_EQ("", create_one_ec_pool_pp(pool_name, s_cluster));
}
void RadosTestECPPNS::TearDownTestCase()
{
ASSERT_EQ(0, destroy_one_ec_pool_pp(pool_name, s_cluster));
}
void RadosTestECPPNS::SetUp()
{
ASSERT_EQ(0, cluster.ioctx_create(pool_name.c_str(), ioctx));
bool req;
ASSERT_EQ(0, ioctx.pool_requires_alignment2(&req));
ASSERT_TRUE(req);
ASSERT_EQ(0, ioctx.pool_required_alignment2(&alignment));
ASSERT_NE(0U, alignment);
}
void RadosTestECPPNS::TearDown()
{
if (cleanup)
cleanup_all_objects(ioctx);
ioctx.close();
}
std::string RadosTestPP::pool_name;
Rados RadosTestPP::s_cluster;
void RadosTestPP::SetUpTestCase()
{
init_rand();
auto pool_prefix = fmt::format("{}_", ::testing::UnitTest::GetInstance()->current_test_case()->name());
pool_name = get_temp_pool_name(pool_prefix);
ASSERT_EQ("", create_one_pool_pp(pool_name, s_cluster));
}
void RadosTestPP::TearDownTestCase()
{
ASSERT_EQ(0, destroy_one_pool_pp(pool_name, s_cluster));
}
void RadosTestPP::SetUp()
{
ASSERT_EQ(0, cluster.ioctx_create(pool_name.c_str(), ioctx));
nspace = get_temp_pool_name();
ioctx.set_namespace(nspace);
bool req;
ASSERT_EQ(0, ioctx.pool_requires_alignment2(&req));
ASSERT_FALSE(req);
}
void RadosTestPP::TearDown()
{
if (cleanup) {
cleanup_default_namespace(ioctx);
cleanup_namespace(ioctx, nspace);
}
ioctx.close();
}
void RadosTestPP::cleanup_default_namespace(librados::IoCtx ioctx)
{
// remove all objects from the default namespace to avoid polluting
// other tests
cleanup_namespace(ioctx, "");
}
void RadosTestPP::cleanup_namespace(librados::IoCtx ioctx, std::string ns)
{
ioctx.snap_set_read(librados::SNAP_HEAD);
ioctx.set_namespace(ns);
int tries = 20;
while (--tries) {
int got_enoent = 0;
for (NObjectIterator it = ioctx.nobjects_begin();
it != ioctx.nobjects_end(); ++it) {
ioctx.locator_set_key(it->get_locator());
ObjectWriteOperation op;
op.remove();
librados::AioCompletion *completion = s_cluster.aio_create_completion();
auto sg = make_scope_guard([&] { completion->release(); });
ASSERT_EQ(0, ioctx.aio_operate(it->get_oid(), completion, &op,
librados::OPERATION_IGNORE_CACHE));
completion->wait_for_complete();
if (completion->get_return_value() == -ENOENT) {
++got_enoent;
std::cout << " got ENOENT removing " << it->get_oid()
<< " in ns " << ns << std::endl;
} else {
ASSERT_EQ(0, completion->get_return_value());
}
}
if (!got_enoent) {
break;
}
std::cout << " got ENOENT on " << got_enoent
<< " objects, waiting a bit for snap"
<< " trimming before retrying " << tries << " more times..."
<< std::endl;
sleep(1);
}
if (tries == 0) {
std::cout << "failed to clean up; probably need to scrub purged_snaps."
<< std::endl;
}
}
std::string RadosTestParamPP::pool_name;
std::string RadosTestParamPP::cache_pool_name;
Rados RadosTestParamPP::s_cluster;
void RadosTestParamPP::SetUpTestCase()
{
auto pool_prefix = fmt::format("{}_", ::testing::UnitTest::GetInstance()->current_test_case()->name());
pool_name = get_temp_pool_name(pool_prefix);
ASSERT_EQ("", create_one_pool_pp(pool_name, s_cluster));
}
void RadosTestParamPP::TearDownTestCase()
{
if (cache_pool_name.length()) {
// tear down tiers
bufferlist inbl;
ASSERT_EQ(0, s_cluster.mon_command(
"{\"prefix\": \"osd tier remove-overlay\", \"pool\": \"" + pool_name +
"\"}",
inbl, NULL, NULL));
ASSERT_EQ(0, s_cluster.mon_command(
"{\"prefix\": \"osd tier remove\", \"pool\": \"" + pool_name +
"\", \"tierpool\": \"" + cache_pool_name + "\"}",
inbl, NULL, NULL));
ASSERT_EQ(0, s_cluster.mon_command(
"{\"prefix\": \"osd pool delete\", \"pool\": \"" + cache_pool_name +
"\", \"pool2\": \"" + cache_pool_name + "\", \"yes_i_really_really_mean_it\": true}",
inbl, NULL, NULL));
cache_pool_name = "";
}
ASSERT_EQ(0, destroy_one_pool_pp(pool_name, s_cluster));
}
void RadosTestParamPP::SetUp()
{
if (!is_crimson_cluster() && strcmp(GetParam(), "cache") == 0 &&
cache_pool_name.empty()) {
auto pool_prefix = fmt::format("{}_", ::testing::UnitTest::GetInstance()->current_test_case()->name());
cache_pool_name = get_temp_pool_name();
bufferlist inbl;
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd pool create\", \"pool\": \"" + cache_pool_name +
"\", \"pg_num\": 4}",
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier add\", \"pool\": \"" + pool_name +
"\", \"tierpool\": \"" + cache_pool_name +
"\", \"force_nonempty\": \"--force-nonempty\" }",
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier set-overlay\", \"pool\": \"" + pool_name +
"\", \"overlaypool\": \"" + cache_pool_name + "\"}",
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier cache-mode\", \"pool\": \"" + cache_pool_name +
"\", \"mode\": \"writeback\"}",
inbl, NULL, NULL));
cluster.wait_for_latest_osdmap();
}
ASSERT_EQ(0, cluster.ioctx_create(pool_name.c_str(), ioctx));
nspace = get_temp_pool_name();
ioctx.set_namespace(nspace);
bool req;
ASSERT_EQ(0, ioctx.pool_requires_alignment2(&req));
ASSERT_FALSE(req);
}
void RadosTestParamPP::TearDown()
{
if (cleanup) {
cleanup_default_namespace(ioctx);
cleanup_namespace(ioctx, nspace);
}
ioctx.close();
}
void RadosTestParamPP::cleanup_default_namespace(librados::IoCtx ioctx)
{
// remove all objects from the default namespace to avoid polluting
// other tests
cleanup_namespace(ioctx, "");
}
void RadosTestParamPP::cleanup_namespace(librados::IoCtx ioctx, std::string ns)
{
ioctx.snap_set_read(librados::SNAP_HEAD);
ioctx.set_namespace(ns);
for (NObjectIterator it = ioctx.nobjects_begin();
it != ioctx.nobjects_end(); ++it) {
ioctx.locator_set_key(it->get_locator());
ASSERT_EQ(0, ioctx.remove(it->get_oid()));
}
}
std::string RadosTestECPP::pool_name;
Rados RadosTestECPP::s_cluster;
void RadosTestECPP::SetUpTestCase()
{
SKIP_IF_CRIMSON();
auto pool_prefix = fmt::format("{}_", ::testing::UnitTest::GetInstance()->current_test_case()->name());
pool_name = get_temp_pool_name(pool_prefix);
ASSERT_EQ("", create_one_ec_pool_pp(pool_name, s_cluster));
}
void RadosTestECPP::TearDownTestCase()
{
SKIP_IF_CRIMSON();
ASSERT_EQ(0, destroy_one_ec_pool_pp(pool_name, s_cluster));
}
void RadosTestECPP::SetUp()
{
SKIP_IF_CRIMSON();
ASSERT_EQ(0, cluster.ioctx_create(pool_name.c_str(), ioctx));
nspace = get_temp_pool_name();
ioctx.set_namespace(nspace);
bool req;
ASSERT_EQ(0, ioctx.pool_requires_alignment2(&req));
ASSERT_TRUE(req);
ASSERT_EQ(0, ioctx.pool_required_alignment2(&alignment));
ASSERT_NE(0U, alignment);
}
void RadosTestECPP::TearDown()
{
SKIP_IF_CRIMSON();
if (cleanup) {
cleanup_default_namespace(ioctx);
cleanup_namespace(ioctx, nspace);
}
ioctx.close();
}
| 12,049 | 28.534314 | 107 | cc |
null | ceph-main/src/test/librados/testcase_cxx.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include "gtest/gtest.h"
#include "include/rados/librados.hpp"
class RadosTestPPNS : public ::testing::Test {
public:
RadosTestPPNS(bool c=false) : cluster(s_cluster), cleanup(c) {}
~RadosTestPPNS() override {}
protected:
static void SetUpTestCase();
static void TearDownTestCase();
static void cleanup_all_objects(librados::IoCtx ioctx);
static librados::Rados s_cluster;
static std::string pool_name;
void SetUp() override;
void TearDown() override;
librados::Rados &cluster;
librados::IoCtx ioctx;
bool cleanup;
};
struct RadosTestPPNSCleanup : public RadosTestPPNS {
RadosTestPPNSCleanup() : RadosTestPPNS(true) {}
};
class RadosTestParamPPNS : public ::testing::TestWithParam<const char*> {
public:
RadosTestParamPPNS(bool c=false) : cluster(s_cluster), cleanup(c) {}
~RadosTestParamPPNS() override {}
static void SetUpTestCase();
static void TearDownTestCase();
protected:
static void cleanup_all_objects(librados::IoCtx ioctx);
static librados::Rados s_cluster;
static std::string pool_name;
static std::string cache_pool_name;
void SetUp() override;
void TearDown() override;
librados::Rados &cluster;
librados::IoCtx ioctx;
bool cleanup;
};
class RadosTestECPPNS : public RadosTestPPNS {
public:
RadosTestECPPNS(bool c=false) : cluster(s_cluster), cleanup(c) {}
~RadosTestECPPNS() override {}
protected:
static void SetUpTestCase();
static void TearDownTestCase();
static librados::Rados s_cluster;
static std::string pool_name;
void SetUp() override;
void TearDown() override;
librados::Rados &cluster;
librados::IoCtx ioctx;
uint64_t alignment = 0;
bool cleanup;
};
struct RadosTestECPPNSCleanup : public RadosTestECPPNS {
RadosTestECPPNSCleanup() : RadosTestECPPNS(true) {}
};
class RadosTestPP : public ::testing::Test {
public:
RadosTestPP(bool c=false) : cluster(s_cluster), cleanup(c) {}
~RadosTestPP() override {}
protected:
static void SetUpTestCase();
static void TearDownTestCase();
static void cleanup_default_namespace(librados::IoCtx ioctx);
static void cleanup_namespace(librados::IoCtx ioctx, std::string ns);
static librados::Rados s_cluster;
static std::string pool_name;
void SetUp() override;
void TearDown() override;
librados::Rados &cluster;
librados::IoCtx ioctx;
bool cleanup;
std::string nspace;
};
class RadosTestParamPP : public ::testing::TestWithParam<const char*> {
public:
RadosTestParamPP(bool c=false) : cluster(s_cluster), cleanup(c) {}
~RadosTestParamPP() override {}
static void SetUpTestCase();
static void TearDownTestCase();
protected:
static void cleanup_default_namespace(librados::IoCtx ioctx);
static void cleanup_namespace(librados::IoCtx ioctx, std::string ns);
static librados::Rados s_cluster;
static std::string pool_name;
static std::string cache_pool_name;
void SetUp() override;
void TearDown() override;
librados::Rados &cluster;
librados::IoCtx ioctx;
bool cleanup;
std::string nspace;
};
class RadosTestECPP : public RadosTestPP {
public:
RadosTestECPP(bool c=false) : cluster(s_cluster), cleanup(c) {}
~RadosTestECPP() override {}
protected:
static void SetUpTestCase();
static void TearDownTestCase();
static librados::Rados s_cluster;
static std::string pool_name;
void SetUp() override;
void TearDown() override;
librados::Rados &cluster;
librados::IoCtx ioctx;
bool cleanup;
std::string nspace;
uint64_t alignment = 0;
};
| 3,580 | 26.335878 | 73 | h |
null | ceph-main/src/test/librados/tier_cxx.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "gtest/gtest.h"
#include "mds/mdstypes.h"
#include "include/buffer.h"
#include "include/rbd_types.h"
#include "include/rados/librados.hpp"
#include "include/stringify.h"
#include "include/types.h"
#include "global/global_context.h"
#include "common/Cond.h"
#include "common/ceph_crypto.h"
#include "test/librados/test_cxx.h"
#include "test/librados/testcase_cxx.h"
#include "json_spirit/json_spirit.h"
#include "cls/cas/cls_cas_ops.h"
#include "cls/cas/cls_cas_internal.h"
#include "osd/HitSet.h"
#include <errno.h>
#include <map>
#include <sstream>
#include <string>
#include "cls/cas/cls_cas_client.h"
#include "cls/cas/cls_cas_internal.h"
#include "crimson_utils.h"
using namespace std;
using namespace librados;
using ceph::crypto::SHA1;
typedef RadosTestPP LibRadosTierPP;
typedef RadosTestECPP LibRadosTierECPP;
void flush_evict_all(librados::Rados& cluster, librados::IoCtx& cache_ioctx)
{
bufferlist inbl;
cache_ioctx.set_namespace(all_nspaces);
for (NObjectIterator it = cache_ioctx.nobjects_begin();
it != cache_ioctx.nobjects_end(); ++it) {
cache_ioctx.locator_set_key(it->get_locator());
cache_ioctx.set_namespace(it->get_nspace());
{
ObjectReadOperation op;
op.cache_flush();
librados::AioCompletion *completion = cluster.aio_create_completion();
cache_ioctx.aio_operate(
it->get_oid(), completion, &op,
librados::OPERATION_IGNORE_OVERLAY, NULL);
completion->wait_for_complete();
completion->get_return_value();
completion->release();
}
{
ObjectReadOperation op;
op.cache_evict();
librados::AioCompletion *completion = cluster.aio_create_completion();
cache_ioctx.aio_operate(
it->get_oid(), completion, &op,
librados::OPERATION_IGNORE_OVERLAY, NULL);
completion->wait_for_complete();
completion->get_return_value();
completion->release();
}
}
}
static string _get_required_osd_release(Rados& cluster)
{
bufferlist inbl;
string cmd = string("{\"prefix\": \"osd dump\",\"format\":\"json\"}");
bufferlist outbl;
int r = cluster.mon_command(cmd, inbl, &outbl, NULL);
ceph_assert(r >= 0);
string outstr(outbl.c_str(), outbl.length());
json_spirit::Value v;
if (!json_spirit::read(outstr, v)) {
cerr <<" unable to parse json " << outstr << std::endl;
return "";
}
json_spirit::Object& o = v.get_obj();
for (json_spirit::Object::size_type i=0; i<o.size(); i++) {
json_spirit::Pair& p = o[i];
if (p.name_ == "require_osd_release") {
cout << "require_osd_release = " << p.value_.get_str() << std::endl;
return p.value_.get_str();
}
}
cerr << "didn't find require_osd_release in " << outstr << std::endl;
return "";
}
void manifest_set_chunk(Rados& cluster, librados::IoCtx& src_ioctx,
librados::IoCtx& tgt_ioctx,
uint64_t src_offset, uint64_t length,
std::string src_oid, std::string tgt_oid)
{
ObjectReadOperation op;
op.set_chunk(src_offset, length, src_ioctx, src_oid, 0,
CEPH_OSD_OP_FLAG_WITH_REFERENCE);
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, tgt_ioctx.aio_operate(tgt_oid, completion, &op,
librados::OPERATION_IGNORE_CACHE, NULL));
completion->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
completion->release();
}
static inline void buf_to_hex(const unsigned char *buf, int len, char *str)
{
int i;
str[0] = '\0';
for (i = 0; i < len; i++) {
sprintf(&str[i*2], "%02x", (int)buf[i]);
}
}
void check_fp_oid_refcount(librados::IoCtx& ioctx, std::string foid, uint64_t count,
std::string fp_algo = NULL)
{
bufferlist t;
int size = foid.length();
if (fp_algo == "sha1") {
unsigned char fingerprint[CEPH_CRYPTO_SHA1_DIGESTSIZE + 1];
char p_str[CEPH_CRYPTO_SHA1_DIGESTSIZE*2+1] = {0};
SHA1 sha1_gen;
sha1_gen.Update((const unsigned char *)foid.c_str(), size);
sha1_gen.Final(fingerprint);
buf_to_hex(fingerprint, CEPH_CRYPTO_SHA1_DIGESTSIZE, p_str);
ioctx.getxattr(p_str, CHUNK_REFCOUNT_ATTR, t);
} else if (fp_algo.empty()) {
ioctx.getxattr(foid, CHUNK_REFCOUNT_ATTR, t);
} else if (!fp_algo.empty()) {
ceph_assert(0 == "unrecognized fingerprint algorithm");
}
chunk_refs_t refs;
try {
auto iter = t.cbegin();
decode(refs, iter);
} catch (buffer::error& err) {
ASSERT_TRUE(0);
}
ASSERT_LE(count, refs.count());
}
string get_fp_oid(string oid, std::string fp_algo = NULL)
{
if (fp_algo == "sha1") {
unsigned char fingerprint[CEPH_CRYPTO_SHA1_DIGESTSIZE + 1];
char p_str[CEPH_CRYPTO_SHA1_DIGESTSIZE*2+1] = {0};
SHA1 sha1_gen;
int size = oid.length();
sha1_gen.Update((const unsigned char *)oid.c_str(), size);
sha1_gen.Final(fingerprint);
buf_to_hex(fingerprint, CEPH_CRYPTO_SHA1_DIGESTSIZE, p_str);
return string(p_str);
}
return string();
}
void is_intended_refcount_state(librados::IoCtx& src_ioctx,
std::string src_oid,
librados::IoCtx& dst_ioctx,
std::string dst_oid,
int expected_refcount)
{
int src_refcount = 0, dst_refcount = 0;
bufferlist t;
int r = dst_ioctx.getxattr(dst_oid, CHUNK_REFCOUNT_ATTR, t);
if (r == -ENOENT) {
dst_refcount = 0;
} else {
chunk_refs_t refs;
try {
auto iter = t.cbegin();
decode(refs, iter);
} catch (buffer::error& err) {
ceph_assert(0);
}
dst_refcount = refs.count();
}
int tries = 0;
for (; tries < 30; ++tries) {
r = cls_cas_references_chunk(src_ioctx, src_oid, dst_oid);
if (r == -ENOENT || r == -ENOLINK) {
src_refcount = 0;
} else if (r == -EBUSY) {
sleep(20);
continue;
} else {
src_refcount = r;
}
break;
}
ASSERT_TRUE(tries < 30);
ASSERT_TRUE(src_refcount >= 0);
ASSERT_TRUE(src_refcount == expected_refcount);
ASSERT_TRUE(src_refcount <= dst_refcount);
}
class LibRadosTwoPoolsPP : public RadosTestPP
{
public:
LibRadosTwoPoolsPP() {};
~LibRadosTwoPoolsPP() override {};
protected:
static void SetUpTestCase() {
pool_name = get_temp_pool_name();
ASSERT_EQ("", create_one_pool_pp(pool_name, s_cluster));
}
static void TearDownTestCase() {
ASSERT_EQ(0, destroy_one_pool_pp(pool_name, s_cluster));
}
static std::string cache_pool_name;
void SetUp() override {
SKIP_IF_CRIMSON();
cache_pool_name = get_temp_pool_name();
ASSERT_EQ(0, s_cluster.pool_create(cache_pool_name.c_str()));
RadosTestPP::SetUp();
ASSERT_EQ(0, cluster.ioctx_create(cache_pool_name.c_str(), cache_ioctx));
cache_ioctx.application_enable("rados", true);
cache_ioctx.set_namespace(nspace);
}
void TearDown() override {
SKIP_IF_CRIMSON();
// flush + evict cache
flush_evict_all(cluster, cache_ioctx);
bufferlist inbl;
// tear down tiers
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier remove-overlay\", \"pool\": \"" + pool_name +
"\"}",
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier remove\", \"pool\": \"" + pool_name +
"\", \"tierpool\": \"" + cache_pool_name + "\"}",
inbl, NULL, NULL));
// wait for maps to settle before next test
cluster.wait_for_latest_osdmap();
RadosTestPP::TearDown();
cleanup_default_namespace(cache_ioctx);
cleanup_namespace(cache_ioctx, nspace);
cache_ioctx.close();
ASSERT_EQ(0, s_cluster.pool_delete(cache_pool_name.c_str()));
}
librados::IoCtx cache_ioctx;
};
class Completions
{
public:
Completions() = default;
librados::AioCompletion* getCompletion() {
librados::AioCompletion* comp = librados::Rados::aio_create_completion();
m_completions.push_back(comp);
return comp;
}
~Completions() {
for (auto& comp : m_completions) {
comp->release();
}
}
private:
vector<librados::AioCompletion *> m_completions;
};
Completions completions;
std::string LibRadosTwoPoolsPP::cache_pool_name;
TEST_F(LibRadosTierPP, Dirty) {
SKIP_IF_CRIMSON();
{
ObjectWriteOperation op;
op.undirty();
ASSERT_EQ(0, ioctx.operate("foo", &op)); // still get 0 if it dne
}
{
ObjectWriteOperation op;
op.create(true);
ASSERT_EQ(0, ioctx.operate("foo", &op));
}
{
bool dirty = false;
int r = -1;
ObjectReadOperation op;
op.is_dirty(&dirty, &r);
ASSERT_EQ(0, ioctx.operate("foo", &op, NULL));
ASSERT_TRUE(dirty);
ASSERT_EQ(0, r);
}
{
ObjectWriteOperation op;
op.undirty();
ASSERT_EQ(0, ioctx.operate("foo", &op));
}
{
ObjectWriteOperation op;
op.undirty();
ASSERT_EQ(0, ioctx.operate("foo", &op)); // still 0 if already clean
}
{
bool dirty = false;
int r = -1;
ObjectReadOperation op;
op.is_dirty(&dirty, &r);
ASSERT_EQ(0, ioctx.operate("foo", &op, NULL));
ASSERT_FALSE(dirty);
ASSERT_EQ(0, r);
}
{
ObjectWriteOperation op;
op.truncate(0); // still a write even tho it is a no-op
ASSERT_EQ(0, ioctx.operate("foo", &op));
}
{
bool dirty = false;
int r = -1;
ObjectReadOperation op;
op.is_dirty(&dirty, &r);
ASSERT_EQ(0, ioctx.operate("foo", &op, NULL));
ASSERT_TRUE(dirty);
ASSERT_EQ(0, r);
}
}
TEST_F(LibRadosTwoPoolsPP, Overlay) {
SKIP_IF_CRIMSON();
// create objects
{
bufferlist bl;
bl.append("base");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("foo", &op));
}
{
bufferlist bl;
bl.append("cache");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, cache_ioctx.operate("foo", &op));
}
// configure cache
bufferlist inbl;
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier add\", \"pool\": \"" + pool_name +
"\", \"tierpool\": \"" + cache_pool_name +
"\", \"force_nonempty\": \"--force-nonempty\" }",
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier set-overlay\", \"pool\": \"" + pool_name +
"\", \"overlaypool\": \"" + cache_pool_name + "\"}",
inbl, NULL, NULL));
// wait for maps to settle
cluster.wait_for_latest_osdmap();
// by default, the overlay sends us to cache pool
{
bufferlist bl;
ASSERT_EQ(1, ioctx.read("foo", bl, 1, 0));
ASSERT_EQ('c', bl[0]);
}
{
bufferlist bl;
ASSERT_EQ(1, cache_ioctx.read("foo", bl, 1, 0));
ASSERT_EQ('c', bl[0]);
}
// unless we say otherwise
{
bufferlist bl;
ObjectReadOperation op;
op.read(0, 1, &bl, NULL);
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, ioctx.aio_operate(
"foo", completion, &op,
librados::OPERATION_IGNORE_OVERLAY, NULL));
completion->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
completion->release();
ASSERT_EQ('b', bl[0]);
}
}
TEST_F(LibRadosTwoPoolsPP, Promote) {
SKIP_IF_CRIMSON();
// create object
{
bufferlist bl;
bl.append("hi there");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("foo", &op));
}
// configure cache
bufferlist inbl;
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier add\", \"pool\": \"" + pool_name +
"\", \"tierpool\": \"" + cache_pool_name +
"\", \"force_nonempty\": \"--force-nonempty\" }",
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier set-overlay\", \"pool\": \"" + pool_name +
"\", \"overlaypool\": \"" + cache_pool_name + "\"}",
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier cache-mode\", \"pool\": \"" + cache_pool_name +
"\", \"mode\": \"writeback\"}",
inbl, NULL, NULL));
// wait for maps to settle
cluster.wait_for_latest_osdmap();
// read, trigger a promote
{
bufferlist bl;
ASSERT_EQ(1, ioctx.read("foo", bl, 1, 0));
}
// read, trigger a whiteout
{
bufferlist bl;
ASSERT_EQ(-ENOENT, ioctx.read("bar", bl, 1, 0));
ASSERT_EQ(-ENOENT, ioctx.read("bar", bl, 1, 0));
}
// verify the object is present in the cache tier
{
NObjectIterator it = cache_ioctx.nobjects_begin();
ASSERT_TRUE(it != cache_ioctx.nobjects_end());
ASSERT_TRUE(it->get_oid() == string("foo") || it->get_oid() == string("bar"));
++it;
ASSERT_TRUE(it->get_oid() == string("foo") || it->get_oid() == string("bar"));
++it;
ASSERT_TRUE(it == cache_ioctx.nobjects_end());
}
}
TEST_F(LibRadosTwoPoolsPP, PromoteSnap) {
SKIP_IF_CRIMSON();
// create object
{
bufferlist bl;
bl.append("hi there");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("foo", &op));
}
{
bufferlist bl;
bl.append("hi there");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("bar", &op));
}
{
bufferlist bl;
bl.append("hi there");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("baz", &op));
}
{
bufferlist bl;
bl.append("hi there");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("bam", &op));
}
// create a snapshot, clone
vector<uint64_t> my_snaps(1);
ASSERT_EQ(0, ioctx.selfmanaged_snap_create(&my_snaps[0]));
ASSERT_EQ(0, ioctx.selfmanaged_snap_set_write_ctx(my_snaps[0],
my_snaps));
{
bufferlist bl;
bl.append("ciao!");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("foo", &op));
}
{
bufferlist bl;
bl.append("ciao!");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("bar", &op));
}
{
ObjectWriteOperation op;
op.remove();
ASSERT_EQ(0, ioctx.operate("baz", &op));
}
{
bufferlist bl;
bl.append("ciao!");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("bam", &op));
}
// configure cache
bufferlist inbl;
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier add\", \"pool\": \"" + pool_name +
"\", \"tierpool\": \"" + cache_pool_name +
"\", \"force_nonempty\": \"--force-nonempty\" }",
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier set-overlay\", \"pool\": \"" + pool_name +
"\", \"overlaypool\": \"" + cache_pool_name + "\"}",
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier cache-mode\", \"pool\": \"" + cache_pool_name +
"\", \"mode\": \"writeback\"}",
inbl, NULL, NULL));
// wait for maps to settle
cluster.wait_for_latest_osdmap();
// read, trigger a promote on the head
{
bufferlist bl;
ASSERT_EQ(1, ioctx.read("foo", bl, 1, 0));
ASSERT_EQ('c', bl[0]);
}
{
bufferlist bl;
ASSERT_EQ(1, ioctx.read("bam", bl, 1, 0));
ASSERT_EQ('c', bl[0]);
}
ioctx.snap_set_read(my_snaps[0]);
// read foo snap
{
bufferlist bl;
ASSERT_EQ(1, ioctx.read("foo", bl, 1, 0));
ASSERT_EQ('h', bl[0]);
}
// read bar snap
{
bufferlist bl;
ASSERT_EQ(1, ioctx.read("bar", bl, 1, 0));
ASSERT_EQ('h', bl[0]);
}
// read baz snap
{
bufferlist bl;
ASSERT_EQ(1, ioctx.read("baz", bl, 1, 0));
ASSERT_EQ('h', bl[0]);
}
ioctx.snap_set_read(librados::SNAP_HEAD);
// read foo
{
bufferlist bl;
ASSERT_EQ(1, ioctx.read("foo", bl, 1, 0));
ASSERT_EQ('c', bl[0]);
}
// read bar
{
bufferlist bl;
ASSERT_EQ(1, ioctx.read("bar", bl, 1, 0));
ASSERT_EQ('c', bl[0]);
}
// read baz
{
bufferlist bl;
ASSERT_EQ(-ENOENT, ioctx.read("baz", bl, 1, 0));
}
// cleanup
ioctx.selfmanaged_snap_remove(my_snaps[0]);
}
TEST_F(LibRadosTwoPoolsPP, PromoteSnapScrub) {
SKIP_IF_CRIMSON();
int num = 100;
// create objects
for (int i=0; i<num; ++i) {
bufferlist bl;
bl.append("hi there");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate(string("foo") + stringify(i), &op));
}
vector<uint64_t> my_snaps;
for (int snap=0; snap<4; ++snap) {
// create a snapshot, clone
vector<uint64_t> ns(1);
ns.insert(ns.end(), my_snaps.begin(), my_snaps.end());
my_snaps.swap(ns);
ASSERT_EQ(0, ioctx.selfmanaged_snap_create(&my_snaps[0]));
cout << "my_snaps " << my_snaps << std::endl;
ASSERT_EQ(0, ioctx.selfmanaged_snap_set_write_ctx(my_snaps[0],
my_snaps));
for (int i=0; i<num; ++i) {
bufferlist bl;
bl.append(string("ciao! snap") + stringify(snap));
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate(string("foo") + stringify(i), &op));
}
}
// configure cache
bufferlist inbl;
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier add\", \"pool\": \"" + pool_name +
"\", \"tierpool\": \"" + cache_pool_name +
"\", \"force_nonempty\": \"--force-nonempty\" }",
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier set-overlay\", \"pool\": \"" + pool_name +
"\", \"overlaypool\": \"" + cache_pool_name + "\"}",
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier cache-mode\", \"pool\": \"" + cache_pool_name +
"\", \"mode\": \"writeback\"}",
inbl, NULL, NULL));
// wait for maps to settle
cluster.wait_for_latest_osdmap();
// read, trigger a promote on _some_ heads to make sure we handle cases
// where snaps are present and where they are not.
cout << "promoting some heads" << std::endl;
for (int i=0; i<num; ++i) {
if (i % 5 == 0 || i > num - 3) {
bufferlist bl;
ASSERT_EQ(1, ioctx.read(string("foo") + stringify(i), bl, 1, 0));
ASSERT_EQ('c', bl[0]);
}
}
for (unsigned snap = 0; snap < my_snaps.size(); ++snap) {
cout << "promoting from clones for snap " << my_snaps[snap] << std::endl;
ioctx.snap_set_read(my_snaps[snap]);
// read some snaps, semi-randomly
for (int i=0; i<50; ++i) {
bufferlist bl;
string o = string("foo") + stringify((snap * i * 137) % 80);
//cout << o << std::endl;
ASSERT_EQ(1, ioctx.read(o, bl, 1, 0));
}
}
// ok, stop and scrub this pool (to make sure scrub can handle
// missing clones in the cache tier).
{
IoCtx cache_ioctx;
ASSERT_EQ(0, cluster.ioctx_create(cache_pool_name.c_str(), cache_ioctx));
for (int i=0; i<10; ++i) {
do {
ostringstream ss;
ss << "{\"prefix\": \"pg scrub\", \"pgid\": \""
<< cache_ioctx.get_id() << "." << i
<< "\"}";
int r = cluster.mon_command(ss.str(), inbl, NULL, NULL);
if (r == -ENOENT || // in case mgr osdmap is stale
r == -EAGAIN) {
sleep(5);
continue;
}
} while (false);
}
// give it a few seconds to go. this is sloppy but is usually enough time
cout << "waiting for scrubs..." << std::endl;
sleep(30);
cout << "done waiting" << std::endl;
}
ioctx.snap_set_read(librados::SNAP_HEAD);
//cleanup
for (unsigned snap = 0; snap < my_snaps.size(); ++snap) {
ioctx.selfmanaged_snap_remove(my_snaps[snap]);
}
}
TEST_F(LibRadosTwoPoolsPP, PromoteSnapTrimRace) {
SKIP_IF_CRIMSON();
// create object
{
bufferlist bl;
bl.append("hi there");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("foo", &op));
}
// create a snapshot, clone
vector<uint64_t> my_snaps(1);
ASSERT_EQ(0, ioctx.selfmanaged_snap_create(&my_snaps[0]));
ASSERT_EQ(0, ioctx.selfmanaged_snap_set_write_ctx(my_snaps[0],
my_snaps));
{
bufferlist bl;
bl.append("ciao!");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("foo", &op));
}
// configure cache
bufferlist inbl;
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier add\", \"pool\": \"" + pool_name +
"\", \"tierpool\": \"" + cache_pool_name +
"\", \"force_nonempty\": \"--force-nonempty\" }",
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier set-overlay\", \"pool\": \"" + pool_name +
"\", \"overlaypool\": \"" + cache_pool_name + "\"}",
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier cache-mode\", \"pool\": \"" + cache_pool_name +
"\", \"mode\": \"writeback\"}",
inbl, NULL, NULL));
// wait for maps to settle
cluster.wait_for_latest_osdmap();
// delete the snap
ASSERT_EQ(0, ioctx.selfmanaged_snap_remove(my_snaps[0]));
ioctx.snap_set_read(my_snaps[0]);
// read foo snap. the OSD may or may not realize that this snap has
// been logically deleted; either response is valid.
{
bufferlist bl;
int r = ioctx.read("foo", bl, 1, 0);
ASSERT_TRUE(r == 1 || r == -ENOENT);
}
// cleanup
ioctx.selfmanaged_snap_remove(my_snaps[0]);
}
TEST_F(LibRadosTwoPoolsPP, Whiteout) {
SKIP_IF_CRIMSON();
// create object
{
bufferlist bl;
bl.append("hi there");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("foo", &op));
}
// configure cache
bufferlist inbl;
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier add\", \"pool\": \"" + pool_name +
"\", \"tierpool\": \"" + cache_pool_name +
"\", \"force_nonempty\": \"--force-nonempty\" }",
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier set-overlay\", \"pool\": \"" + pool_name +
"\", \"overlaypool\": \"" + cache_pool_name + "\"}",
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier cache-mode\", \"pool\": \"" + cache_pool_name +
"\", \"mode\": \"writeback\"}",
inbl, NULL, NULL));
// wait for maps to settle
cluster.wait_for_latest_osdmap();
// create some whiteouts, verify they behave
{
ObjectWriteOperation op;
op.assert_exists();
op.remove();
ASSERT_EQ(0, ioctx.operate("foo", &op));
}
{
ObjectWriteOperation op;
op.assert_exists();
op.remove();
ASSERT_EQ(-ENOENT, ioctx.operate("bar", &op));
}
{
ObjectWriteOperation op;
op.assert_exists();
op.remove();
ASSERT_EQ(-ENOENT, ioctx.operate("bar", &op));
}
// verify the whiteouts are there in the cache tier
{
NObjectIterator it = cache_ioctx.nobjects_begin();
ASSERT_TRUE(it != cache_ioctx.nobjects_end());
ASSERT_TRUE(it->get_oid() == string("foo") || it->get_oid() == string("bar"));
++it;
ASSERT_TRUE(it->get_oid() == string("foo") || it->get_oid() == string("bar"));
++it;
ASSERT_TRUE(it == cache_ioctx.nobjects_end());
}
// delete a whiteout and verify it goes away
ASSERT_EQ(-ENOENT, ioctx.remove("foo"));
{
ObjectWriteOperation op;
op.remove();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, ioctx.aio_operate("bar", completion, &op,
librados::OPERATION_IGNORE_CACHE));
completion->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
completion->release();
NObjectIterator it = cache_ioctx.nobjects_begin();
ASSERT_TRUE(it != cache_ioctx.nobjects_end());
ASSERT_TRUE(it->get_oid() == string("foo"));
++it;
ASSERT_TRUE(it == cache_ioctx.nobjects_end());
}
// recreate an object and verify we can read it
{
bufferlist bl;
bl.append("hi there");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("foo", &op));
}
{
bufferlist bl;
ASSERT_EQ(1, ioctx.read("foo", bl, 1, 0));
ASSERT_EQ('h', bl[0]);
}
}
TEST_F(LibRadosTwoPoolsPP, WhiteoutDeleteCreate) {
SKIP_IF_CRIMSON();
// configure cache
bufferlist inbl;
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier add\", \"pool\": \"" + pool_name +
"\", \"tierpool\": \"" + cache_pool_name +
"\", \"force_nonempty\": \"--force-nonempty\" }",
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier set-overlay\", \"pool\": \"" + pool_name +
"\", \"overlaypool\": \"" + cache_pool_name + "\"}",
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier cache-mode\", \"pool\": \"" + cache_pool_name +
"\", \"mode\": \"writeback\"}",
inbl, NULL, NULL));
// wait for maps to settle
cluster.wait_for_latest_osdmap();
// create an object
{
bufferlist bl;
bl.append("foo");
ASSERT_EQ(0, ioctx.write_full("foo", bl));
}
// do delete + create operation
{
ObjectWriteOperation op;
op.remove();
bufferlist bl;
bl.append("bar");
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("foo", &op));
}
// verify it still "exists" (w/ new content)
{
bufferlist bl;
ASSERT_EQ(1, ioctx.read("foo", bl, 1, 0));
ASSERT_EQ('b', bl[0]);
}
}
TEST_F(LibRadosTwoPoolsPP, Evict) {
SKIP_IF_CRIMSON();
// create object
{
bufferlist bl;
bl.append("hi there");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("foo", &op));
}
// configure cache
bufferlist inbl;
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier add\", \"pool\": \"" + pool_name +
"\", \"tierpool\": \"" + cache_pool_name +
"\", \"force_nonempty\": \"--force-nonempty\" }",
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier set-overlay\", \"pool\": \"" + pool_name +
"\", \"overlaypool\": \"" + cache_pool_name + "\"}",
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier cache-mode\", \"pool\": \"" + cache_pool_name +
"\", \"mode\": \"writeback\"}",
inbl, NULL, NULL));
// wait for maps to settle
cluster.wait_for_latest_osdmap();
// read, trigger a promote
{
bufferlist bl;
ASSERT_EQ(1, ioctx.read("foo", bl, 1, 0));
}
// read, trigger a whiteout, and a dirty object
{
bufferlist bl;
ASSERT_EQ(-ENOENT, ioctx.read("bar", bl, 1, 0));
ASSERT_EQ(-ENOENT, ioctx.read("bar", bl, 1, 0));
ASSERT_EQ(0, ioctx.write("bar", bl, bl.length(), 0));
}
// verify the object is present in the cache tier
{
NObjectIterator it = cache_ioctx.nobjects_begin();
ASSERT_TRUE(it != cache_ioctx.nobjects_end());
ASSERT_TRUE(it->get_oid() == string("foo") || it->get_oid() == string("bar"));
++it;
ASSERT_TRUE(it->get_oid() == string("foo") || it->get_oid() == string("bar"));
++it;
ASSERT_TRUE(it == cache_ioctx.nobjects_end());
}
// pin
{
ObjectWriteOperation op;
op.cache_pin();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, cache_ioctx.aio_operate("foo", completion, &op));
completion->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
completion->release();
}
// evict the pinned object with -EPERM
{
ObjectReadOperation op;
op.cache_evict();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, cache_ioctx.aio_operate("foo", completion, &op,
librados::OPERATION_IGNORE_CACHE,
NULL));
completion->wait_for_complete();
ASSERT_EQ(-EPERM, completion->get_return_value());
completion->release();
}
// unpin
{
ObjectWriteOperation op;
op.cache_unpin();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, cache_ioctx.aio_operate("foo", completion, &op));
completion->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
completion->release();
}
// flush
{
ObjectReadOperation op;
op.cache_flush();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, cache_ioctx.aio_operate(
"foo", completion, &op,
librados::OPERATION_IGNORE_OVERLAY, NULL));
completion->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
completion->release();
}
// verify clean
{
bool dirty = false;
int r = -1;
ObjectReadOperation op;
op.is_dirty(&dirty, &r);
ASSERT_EQ(0, cache_ioctx.operate("foo", &op, NULL));
ASSERT_FALSE(dirty);
ASSERT_EQ(0, r);
}
// evict
{
ObjectReadOperation op;
op.cache_evict();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, cache_ioctx.aio_operate("foo", completion, &op,
librados::OPERATION_IGNORE_CACHE,
NULL));
completion->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
completion->release();
}
{
ObjectReadOperation op;
op.cache_evict();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, cache_ioctx.aio_operate(
"foo", completion, &op,
librados::OPERATION_IGNORE_CACHE, NULL));
completion->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
completion->release();
}
{
ObjectReadOperation op;
op.cache_evict();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, cache_ioctx.aio_operate(
"bar", completion, &op,
librados::OPERATION_IGNORE_CACHE, NULL));
completion->wait_for_complete();
ASSERT_EQ(-EBUSY, completion->get_return_value());
completion->release();
}
}
TEST_F(LibRadosTwoPoolsPP, EvictSnap) {
SKIP_IF_CRIMSON();
// create object
{
bufferlist bl;
bl.append("hi there");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("foo", &op));
}
{
bufferlist bl;
bl.append("hi there");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("bar", &op));
}
{
bufferlist bl;
bl.append("hi there");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("baz", &op));
}
{
bufferlist bl;
bl.append("hi there");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("bam", &op));
}
// create a snapshot, clone
vector<uint64_t> my_snaps(1);
ASSERT_EQ(0, ioctx.selfmanaged_snap_create(&my_snaps[0]));
ASSERT_EQ(0, ioctx.selfmanaged_snap_set_write_ctx(my_snaps[0],
my_snaps));
{
bufferlist bl;
bl.append("ciao!");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("foo", &op));
}
{
bufferlist bl;
bl.append("ciao!");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("bar", &op));
}
{
ObjectWriteOperation op;
op.remove();
ASSERT_EQ(0, ioctx.operate("baz", &op));
}
{
bufferlist bl;
bl.append("ciao!");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("bam", &op));
}
// configure cache
bufferlist inbl;
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier add\", \"pool\": \"" + pool_name +
"\", \"tierpool\": \"" + cache_pool_name +
"\", \"force_nonempty\": \"--force-nonempty\" }",
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier set-overlay\", \"pool\": \"" + pool_name +
"\", \"overlaypool\": \"" + cache_pool_name + "\"}",
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier cache-mode\", \"pool\": \"" + cache_pool_name +
"\", \"mode\": \"writeback\"}",
inbl, NULL, NULL));
// wait for maps to settle
cluster.wait_for_latest_osdmap();
// read, trigger a promote on the head
{
bufferlist bl;
ASSERT_EQ(1, ioctx.read("foo", bl, 1, 0));
ASSERT_EQ('c', bl[0]);
}
{
bufferlist bl;
ASSERT_EQ(1, ioctx.read("bam", bl, 1, 0));
ASSERT_EQ('c', bl[0]);
}
// evict bam
{
ObjectReadOperation op;
op.cache_evict();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, cache_ioctx.aio_operate(
"bam", completion, &op,
librados::OPERATION_IGNORE_CACHE, NULL));
completion->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
completion->release();
}
{
bufferlist bl;
ObjectReadOperation op;
op.read(1, 0, &bl, NULL);
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, cache_ioctx.aio_operate(
"bam", completion, &op,
librados::OPERATION_IGNORE_CACHE, NULL));
completion->wait_for_complete();
ASSERT_EQ(-ENOENT, completion->get_return_value());
completion->release();
}
// read foo snap
ioctx.snap_set_read(my_snaps[0]);
{
bufferlist bl;
ASSERT_EQ(1, ioctx.read("foo", bl, 1, 0));
ASSERT_EQ('h', bl[0]);
}
// evict foo snap
{
ObjectReadOperation op;
op.cache_evict();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, ioctx.aio_operate(
"foo", completion, &op,
librados::OPERATION_IGNORE_CACHE, NULL));
completion->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
completion->release();
}
// snap is gone...
{
bufferlist bl;
ObjectReadOperation op;
op.read(1, 0, &bl, NULL);
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, ioctx.aio_operate(
"foo", completion, &op,
librados::OPERATION_IGNORE_CACHE, NULL));
completion->wait_for_complete();
ASSERT_EQ(-ENOENT, completion->get_return_value());
completion->release();
}
// head is still there...
ioctx.snap_set_read(librados::SNAP_HEAD);
{
bufferlist bl;
ObjectReadOperation op;
op.read(1, 0, &bl, NULL);
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, ioctx.aio_operate(
"foo", completion, &op,
librados::OPERATION_IGNORE_CACHE, NULL));
completion->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
completion->release();
}
// promote head + snap of bar
ioctx.snap_set_read(librados::SNAP_HEAD);
{
bufferlist bl;
ASSERT_EQ(1, ioctx.read("bar", bl, 1, 0));
ASSERT_EQ('c', bl[0]);
}
ioctx.snap_set_read(my_snaps[0]);
{
bufferlist bl;
ASSERT_EQ(1, ioctx.read("bar", bl, 1, 0));
ASSERT_EQ('h', bl[0]);
}
// evict bar head (fail)
ioctx.snap_set_read(librados::SNAP_HEAD);
{
ObjectReadOperation op;
op.cache_evict();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, ioctx.aio_operate(
"bar", completion, &op,
librados::OPERATION_IGNORE_CACHE, NULL));
completion->wait_for_complete();
ASSERT_EQ(-EBUSY, completion->get_return_value());
completion->release();
}
// evict bar snap
ioctx.snap_set_read(my_snaps[0]);
{
ObjectReadOperation op;
op.cache_evict();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, ioctx.aio_operate(
"bar", completion, &op,
librados::OPERATION_IGNORE_CACHE, NULL));
completion->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
completion->release();
}
// ...and then head
ioctx.snap_set_read(librados::SNAP_HEAD);
{
bufferlist bl;
ObjectReadOperation op;
op.read(1, 0, &bl, NULL);
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, ioctx.aio_operate(
"bar", completion, &op,
librados::OPERATION_IGNORE_CACHE, NULL));
completion->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
completion->release();
}
{
ObjectReadOperation op;
op.cache_evict();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, ioctx.aio_operate(
"bar", completion, &op,
librados::OPERATION_IGNORE_CACHE, NULL));
completion->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
completion->release();
}
// cleanup
ioctx.selfmanaged_snap_remove(my_snaps[0]);
}
// this test case reproduces http://tracker.ceph.com/issues/8629
TEST_F(LibRadosTwoPoolsPP, EvictSnap2) {
SKIP_IF_CRIMSON();
// create object
{
bufferlist bl;
bl.append("hi there");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("foo", &op));
}
// create a snapshot, clone
vector<uint64_t> my_snaps(1);
ASSERT_EQ(0, ioctx.selfmanaged_snap_create(&my_snaps[0]));
ASSERT_EQ(0, ioctx.selfmanaged_snap_set_write_ctx(my_snaps[0],
my_snaps));
{
bufferlist bl;
bl.append("ciao!");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("foo", &op));
}
// configure cache
bufferlist inbl;
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier add\", \"pool\": \"" + pool_name +
"\", \"tierpool\": \"" + cache_pool_name +
"\", \"force_nonempty\": \"--force-nonempty\" }",
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier set-overlay\", \"pool\": \"" + pool_name +
"\", \"overlaypool\": \"" + cache_pool_name + "\"}",
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier cache-mode\", \"pool\": \"" + cache_pool_name +
"\", \"mode\": \"writeback\"}",
inbl, NULL, NULL));
// wait for maps to settle
cluster.wait_for_latest_osdmap();
// read, trigger a promote on the head
{
bufferlist bl;
ASSERT_EQ(1, ioctx.read("foo", bl, 1, 0));
ASSERT_EQ('c', bl[0]);
}
// evict
{
ObjectReadOperation op;
op.cache_evict();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, cache_ioctx.aio_operate(
"foo", completion, &op,
librados::OPERATION_IGNORE_CACHE, NULL));
completion->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
completion->release();
}
// verify the snapdir is not present in the cache pool
{
ObjectReadOperation op;
librados::snap_set_t snapset;
op.list_snaps(&snapset, NULL);
ioctx.snap_set_read(librados::SNAP_DIR);
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, ioctx.aio_operate("foo", completion, &op,
librados::OPERATION_IGNORE_CACHE, NULL));
completion->wait_for_complete();
ASSERT_EQ(-ENOENT, completion->get_return_value());
completion->release();
}
}
//This test case reproduces http://tracker.ceph.com/issues/17445
TEST_F(LibRadosTwoPoolsPP, ListSnap){
SKIP_IF_CRIMSON();
// Create object
{
bufferlist bl;
bl.append("hi there");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("foo", &op));
}
{
bufferlist bl;
bl.append("hi there");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("bar", &op));
}
{
bufferlist bl;
bl.append("hi there");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("baz", &op));
}
{
bufferlist bl;
bl.append("hi there");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("bam", &op));
}
// Create a snapshot, clone
vector<uint64_t> my_snaps(1);
ASSERT_EQ(0, ioctx.selfmanaged_snap_create(&my_snaps[0]));
ASSERT_EQ(0, ioctx.selfmanaged_snap_set_write_ctx(my_snaps[0],
my_snaps));
{
bufferlist bl;
bl.append("ciao!");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("foo", &op));
}
{
bufferlist bl;
bl.append("ciao!");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("bar", &op));
}
{
ObjectWriteOperation op;
op.remove();
ASSERT_EQ(0, ioctx.operate("baz", &op));
}
{
bufferlist bl;
bl.append("ciao!");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("bam", &op));
}
// Configure cache
bufferlist inbl;
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier add\", \"pool\": \"" + pool_name +
"\", \"tierpool\": \"" + cache_pool_name +
"\", \"force_nonempty\": \"--force-nonempty\" }",
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier set-overlay\", \"pool\": \"" + pool_name +
"\", \"overlaypool\": \"" + cache_pool_name + "\"}",
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier cache-mode\", \"pool\": \"" + cache_pool_name +
"\", \"mode\": \"writeback\"}",
inbl, NULL, NULL));
// Wait for maps to settle
cluster.wait_for_latest_osdmap();
// Read, trigger a promote on the head
{
bufferlist bl;
ASSERT_EQ(1, ioctx.read("foo", bl, 1, 0));
ASSERT_EQ('c', bl[0]);
}
// Read foo snap
ioctx.snap_set_read(my_snaps[0]);
{
bufferlist bl;
ASSERT_EQ(1, ioctx.read("foo", bl, 1, 0));
ASSERT_EQ('h', bl[0]);
}
// Evict foo snap
{
ObjectReadOperation op;
op.cache_evict();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, ioctx.aio_operate(
"foo", completion, &op,
librados::OPERATION_IGNORE_CACHE, NULL));
completion->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
completion->release();
}
// Snap is gone...
{
bufferlist bl;
ObjectReadOperation op;
op.read(1, 0, &bl, NULL);
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, ioctx.aio_operate(
"foo", completion, &op,
librados::OPERATION_IGNORE_CACHE, NULL));
completion->wait_for_complete();
ASSERT_EQ(-ENOENT, completion->get_return_value());
completion->release();
}
// Do list-snaps
ioctx.snap_set_read(CEPH_SNAPDIR);
{
snap_set_t snap_set;
int snap_ret;
ObjectReadOperation op;
op.list_snaps(&snap_set, &snap_ret);
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, ioctx.aio_operate(
"foo", completion, &op,
0, NULL));
completion->wait_for_complete();
ASSERT_EQ(0, snap_ret);
ASSERT_LT(0u, snap_set.clones.size());
for (vector<librados::clone_info_t>::const_iterator r = snap_set.clones.begin();
r != snap_set.clones.end();
++r) {
if (r->cloneid != librados::SNAP_HEAD) {
ASSERT_LT(0u, r->snaps.size());
}
}
}
// Cleanup
ioctx.selfmanaged_snap_remove(my_snaps[0]);
}
// This test case reproduces https://tracker.ceph.com/issues/49409
TEST_F(LibRadosTwoPoolsPP, EvictSnapRollbackReadRace) {
SKIP_IF_CRIMSON();
// create object
{
bufferlist bl;
int len = string("hi there").length() * 2;
// append more chrunk data make sure the second promote
// op coming before the first promote op finished
for (int i=0; i<4*1024*1024/len; ++i)
bl.append("hi therehi there");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("foo", &op));
}
// create two snapshot, a clone
vector<uint64_t> my_snaps(2);
ASSERT_EQ(0, ioctx.selfmanaged_snap_create(&my_snaps[1]));
ASSERT_EQ(0, ioctx.selfmanaged_snap_create(&my_snaps[0]));
ASSERT_EQ(0, ioctx.selfmanaged_snap_set_write_ctx(my_snaps[0],
my_snaps));
{
bufferlist bl;
bl.append("ciao!");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("foo", &op));
}
// configure cache
bufferlist inbl;
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier add\", \"pool\": \"" + pool_name +
"\", \"tierpool\": \"" + cache_pool_name +
"\", \"force_nonempty\": \"--force-nonempty\" }",
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier set-overlay\", \"pool\": \"" + pool_name +
"\", \"overlaypool\": \"" + cache_pool_name + "\"}",
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier cache-mode\", \"pool\": \"" + cache_pool_name +
"\", \"mode\": \"writeback\"}",
inbl, NULL, NULL));
// wait for maps to settle
cluster.wait_for_latest_osdmap();
// read, trigger a promote on the head
{
bufferlist bl;
ASSERT_EQ(1, ioctx.read("foo", bl, 1, 0));
ASSERT_EQ('c', bl[0]);
}
// try more times
int retries = 50;
for (int i=0; i<retries; ++i)
{
{
librados::AioCompletion * completion = cluster.aio_create_completion();
librados::AioCompletion * completion1 = cluster.aio_create_completion();
// send a snap rollback op and a snap read op parallel
// trigger two promote(copy) to the same snap clone obj
// the second snap read op is read-ordered make sure
// op not wait for objects_blocked_on_snap_promotion
ObjectWriteOperation op;
op.selfmanaged_snap_rollback(my_snaps[0]);
ASSERT_EQ(0, ioctx.aio_operate(
"foo", completion, &op));
ioctx.snap_set_read(my_snaps[1]);
std::map<uint64_t, uint64_t> extents;
bufferlist read_bl;
int rval = -1;
ObjectReadOperation op1;
op1.sparse_read(0, 8, &extents, &read_bl, &rval);
ASSERT_EQ(0, ioctx.aio_operate("foo", completion1, &op1, &read_bl));
ioctx.snap_set_read(librados::SNAP_HEAD);
completion->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
completion->release();
completion1->wait_for_complete();
ASSERT_EQ(0, completion1->get_return_value());
completion1->release();
}
// evict foo snap
ioctx.snap_set_read(my_snaps[0]);
{
ObjectReadOperation op;
op.cache_evict();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, ioctx.aio_operate(
"foo", completion, &op,
librados::OPERATION_IGNORE_CACHE, NULL));
completion->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
completion->release();
}
ioctx.snap_set_read(librados::SNAP_HEAD);
}
// cleanup
ioctx.selfmanaged_snap_remove(my_snaps[0]);
ioctx.selfmanaged_snap_remove(my_snaps[1]);
}
TEST_F(LibRadosTwoPoolsPP, TryFlush) {
SKIP_IF_CRIMSON();
// configure cache
bufferlist inbl;
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier add\", \"pool\": \"" + pool_name +
"\", \"tierpool\": \"" + cache_pool_name +
"\", \"force_nonempty\": \"--force-nonempty\" }",
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier set-overlay\", \"pool\": \"" + pool_name +
"\", \"overlaypool\": \"" + cache_pool_name + "\"}",
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier cache-mode\", \"pool\": \"" + cache_pool_name +
"\", \"mode\": \"writeback\"}",
inbl, NULL, NULL));
// wait for maps to settle
cluster.wait_for_latest_osdmap();
// create object
{
bufferlist bl;
bl.append("hi there");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("foo", &op));
}
// verify the object is present in the cache tier
{
NObjectIterator it = cache_ioctx.nobjects_begin();
ASSERT_TRUE(it != cache_ioctx.nobjects_end());
ASSERT_TRUE(it->get_oid() == string("foo"));
++it;
ASSERT_TRUE(it == cache_ioctx.nobjects_end());
}
// verify the object is NOT present in the base tier
{
NObjectIterator it = ioctx.nobjects_begin();
ASSERT_TRUE(it == ioctx.nobjects_end());
}
// verify dirty
{
bool dirty = false;
int r = -1;
ObjectReadOperation op;
op.is_dirty(&dirty, &r);
ASSERT_EQ(0, cache_ioctx.operate("foo", &op, NULL));
ASSERT_TRUE(dirty);
ASSERT_EQ(0, r);
}
// pin
{
ObjectWriteOperation op;
op.cache_pin();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, cache_ioctx.aio_operate("foo", completion, &op));
completion->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
completion->release();
}
// flush the pinned object with -EPERM
{
ObjectReadOperation op;
op.cache_try_flush();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, cache_ioctx.aio_operate(
"foo", completion, &op,
librados::OPERATION_IGNORE_OVERLAY |
librados::OPERATION_SKIPRWLOCKS, NULL));
completion->wait_for_complete();
ASSERT_EQ(-EPERM, completion->get_return_value());
completion->release();
}
// unpin
{
ObjectWriteOperation op;
op.cache_unpin();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, cache_ioctx.aio_operate("foo", completion, &op));
completion->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
completion->release();
}
// flush
{
ObjectReadOperation op;
op.cache_try_flush();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, cache_ioctx.aio_operate(
"foo", completion, &op,
librados::OPERATION_IGNORE_OVERLAY |
librados::OPERATION_SKIPRWLOCKS, NULL));
completion->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
completion->release();
}
// verify clean
{
bool dirty = false;
int r = -1;
ObjectReadOperation op;
op.is_dirty(&dirty, &r);
ASSERT_EQ(0, cache_ioctx.operate("foo", &op, NULL));
ASSERT_FALSE(dirty);
ASSERT_EQ(0, r);
}
// verify in base tier
{
NObjectIterator it = ioctx.nobjects_begin();
ASSERT_TRUE(it != ioctx.nobjects_end());
ASSERT_TRUE(it->get_oid() == string("foo"));
++it;
ASSERT_TRUE(it == ioctx.nobjects_end());
}
// evict it
{
ObjectReadOperation op;
op.cache_evict();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, cache_ioctx.aio_operate(
"foo", completion, &op, librados::OPERATION_IGNORE_CACHE, NULL));
completion->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
completion->release();
}
// verify no longer in cache tier
{
NObjectIterator it = cache_ioctx.nobjects_begin();
ASSERT_TRUE(it == cache_ioctx.nobjects_end());
}
}
TEST_F(LibRadosTwoPoolsPP, Flush) {
SKIP_IF_CRIMSON();
// configure cache
bufferlist inbl;
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier add\", \"pool\": \"" + pool_name +
"\", \"tierpool\": \"" + cache_pool_name +
"\", \"force_nonempty\": \"--force-nonempty\" }",
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier set-overlay\", \"pool\": \"" + pool_name +
"\", \"overlaypool\": \"" + cache_pool_name + "\"}",
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier cache-mode\", \"pool\": \"" + cache_pool_name +
"\", \"mode\": \"writeback\"}",
inbl, NULL, NULL));
// wait for maps to settle
cluster.wait_for_latest_osdmap();
uint64_t user_version = 0;
// create object
{
bufferlist bl;
bl.append("hi there");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("foo", &op));
}
// verify the object is present in the cache tier
{
NObjectIterator it = cache_ioctx.nobjects_begin();
ASSERT_TRUE(it != cache_ioctx.nobjects_end());
ASSERT_TRUE(it->get_oid() == string("foo"));
++it;
ASSERT_TRUE(it == cache_ioctx.nobjects_end());
}
// verify the object is NOT present in the base tier
{
NObjectIterator it = ioctx.nobjects_begin();
ASSERT_TRUE(it == ioctx.nobjects_end());
}
// verify dirty
{
bool dirty = false;
int r = -1;
ObjectReadOperation op;
op.is_dirty(&dirty, &r);
ASSERT_EQ(0, cache_ioctx.operate("foo", &op, NULL));
ASSERT_TRUE(dirty);
ASSERT_EQ(0, r);
user_version = cache_ioctx.get_last_version();
}
// pin
{
ObjectWriteOperation op;
op.cache_pin();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, cache_ioctx.aio_operate("foo", completion, &op));
completion->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
completion->release();
}
// flush the pinned object with -EPERM
{
ObjectReadOperation op;
op.cache_try_flush();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, cache_ioctx.aio_operate(
"foo", completion, &op,
librados::OPERATION_IGNORE_OVERLAY |
librados::OPERATION_SKIPRWLOCKS, NULL));
completion->wait_for_complete();
ASSERT_EQ(-EPERM, completion->get_return_value());
completion->release();
}
// unpin
{
ObjectWriteOperation op;
op.cache_unpin();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, cache_ioctx.aio_operate("foo", completion, &op));
completion->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
completion->release();
}
// flush
{
ObjectReadOperation op;
op.cache_flush();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, cache_ioctx.aio_operate(
"foo", completion, &op,
librados::OPERATION_IGNORE_OVERLAY, NULL));
completion->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
completion->release();
}
// verify clean
{
bool dirty = false;
int r = -1;
ObjectReadOperation op;
op.is_dirty(&dirty, &r);
ASSERT_EQ(0, cache_ioctx.operate("foo", &op, NULL));
ASSERT_FALSE(dirty);
ASSERT_EQ(0, r);
}
// verify in base tier
{
NObjectIterator it = ioctx.nobjects_begin();
ASSERT_TRUE(it != ioctx.nobjects_end());
ASSERT_TRUE(it->get_oid() == string("foo"));
++it;
ASSERT_TRUE(it == ioctx.nobjects_end());
}
// evict it
{
ObjectReadOperation op;
op.cache_evict();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, cache_ioctx.aio_operate(
"foo", completion, &op, librados::OPERATION_IGNORE_CACHE, NULL));
completion->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
completion->release();
}
// verify no longer in cache tier
{
NObjectIterator it = cache_ioctx.nobjects_begin();
ASSERT_TRUE(it == cache_ioctx.nobjects_end());
}
// read it again and verify the version is consistent
{
bufferlist bl;
ASSERT_EQ(1, cache_ioctx.read("foo", bl, 1, 0));
ASSERT_EQ(user_version, cache_ioctx.get_last_version());
}
// erase it
{
ObjectWriteOperation op;
op.remove();
ASSERT_EQ(0, ioctx.operate("foo", &op));
}
// flush whiteout
{
ObjectReadOperation op;
op.cache_flush();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, cache_ioctx.aio_operate(
"foo", completion, &op,
librados::OPERATION_IGNORE_OVERLAY, NULL));
completion->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
completion->release();
}
// evict
{
ObjectReadOperation op;
op.cache_evict();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, cache_ioctx.aio_operate(
"foo", completion, &op, librados::OPERATION_IGNORE_CACHE, NULL));
completion->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
completion->release();
}
// verify no longer in cache tier
{
NObjectIterator it = cache_ioctx.nobjects_begin();
ASSERT_TRUE(it == cache_ioctx.nobjects_end());
}
// or base tier
{
NObjectIterator it = ioctx.nobjects_begin();
ASSERT_TRUE(it == ioctx.nobjects_end());
}
}
TEST_F(LibRadosTwoPoolsPP, FlushSnap) {
SKIP_IF_CRIMSON();
// configure cache
bufferlist inbl;
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier add\", \"pool\": \"" + pool_name +
"\", \"tierpool\": \"" + cache_pool_name +
"\", \"force_nonempty\": \"--force-nonempty\" }",
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier set-overlay\", \"pool\": \"" + pool_name +
"\", \"overlaypool\": \"" + cache_pool_name + "\"}",
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier cache-mode\", \"pool\": \"" + cache_pool_name +
"\", \"mode\": \"writeback\"}",
inbl, NULL, NULL));
// wait for maps to settle
cluster.wait_for_latest_osdmap();
// create object
{
bufferlist bl;
bl.append("a");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("foo", &op));
}
// create a snapshot, clone
vector<uint64_t> my_snaps(1);
ASSERT_EQ(0, ioctx.selfmanaged_snap_create(&my_snaps[0]));
ASSERT_EQ(0, ioctx.selfmanaged_snap_set_write_ctx(my_snaps[0],
my_snaps));
{
bufferlist bl;
bl.append("b");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("foo", &op));
}
// and another
my_snaps.resize(2);
my_snaps[1] = my_snaps[0];
ASSERT_EQ(0, ioctx.selfmanaged_snap_create(&my_snaps[0]));
ASSERT_EQ(0, ioctx.selfmanaged_snap_set_write_ctx(my_snaps[0],
my_snaps));
{
bufferlist bl;
bl.append("c");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("foo", &op));
}
// verify the object is present in the cache tier
{
NObjectIterator it = cache_ioctx.nobjects_begin();
ASSERT_TRUE(it != cache_ioctx.nobjects_end());
ASSERT_TRUE(it->get_oid() == string("foo"));
++it;
ASSERT_TRUE(it == cache_ioctx.nobjects_end());
}
// verify the object is NOT present in the base tier
{
NObjectIterator it = ioctx.nobjects_begin();
ASSERT_TRUE(it == ioctx.nobjects_end());
}
// flush on head (should fail)
ioctx.snap_set_read(librados::SNAP_HEAD);
{
ObjectReadOperation op;
op.cache_flush();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, ioctx.aio_operate(
"foo", completion, &op,
librados::OPERATION_IGNORE_CACHE, NULL));
completion->wait_for_complete();
ASSERT_EQ(-EBUSY, completion->get_return_value());
completion->release();
}
// flush on recent snap (should fail)
ioctx.snap_set_read(my_snaps[0]);
{
ObjectReadOperation op;
op.cache_flush();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, ioctx.aio_operate(
"foo", completion, &op,
librados::OPERATION_IGNORE_CACHE, NULL));
completion->wait_for_complete();
ASSERT_EQ(-EBUSY, completion->get_return_value());
completion->release();
}
// flush on oldest snap
ioctx.snap_set_read(my_snaps[1]);
{
ObjectReadOperation op;
op.cache_flush();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, ioctx.aio_operate(
"foo", completion, &op,
librados::OPERATION_IGNORE_CACHE, NULL));
completion->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
completion->release();
}
// flush on next oldest snap
ioctx.snap_set_read(my_snaps[0]);
{
ObjectReadOperation op;
op.cache_flush();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, ioctx.aio_operate(
"foo", completion, &op,
librados::OPERATION_IGNORE_CACHE, NULL));
completion->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
completion->release();
}
// flush on head
ioctx.snap_set_read(librados::SNAP_HEAD);
{
ObjectReadOperation op;
op.cache_flush();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, ioctx.aio_operate(
"foo", completion, &op,
librados::OPERATION_IGNORE_CACHE, NULL));
completion->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
completion->release();
}
// verify i can read the snaps from the cache pool
ioctx.snap_set_read(librados::SNAP_HEAD);
{
bufferlist bl;
ASSERT_EQ(1, ioctx.read("foo", bl, 1, 0));
ASSERT_EQ('c', bl[0]);
}
ioctx.snap_set_read(my_snaps[0]);
{
bufferlist bl;
ASSERT_EQ(1, ioctx.read("foo", bl, 1, 0));
ASSERT_EQ('b', bl[0]);
}
ioctx.snap_set_read(my_snaps[1]);
{
bufferlist bl;
ASSERT_EQ(1, ioctx.read("foo", bl, 1, 0));
ASSERT_EQ('a', bl[0]);
}
// remove overlay
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier remove-overlay\", \"pool\": \"" + pool_name +
"\"}",
inbl, NULL, NULL));
// wait for maps to settle
cluster.wait_for_latest_osdmap();
// verify i can read the snaps from the base pool
ioctx.snap_set_read(librados::SNAP_HEAD);
{
bufferlist bl;
ASSERT_EQ(1, ioctx.read("foo", bl, 1, 0));
ASSERT_EQ('c', bl[0]);
}
ioctx.snap_set_read(my_snaps[0]);
{
bufferlist bl;
ASSERT_EQ(1, ioctx.read("foo", bl, 1, 0));
ASSERT_EQ('b', bl[0]);
}
ioctx.snap_set_read(my_snaps[1]);
{
bufferlist bl;
ASSERT_EQ(1, ioctx.read("foo", bl, 1, 0));
ASSERT_EQ('a', bl[0]);
}
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier set-overlay\", \"pool\": \"" + pool_name +
"\", \"overlaypool\": \"" + cache_pool_name + "\"}",
inbl, NULL, NULL));
// cleanup
ioctx.selfmanaged_snap_remove(my_snaps[0]);
}
TEST_F(LibRadosTierPP, FlushWriteRaces) {
SKIP_IF_CRIMSON();
Rados cluster;
std::string pool_name = get_temp_pool_name();
std::string cache_pool_name = pool_name + "-cache";
ASSERT_EQ("", create_one_pool_pp(pool_name, cluster));
ASSERT_EQ(0, cluster.pool_create(cache_pool_name.c_str()));
IoCtx cache_ioctx;
ASSERT_EQ(0, cluster.ioctx_create(cache_pool_name.c_str(), cache_ioctx));
cache_ioctx.application_enable("rados", true);
IoCtx ioctx;
ASSERT_EQ(0, cluster.ioctx_create(pool_name.c_str(), ioctx));
// configure cache
bufferlist inbl;
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier add\", \"pool\": \"" + pool_name +
"\", \"tierpool\": \"" + cache_pool_name + "\"}",
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier set-overlay\", \"pool\": \"" + pool_name +
"\", \"overlaypool\": \"" + cache_pool_name + "\"}",
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier cache-mode\", \"pool\": \"" + cache_pool_name +
"\", \"mode\": \"writeback\"}",
inbl, NULL, NULL));
// wait for maps to settle
cluster.wait_for_latest_osdmap();
// create/dirty object
bufferlist bl;
bl.append("hi there");
{
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("foo", &op));
}
// flush + write
{
ObjectReadOperation op;
op.cache_flush();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, cache_ioctx.aio_operate(
"foo", completion, &op,
librados::OPERATION_IGNORE_OVERLAY, NULL));
ObjectWriteOperation op2;
op2.write_full(bl);
librados::AioCompletion *completion2 = cluster.aio_create_completion();
ASSERT_EQ(0, ioctx.aio_operate(
"foo", completion2, &op2, 0));
completion->wait_for_complete();
completion2->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
ASSERT_EQ(0, completion2->get_return_value());
completion->release();
completion2->release();
}
int tries = 1000;
do {
// create/dirty object
{
bufferlist bl;
bl.append("hi there");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("foo", &op));
}
// try-flush + write
{
ObjectReadOperation op;
op.cache_try_flush();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, cache_ioctx.aio_operate(
"foo", completion, &op,
librados::OPERATION_IGNORE_OVERLAY |
librados::OPERATION_SKIPRWLOCKS, NULL));
ObjectWriteOperation op2;
op2.write_full(bl);
librados::AioCompletion *completion2 = cluster.aio_create_completion();
ASSERT_EQ(0, ioctx.aio_operate("foo", completion2, &op2, 0));
completion->wait_for_complete();
completion2->wait_for_complete();
int r = completion->get_return_value();
ASSERT_TRUE(r == -EBUSY || r == 0);
ASSERT_EQ(0, completion2->get_return_value());
completion->release();
completion2->release();
if (r == -EBUSY)
break;
cout << "didn't get EBUSY, trying again" << std::endl;
}
ASSERT_TRUE(--tries);
} while (true);
// tear down tiers
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier remove-overlay\", \"pool\": \"" + pool_name +
"\"}",
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier remove\", \"pool\": \"" + pool_name +
"\", \"tierpool\": \"" + cache_pool_name + "\"}",
inbl, NULL, NULL));
// wait for maps to settle before next test
cluster.wait_for_latest_osdmap();
ASSERT_EQ(0, cluster.pool_delete(cache_pool_name.c_str()));
ASSERT_EQ(0, destroy_one_pool_pp(pool_name, cluster));
}
TEST_F(LibRadosTwoPoolsPP, FlushTryFlushRaces) {
SKIP_IF_CRIMSON();
// configure cache
bufferlist inbl;
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier add\", \"pool\": \"" + pool_name +
"\", \"tierpool\": \"" + cache_pool_name +
"\", \"force_nonempty\": \"--force-nonempty\" }",
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier set-overlay\", \"pool\": \"" + pool_name +
"\", \"overlaypool\": \"" + cache_pool_name + "\"}",
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier cache-mode\", \"pool\": \"" + cache_pool_name +
"\", \"mode\": \"writeback\"}",
inbl, NULL, NULL));
// wait for maps to settle
cluster.wait_for_latest_osdmap();
// create/dirty object
{
bufferlist bl;
bl.append("hi there");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("foo", &op));
}
// flush + flush
{
ObjectReadOperation op;
op.cache_flush();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, cache_ioctx.aio_operate(
"foo", completion, &op,
librados::OPERATION_IGNORE_OVERLAY, NULL));
ObjectReadOperation op2;
op2.cache_flush();
librados::AioCompletion *completion2 = cluster.aio_create_completion();
ASSERT_EQ(0, cache_ioctx.aio_operate(
"foo", completion2, &op2,
librados::OPERATION_IGNORE_OVERLAY, NULL));
completion->wait_for_complete();
completion2->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
ASSERT_EQ(0, completion2->get_return_value());
completion->release();
completion2->release();
}
// create/dirty object
{
bufferlist bl;
bl.append("hi there");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("foo", &op));
}
// flush + try-flush
{
ObjectReadOperation op;
op.cache_flush();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, cache_ioctx.aio_operate(
"foo", completion, &op,
librados::OPERATION_IGNORE_OVERLAY, NULL));
ObjectReadOperation op2;
op2.cache_try_flush();
librados::AioCompletion *completion2 = cluster.aio_create_completion();
ASSERT_EQ(0, cache_ioctx.aio_operate(
"foo", completion2, &op2,
librados::OPERATION_IGNORE_OVERLAY |
librados::OPERATION_SKIPRWLOCKS, NULL));
completion->wait_for_complete();
completion2->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
ASSERT_EQ(0, completion2->get_return_value());
completion->release();
completion2->release();
}
// create/dirty object
int tries = 1000;
do {
{
bufferlist bl;
bl.append("hi there");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("foo", &op));
}
// try-flush + flush
// (flush will not piggyback on try-flush)
{
ObjectReadOperation op;
op.cache_try_flush();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, cache_ioctx.aio_operate(
"foo", completion, &op,
librados::OPERATION_IGNORE_OVERLAY |
librados::OPERATION_SKIPRWLOCKS, NULL));
ObjectReadOperation op2;
op2.cache_flush();
librados::AioCompletion *completion2 = cluster.aio_create_completion();
ASSERT_EQ(0, cache_ioctx.aio_operate(
"foo", completion2, &op2,
librados::OPERATION_IGNORE_OVERLAY, NULL));
completion->wait_for_complete();
completion2->wait_for_complete();
int r = completion->get_return_value();
ASSERT_TRUE(r == -EBUSY || r == 0);
ASSERT_EQ(0, completion2->get_return_value());
completion->release();
completion2->release();
if (r == -EBUSY)
break;
cout << "didn't get EBUSY, trying again" << std::endl;
}
ASSERT_TRUE(--tries);
} while (true);
// create/dirty object
{
bufferlist bl;
bl.append("hi there");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("foo", &op));
}
// try-flush + try-flush
{
ObjectReadOperation op;
op.cache_try_flush();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, cache_ioctx.aio_operate(
"foo", completion, &op,
librados::OPERATION_IGNORE_OVERLAY |
librados::OPERATION_SKIPRWLOCKS, NULL));
ObjectReadOperation op2;
op2.cache_try_flush();
librados::AioCompletion *completion2 = cluster.aio_create_completion();
ASSERT_EQ(0, cache_ioctx.aio_operate(
"foo", completion2, &op2,
librados::OPERATION_IGNORE_OVERLAY |
librados::OPERATION_SKIPRWLOCKS, NULL));
completion->wait_for_complete();
completion2->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
ASSERT_EQ(0, completion2->get_return_value());
completion->release();
completion2->release();
}
}
IoCtx *read_ioctx = 0;
ceph::mutex test_lock = ceph::make_mutex("FlushReadRaces::lock");
ceph::condition_variable cond;
int max_reads = 100;
int num_reads = 0; // in progress
void flush_read_race_cb(completion_t cb, void *arg);
void start_flush_read()
{
//cout << " starting read" << std::endl;
ObjectReadOperation op;
op.stat(NULL, NULL, NULL);
librados::AioCompletion *completion = completions.getCompletion();
completion->set_complete_callback(0, flush_read_race_cb);
read_ioctx->aio_operate("foo", completion, &op, NULL);
}
void flush_read_race_cb(completion_t cb, void *arg)
{
//cout << " finished read" << std::endl;
std::lock_guard l{test_lock};
if (num_reads > max_reads) {
num_reads--;
cond.notify_all();
} else {
start_flush_read();
}
}
TEST_F(LibRadosTwoPoolsPP, TryFlushReadRace) {
SKIP_IF_CRIMSON();
// configure cache
bufferlist inbl;
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier add\", \"pool\": \"" + pool_name +
"\", \"tierpool\": \"" + cache_pool_name +
"\", \"force_nonempty\": \"--force-nonempty\" }",
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier set-overlay\", \"pool\": \"" + pool_name +
"\", \"overlaypool\": \"" + cache_pool_name + "\"}",
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier cache-mode\", \"pool\": \"" + cache_pool_name +
"\", \"mode\": \"writeback\"}",
inbl, NULL, NULL));
// wait for maps to settle
cluster.wait_for_latest_osdmap();
// create/dirty object
{
bufferlist bl;
bl.append("hi there");
bufferptr bp(4000000); // make it big!
bp.zero();
bl.append(bp);
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("foo", &op));
}
// start a continuous stream of reads
read_ioctx = &ioctx;
test_lock.lock();
for (int i = 0; i < max_reads; ++i) {
start_flush_read();
num_reads++;
}
test_lock.unlock();
// try-flush
ObjectReadOperation op;
op.cache_try_flush();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, cache_ioctx.aio_operate(
"foo", completion, &op,
librados::OPERATION_IGNORE_OVERLAY |
librados::OPERATION_SKIPRWLOCKS, NULL));
completion->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
completion->release();
// stop reads
std::unique_lock locker{test_lock};
max_reads = 0;
cond.wait(locker, [] { return num_reads == 0;});
}
TEST_F(LibRadosTierPP, HitSetNone) {
SKIP_IF_CRIMSON();
{
list< pair<time_t,time_t> > ls;
AioCompletion *c = librados::Rados::aio_create_completion();
ASSERT_EQ(0, ioctx.hit_set_list(123, c, &ls));
c->wait_for_complete();
ASSERT_EQ(0, c->get_return_value());
ASSERT_TRUE(ls.empty());
c->release();
}
{
bufferlist bl;
AioCompletion *c = librados::Rados::aio_create_completion();
ASSERT_EQ(0, ioctx.hit_set_get(123, c, 12345, &bl));
c->wait_for_complete();
ASSERT_EQ(-ENOENT, c->get_return_value());
c->release();
}
}
string set_pool_str(string pool, string var, string val)
{
return string("{\"prefix\": \"osd pool set\",\"pool\":\"") + pool
+ string("\",\"var\": \"") + var + string("\",\"val\": \"")
+ val + string("\"}");
}
string set_pool_str(string pool, string var, int val)
{
return string("{\"prefix\": \"osd pool set\",\"pool\":\"") + pool
+ string("\",\"var\": \"") + var + string("\",\"val\": \"")
+ stringify(val) + string("\"}");
}
TEST_F(LibRadosTwoPoolsPP, HitSetRead) {
SKIP_IF_CRIMSON();
// make it a tier
bufferlist inbl;
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier add\", \"pool\": \"" + pool_name +
"\", \"tierpool\": \"" + cache_pool_name +
"\", \"force_nonempty\": \"--force-nonempty\" }",
inbl, NULL, NULL));
// enable hitset tracking for this pool
ASSERT_EQ(0, cluster.mon_command(set_pool_str(cache_pool_name, "hit_set_count", 2),
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(set_pool_str(cache_pool_name, "hit_set_period", 600),
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(set_pool_str(cache_pool_name, "hit_set_type",
"explicit_object"),
inbl, NULL, NULL));
// wait for maps to settle
cluster.wait_for_latest_osdmap();
cache_ioctx.set_namespace("");
// keep reading until we see our object appear in the HitSet
utime_t start = ceph_clock_now();
utime_t hard_stop = start + utime_t(600, 0);
while (true) {
utime_t now = ceph_clock_now();
ASSERT_TRUE(now < hard_stop);
string name = "foo";
uint32_t hash;
ASSERT_EQ(0, cache_ioctx.get_object_hash_position2(name, &hash));
hobject_t oid(sobject_t(name, CEPH_NOSNAP), "", hash,
cluster.pool_lookup(cache_pool_name.c_str()), "");
bufferlist bl;
ASSERT_EQ(-ENOENT, cache_ioctx.read("foo", bl, 1, 0));
bufferlist hbl;
AioCompletion *c = librados::Rados::aio_create_completion();
ASSERT_EQ(0, cache_ioctx.hit_set_get(hash, c, now.sec(), &hbl));
c->wait_for_complete();
c->release();
if (hbl.length()) {
auto p = hbl.cbegin();
HitSet hs;
decode(hs, p);
if (hs.contains(oid)) {
cout << "ok, hit_set contains " << oid << std::endl;
break;
}
cout << "hmm, not in HitSet yet" << std::endl;
} else {
cout << "hmm, no HitSet yet" << std::endl;
}
sleep(1);
}
}
static int _get_pg_num(Rados& cluster, string pool_name)
{
bufferlist inbl;
string cmd = string("{\"prefix\": \"osd pool get\",\"pool\":\"")
+ pool_name
+ string("\",\"var\": \"pg_num\",\"format\": \"json\"}");
bufferlist outbl;
int r = cluster.mon_command(cmd, inbl, &outbl, NULL);
ceph_assert(r >= 0);
string outstr(outbl.c_str(), outbl.length());
json_spirit::Value v;
if (!json_spirit::read(outstr, v)) {
cerr <<" unable to parse json " << outstr << std::endl;
return -1;
}
json_spirit::Object& o = v.get_obj();
for (json_spirit::Object::size_type i=0; i<o.size(); i++) {
json_spirit::Pair& p = o[i];
if (p.name_ == "pg_num") {
cout << "pg_num = " << p.value_.get_int() << std::endl;
return p.value_.get_int();
}
}
cerr << "didn't find pg_num in " << outstr << std::endl;
return -1;
}
int make_hitset(Rados& cluster, librados::IoCtx& cache_ioctx, int num_pg,
int num, std::map<int, HitSet>& hitsets, std::string& cache_pool_name)
{
int pg = num_pg;
// do a bunch of writes
for (int i=0; i<num; ++i) {
bufferlist bl;
bl.append("a");
ceph_assert(0 == cache_ioctx.write(stringify(i), bl, 1, 0));
}
// get HitSets
for (int i=0; i<pg; ++i) {
list< pair<time_t,time_t> > ls;
AioCompletion *c = librados::Rados::aio_create_completion();
ceph_assert(0 == cache_ioctx.hit_set_list(i, c, &ls));
c->wait_for_complete();
c->release();
std::cout << "pg " << i << " ls " << ls << std::endl;
ceph_assert(!ls.empty());
// get the latest
c = librados::Rados::aio_create_completion();
bufferlist bl;
ceph_assert(0 == cache_ioctx.hit_set_get(i, c, ls.back().first, &bl));
c->wait_for_complete();
c->release();
try {
auto p = bl.cbegin();
decode(hitsets[i], p);
}
catch (buffer::error& e) {
std::cout << "failed to decode hit set; bl len is " << bl.length() << "\n";
bl.hexdump(std::cout);
std::cout << std::endl;
throw e;
}
// cope with racing splits by refreshing pg_num
if (i == pg - 1)
pg = _get_pg_num(cluster, cache_pool_name);
}
return pg;
}
TEST_F(LibRadosTwoPoolsPP, HitSetWrite) {
SKIP_IF_CRIMSON();
int num_pg = _get_pg_num(cluster, pool_name);
ceph_assert(num_pg > 0);
// make it a tier
bufferlist inbl;
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier add\", \"pool\": \"" + pool_name +
"\", \"tierpool\": \"" + cache_pool_name +
"\", \"force_nonempty\": \"--force-nonempty\" }",
inbl, NULL, NULL));
// enable hitset tracking for this pool
ASSERT_EQ(0, cluster.mon_command(set_pool_str(cache_pool_name, "hit_set_count", 8),
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(set_pool_str(cache_pool_name, "hit_set_period", 600),
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(set_pool_str(cache_pool_name, "hit_set_type",
"explicit_hash"),
inbl, NULL, NULL));
// wait for maps to settle
cluster.wait_for_latest_osdmap();
cache_ioctx.set_namespace("");
int num = 200;
std::map<int,HitSet> hitsets;
num_pg = make_hitset(cluster, cache_ioctx, num_pg, num, hitsets, cache_pool_name);
int retry = 0;
for (int i=0; i<num; ++i) {
string n = stringify(i);
uint32_t hash;
ASSERT_EQ(0, cache_ioctx.get_object_hash_position2(n, &hash));
hobject_t oid(sobject_t(n, CEPH_NOSNAP), "", hash,
cluster.pool_lookup(cache_pool_name.c_str()), "");
std::cout << "checking for " << oid << std::endl;
bool found = false;
for (int p=0; p<num_pg; ++p) {
if (hitsets[p].contains(oid)) {
found = true;
break;
}
}
if (!found && retry < 5) {
num_pg = make_hitset(cluster, cache_ioctx, num_pg, num, hitsets, cache_pool_name);
i--;
retry++;
continue;
}
ASSERT_TRUE(found);
}
}
TEST_F(LibRadosTwoPoolsPP, HitSetTrim) {
SKIP_IF_CRIMSON();
unsigned count = 3;
unsigned period = 3;
// make it a tier
bufferlist inbl;
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier add\", \"pool\": \"" + pool_name +
"\", \"tierpool\": \"" + cache_pool_name +
"\", \"force_nonempty\": \"--force-nonempty\" }",
inbl, NULL, NULL));
// enable hitset tracking for this pool
ASSERT_EQ(0, cluster.mon_command(set_pool_str(cache_pool_name, "hit_set_count", count),
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(set_pool_str(cache_pool_name, "hit_set_period", period),
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(set_pool_str(cache_pool_name, "hit_set_type", "bloom"),
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(set_pool_str(cache_pool_name, "hit_set_fpp", ".01"),
inbl, NULL, NULL));
// wait for maps to settle
cluster.wait_for_latest_osdmap();
cache_ioctx.set_namespace("");
// do a bunch of writes and make sure the hitsets rotate
utime_t start = ceph_clock_now();
utime_t hard_stop = start + utime_t(count * period * 50, 0);
time_t first = 0;
while (true) {
string name = "foo";
uint32_t hash;
ASSERT_EQ(0, cache_ioctx.get_object_hash_position2(name, &hash));
hobject_t oid(sobject_t(name, CEPH_NOSNAP), "", hash, -1, "");
bufferlist bl;
bl.append("f");
ASSERT_EQ(0, cache_ioctx.write("foo", bl, 1, 0));
list<pair<time_t, time_t> > ls;
AioCompletion *c = librados::Rados::aio_create_completion();
ASSERT_EQ(0, cache_ioctx.hit_set_list(hash, c, &ls));
c->wait_for_complete();
c->release();
cout << " got ls " << ls << std::endl;
if (!ls.empty()) {
if (!first) {
first = ls.front().first;
cout << "first is " << first << std::endl;
} else {
if (ls.front().first != first) {
cout << "first now " << ls.front().first << ", trimmed" << std::endl;
break;
}
}
}
utime_t now = ceph_clock_now();
ASSERT_TRUE(now < hard_stop);
sleep(1);
}
}
TEST_F(LibRadosTwoPoolsPP, PromoteOn2ndRead) {
SKIP_IF_CRIMSON();
// create object
for (int i=0; i<20; ++i) {
bufferlist bl;
bl.append("hi there");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("foo" + stringify(i), &op));
}
// configure cache
bufferlist inbl;
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier add\", \"pool\": \"" + pool_name +
"\", \"tierpool\": \"" + cache_pool_name +
"\", \"force_nonempty\": \"--force-nonempty\" }",
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier set-overlay\", \"pool\": \"" + pool_name +
"\", \"overlaypool\": \"" + cache_pool_name + "\"}",
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier cache-mode\", \"pool\": \"" + cache_pool_name +
"\", \"mode\": \"writeback\"}",
inbl, NULL, NULL));
// enable hitset tracking for this pool
ASSERT_EQ(0, cluster.mon_command(
set_pool_str(cache_pool_name, "hit_set_count", 2),
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
set_pool_str(cache_pool_name, "hit_set_period", 600),
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
set_pool_str(cache_pool_name, "hit_set_type", "bloom"),
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
set_pool_str(cache_pool_name, "min_read_recency_for_promote", 1),
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
set_pool_str(cache_pool_name, "hit_set_grade_decay_rate", 20),
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
set_pool_str(cache_pool_name, "hit_set_search_last_n", 1),
inbl, NULL, NULL));
// wait for maps to settle
cluster.wait_for_latest_osdmap();
int fake = 0; // set this to non-zero to test spurious promotion,
// e.g. from thrashing
int attempt = 0;
string obj;
while (true) {
// 1st read, don't trigger a promote
obj = "foo" + stringify(attempt);
cout << obj << std::endl;
{
bufferlist bl;
ASSERT_EQ(1, ioctx.read(obj.c_str(), bl, 1, 0));
if (--fake >= 0) {
sleep(1);
ASSERT_EQ(1, ioctx.read(obj.c_str(), bl, 1, 0));
sleep(1);
}
}
// verify the object is NOT present in the cache tier
{
bool found = false;
NObjectIterator it = cache_ioctx.nobjects_begin();
while (it != cache_ioctx.nobjects_end()) {
cout << " see " << it->get_oid() << std::endl;
if (it->get_oid() == string(obj.c_str())) {
found = true;
break;
}
++it;
}
if (!found)
break;
}
++attempt;
ASSERT_LE(attempt, 20);
cout << "hrm, object is present in cache on attempt " << attempt
<< ", retrying" << std::endl;
}
// Read until the object is present in the cache tier
cout << "verifying " << obj << " is eventually promoted" << std::endl;
while (true) {
bufferlist bl;
ASSERT_EQ(1, ioctx.read(obj.c_str(), bl, 1, 0));
bool there = false;
NObjectIterator it = cache_ioctx.nobjects_begin();
while (it != cache_ioctx.nobjects_end()) {
if (it->get_oid() == string(obj.c_str())) {
there = true;
break;
}
++it;
}
if (there)
break;
sleep(1);
}
// tear down tiers
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier remove-overlay\", \"pool\": \"" + pool_name +
"\"}",
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier remove\", \"pool\": \"" + pool_name +
"\", \"tierpool\": \"" + cache_pool_name + "\"}",
inbl, NULL, NULL));
// wait for maps to settle before next test
cluster.wait_for_latest_osdmap();
}
TEST_F(LibRadosTwoPoolsPP, ProxyRead) {
SKIP_IF_CRIMSON();
// create object
{
bufferlist bl;
bl.append("hi there");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("foo", &op));
}
// configure cache
bufferlist inbl;
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier add\", \"pool\": \"" + pool_name +
"\", \"tierpool\": \"" + cache_pool_name +
"\", \"force_nonempty\": \"--force-nonempty\" }",
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier set-overlay\", \"pool\": \"" + pool_name +
"\", \"overlaypool\": \"" + cache_pool_name + "\"}",
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier cache-mode\", \"pool\": \"" + cache_pool_name +
"\", \"mode\": \"readproxy\"}",
inbl, NULL, NULL));
// wait for maps to settle
cluster.wait_for_latest_osdmap();
// read and verify the object
{
bufferlist bl;
ASSERT_EQ(1, ioctx.read("foo", bl, 1, 0));
ASSERT_EQ('h', bl[0]);
}
// Verify 10 times the object is NOT present in the cache tier
uint32_t i = 0;
while (i++ < 10) {
NObjectIterator it = cache_ioctx.nobjects_begin();
ASSERT_TRUE(it == cache_ioctx.nobjects_end());
sleep(1);
}
// tear down tiers
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier remove-overlay\", \"pool\": \"" + pool_name +
"\"}",
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier remove\", \"pool\": \"" + pool_name +
"\", \"tierpool\": \"" + cache_pool_name + "\"}",
inbl, NULL, NULL));
// wait for maps to settle before next test
cluster.wait_for_latest_osdmap();
}
TEST_F(LibRadosTwoPoolsPP, CachePin) {
SKIP_IF_CRIMSON();
// create object
{
bufferlist bl;
bl.append("hi there");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("foo", &op));
}
{
bufferlist bl;
bl.append("hi there");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("bar", &op));
}
{
bufferlist bl;
bl.append("hi there");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("baz", &op));
}
{
bufferlist bl;
bl.append("hi there");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("bam", &op));
}
// configure cache
bufferlist inbl;
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier add\", \"pool\": \"" + pool_name +
"\", \"tierpool\": \"" + cache_pool_name +
"\", \"force_nonempty\": \"--force-nonempty\" }",
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier set-overlay\", \"pool\": \"" + pool_name +
"\", \"overlaypool\": \"" + cache_pool_name + "\"}",
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier cache-mode\", \"pool\": \"" + cache_pool_name +
"\", \"mode\": \"writeback\"}",
inbl, NULL, NULL));
// wait for maps to settle
cluster.wait_for_latest_osdmap();
// read, trigger promote
{
bufferlist bl;
ASSERT_EQ(1, ioctx.read("foo", bl, 1, 0));
ASSERT_EQ(1, ioctx.read("bar", bl, 1, 0));
ASSERT_EQ(1, ioctx.read("baz", bl, 1, 0));
ASSERT_EQ(1, ioctx.read("bam", bl, 1, 0));
}
// verify the objects are present in the cache tier
{
NObjectIterator it = cache_ioctx.nobjects_begin();
ASSERT_TRUE(it != cache_ioctx.nobjects_end());
for (uint32_t i = 0; i < 4; i++) {
ASSERT_TRUE(it->get_oid() == string("foo") ||
it->get_oid() == string("bar") ||
it->get_oid() == string("baz") ||
it->get_oid() == string("bam"));
++it;
}
ASSERT_TRUE(it == cache_ioctx.nobjects_end());
}
// pin objects
{
ObjectWriteOperation op;
op.cache_pin();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, cache_ioctx.aio_operate("foo", completion, &op));
completion->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
completion->release();
}
{
ObjectWriteOperation op;
op.cache_pin();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, cache_ioctx.aio_operate("baz", completion, &op));
completion->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
completion->release();
}
// enable agent
ASSERT_EQ(0, cluster.mon_command(
set_pool_str(cache_pool_name, "hit_set_count", 2),
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
set_pool_str(cache_pool_name, "hit_set_period", 600),
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
set_pool_str(cache_pool_name, "hit_set_type", "bloom"),
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
set_pool_str(cache_pool_name, "min_read_recency_for_promote", 1),
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
set_pool_str(cache_pool_name, "target_max_objects", 1),
inbl, NULL, NULL));
sleep(10);
// Verify the pinned object 'foo' is not flushed/evicted
uint32_t count = 0;
while (true) {
bufferlist bl;
ASSERT_EQ(1, ioctx.read("baz", bl, 1, 0));
count = 0;
NObjectIterator it = cache_ioctx.nobjects_begin();
while (it != cache_ioctx.nobjects_end()) {
ASSERT_TRUE(it->get_oid() == string("foo") ||
it->get_oid() == string("bar") ||
it->get_oid() == string("baz") ||
it->get_oid() == string("bam"));
++count;
++it;
}
if (count == 2) {
ASSERT_TRUE(it->get_oid() == string("foo") ||
it->get_oid() == string("baz"));
break;
}
sleep(1);
}
// tear down tiers
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier remove-overlay\", \"pool\": \"" + pool_name +
"\"}",
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier remove\", \"pool\": \"" + pool_name +
"\", \"tierpool\": \"" + cache_pool_name + "\"}",
inbl, NULL, NULL));
// wait for maps to settle before next test
cluster.wait_for_latest_osdmap();
}
TEST_F(LibRadosTwoPoolsPP, SetRedirectRead) {
SKIP_IF_CRIMSON();
// create object
{
bufferlist bl;
bl.append("hi there");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("foo", &op));
}
{
bufferlist bl;
bl.append("there");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, cache_ioctx.operate("bar", &op));
}
// wait for maps to settle
cluster.wait_for_latest_osdmap();
{
ObjectWriteOperation op;
op.set_redirect("bar", cache_ioctx, 0);
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, ioctx.aio_operate("foo", completion, &op));
completion->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
completion->release();
}
// read and verify the object
{
bufferlist bl;
ASSERT_EQ(1, ioctx.read("foo", bl, 1, 0));
ASSERT_EQ('t', bl[0]);
}
// wait for maps to settle before next test
cluster.wait_for_latest_osdmap();
}
TEST_F(LibRadosTwoPoolsPP, ManifestPromoteRead) {
SKIP_IF_CRIMSON();
// skip test if not yet mimic
if (_get_required_osd_release(cluster) < "mimic") {
GTEST_SKIP() << "cluster is not yet mimic, skipping test";
}
// create object
{
bufferlist bl;
bl.append("hi there");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("foo", &op));
}
{
bufferlist bl;
bl.append("base chunk");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("foo-chunk", &op));
}
{
bufferlist bl;
bl.append("there");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, cache_ioctx.operate("bar", &op));
}
{
bufferlist bl;
bl.append("CHUNK");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, cache_ioctx.operate("bar-chunk", &op));
}
// wait for maps to settle
cluster.wait_for_latest_osdmap();
// set-redirect
{
ObjectWriteOperation op;
op.set_redirect("bar", cache_ioctx, 0);
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, ioctx.aio_operate("foo", completion, &op));
completion->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
completion->release();
}
// set-chunk
manifest_set_chunk(cluster, cache_ioctx, ioctx, 0, 2, "bar-chunk", "foo-chunk");
// promote
{
ObjectWriteOperation op;
op.tier_promote();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, ioctx.aio_operate("foo", completion, &op));
completion->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
completion->release();
}
// read and verify the object (redirect)
{
bufferlist bl;
ASSERT_EQ(1, ioctx.read("foo", bl, 1, 0));
ASSERT_EQ('t', bl[0]);
}
// promote
{
ObjectWriteOperation op;
op.tier_promote();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, ioctx.aio_operate("foo-chunk", completion, &op));
completion->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
completion->release();
}
// read and verify the object
{
bufferlist bl;
ASSERT_EQ(1, ioctx.read("foo-chunk", bl, 1, 0));
ASSERT_EQ('C', bl[0]);
}
// wait for maps to settle before next test
cluster.wait_for_latest_osdmap();
}
TEST_F(LibRadosTwoPoolsPP, ManifestRefRead) {
SKIP_IF_CRIMSON();
// note: require >= mimic
// create object
{
bufferlist bl;
bl.append("hi there");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("foo", &op));
}
{
bufferlist bl;
bl.append("base chunk");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("foo-chunk", &op));
}
{
bufferlist bl;
bl.append("there");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, cache_ioctx.operate("bar", &op));
}
{
bufferlist bl;
bl.append("CHUNK");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, cache_ioctx.operate("bar-chunk", &op));
}
// wait for maps to settle
cluster.wait_for_latest_osdmap();
// set-redirect
{
ObjectWriteOperation op;
op.set_redirect("bar", cache_ioctx, 0, CEPH_OSD_OP_FLAG_WITH_REFERENCE);
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, ioctx.aio_operate("foo", completion, &op));
completion->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
completion->release();
}
// set-chunk
{
ObjectReadOperation op;
op.set_chunk(0, 2, cache_ioctx, "bar-chunk", 0, CEPH_OSD_OP_FLAG_WITH_REFERENCE);
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, ioctx.aio_operate("foo-chunk", completion, &op,
librados::OPERATION_IGNORE_CACHE, NULL));
completion->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
completion->release();
}
// redirect's refcount
{
bufferlist t;
cache_ioctx.getxattr("bar", CHUNK_REFCOUNT_ATTR, t);
chunk_refs_t refs;
try {
auto iter = t.cbegin();
decode(refs, iter);
} catch (buffer::error& err) {
ASSERT_TRUE(0);
}
ASSERT_EQ(1U, refs.count());
}
// chunk's refcount
{
bufferlist t;
cache_ioctx.getxattr("bar-chunk", CHUNK_REFCOUNT_ATTR, t);
chunk_refs_t refs;
try {
auto iter = t.cbegin();
decode(refs, iter);
} catch (buffer::error& err) {
ASSERT_TRUE(0);
}
ASSERT_EQ(1u, refs.count());
}
// wait for maps to settle before next test
cluster.wait_for_latest_osdmap();
}
TEST_F(LibRadosTwoPoolsPP, ManifestUnset) {
SKIP_IF_CRIMSON();
// skip test if not yet nautilus
if (_get_required_osd_release(cluster) < "nautilus") {
GTEST_SKIP() << "cluster is not yet nautilus, skipping test";
}
// create object
{
bufferlist bl;
bl.append("hi there");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("foo", &op));
}
{
bufferlist bl;
bl.append("base chunk");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("foo-chunk", &op));
}
{
bufferlist bl;
bl.append("there");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, cache_ioctx.operate("bar", &op));
}
{
bufferlist bl;
bl.append("CHUNK");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, cache_ioctx.operate("bar-chunk", &op));
}
// wait for maps to settle
cluster.wait_for_latest_osdmap();
// set-redirect
{
ObjectWriteOperation op;
op.set_redirect("bar", cache_ioctx, 0, CEPH_OSD_OP_FLAG_WITH_REFERENCE);
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, ioctx.aio_operate("foo", completion, &op));
completion->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
completion->release();
}
// set-chunk
{
ObjectReadOperation op;
op.set_chunk(0, 2, cache_ioctx, "bar-chunk", 0, CEPH_OSD_OP_FLAG_WITH_REFERENCE);
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, ioctx.aio_operate("foo-chunk", completion, &op,
librados::OPERATION_IGNORE_CACHE, NULL));
completion->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
completion->release();
}
// redirect's refcount
{
bufferlist t;
cache_ioctx.getxattr("bar", CHUNK_REFCOUNT_ATTR, t);
chunk_refs_t refs;
try {
auto iter = t.cbegin();
decode(refs, iter);
} catch (buffer::error& err) {
ASSERT_TRUE(0);
}
ASSERT_EQ(1u, refs.count());
}
// chunk's refcount
{
bufferlist t;
cache_ioctx.getxattr("bar-chunk", CHUNK_REFCOUNT_ATTR, t);
chunk_refs_t refs;
try {
auto iter = t.cbegin();
decode(refs, iter);
} catch (buffer::error& err) {
ASSERT_TRUE(0);
}
ASSERT_EQ(1u, refs.count());
}
// unset-manifest for set-redirect
{
ObjectWriteOperation op;
op.unset_manifest();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, ioctx.aio_operate("foo", completion, &op));
completion->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
completion->release();
}
// unset-manifest for set-chunk
{
ObjectWriteOperation op;
op.unset_manifest();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, ioctx.aio_operate("foo-chunk", completion, &op));
completion->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
completion->release();
}
// redirect's refcount
{
bufferlist t;
cache_ioctx.getxattr("bar-chunk", CHUNK_REFCOUNT_ATTR, t);
if (t.length() != 0U) {
ObjectWriteOperation op;
op.unset_manifest();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, ioctx.aio_operate("foo", completion, &op));
completion->wait_for_complete();
ASSERT_EQ(-EOPNOTSUPP, completion->get_return_value());
completion->release();
}
}
// chunk's refcount
{
bufferlist t;
cache_ioctx.getxattr("bar-chunk", CHUNK_REFCOUNT_ATTR, t);
if (t.length() != 0U) {
ObjectWriteOperation op;
op.unset_manifest();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, ioctx.aio_operate("foo-chunk", completion, &op));
completion->wait_for_complete();
ASSERT_EQ(-EOPNOTSUPP, completion->get_return_value());
completion->release();
}
}
// wait for maps to settle before next test
cluster.wait_for_latest_osdmap();
}
TEST_F(LibRadosTwoPoolsPP, ManifestDedupRefRead) {
SKIP_IF_CRIMSON();
// skip test if not yet nautilus
if (_get_required_osd_release(cluster) < "nautilus") {
GTEST_SKIP() << "cluster is not yet nautilus, skipping test";
}
bufferlist inbl;
ASSERT_EQ(0, cluster.mon_command(
set_pool_str(pool_name, "fingerprint_algorithm", "sha1"),
inbl, NULL, NULL));
cluster.wait_for_latest_osdmap();
string tgt_oid;
// get fp_oid
tgt_oid = get_fp_oid("There hi", "sha1");
// create object
{
bufferlist bl;
bl.append("There hi");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("foo", &op));
}
{
bufferlist bl;
bl.append("There hi");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("foo-dedup", &op));
}
// write
{
ObjectWriteOperation op;
bufferlist bl;
bl.append("There hi");
op.write_full(bl);
ASSERT_EQ(0, cache_ioctx.operate(tgt_oid, &op));
}
// set-chunk (dedup)
manifest_set_chunk(cluster, cache_ioctx, ioctx, 0, 8, tgt_oid, "foo-dedup");
// set-chunk (dedup)
manifest_set_chunk(cluster, cache_ioctx, ioctx, 0, 8, tgt_oid, "foo");
// chunk's refcount
{
bufferlist t;
cache_ioctx.getxattr(tgt_oid, CHUNK_REFCOUNT_ATTR, t);
chunk_refs_t refs;
try {
auto iter = t.cbegin();
decode(refs, iter);
} catch (buffer::error& err) {
ASSERT_TRUE(0);
}
ASSERT_LE(2u, refs.count());
}
// wait for maps to settle before next test
cluster.wait_for_latest_osdmap();
}
TEST_F(LibRadosTwoPoolsPP, ManifestSnapRefcount) {
SKIP_IF_CRIMSON();
// skip test if not yet octopus
if (_get_required_osd_release(cluster) < "octopus") {
cout << "cluster is not yet octopus, skipping test" << std::endl;
return;
}
bufferlist inbl;
ASSERT_EQ(0, cluster.mon_command(
set_pool_str(pool_name, "fingerprint_algorithm", "sha1"),
inbl, NULL, NULL));
cluster.wait_for_latest_osdmap();
// create object
{
bufferlist bl;
bl.append("there hi");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("foo", &op));
}
{
bufferlist bl;
bl.append("there hi");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, cache_ioctx.operate("bar", &op));
}
// wait for maps to settle
cluster.wait_for_latest_osdmap();
string er_fp_oid, hi_fp_oid, bb_fp_oid;
// get fp_oid
er_fp_oid = get_fp_oid("er", "sha1");
hi_fp_oid = get_fp_oid("hi", "sha1");
bb_fp_oid = get_fp_oid("bb", "sha1");
// write
{
ObjectWriteOperation op;
bufferlist bl;
bl.append("er");
op.write_full(bl);
ASSERT_EQ(0, cache_ioctx.operate(er_fp_oid, &op));
}
// write
{
ObjectWriteOperation op;
bufferlist bl;
bl.append("hi");
op.write_full(bl);
ASSERT_EQ(0, cache_ioctx.operate(hi_fp_oid, &op));
}
// write
{
ObjectWriteOperation op;
bufferlist bl;
bl.append("bb");
op.write_full(bl);
ASSERT_EQ(0, cache_ioctx.operate(bb_fp_oid, &op));
}
// set-chunk (dedup)
manifest_set_chunk(cluster, cache_ioctx, ioctx, 2, 2, er_fp_oid, "foo");
// set-chunk (dedup)
manifest_set_chunk(cluster, cache_ioctx, ioctx, 6, 2, hi_fp_oid, "foo");
// make all chunks dirty --> flush
// foo: [er] [hi]
// check chunk's refcount
{
bufferlist t;
SHA1 sha1_gen;
int size = strlen("er");
unsigned char fingerprint[CEPH_CRYPTO_SHA1_DIGESTSIZE + 1];
char p_str[CEPH_CRYPTO_SHA1_DIGESTSIZE*2+1] = {0};
sha1_gen.Update((const unsigned char *)"er", size);
sha1_gen.Final(fingerprint);
buf_to_hex(fingerprint, CEPH_CRYPTO_SHA1_DIGESTSIZE, p_str);
cache_ioctx.getxattr(p_str, CHUNK_REFCOUNT_ATTR, t);
chunk_refs_t refs;
try {
auto iter = t.cbegin();
decode(refs, iter);
} catch (buffer::error& err) {
ASSERT_TRUE(0);
}
ASSERT_LE(1u, refs.count());
}
// create a snapshot, clone
vector<uint64_t> my_snaps(1);
ASSERT_EQ(0, ioctx.selfmanaged_snap_create(&my_snaps[0]));
ASSERT_EQ(0, ioctx.selfmanaged_snap_set_write_ctx(my_snaps[0],
my_snaps));
// foo: [bb] [hi]
// create a clone
{
bufferlist bl;
bl.append("Thbbe");
ASSERT_EQ(0, ioctx.write("foo", bl, bl.length(), 0));
}
// make clean
{
bufferlist bl;
bl.append("Thbbe");
ASSERT_EQ(0, ioctx.write("foo", bl, bl.length(), 0));
}
// set-chunk (dedup)
manifest_set_chunk(cluster, cache_ioctx, ioctx, 2, 2, bb_fp_oid, "foo");
// and another
my_snaps.resize(2);
my_snaps[1] = my_snaps[0];
ASSERT_EQ(0, ioctx.selfmanaged_snap_create(&my_snaps[0]));
ASSERT_EQ(0, ioctx.selfmanaged_snap_set_write_ctx(my_snaps[0],
my_snaps));
// foo: [er] [hi]
// create a clone
{
bufferlist bl;
bl.append("There");
ASSERT_EQ(0, ioctx.write("foo", bl, bl.length(), 0));
}
// make clean
{
bufferlist bl;
bl.append("There");
ASSERT_EQ(0, ioctx.write("foo", bl, bl.length(), 0));
}
// set-chunk (dedup)
manifest_set_chunk(cluster, cache_ioctx, ioctx, 2, 2, er_fp_oid, "foo");
// check chunk's refcount
{
bufferlist t;
SHA1 sha1_gen;
int size = strlen("er");
unsigned char fingerprint[CEPH_CRYPTO_SHA1_DIGESTSIZE + 1];
char p_str[CEPH_CRYPTO_SHA1_DIGESTSIZE*2+1] = {0};
sha1_gen.Update((const unsigned char *)"er", size);
sha1_gen.Final(fingerprint);
buf_to_hex(fingerprint, CEPH_CRYPTO_SHA1_DIGESTSIZE, p_str);
cache_ioctx.getxattr(p_str, CHUNK_REFCOUNT_ATTR, t);
chunk_refs_t refs;
try {
auto iter = t.cbegin();
decode(refs, iter);
} catch (buffer::error& err) {
ASSERT_TRUE(0);
}
ASSERT_LE(2u, refs.count());
}
// and another
my_snaps.resize(3);
my_snaps[2] = my_snaps[1];
my_snaps[1] = my_snaps[0];
ASSERT_EQ(0, ioctx.selfmanaged_snap_create(&my_snaps[0]));
ASSERT_EQ(0, ioctx.selfmanaged_snap_set_write_ctx(my_snaps[0],
my_snaps));
// foo: [bb] [hi]
// create a clone
{
bufferlist bl;
bl.append("Thbbe");
ASSERT_EQ(0, ioctx.write("foo", bl, bl.length(), 0));
}
// make clean
{
bufferlist bl;
bl.append("Thbbe");
ASSERT_EQ(0, ioctx.write("foo", bl, bl.length(), 0));
}
// set-chunk (dedup)
manifest_set_chunk(cluster, cache_ioctx, ioctx, 2, 2, bb_fp_oid, "foo");
/*
* snap[2]: [er] [hi]
* snap[1]: [bb] [hi]
* snap[0]: [er] [hi]
* head: [bb] [hi]
*/
// check chunk's refcount
{
bufferlist t;
SHA1 sha1_gen;
int size = strlen("hi");
unsigned char fingerprint[CEPH_CRYPTO_SHA1_DIGESTSIZE + 1];
char p_str[CEPH_CRYPTO_SHA1_DIGESTSIZE*2+1] = {0};
sha1_gen.Update((const unsigned char *)"hi", size);
sha1_gen.Final(fingerprint);
buf_to_hex(fingerprint, CEPH_CRYPTO_SHA1_DIGESTSIZE, p_str);
is_intended_refcount_state(ioctx, "foo", cache_ioctx, p_str, 1);
}
// check chunk's refcount
{
bufferlist t;
SHA1 sha1_gen;
int size = strlen("er");
unsigned char fingerprint[CEPH_CRYPTO_SHA1_DIGESTSIZE + 1];
char p_str[CEPH_CRYPTO_SHA1_DIGESTSIZE*2+1] = {0};
sha1_gen.Update((const unsigned char *)"er", size);
sha1_gen.Final(fingerprint);
buf_to_hex(fingerprint, CEPH_CRYPTO_SHA1_DIGESTSIZE, p_str);
cache_ioctx.getxattr(p_str, CHUNK_REFCOUNT_ATTR, t);
chunk_refs_t refs;
try {
auto iter = t.cbegin();
decode(refs, iter);
} catch (buffer::error& err) {
ASSERT_TRUE(0);
}
ASSERT_LE(2u, refs.count());
}
// remove snap
ioctx.selfmanaged_snap_remove(my_snaps[2]);
/*
* snap[1]: [bb] [hi]
* snap[0]: [er] [hi]
* head: [bb] [hi]
*/
sleep(10);
// check chunk's refcount
{
bufferlist t;
SHA1 sha1_gen;
int size = strlen("hi");
unsigned char fingerprint[CEPH_CRYPTO_SHA1_DIGESTSIZE + 1];
char p_str[CEPH_CRYPTO_SHA1_DIGESTSIZE*2+1] = {0};
sha1_gen.Update((const unsigned char *)"hi", size);
sha1_gen.Final(fingerprint);
buf_to_hex(fingerprint, CEPH_CRYPTO_SHA1_DIGESTSIZE, p_str);
is_intended_refcount_state(ioctx, "foo", cache_ioctx, p_str, 1);
}
// remove snap
ioctx.selfmanaged_snap_remove(my_snaps[0]);
/*
* snap[1]: [bb] [hi]
* head: [bb] [hi]
*/
sleep(10);
// check chunk's refcount
{
bufferlist t;
SHA1 sha1_gen;
int size = strlen("bb");
unsigned char fingerprint[CEPH_CRYPTO_SHA1_DIGESTSIZE + 1];
char p_str[CEPH_CRYPTO_SHA1_DIGESTSIZE*2+1] = {0};
sha1_gen.Update((const unsigned char *)"bb", size);
sha1_gen.Final(fingerprint);
buf_to_hex(fingerprint, CEPH_CRYPTO_SHA1_DIGESTSIZE, p_str);
is_intended_refcount_state(ioctx, "foo", cache_ioctx, p_str, 1);
}
// remove snap
ioctx.selfmanaged_snap_remove(my_snaps[1]);
/*
* snap[1]: [bb] [hi]
*/
sleep(10);
// check chunk's refcount
{
bufferlist t;
SHA1 sha1_gen;
int size = strlen("bb");
unsigned char fingerprint[CEPH_CRYPTO_SHA1_DIGESTSIZE + 1];
char p_str[CEPH_CRYPTO_SHA1_DIGESTSIZE*2+1] = {0};
sha1_gen.Update((const unsigned char *)"bb", size);
sha1_gen.Final(fingerprint);
buf_to_hex(fingerprint, CEPH_CRYPTO_SHA1_DIGESTSIZE, p_str);
is_intended_refcount_state(ioctx, "foo", cache_ioctx, p_str, 1);
}
// check chunk's refcount
{
bufferlist t;
SHA1 sha1_gen;
int size = strlen("hi");
unsigned char fingerprint[CEPH_CRYPTO_SHA1_DIGESTSIZE + 1];
char p_str[CEPH_CRYPTO_SHA1_DIGESTSIZE*2+1] = {0};
sha1_gen.Update((const unsigned char *)"hi", size);
sha1_gen.Final(fingerprint);
buf_to_hex(fingerprint, CEPH_CRYPTO_SHA1_DIGESTSIZE, p_str);
is_intended_refcount_state(ioctx, "foo", cache_ioctx, p_str, 1);
}
}
TEST_F(LibRadosTwoPoolsPP, ManifestSnapRefcount2) {
SKIP_IF_CRIMSON();
// skip test if not yet octopus
if (_get_required_osd_release(cluster) < "octopus") {
cout << "cluster is not yet octopus, skipping test" << std::endl;
return;
}
bufferlist inbl;
ASSERT_EQ(0, cluster.mon_command(
set_pool_str(pool_name, "fingerprint_algorithm", "sha1"),
inbl, NULL, NULL));
cluster.wait_for_latest_osdmap();
// create object
{
bufferlist bl;
bl.append("Thabe cdHI");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("foo", &op));
}
{
bufferlist bl;
bl.append("there hiHI");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, cache_ioctx.operate("bar", &op));
}
string ab_fp_oid, cd_fp_oid, ef_fp_oid, BB_fp_oid;
// get fp_oid
ab_fp_oid = get_fp_oid("ab", "sha1");
cd_fp_oid = get_fp_oid("cd", "sha1");
ef_fp_oid = get_fp_oid("ef", "sha1");
BB_fp_oid = get_fp_oid("BB", "sha1");
// write
{
ObjectWriteOperation op;
bufferlist bl;
bl.append("ab");
op.write_full(bl);
ASSERT_EQ(0, cache_ioctx.operate(ab_fp_oid, &op));
}
// write
{
ObjectWriteOperation op;
bufferlist bl;
bl.append("cd");
op.write_full(bl);
ASSERT_EQ(0, cache_ioctx.operate(cd_fp_oid, &op));
}
// write
{
ObjectWriteOperation op;
bufferlist bl;
bl.append("ef");
op.write_full(bl);
ASSERT_EQ(0, cache_ioctx.operate(ef_fp_oid, &op));
}
// write
{
ObjectWriteOperation op;
bufferlist bl;
bl.append("BB");
op.write_full(bl);
ASSERT_EQ(0, cache_ioctx.operate(BB_fp_oid, &op));
}
// set-chunk (dedup)
manifest_set_chunk(cluster, cache_ioctx, ioctx, 2, 2, ab_fp_oid, "foo");
// set-chunk (dedup)
manifest_set_chunk(cluster, cache_ioctx, ioctx, 6, 2, cd_fp_oid, "foo");
// set-chunk (dedup)
manifest_set_chunk(cluster, cache_ioctx, ioctx, 8, 2, ef_fp_oid, "foo");
// make all chunks dirty --> flush
// foo: [ab] [cd] [ef]
// create a snapshot, clone
vector<uint64_t> my_snaps(1);
ASSERT_EQ(0, ioctx.selfmanaged_snap_create(&my_snaps[0]));
ASSERT_EQ(0, ioctx.selfmanaged_snap_set_write_ctx(my_snaps[0],
my_snaps));
// foo: [BB] [BB] [ef]
// create a clone
{
bufferlist bl;
bl.append("ThBBe BB");
ASSERT_EQ(0, ioctx.write("foo", bl, bl.length(), 0));
}
// make clean
{
bufferlist bl;
bl.append("ThBBe BB");
ASSERT_EQ(0, ioctx.write("foo", bl, bl.length(), 0));
}
// set-chunk (dedup)
manifest_set_chunk(cluster, cache_ioctx, ioctx, 2, 2, BB_fp_oid, "foo");
// set-chunk (dedup)
manifest_set_chunk(cluster, cache_ioctx, ioctx, 6, 2, BB_fp_oid, "foo");
// and another
my_snaps.resize(2);
my_snaps[1] = my_snaps[0];
ASSERT_EQ(0, ioctx.selfmanaged_snap_create(&my_snaps[0]));
ASSERT_EQ(0, ioctx.selfmanaged_snap_set_write_ctx(my_snaps[0],
my_snaps));
// foo: [ab] [cd] [ef]
// create a clone
{
bufferlist bl;
bl.append("Thabe cd");
ASSERT_EQ(0, ioctx.write("foo", bl, bl.length(), 0));
}
// make clean
{
bufferlist bl;
bl.append("Thabe cd");
ASSERT_EQ(0, ioctx.write("foo", bl, bl.length(), 0));
}
// set-chunk (dedup)
manifest_set_chunk(cluster, cache_ioctx, ioctx, 2, 2, ab_fp_oid, "foo");
// set-chunk (dedup)
manifest_set_chunk(cluster, cache_ioctx, ioctx, 6, 2, cd_fp_oid, "foo");
/*
* snap[1]: [ab] [cd] [ef]
* snap[0]: [BB] [BB] [ef]
* head: [ab] [cd] [ef]
*/
// check chunk's refcount
{
bufferlist t;
SHA1 sha1_gen;
int size = strlen("ab");
unsigned char fingerprint[CEPH_CRYPTO_SHA1_DIGESTSIZE + 1];
char p_str[CEPH_CRYPTO_SHA1_DIGESTSIZE*2+1] = {0};
sha1_gen.Update((const unsigned char *)"ab", size);
sha1_gen.Final(fingerprint);
buf_to_hex(fingerprint, CEPH_CRYPTO_SHA1_DIGESTSIZE, p_str);
cache_ioctx.getxattr(p_str, CHUNK_REFCOUNT_ATTR, t);
chunk_refs_t refs;
try {
auto iter = t.cbegin();
decode(refs, iter);
} catch (buffer::error& err) {
ASSERT_TRUE(0);
}
ASSERT_LE(2u, refs.count());
}
// check chunk's refcount
{
bufferlist t;
SHA1 sha1_gen;
int size = strlen("cd");
unsigned char fingerprint[CEPH_CRYPTO_SHA1_DIGESTSIZE + 1];
char p_str[CEPH_CRYPTO_SHA1_DIGESTSIZE*2+1] = {0};
sha1_gen.Update((const unsigned char *)"cd", size);
sha1_gen.Final(fingerprint);
buf_to_hex(fingerprint, CEPH_CRYPTO_SHA1_DIGESTSIZE, p_str);
cache_ioctx.getxattr(p_str, CHUNK_REFCOUNT_ATTR, t);
chunk_refs_t refs;
try {
auto iter = t.cbegin();
decode(refs, iter);
} catch (buffer::error& err) {
ASSERT_TRUE(0);
}
ASSERT_LE(2u, refs.count());
}
// check chunk's refcount
{
bufferlist t;
SHA1 sha1_gen;
int size = strlen("BB");
unsigned char fingerprint[CEPH_CRYPTO_SHA1_DIGESTSIZE + 1];
char p_str[CEPH_CRYPTO_SHA1_DIGESTSIZE*2+1] = {0};
sha1_gen.Update((const unsigned char *)"BB", size);
sha1_gen.Final(fingerprint);
buf_to_hex(fingerprint, CEPH_CRYPTO_SHA1_DIGESTSIZE, p_str);
cache_ioctx.getxattr(p_str, CHUNK_REFCOUNT_ATTR, t);
chunk_refs_t refs;
try {
auto iter = t.cbegin();
decode(refs, iter);
} catch (buffer::error& err) {
ASSERT_TRUE(0);
}
ASSERT_LE(2u, refs.count());
}
// remove snap
ioctx.selfmanaged_snap_remove(my_snaps[0]);
/*
* snap[1]: [ab] [cd] [ef]
* head: [ab] [cd] [ef]
*/
sleep(10);
// check chunk's refcount
{
bufferlist t;
SHA1 sha1_gen;
int size = strlen("BB");
unsigned char fingerprint[CEPH_CRYPTO_SHA1_DIGESTSIZE + 1];
char p_str[CEPH_CRYPTO_SHA1_DIGESTSIZE*2+1] = {0};
sha1_gen.Update((const unsigned char *)"BB", size);
sha1_gen.Final(fingerprint);
buf_to_hex(fingerprint, CEPH_CRYPTO_SHA1_DIGESTSIZE, p_str);
is_intended_refcount_state(ioctx, "foo", cache_ioctx, p_str, 0);
}
}
TEST_F(LibRadosTwoPoolsPP, ManifestTestSnapCreate) {
SKIP_IF_CRIMSON();
// skip test if not yet octopus
if (_get_required_osd_release(cluster) < "octopus") {
GTEST_SKIP() << "cluster is not yet octopus, skipping test";
}
// create object
{
bufferlist bl;
bl.append("base chunk");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("foo", &op));
}
{
bufferlist bl;
bl.append("CHUNKS CHUNKS");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, cache_ioctx.operate("bar", &op));
}
string ba_fp_oid, se_fp_oid, ch_fp_oid;
// get fp_oid
ba_fp_oid = get_fp_oid("ba", "sha1");
se_fp_oid = get_fp_oid("se", "sha1");
ch_fp_oid = get_fp_oid("ch", "sha1");
// write
{
ObjectWriteOperation op;
bufferlist bl;
bl.append("ba");
op.write_full(bl);
ASSERT_EQ(0, cache_ioctx.operate(ba_fp_oid, &op));
}
// write
{
ObjectWriteOperation op;
bufferlist bl;
bl.append("se");
op.write_full(bl);
ASSERT_EQ(0, cache_ioctx.operate(se_fp_oid, &op));
}
// write
{
ObjectWriteOperation op;
bufferlist bl;
bl.append("ch");
op.write_full(bl);
ASSERT_EQ(0, cache_ioctx.operate(ch_fp_oid, &op));
}
// set-chunk (dedup)
manifest_set_chunk(cluster, cache_ioctx, ioctx, 0, 2, ba_fp_oid, "foo");
// try to create a snapshot, clone
vector<uint64_t> my_snaps(1);
ASSERT_EQ(0, ioctx.selfmanaged_snap_create(&my_snaps[0]));
ASSERT_EQ(0, ioctx.selfmanaged_snap_set_write_ctx(my_snaps[0],
my_snaps));
// set-chunk (dedup)
manifest_set_chunk(cluster, cache_ioctx, ioctx, 2, 2, se_fp_oid, "foo");
// check whether clone is created
ioctx.snap_set_read(librados::SNAP_DIR);
{
snap_set_t snap_set;
int snap_ret;
ObjectReadOperation op;
op.list_snaps(&snap_set, &snap_ret);
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, ioctx.aio_operate(
"foo", completion, &op,
0, NULL));
completion->wait_for_complete();
ASSERT_EQ(0, snap_ret);
ASSERT_LT(0u, snap_set.clones.size());
ASSERT_EQ(1, snap_set.clones.size());
}
// create a clone
ioctx.snap_set_read(librados::SNAP_HEAD);
{
bufferlist bl;
bl.append("B");
ASSERT_EQ(0, ioctx.write("foo", bl, 1, 0));
}
ioctx.snap_set_read(my_snaps[0]);
// set-chunk to clone
manifest_set_chunk(cluster, cache_ioctx, ioctx, 6, 2, ch_fp_oid, "foo");
}
TEST_F(LibRadosTwoPoolsPP, ManifestRedirectAfterPromote) {
SKIP_IF_CRIMSON();
// skip test if not yet octopus
if (_get_required_osd_release(cluster) < "octopus") {
GTEST_SKIP() << "cluster is not yet octopus, skipping test";
}
// create object
{
bufferlist bl;
bl.append("base chunk");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("foo", &op));
}
{
bufferlist bl;
bl.append("BASE CHUNK");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, cache_ioctx.operate("bar", &op));
}
// set-redirect
{
ObjectWriteOperation op;
op.set_redirect("bar", cache_ioctx, 0);
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, ioctx.aio_operate("foo", completion, &op));
completion->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
completion->release();
}
// promote
{
ObjectWriteOperation op;
op.tier_promote();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, ioctx.aio_operate("foo", completion, &op));
completion->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
completion->release();
}
// write
{
bufferlist bl;
bl.append("a");
ASSERT_EQ(0, ioctx.write("foo", bl, 1, 0));
}
// read and verify the object (redirect)
{
bufferlist bl;
ASSERT_EQ(1, ioctx.read("foo", bl, 1, 0));
ASSERT_EQ('a', bl[0]);
}
// read and verify the object (redirect)
{
bufferlist bl;
ASSERT_EQ(1, cache_ioctx.read("bar", bl, 1, 0));
ASSERT_EQ('B', bl[0]);
}
}
TEST_F(LibRadosTwoPoolsPP, ManifestCheckRefcountWhenModification) {
SKIP_IF_CRIMSON();
// skip test if not yet octopus
if (_get_required_osd_release(cluster) < "octopus") {
GTEST_SKIP() << "cluster is not yet octopus, skipping test";
}
bufferlist inbl;
ASSERT_EQ(0, cluster.mon_command(
set_pool_str(pool_name, "fingerprint_algorithm", "sha1"),
inbl, NULL, NULL));
cluster.wait_for_latest_osdmap();
// create object
{
bufferlist bl;
bl.append("there hiHI");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("foo", &op));
}
string er_fp_oid, hi_fp_oid, HI_fp_oid, ai_fp_oid, bi_fp_oid,
Er_fp_oid, Hi_fp_oid, Si_fp_oid;
// get fp_oid
er_fp_oid = get_fp_oid("er", "sha1");
hi_fp_oid = get_fp_oid("hi", "sha1");
HI_fp_oid = get_fp_oid("HI", "sha1");
ai_fp_oid = get_fp_oid("ai", "sha1");
bi_fp_oid = get_fp_oid("bi", "sha1");
Er_fp_oid = get_fp_oid("Er", "sha1");
Hi_fp_oid = get_fp_oid("Hi", "sha1");
Si_fp_oid = get_fp_oid("Si", "sha1");
// write
{
ObjectWriteOperation op;
bufferlist bl;
bl.append("er");
op.write_full(bl);
ASSERT_EQ(0, cache_ioctx.operate(er_fp_oid, &op));
}
// write
{
ObjectWriteOperation op;
bufferlist bl;
bl.append("hi");
op.write_full(bl);
ASSERT_EQ(0, cache_ioctx.operate(hi_fp_oid, &op));
}
// write
{
ObjectWriteOperation op;
bufferlist bl;
bl.append("HI");
op.write_full(bl);
ASSERT_EQ(0, cache_ioctx.operate(HI_fp_oid, &op));
}
// write
{
ObjectWriteOperation op;
bufferlist bl;
bl.append("ai");
op.write_full(bl);
ASSERT_EQ(0, cache_ioctx.operate(ai_fp_oid, &op));
}
// write
{
ObjectWriteOperation op;
bufferlist bl;
bl.append("bi");
op.write_full(bl);
ASSERT_EQ(0, cache_ioctx.operate(bi_fp_oid, &op));
}
// write
{
ObjectWriteOperation op;
bufferlist bl;
bl.append("Er");
op.write_full(bl);
ASSERT_EQ(0, cache_ioctx.operate(Er_fp_oid, &op));
}
// write
{
ObjectWriteOperation op;
bufferlist bl;
bl.append("Hi");
op.write_full(bl);
ASSERT_EQ(0, cache_ioctx.operate(Hi_fp_oid, &op));
}
// write
{
ObjectWriteOperation op;
bufferlist bl;
bl.append("Si");
op.write_full(bl);
ASSERT_EQ(0, cache_ioctx.operate(Si_fp_oid, &op));
}
// set-chunk (dedup)
manifest_set_chunk(cluster, cache_ioctx, ioctx, 2, 2, er_fp_oid, "foo");
// set-chunk (dedup)
manifest_set_chunk(cluster, cache_ioctx, ioctx, 6, 2, hi_fp_oid, "foo");
// set-chunk (dedup)
manifest_set_chunk(cluster, cache_ioctx, ioctx, 8, 2, HI_fp_oid, "foo");
// foo head: [er] [hi] [HI]
// create a snapshot, clone
vector<uint64_t> my_snaps(1);
ASSERT_EQ(0, ioctx.selfmanaged_snap_create(&my_snaps[0]));
ASSERT_EQ(0, ioctx.selfmanaged_snap_set_write_ctx(my_snaps[0],
my_snaps));
// foo snap[0]: [er] [hi] [HI]
// foo head : [er] [ai] [HI]
// create a clone
{
bufferlist bl;
bl.append("a");
ASSERT_EQ(0, ioctx.write("foo", bl, 1, 6));
}
// write
{
bufferlist bl;
bl.append("a");
ASSERT_EQ(0, ioctx.write("foo", bl, 1, 6));
}
// set-chunk (dedup)
manifest_set_chunk(cluster, cache_ioctx, ioctx, 6, 2, ai_fp_oid, "foo");
// foo snap[0]: [er] [hi] [HI]
// foo head : [er] [bi] [HI]
// create a clone
{
bufferlist bl;
bl.append("b");
ASSERT_EQ(0, ioctx.write("foo", bl, 1, 6));
}
// write
{
bufferlist bl;
bl.append("b");
ASSERT_EQ(0, ioctx.write("foo", bl, 1, 6));
}
// set-chunk (dedup)
manifest_set_chunk(cluster, cache_ioctx, ioctx, 6, 2, bi_fp_oid, "foo");
sleep(10);
// check chunk's refcount
// [ai]'s refcount should be 0
{
bufferlist t;
SHA1 sha1_gen;
int size = strlen("ai");
unsigned char fingerprint[CEPH_CRYPTO_SHA1_DIGESTSIZE + 1];
char p_str[CEPH_CRYPTO_SHA1_DIGESTSIZE*2+1] = {0};
sha1_gen.Update((const unsigned char *)"ai", size);
sha1_gen.Final(fingerprint);
buf_to_hex(fingerprint, CEPH_CRYPTO_SHA1_DIGESTSIZE, p_str);
is_intended_refcount_state(ioctx, "foo", cache_ioctx, p_str, 0);
}
// foo snap[0]: [er] [hi] [HI]
// foo head : [Er] [Hi] [Si]
// create a clone
{
bufferlist bl;
bl.append("thEre HiSi");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("foo", &op));
}
// write
{
bufferlist bl;
bl.append("thEre HiSi");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("foo", &op));
}
// set-chunk (dedup)
manifest_set_chunk(cluster, cache_ioctx, ioctx, 2, 2, Er_fp_oid, "foo");
// set-chunk (dedup)
manifest_set_chunk(cluster, cache_ioctx, ioctx, 6, 2, Hi_fp_oid, "foo");
// set-chunk (dedup)
manifest_set_chunk(cluster, cache_ioctx, ioctx, 8, 2, Si_fp_oid, "foo");
// foo snap[0]: [er] [hi] [HI]
// foo head : [ER] [HI] [SI]
// write
{
bufferlist bl;
bl.append("thERe HISI");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("foo", &op));
}
sleep(10);
// check chunk's refcount
// [Er]'s refcount should be 0
{
bufferlist t;
SHA1 sha1_gen;
int size = strlen("Er");
unsigned char fingerprint[CEPH_CRYPTO_SHA1_DIGESTSIZE + 1];
char p_str[CEPH_CRYPTO_SHA1_DIGESTSIZE*2+1] = {0};
sha1_gen.Update((const unsigned char *)"Er", size);
sha1_gen.Final(fingerprint);
buf_to_hex(fingerprint, CEPH_CRYPTO_SHA1_DIGESTSIZE, p_str);
is_intended_refcount_state(ioctx, "foo", cache_ioctx, p_str, 0);
}
}
TEST_F(LibRadosTwoPoolsPP, ManifestSnapIncCount) {
SKIP_IF_CRIMSON();
// skip test if not yet octopus
if (_get_required_osd_release(cluster) < "octopus") {
cout << "cluster is not yet octopus, skipping test" << std::endl;
return;
}
// create object
{
bufferlist bl;
bl.append("there hiHI");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("foo", &op));
}
{
bufferlist bl;
bl.append("there hiHI");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, cache_ioctx.operate("chunk1", &op));
}
{
bufferlist bl;
bl.append("there hiHI");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, cache_ioctx.operate("chunk2", &op));
}
{
bufferlist bl;
bl.append("there hiHI");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, cache_ioctx.operate("chunk3", &op));
}
{
bufferlist bl;
bl.append("there hiHI");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, cache_ioctx.operate("chunk4", &op));
}
// wait for maps to settle
cluster.wait_for_latest_osdmap();
// create a snapshot, clone
vector<uint64_t> my_snaps(1);
ASSERT_EQ(0, ioctx.selfmanaged_snap_create(&my_snaps[0]));
ASSERT_EQ(0, ioctx.selfmanaged_snap_set_write_ctx(my_snaps[0],
my_snaps));
{
bufferlist bl;
bl.append("there hiHI");
ASSERT_EQ(0, ioctx.write("foo", bl, bl.length(), 0));
}
my_snaps.resize(2);
my_snaps[1] = my_snaps[0];
ASSERT_EQ(0, ioctx.selfmanaged_snap_create(&my_snaps[0]));
ASSERT_EQ(0, ioctx.selfmanaged_snap_set_write_ctx(my_snaps[0],
my_snaps));
{
bufferlist bl;
bl.append("there hiHI");
ASSERT_EQ(0, ioctx.write("foo", bl, bl.length(), 0));
}
// set-chunk
manifest_set_chunk(cluster, cache_ioctx, ioctx, 2, 2, "chunk1", "foo");
manifest_set_chunk(cluster, cache_ioctx, ioctx, 8, 2, "chunk4", "foo");
// foo snap[1]:
// foo snap[0]:
// foo head : [chunk1] [chunk4]
ioctx.snap_set_read(my_snaps[1]);
// set-chunk
manifest_set_chunk(cluster, cache_ioctx, ioctx, 6, 2, "chunk2", "foo");
manifest_set_chunk(cluster, cache_ioctx, ioctx, 8, 2, "chunk4", "foo");
// foo snap[1]: [chunk2] [chunk4]
// foo snap[0]:
// foo head : [chunk1] [chunk4]
ioctx.snap_set_read(my_snaps[0]);
// set-chunk
manifest_set_chunk(cluster, cache_ioctx, ioctx, 6, 2, "chunk2", "foo");
// foo snap[1]: [chunk2] [chunk4]
// foo snap[0]: [chunk2]
// foo head : [chunk1] [chunk4]
manifest_set_chunk(cluster, cache_ioctx, ioctx, 2, 2, "chunk3", "foo");
// foo snap[1]: [chunk2] [chunk4]
// foo snap[0]: [chunk3] [chunk2]
// foo head : [chunk1] [chunk4]
manifest_set_chunk(cluster, cache_ioctx, ioctx, 8, 2, "chunk4", "foo");
// foo snap[1]: [chunk2] [chunk4]
// foo snap[0]: [chunk3] [chunk2] [chunk4]
// foo head : [chunk1] [chunk4]
// check chunk's refcount
check_fp_oid_refcount(cache_ioctx, "chunk1", 1u, "");
// check chunk's refcount
check_fp_oid_refcount(cache_ioctx, "chunk2", 1u, "");
// check chunk's refcount
check_fp_oid_refcount(cache_ioctx, "chunk3", 1u, "");
sleep(10);
// check chunk's refcount
is_intended_refcount_state(ioctx, "foo", cache_ioctx, "chunk4", 1);
}
TEST_F(LibRadosTwoPoolsPP, ManifestEvict) {
SKIP_IF_CRIMSON();
// skip test if not yet octopus
if (_get_required_osd_release(cluster) < "octopus") {
cout << "cluster is not yet octopus, skipping test" << std::endl;
return;
}
// create object
{
bufferlist bl;
bl.append("there hiHI");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("foo", &op));
}
{
bufferlist bl;
bl.append("there hiHI");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, cache_ioctx.operate("chunk1", &op));
}
{
bufferlist bl;
bl.append("there hiHI");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, cache_ioctx.operate("chunk2", &op));
}
{
bufferlist bl;
bl.append("there hiHI");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, cache_ioctx.operate("chunk3", &op));
}
{
bufferlist bl;
bl.append("there hiHI");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, cache_ioctx.operate("chunk4", &op));
}
// wait for maps to settle
cluster.wait_for_latest_osdmap();
// create a snapshot, clone
vector<uint64_t> my_snaps(1);
ASSERT_EQ(0, ioctx.selfmanaged_snap_create(&my_snaps[0]));
ASSERT_EQ(0, ioctx.selfmanaged_snap_set_write_ctx(my_snaps[0],
my_snaps));
{
bufferlist bl;
bl.append("there hiHI");
ASSERT_EQ(0, ioctx.write("foo", bl, bl.length(), 0));
}
my_snaps.resize(2);
my_snaps[1] = my_snaps[0];
ASSERT_EQ(0, ioctx.selfmanaged_snap_create(&my_snaps[0]));
ASSERT_EQ(0, ioctx.selfmanaged_snap_set_write_ctx(my_snaps[0],
my_snaps));
{
bufferlist bl;
bl.append("there hiHI");
ASSERT_EQ(0, ioctx.write("foo", bl, bl.length(), 0));
}
// set-chunk
manifest_set_chunk(cluster, cache_ioctx, ioctx, 2, 2, "chunk1", "foo");
manifest_set_chunk(cluster, cache_ioctx, ioctx, 8, 2, "chunk4", "foo");
// foo snap[1]:
// foo snap[0]:
// foo head : [chunk1] [chunk4]
ioctx.snap_set_read(my_snaps[1]);
// set-chunk
manifest_set_chunk(cluster, cache_ioctx, ioctx, 0, 10, "chunk2", "foo");
// foo snap[1]: [ chunk2 ]
// foo snap[0]:
// foo head : [chunk1] [chunk4]
ioctx.snap_set_read(my_snaps[0]);
// set-chunk
manifest_set_chunk(cluster, cache_ioctx, ioctx, 6, 2, "chunk2", "foo");
// foo snap[1]: [ chunk2 ]
// foo snap[0]: [chunk2]
// foo head : [chunk1] [chunk4]
manifest_set_chunk(cluster, cache_ioctx, ioctx, 2, 2, "chunk3", "foo");
// foo snap[1]: [ chunk2 ]
// foo snap[0]: [chunk3] [chunk2]
// foo head : [chunk1] [chunk4]
manifest_set_chunk(cluster, cache_ioctx, ioctx, 8, 2, "chunk4", "foo");
// foo snap[1]: [ chunk2 ]
// foo snap[0]: [chunk3] [chunk2] [chunk4]
// foo head : [chunk1] [chunk4]
manifest_set_chunk(cluster, cache_ioctx, ioctx, 0, 2, "chunk4", "foo");
// foo snap[1]: [ chunk2 ]
// foo snap[0]: [chunk4] [chunk3] [chunk2] [chunk4]
// foo head : [chunk1] [chunk4]
manifest_set_chunk(cluster, cache_ioctx, ioctx, 4, 2, "chunk1", "foo");
// foo snap[1]: [ chunk2 ]
// foo snap[0]: [chunk4] [chunk3] [chunk1] [chunk2] [chunk4]
// foo head : [chunk1] [chunk4]
{
ObjectReadOperation op, stat_op;
uint64_t size;
op.tier_evict();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, ioctx.aio_operate(
"foo", completion, &op,
librados::OPERATION_IGNORE_OVERLAY, NULL));
completion->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
stat_op.stat(&size, NULL, NULL);
ASSERT_EQ(0, ioctx.operate("foo", &stat_op, NULL));
ASSERT_EQ(10, size);
}
ioctx.snap_set_read(librados::SNAP_HEAD);
{
ObjectReadOperation op, stat_op;
uint64_t size;
op.tier_evict();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, ioctx.aio_operate(
"foo", completion, &op,
librados::OPERATION_IGNORE_OVERLAY, NULL));
completion->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
stat_op.stat(&size, NULL, NULL);
ASSERT_EQ(0, ioctx.operate("foo", &stat_op, NULL));
ASSERT_EQ(strlen("there hiHI"), size);
}
}
TEST_F(LibRadosTwoPoolsPP, ManifestEvictPromote) {
SKIP_IF_CRIMSON();
// skip test if not yet octopus
if (_get_required_osd_release(cluster) < "octopus") {
cout << "cluster is not yet octopus, skipping test" << std::endl;
return;
}
// create object
{
bufferlist bl;
bl.append("there hiHI");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("foo", &op));
}
{
bufferlist bl;
bl.append("EREHT hiHI");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, cache_ioctx.operate("chunk1", &op));
}
{
bufferlist bl;
bl.append("there hiHI");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, cache_ioctx.operate("chunk2", &op));
}
{
bufferlist bl;
bl.append("THERE HIHI");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, cache_ioctx.operate("chunk3", &op));
}
// wait for maps to settle
cluster.wait_for_latest_osdmap();
// create a snapshot, clone
vector<uint64_t> my_snaps(1);
ASSERT_EQ(0, ioctx.selfmanaged_snap_create(&my_snaps[0]));
ASSERT_EQ(0, ioctx.selfmanaged_snap_set_write_ctx(my_snaps[0],
my_snaps));
{
bufferlist bl;
bl.append("there");
ASSERT_EQ(0, ioctx.write("foo", bl, bl.length(), 0));
}
// set-chunk
manifest_set_chunk(cluster, cache_ioctx, ioctx, 0, 2, "chunk1", "foo");
manifest_set_chunk(cluster, cache_ioctx, ioctx, 8, 2, "chunk2", "foo");
// foo snap[0]:
// foo head : [chunk1] [chunk2]
ioctx.snap_set_read(my_snaps[0]);
// set-chunk
manifest_set_chunk(cluster, cache_ioctx, ioctx, 0, 10, "chunk3", "foo");
// foo snap[0]: [ chunk3 ]
// foo head : [chunk1] [chunk2]
{
ObjectReadOperation op, stat_op;
uint64_t size;
op.tier_evict();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, ioctx.aio_operate(
"foo", completion, &op,
librados::OPERATION_IGNORE_OVERLAY, NULL));
completion->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
stat_op.stat(&size, NULL, NULL);
ASSERT_EQ(0, ioctx.operate("foo", &stat_op, NULL));
ASSERT_EQ(10, size);
}
{
bufferlist bl;
ASSERT_EQ(1, ioctx.read("foo", bl, 1, 0));
ASSERT_EQ('T', bl[0]);
}
ioctx.snap_set_read(librados::SNAP_HEAD);
{
bufferlist bl;
ASSERT_EQ(10, ioctx.read("foo", bl, 10, 0));
ASSERT_EQ('H', bl[8]);
}
}
TEST_F(LibRadosTwoPoolsPP, ManifestSnapSizeMismatch) {
SKIP_IF_CRIMSON();
// skip test if not yet octopus
if (_get_required_osd_release(cluster) < "octopus") {
cout << "cluster is not yet octopus, skipping test" << std::endl;
return;
}
// create object
{
bufferlist bl;
bl.append("there hiHI");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, cache_ioctx.operate("foo", &op));
}
{
bufferlist bl;
bl.append("there hiHI");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("chunk1", &op));
}
{
bufferlist bl;
bl.append("there HIHI");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("chunk2", &op));
}
// wait for maps to settle
cluster.wait_for_latest_osdmap();
// create a snapshot, clone
vector<uint64_t> my_snaps(1);
ASSERT_EQ(0, cache_ioctx.selfmanaged_snap_create(&my_snaps[0]));
ASSERT_EQ(0, cache_ioctx.selfmanaged_snap_set_write_ctx(my_snaps[0],
my_snaps));
{
bufferlist bl;
bl.append("There hiHI");
ASSERT_EQ(0, cache_ioctx.write("foo", bl, bl.length(), 0));
}
my_snaps.resize(2);
my_snaps[1] = my_snaps[0];
ASSERT_EQ(0, cache_ioctx.selfmanaged_snap_create(&my_snaps[0]));
ASSERT_EQ(0, cache_ioctx.selfmanaged_snap_set_write_ctx(my_snaps[0],
my_snaps));
{
bufferlist bl;
bl.append("tHere hiHI");
ASSERT_EQ(0, cache_ioctx.write("foo", bl, bl.length(), 0));
}
// set-chunk
manifest_set_chunk(cluster, ioctx, cache_ioctx, 0, 10, "chunk1", "foo");
cache_ioctx.snap_set_read(my_snaps[1]);
// set-chunk
manifest_set_chunk(cluster, ioctx, cache_ioctx, 0, 10, "chunk2", "foo");
// evict
{
ObjectReadOperation op, stat_op;
op.tier_evict();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, cache_ioctx.aio_operate(
"foo", completion, &op,
librados::OPERATION_IGNORE_OVERLAY, NULL));
completion->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
}
uint32_t hash;
ASSERT_EQ(0, cache_ioctx.get_object_pg_hash_position2("foo", &hash));
// scrub
{
for (int tries = 0; tries < 5; ++tries) {
bufferlist inbl;
ostringstream ss;
ss << "{\"prefix\": \"pg deep-scrub\", \"pgid\": \""
<< cache_ioctx.get_id() << "."
<< std::hex << hash
<< "\"}";
int r = cluster.mon_command(ss.str(), inbl, NULL, NULL);
if (r == -ENOENT ||
r == -EAGAIN) {
sleep(5);
continue;
}
ASSERT_EQ(0, r);
break;
}
cout << "waiting for scrubs..." << std::endl;
sleep(20);
cout << "done waiting" << std::endl;
}
{
bufferlist bl;
ASSERT_EQ(1, cache_ioctx.read("foo", bl, 1, 0));
ASSERT_EQ('t', bl[0]);
}
}
#include <common/CDC.h>
TEST_F(LibRadosTwoPoolsPP, DedupFlushRead) {
SKIP_IF_CRIMSON();
// skip test if not yet octopus
if (_get_required_osd_release(cluster) < "octopus") {
GTEST_SKIP() << "cluster is not yet octopus, skipping test";
}
bufferlist inbl;
ASSERT_EQ(0, cluster.mon_command(
set_pool_str(cache_pool_name, "fingerprint_algorithm", "sha1"),
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
set_pool_str(cache_pool_name, "dedup_tier", pool_name),
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
set_pool_str(cache_pool_name, "dedup_chunk_algorithm", "fastcdc"),
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
set_pool_str(cache_pool_name, "dedup_cdc_chunk_size", 1024),
inbl, NULL, NULL));
// wait for maps to settle
cluster.wait_for_latest_osdmap();
// create object
bufferlist gbl;
{
generate_buffer(1024*8, &gbl);
ObjectWriteOperation op;
op.write_full(gbl);
ASSERT_EQ(0, cache_ioctx.operate("foo-chunk", &op));
}
{
bufferlist bl;
bl.append("DDse chunk");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("bar-chunk", &op));
}
// set-chunk to set manifest object
{
ObjectReadOperation op;
op.set_chunk(0, 2, ioctx, "bar-chunk", 0,
CEPH_OSD_OP_FLAG_WITH_REFERENCE);
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, cache_ioctx.aio_operate("foo-chunk", completion, &op,
librados::OPERATION_IGNORE_CACHE, NULL));
completion->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
completion->release();
}
// flush
{
ObjectReadOperation op;
op.tier_flush();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, cache_ioctx.aio_operate(
"foo-chunk", completion, &op,
librados::OPERATION_IGNORE_CACHE, NULL));
completion->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
completion->release();
}
std::unique_ptr<CDC> cdc = CDC::create("fastcdc", cbits(1024)-1);
vector<pair<uint64_t, uint64_t>> chunks;
bufferlist chunk;
cdc->calc_chunks(gbl, &chunks);
chunk.substr_of(gbl, chunks[1].first, chunks[1].second);
string tgt_oid;
{
unsigned char fingerprint[CEPH_CRYPTO_SHA1_DIGESTSIZE + 1] = {0};
char p_str[CEPH_CRYPTO_SHA1_DIGESTSIZE*2+1] = {0};
SHA1 sha1_gen;
int size = chunk.length();
sha1_gen.Update((const unsigned char *)chunk.c_str(), size);
sha1_gen.Final(fingerprint);
buf_to_hex(fingerprint, CEPH_CRYPTO_SHA1_DIGESTSIZE, p_str);
tgt_oid = string(p_str);
}
// read and verify the chunked object
{
bufferlist test_bl;
ASSERT_EQ(2, ioctx.read(tgt_oid, test_bl, 2, 0));
ASSERT_EQ(test_bl[1], chunk[1]);
}
ASSERT_EQ(0, cluster.mon_command(
set_pool_str(cache_pool_name, "dedup_cdc_chunk_size", 512),
inbl, NULL, NULL));
cluster.wait_for_latest_osdmap();
// make a dirty chunks
{
bufferlist bl;
bl.append("hi");
ASSERT_EQ(0, cache_ioctx.write("foo-chunk", bl, bl.length(), 0));
}
// flush
{
ObjectReadOperation op;
op.tier_flush();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, cache_ioctx.aio_operate(
"foo-chunk", completion, &op,
librados::OPERATION_IGNORE_CACHE, NULL));
completion->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
completion->release();
}
cdc = CDC::create("fastcdc", cbits(512)-1);
chunks.clear();
cdc->calc_chunks(gbl, &chunks);
bufferlist chunk_512;
chunk_512.substr_of(gbl, chunks[3].first, chunks[3].second);
{
unsigned char fingerprint[CEPH_CRYPTO_SHA1_DIGESTSIZE + 1] = {0};
char p_str[CEPH_CRYPTO_SHA1_DIGESTSIZE*2+1] = {0};
SHA1 sha1_gen;
int size = chunk_512.length();
sha1_gen.Update((const unsigned char *)chunk_512.c_str(), size);
sha1_gen.Final(fingerprint);
buf_to_hex(fingerprint, CEPH_CRYPTO_SHA1_DIGESTSIZE, p_str);
tgt_oid = string(p_str);
}
// read and verify the chunked object
{
bufferlist test_bl;
ASSERT_EQ(2, ioctx.read(tgt_oid, test_bl, 2, 0));
ASSERT_EQ(test_bl[1], chunk_512[1]);
}
ASSERT_EQ(0, cluster.mon_command(
set_pool_str(cache_pool_name, "dedup_cdc_chunk_size", 16384),
inbl, NULL, NULL));
cluster.wait_for_latest_osdmap();
// make a dirty chunks
{
bufferlist bl;
bl.append("hi");
ASSERT_EQ(0, cache_ioctx.write("foo-chunk", bl, bl.length(), 0));
gbl.begin(0).copy_in(bl.length(), bl);
}
// flush
{
ObjectReadOperation op;
op.tier_flush();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, cache_ioctx.aio_operate(
"foo-chunk", completion, &op,
librados::OPERATION_IGNORE_CACHE, NULL));
completion->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
completion->release();
}
cdc = CDC::create("fastcdc", cbits(16384)-1);
chunks.clear();
cdc->calc_chunks(gbl, &chunks);
bufferlist chunk_16384;
chunk_16384.substr_of(gbl, chunks[0].first, chunks[0].second);
{
unsigned char fingerprint[CEPH_CRYPTO_SHA1_DIGESTSIZE + 1] = {0};
char p_str[CEPH_CRYPTO_SHA1_DIGESTSIZE*2+1] = {0};
SHA1 sha1_gen;
int size = chunk_16384.length();
sha1_gen.Update((const unsigned char *)chunk_16384.c_str(), size);
sha1_gen.Final(fingerprint);
buf_to_hex(fingerprint, CEPH_CRYPTO_SHA1_DIGESTSIZE, p_str);
tgt_oid = string(p_str);
}
// read and verify the chunked object
{
bufferlist test_bl;
ASSERT_EQ(2, ioctx.read(tgt_oid, test_bl, 2, 0));
ASSERT_EQ(test_bl[0], chunk_16384[0]);
}
// less than object size
ASSERT_EQ(0, cluster.mon_command(
set_pool_str(cache_pool_name, "dedup_cdc_chunk_size", 1024),
inbl, NULL, NULL));
cluster.wait_for_latest_osdmap();
// make a dirty chunks
// a chunk_info is deleted by write, which converts the manifest object to non-manifest object
{
bufferlist bl;
bl.append("hi");
ASSERT_EQ(0, cache_ioctx.write("foo-chunk", bl, bl.length(), 0));
}
// reset set-chunk
{
bufferlist bl;
bl.append("DDse chunk");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("bar-chunk", &op));
}
// set-chunk to set manifest object
{
ObjectReadOperation op;
op.set_chunk(0, 2, ioctx, "bar-chunk", 0,
CEPH_OSD_OP_FLAG_WITH_REFERENCE);
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, cache_ioctx.aio_operate("foo-chunk", completion, &op,
librados::OPERATION_IGNORE_CACHE, NULL));
completion->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
completion->release();
}
// flush
{
ObjectReadOperation op;
op.tier_flush();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, cache_ioctx.aio_operate(
"foo-chunk", completion, &op,
librados::OPERATION_IGNORE_CACHE, NULL));
completion->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
completion->release();
}
cdc = CDC::create("fastcdc", cbits(1024)-1);
chunks.clear();
cdc->calc_chunks(gbl, &chunks);
bufferlist small_chunk;
small_chunk.substr_of(gbl, chunks[1].first, chunks[1].second);
{
unsigned char fingerprint[CEPH_CRYPTO_SHA1_DIGESTSIZE + 1] = {0};
char p_str[CEPH_CRYPTO_SHA1_DIGESTSIZE*2+1] = {0};
SHA1 sha1_gen;
int size = small_chunk.length();
sha1_gen.Update((const unsigned char *)small_chunk.c_str(), size);
sha1_gen.Final(fingerprint);
buf_to_hex(fingerprint, CEPH_CRYPTO_SHA1_DIGESTSIZE, p_str);
tgt_oid = string(p_str);
}
// read and verify the chunked object
{
bufferlist test_bl;
ASSERT_EQ(2, ioctx.read(tgt_oid, test_bl, 2, 0));
ASSERT_EQ(test_bl[0], small_chunk[0]);
}
}
TEST_F(LibRadosTwoPoolsPP, ManifestFlushSnap) {
SKIP_IF_CRIMSON();
// skip test if not yet octopus
if (_get_required_osd_release(cluster) < "octopus") {
cout << "cluster is not yet octopus, skipping test" << std::endl;
return;
}
bufferlist inbl;
ASSERT_EQ(0, cluster.mon_command(
set_pool_str(cache_pool_name, "fingerprint_algorithm", "sha1"),
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
set_pool_str(cache_pool_name, "dedup_tier", pool_name),
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
set_pool_str(cache_pool_name, "dedup_chunk_algorithm", "fastcdc"),
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
set_pool_str(cache_pool_name, "dedup_cdc_chunk_size", 1024),
inbl, NULL, NULL));
// wait for maps to settle
cluster.wait_for_latest_osdmap();
// create object
bufferlist gbl;
{
//bufferlist bl;
//bl.append("there hi");
generate_buffer(1024*8, &gbl);
ObjectWriteOperation op;
op.write_full(gbl);
ASSERT_EQ(0, cache_ioctx.operate("foo", &op));
}
{
bufferlist bl;
bl.append("there hi");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("bar", &op));
}
// set-chunk (dedup)
manifest_set_chunk(cluster, ioctx, cache_ioctx, 2, 2, "bar", "foo");
manifest_set_chunk(cluster, ioctx, cache_ioctx, 6, 2, "bar", "foo");
// create a snapshot, clone
vector<uint64_t> my_snaps(1);
ASSERT_EQ(0, cache_ioctx.selfmanaged_snap_create(&my_snaps[0]));
ASSERT_EQ(0, cache_ioctx.selfmanaged_snap_set_write_ctx(my_snaps[0],
my_snaps));
// make a dirty chunks
{
bufferlist bl;
bl.append("Thbbe");
ASSERT_EQ(0, cache_ioctx.write("foo", bl, bl.length(), 0));
}
// and another
my_snaps.resize(2);
my_snaps[1] = my_snaps[0];
ASSERT_EQ(0, cache_ioctx.selfmanaged_snap_create(&my_snaps[0]));
ASSERT_EQ(0, cache_ioctx.selfmanaged_snap_set_write_ctx(my_snaps[0],
my_snaps));
// make a dirty chunks
{
bufferlist bl;
bl.append("Thcce");
ASSERT_EQ(0, cache_ioctx.write("foo", bl, bl.length(), 0));
}
// flush on head (should fail)
cache_ioctx.snap_set_read(librados::SNAP_HEAD);
// flush
{
ObjectReadOperation op;
op.tier_flush();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, cache_ioctx.aio_operate(
"foo", completion, &op,
librados::OPERATION_IGNORE_CACHE, NULL));
completion->wait_for_complete();
ASSERT_EQ(-EBUSY, completion->get_return_value());
completion->release();
}
// flush on recent snap (should fail)
cache_ioctx.snap_set_read(my_snaps[0]);
{
ObjectReadOperation op;
op.tier_flush();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, cache_ioctx.aio_operate(
"foo", completion, &op,
librados::OPERATION_IGNORE_CACHE, NULL));
completion->wait_for_complete();
ASSERT_EQ(-EBUSY, completion->get_return_value());
completion->release();
}
// flush on oldest snap
cache_ioctx.snap_set_read(my_snaps[1]);
{
ObjectReadOperation op;
op.tier_flush();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, cache_ioctx.aio_operate(
"foo", completion, &op,
librados::OPERATION_IGNORE_CACHE, NULL));
completion->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
completion->release();
}
// flush on oldest snap
cache_ioctx.snap_set_read(my_snaps[0]);
{
ObjectReadOperation op;
op.tier_flush();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, cache_ioctx.aio_operate(
"foo", completion, &op,
librados::OPERATION_IGNORE_CACHE, NULL));
completion->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
completion->release();
}
// flush on oldest snap
cache_ioctx.snap_set_read(librados::SNAP_HEAD);
{
ObjectReadOperation op;
op.tier_flush();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, cache_ioctx.aio_operate(
"foo", completion, &op,
librados::OPERATION_IGNORE_CACHE, NULL));
completion->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
completion->release();
}
// check chunk's refcount
std::unique_ptr<CDC> cdc = CDC::create("fastcdc", cbits(1024)-1);
vector<pair<uint64_t, uint64_t>> chunks;
bufferlist chunk;
cdc->calc_chunks(gbl, &chunks);
chunk.substr_of(gbl, chunks[1].first, chunks[1].second);
string tgt_oid;
{
unsigned char fingerprint[CEPH_CRYPTO_SHA1_DIGESTSIZE + 1] = {0};
char p_str[CEPH_CRYPTO_SHA1_DIGESTSIZE*2+1] = {0};
SHA1 sha1_gen;
int size = chunk.length();
sha1_gen.Update((const unsigned char *)chunk.c_str(), size);
sha1_gen.Final(fingerprint);
buf_to_hex(fingerprint, CEPH_CRYPTO_SHA1_DIGESTSIZE, p_str);
tgt_oid = string(p_str);
}
// read and verify the chunked object
{
bufferlist test_bl;
ASSERT_EQ(2, ioctx.read(tgt_oid, test_bl, 2, 0));
ASSERT_EQ(test_bl[1], chunk[1]);
}
cache_ioctx.snap_set_read(librados::SNAP_HEAD);
{
bufferlist bl;
ASSERT_EQ(4, cache_ioctx.read("foo", bl, 4, 0));
ASSERT_EQ('c', bl[2]);
}
cache_ioctx.snap_set_read(my_snaps[0]);
{
bufferlist bl;
ASSERT_EQ(4, cache_ioctx.read("foo", bl, 4, 0));
ASSERT_EQ('b', bl[2]);
}
}
TEST_F(LibRadosTwoPoolsPP, ManifestFlushDupCount) {
SKIP_IF_CRIMSON();
// skip test if not yet octopus
if (_get_required_osd_release(cluster) < "octopus") {
cout << "cluster is not yet octopus, skipping test" << std::endl;
return;
}
bufferlist inbl;
ASSERT_EQ(0, cluster.mon_command(
set_pool_str(cache_pool_name, "fingerprint_algorithm", "sha1"),
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
set_pool_str(cache_pool_name, "dedup_tier", pool_name),
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
set_pool_str(cache_pool_name, "dedup_chunk_algorithm", "fastcdc"),
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
set_pool_str(cache_pool_name, "dedup_cdc_chunk_size", 1024),
inbl, NULL, NULL));
// create object
bufferlist gbl;
{
//bufferlist bl;
generate_buffer(1024*8, &gbl);
ObjectWriteOperation op;
op.write_full(gbl);
ASSERT_EQ(0, cache_ioctx.operate("foo", &op));
}
{
bufferlist bl;
bl.append("there hiHI");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("bar", &op));
}
// wait for maps to settle
cluster.wait_for_latest_osdmap();
// set-chunk to set manifest object
{
ObjectReadOperation op;
op.set_chunk(0, 2, ioctx, "bar", 0,
CEPH_OSD_OP_FLAG_WITH_REFERENCE);
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, cache_ioctx.aio_operate("foo", completion, &op,
librados::OPERATION_IGNORE_CACHE, NULL));
completion->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
completion->release();
}
// create a snapshot, clone
vector<uint64_t> my_snaps(1);
ASSERT_EQ(0, cache_ioctx.selfmanaged_snap_create(&my_snaps[0]));
ASSERT_EQ(0, cache_ioctx.selfmanaged_snap_set_write_ctx(my_snaps[0],
my_snaps));
// make a dirty chunks
{
bufferlist bl;
bl.append("Thbbe hi");
ASSERT_EQ(0, cache_ioctx.write("foo", bl, bl.length(), 0));
}
// and another
my_snaps.resize(2);
my_snaps[1] = my_snaps[0];
ASSERT_EQ(0, cache_ioctx.selfmanaged_snap_create(&my_snaps[0]));
ASSERT_EQ(0, cache_ioctx.selfmanaged_snap_set_write_ctx(my_snaps[0],
my_snaps));
// make a dirty chunks
{
bufferlist bl;
bl.append("Thcce hi");
ASSERT_EQ(0, cache_ioctx.write("foo", bl, bl.length(), 0));
}
//flush on oldest snap
cache_ioctx.snap_set_read(my_snaps[1]);
// flush
{
ObjectReadOperation op;
op.tier_flush();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, cache_ioctx.aio_operate(
"foo", completion, &op,
librados::OPERATION_IGNORE_CACHE, NULL));
completion->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
completion->release();
}
// flush on oldest snap
cache_ioctx.snap_set_read(my_snaps[0]);
// flush
{
ObjectReadOperation op;
op.tier_flush();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, cache_ioctx.aio_operate(
"foo", completion, &op,
librados::OPERATION_IGNORE_CACHE, NULL));
completion->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
completion->release();
}
cache_ioctx.snap_set_read(librados::SNAP_HEAD);
// flush
{
ObjectReadOperation op;
op.tier_flush();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, cache_ioctx.aio_operate(
"foo", completion, &op,
librados::OPERATION_IGNORE_CACHE, NULL));
completion->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
completion->release();
}
std::unique_ptr<CDC> cdc = CDC::create("fastcdc", cbits(1024)-1);
vector<pair<uint64_t, uint64_t>> chunks;
bufferlist chunk;
cdc->calc_chunks(gbl, &chunks);
chunk.substr_of(gbl, chunks[1].first, chunks[1].second);
string tgt_oid;
// check chunk's refcount
{
unsigned char fingerprint[CEPH_CRYPTO_SHA1_DIGESTSIZE + 1] = {0};
char p_str[CEPH_CRYPTO_SHA1_DIGESTSIZE*2+1] = {0};
bufferlist t;
SHA1 sha1_gen;
int size = chunk.length();
sha1_gen.Update((const unsigned char *)chunk.c_str(), size);
sha1_gen.Final(fingerprint);
buf_to_hex(fingerprint, CEPH_CRYPTO_SHA1_DIGESTSIZE, p_str);
tgt_oid = string(p_str);
ioctx.getxattr(p_str, CHUNK_REFCOUNT_ATTR, t);
chunk_refs_t refs;
try {
auto iter = t.cbegin();
decode(refs, iter);
} catch (buffer::error& err) {
ASSERT_TRUE(0);
}
ASSERT_LE(1u, refs.count());
}
bufferlist chunk2;
chunk2.substr_of(gbl, chunks[0].first, chunks[0].second);
// check chunk's refcount
{
unsigned char fingerprint[CEPH_CRYPTO_SHA1_DIGESTSIZE + 1] = {0};
char p_str[CEPH_CRYPTO_SHA1_DIGESTSIZE*2+1] = {0};
bufferlist t;
SHA1 sha1_gen;
int size = chunk2.length();
sha1_gen.Update((const unsigned char *)chunk2.c_str(), size);
sha1_gen.Final(fingerprint);
buf_to_hex(fingerprint, CEPH_CRYPTO_SHA1_DIGESTSIZE, p_str);
tgt_oid = string(p_str);
ioctx.getxattr(p_str, CHUNK_REFCOUNT_ATTR, t);
chunk_refs_t refs;
try {
auto iter = t.cbegin();
decode(refs, iter);
} catch (buffer::error& err) {
ASSERT_TRUE(0);
}
ASSERT_LE(1u, refs.count());
}
// make a dirty chunks
{
bufferlist bl;
bl.append("ThDDe hi");
ASSERT_EQ(0, cache_ioctx.write("foo", bl, bl.length(), 0));
}
// flush
{
ObjectReadOperation op;
op.tier_flush();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, cache_ioctx.aio_operate(
"foo", completion, &op,
librados::OPERATION_IGNORE_CACHE, NULL));
completion->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
completion->release();
}
bufferlist tmp;
tmp.append("Thcce hi");
gbl.begin(0).copy_in(tmp.length(), tmp);
bufferlist chunk3;
cdc->calc_chunks(gbl, &chunks);
chunk3.substr_of(gbl, chunks[0].first, chunks[0].second);
// check chunk's refcount
{
unsigned char fingerprint[CEPH_CRYPTO_SHA1_DIGESTSIZE + 1] = {0};
char p_str[CEPH_CRYPTO_SHA1_DIGESTSIZE*2+1] = {0};
bufferlist t;
SHA1 sha1_gen;
int size = chunk2.length();
sha1_gen.Update((const unsigned char *)chunk2.c_str(), size);
sha1_gen.Final(fingerprint);
buf_to_hex(fingerprint, CEPH_CRYPTO_SHA1_DIGESTSIZE, p_str);
is_intended_refcount_state(cache_ioctx, "foo", ioctx, p_str, 0);
}
}
TEST_F(LibRadosTwoPoolsPP, TierFlushDuringFlush) {
SKIP_IF_CRIMSON();
// skip test if not yet octopus
if (_get_required_osd_release(cluster) < "octopus") {
cout << "cluster is not yet octopus, skipping test" << std::endl;
return;
}
bufferlist inbl;
// create a new pool
std::string temp_pool_name = get_temp_pool_name() + "-test-flush";
ASSERT_EQ(0, cluster.pool_create(temp_pool_name.c_str()));
ASSERT_EQ(0, cluster.mon_command(
set_pool_str(cache_pool_name, "fingerprint_algorithm", "sha1"),
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
set_pool_str(cache_pool_name, "dedup_tier", temp_pool_name),
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
set_pool_str(cache_pool_name, "dedup_chunk_algorithm", "fastcdc"),
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
set_pool_str(cache_pool_name, "dedup_cdc_chunk_size", 1024),
inbl, NULL, NULL));
// create object
bufferlist gbl;
{
//bufferlist bl;
generate_buffer(1024*8, &gbl);
ObjectWriteOperation op;
op.write_full(gbl);
ASSERT_EQ(0, cache_ioctx.operate("foo", &op));
}
{
bufferlist bl;
bl.append("there hiHI");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("bar", &op));
}
// wait for maps to settle
cluster.wait_for_latest_osdmap();
// set-chunk to set manifest object
{
ObjectReadOperation op;
op.set_chunk(0, 2, ioctx, "bar", 0,
CEPH_OSD_OP_FLAG_WITH_REFERENCE);
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, cache_ioctx.aio_operate("foo", completion, &op,
librados::OPERATION_IGNORE_CACHE, NULL));
completion->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
completion->release();
}
// delete temp pool, so flushing chunk will fail
ASSERT_EQ(0, s_cluster.pool_delete(temp_pool_name.c_str()));
// wait for maps to settle
cluster.wait_for_latest_osdmap();
// flush to check if proper error is returned
{
ObjectReadOperation op;
op.tier_flush();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, cache_ioctx.aio_operate(
"foo", completion, &op,
librados::OPERATION_IGNORE_CACHE, NULL));
completion->wait_for_complete();
ASSERT_EQ(-ENOENT, completion->get_return_value());
completion->release();
}
}
TEST_F(LibRadosTwoPoolsPP, ManifestSnapHasChunk) {
SKIP_IF_CRIMSON();
// skip test if not yet octopus
if (_get_required_osd_release(cluster) < "octopus") {
cout << "cluster is not yet octopus, skipping test" << std::endl;
return;
}
bufferlist inbl;
ASSERT_EQ(0, cluster.mon_command(
set_pool_str(pool_name, "fingerprint_algorithm", "sha1"),
inbl, NULL, NULL));
cluster.wait_for_latest_osdmap();
// create object
{
bufferlist bl;
bl.append("there HIHI");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("foo", &op));
}
string er_fp_oid, hi_fp_oid, HI_fp_oid, ai_fp_oid, bi_fp_oid,
Er_fp_oid, Hi_fp_oid, SI_fp_oid;
// get fp_oid
er_fp_oid = get_fp_oid("er", "sha1");
hi_fp_oid = get_fp_oid("hi", "sha1");
HI_fp_oid = get_fp_oid("HI", "sha1");
ai_fp_oid = get_fp_oid("ai", "sha1");
bi_fp_oid = get_fp_oid("bi", "sha1");
Er_fp_oid = get_fp_oid("Er", "sha1");
Hi_fp_oid = get_fp_oid("Hi", "sha1");
SI_fp_oid = get_fp_oid("SI", "sha1");
// write
{
ObjectWriteOperation op;
bufferlist bl;
bl.append("er");
op.write_full(bl);
ASSERT_EQ(0, cache_ioctx.operate(er_fp_oid, &op));
}
// write
{
ObjectWriteOperation op;
bufferlist bl;
bl.append("hi");
op.write_full(bl);
ASSERT_EQ(0, cache_ioctx.operate(hi_fp_oid, &op));
}
// write
{
ObjectWriteOperation op;
bufferlist bl;
bl.append("HI");
op.write_full(bl);
ASSERT_EQ(0, cache_ioctx.operate(HI_fp_oid, &op));
}
// write
{
ObjectWriteOperation op;
bufferlist bl;
bl.append("ai");
op.write_full(bl);
ASSERT_EQ(0, cache_ioctx.operate(ai_fp_oid, &op));
}
// write
{
ObjectWriteOperation op;
bufferlist bl;
bl.append("bi");
op.write_full(bl);
ASSERT_EQ(0, cache_ioctx.operate(bi_fp_oid, &op));
}
// write
{
ObjectWriteOperation op;
bufferlist bl;
bl.append("Er");
op.write_full(bl);
ASSERT_EQ(0, cache_ioctx.operate(Er_fp_oid, &op));
}
// write
{
ObjectWriteOperation op;
bufferlist bl;
bl.append("Hi");
op.write_full(bl);
ASSERT_EQ(0, cache_ioctx.operate(Hi_fp_oid, &op));
}
// write
{
ObjectWriteOperation op;
bufferlist bl;
bl.append("SI");
op.write_full(bl);
ASSERT_EQ(0, cache_ioctx.operate(SI_fp_oid, &op));
}
// set-chunk (dedup)
manifest_set_chunk(cluster, cache_ioctx, ioctx, 6, 2, HI_fp_oid, "foo");
// set-chunk (dedup)
manifest_set_chunk(cluster, cache_ioctx, ioctx, 8, 2, HI_fp_oid, "foo");
// foo head: [hi] [HI]
// create a snapshot, clone
vector<uint64_t> my_snaps(1);
ASSERT_EQ(0, ioctx.selfmanaged_snap_create(&my_snaps[0]));
ASSERT_EQ(0, ioctx.selfmanaged_snap_set_write_ctx(my_snaps[0],
my_snaps));
// create a clone
{
bufferlist bl;
bl.append("a");
ASSERT_EQ(0, ioctx.write("foo", bl, 1, 6));
}
// write
{
bufferlist bl;
bl.append("a");
ASSERT_EQ(0, ioctx.write("foo", bl, 1, 6));
}
// write
{
bufferlist bl;
bl.append("S");
ASSERT_EQ(0, ioctx.write("foo", bl, 1, 8));
}
// foo snap[0]: [hi] [HI]
// foo head : [er] [ai] [SI]
// set-chunk (dedup)
manifest_set_chunk(cluster, cache_ioctx, ioctx, 2, 2, er_fp_oid, "foo");
// set-chunk (dedup)
manifest_set_chunk(cluster, cache_ioctx, ioctx, 6, 2, ai_fp_oid, "foo");
// set-chunk (dedup)
manifest_set_chunk(cluster, cache_ioctx, ioctx, 8, 2, SI_fp_oid, "foo");
my_snaps.resize(2);
my_snaps[1] = my_snaps[0];
ASSERT_EQ(0, ioctx.selfmanaged_snap_create(&my_snaps[0]));
ASSERT_EQ(0, ioctx.selfmanaged_snap_set_write_ctx(my_snaps[0],
my_snaps));
// create a clone
{
bufferlist bl;
bl.append("b");
ASSERT_EQ(0, ioctx.write("foo", bl, 1, 6));
}
// write
{
bufferlist bl;
bl.append("b");
ASSERT_EQ(0, ioctx.write("foo", bl, 1, 6));
}
// foo snap[1]: [HI] [HI]
// foo snap[0]: [er] [ai] [SI]
// foo head : [er] [bi] [SI]
// set-chunk (dedup)
manifest_set_chunk(cluster, cache_ioctx, ioctx, 6, 2, bi_fp_oid, "foo");
{
ASSERT_EQ(1, cls_cas_references_chunk(ioctx, "foo", SI_fp_oid));
ASSERT_EQ(1, cls_cas_references_chunk(ioctx, "foo", er_fp_oid));
ASSERT_EQ(1, cls_cas_references_chunk(ioctx, "foo", ai_fp_oid));
ASSERT_EQ(2, cls_cas_references_chunk(ioctx, "foo", HI_fp_oid));
ASSERT_EQ(-ENOLINK, cls_cas_references_chunk(ioctx, "foo", Hi_fp_oid));
}
}
TEST_F(LibRadosTwoPoolsPP, ManifestRollback) {
SKIP_IF_CRIMSON();
// skip test if not yet pacific
if (_get_required_osd_release(cluster) < "pacific") {
cout << "cluster is not yet pacific, skipping test" << std::endl;
return;
}
// create object
{
bufferlist bl;
bl.append("CDere hiHI");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("foo", &op));
}
{
bufferlist bl;
bl.append("ABere hiHI");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, cache_ioctx.operate("chunk1", &op));
}
{
bufferlist bl;
bl.append("CDere hiHI");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, cache_ioctx.operate("chunk2", &op));
}
{
bufferlist bl;
bl.append("EFere hiHI");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, cache_ioctx.operate("chunk3", &op));
}
// wait for maps to settle
cluster.wait_for_latest_osdmap();
// create a snapshot, clone
vector<uint64_t> my_snaps(1);
ASSERT_EQ(0, ioctx.selfmanaged_snap_create(&my_snaps[0]));
ASSERT_EQ(0, ioctx.selfmanaged_snap_set_write_ctx(my_snaps[0],
my_snaps));
{
bufferlist bl;
bl.append("there hiHI");
ASSERT_EQ(0, ioctx.write("foo", bl, bl.length(), 0));
}
my_snaps.resize(2);
my_snaps[1] = my_snaps[0];
ASSERT_EQ(0, ioctx.selfmanaged_snap_create(&my_snaps[0]));
ASSERT_EQ(0, ioctx.selfmanaged_snap_set_write_ctx(my_snaps[0],
my_snaps));
{
bufferlist bl;
bl.append("thABe hiEF");
ASSERT_EQ(0, ioctx.write("foo", bl, bl.length(), 0));
}
// set-chunk
manifest_set_chunk(cluster, cache_ioctx, ioctx, 2, 2, "chunk1", "foo");
manifest_set_chunk(cluster, cache_ioctx, ioctx, 8, 2, "chunk3", "foo");
// foo snap[1]:
// foo snap[0]:
// foo head : [chunk1] [chunk3]
ioctx.snap_set_read(my_snaps[1]);
// set-chunk
manifest_set_chunk(cluster, cache_ioctx, ioctx, 0, 10, "chunk2", "foo");
// foo snap[1]: [ chunk2 ]
// foo snap[0]:
// foo head : [chunk1] [chunk3]
// foo snap[1]: [ chunk2 ]
// foo snap[0]:
// foo head : [chunk1] [chunk3]
ASSERT_EQ(0, ioctx.selfmanaged_snap_rollback("foo", my_snaps[0]));
ioctx.snap_set_read(librados::SNAP_HEAD);
{
bufferlist bl;
ASSERT_EQ(1, ioctx.read("foo", bl, 1, 0));
ASSERT_EQ('t', bl[0]);
}
ASSERT_EQ(0, ioctx.selfmanaged_snap_rollback("foo", my_snaps[1]));
{
bufferlist bl;
ASSERT_EQ(1, ioctx.read("foo", bl, 1, 0));
ASSERT_EQ('C', bl[0]);
}
}
TEST_F(LibRadosTwoPoolsPP, ManifestRollbackRefcount) {
SKIP_IF_CRIMSON();
// skip test if not yet pacific
if (_get_required_osd_release(cluster) < "pacific") {
cout << "cluster is not yet pacific, skipping test" << std::endl;
return;
}
// create object
{
bufferlist bl;
bl.append("CDere hiHI");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("foo", &op));
}
{
bufferlist bl;
bl.append("ABere hiHI");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, cache_ioctx.operate("chunk1", &op));
}
{
bufferlist bl;
bl.append("CDere hiHI");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, cache_ioctx.operate("chunk2", &op));
}
{
bufferlist bl;
bl.append("EFere hiHI");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, cache_ioctx.operate("chunk3", &op));
}
{
bufferlist bl;
bl.append("DDDDD hiHI");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, cache_ioctx.operate("chunk4", &op));
}
{
bufferlist bl;
bl.append("EEEEE hiHI");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, cache_ioctx.operate("chunk5", &op));
}
// wait for maps to settle
cluster.wait_for_latest_osdmap();
// create a snapshot, clone
vector<uint64_t> my_snaps(1);
ASSERT_EQ(0, ioctx.selfmanaged_snap_create(&my_snaps[0]));
ASSERT_EQ(0, ioctx.selfmanaged_snap_set_write_ctx(my_snaps[0],
my_snaps));
{
bufferlist bl;
bl.append("there hiHI");
ASSERT_EQ(0, ioctx.write("foo", bl, bl.length(), 0));
}
my_snaps.resize(2);
my_snaps[1] = my_snaps[0];
ASSERT_EQ(0, ioctx.selfmanaged_snap_create(&my_snaps[0]));
ASSERT_EQ(0, ioctx.selfmanaged_snap_set_write_ctx(my_snaps[0],
my_snaps));
{
bufferlist bl;
bl.append("thABe hiEF");
ASSERT_EQ(0, ioctx.write("foo", bl, bl.length(), 0));
}
// set-chunk
manifest_set_chunk(cluster, cache_ioctx, ioctx, 2, 2, "chunk1", "foo");
manifest_set_chunk(cluster, cache_ioctx, ioctx, 8, 2, "chunk3", "foo");
// foo snap[1]:
// foo snap[0]:
// foo head : [chunk1] [chunk3]
ioctx.snap_set_read(my_snaps[1]);
manifest_set_chunk(cluster, cache_ioctx, ioctx, 2, 2, "chunk4", "foo");
manifest_set_chunk(cluster, cache_ioctx, ioctx, 6, 2, "chunk5", "foo");
// foo snap[1]: [chunk4] [chunk5]
// foo snap[0]:
// foo head : [chunk1] [chunk3]
ioctx.snap_set_read(my_snaps[0]);
manifest_set_chunk(cluster, cache_ioctx, ioctx, 0, 10, "chunk2", "foo");
// foo snap[1]: [chunk4] [chunk5]
// foo snap[0]: [ chunk2 ]
// foo head : [chunk1] [chunk3]
ASSERT_EQ(0, ioctx.selfmanaged_snap_rollback("foo", my_snaps[1]));
// foo snap[1]: [chunk4] [chunk5]
// foo snap[0]: [ chunk2 ]
// foo head : [chunk4] [chunk5] <-- will contain these contents
sleep(10);
is_intended_refcount_state(ioctx, "foo", cache_ioctx, "chunk1", 0);
is_intended_refcount_state(ioctx, "foo", cache_ioctx, "chunk3", 0);
ioctx.selfmanaged_snap_remove(my_snaps[1]);
sleep(10);
// foo snap[1]:
// foo snap[0]: [ chunk2 ]
// foo head : [chunk4] [chunk5]
ioctx.snap_set_read(librados::SNAP_HEAD);
is_intended_refcount_state(ioctx, "foo", cache_ioctx, "chunk4", 1);
is_intended_refcount_state(ioctx, "foo", cache_ioctx, "chunk5", 1);
{
bufferlist bl;
bl.append("thABe hiEF");
ASSERT_EQ(0, ioctx.write("foo", bl, bl.length(), 0));
}
// foo snap[1]:
// foo snap[0]: [ chunk2 ]
// foo head :
is_intended_refcount_state(ioctx, "foo", cache_ioctx, "chunk1", 0);
is_intended_refcount_state(ioctx, "foo", cache_ioctx, "chunk3", 0);
is_intended_refcount_state(ioctx, "foo", cache_ioctx, "chunk4", 0);
is_intended_refcount_state(ioctx, "foo", cache_ioctx, "chunk5", 0);
is_intended_refcount_state(ioctx, "foo", cache_ioctx, "chunk2", 1);
}
TEST_F(LibRadosTwoPoolsPP, ManifestEvictRollback) {
SKIP_IF_CRIMSON();
// skip test if not yet pacific
if (_get_required_osd_release(cluster) < "pacific") {
cout << "cluster is not yet pacific, skipping test" << std::endl;
return;
}
// create object
{
bufferlist bl;
bl.append("CDere hiHI");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("foo", &op));
}
{
bufferlist bl;
bl.append("ABere hiHI");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, cache_ioctx.operate("chunk1", &op));
}
{
bufferlist bl;
bl.append("CDere hiHI");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, cache_ioctx.operate("chunk2", &op));
}
{
bufferlist bl;
bl.append("EFere hiHI");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, cache_ioctx.operate("chunk3", &op));
}
// wait for maps to settle
cluster.wait_for_latest_osdmap();
// create a snapshot, clone
vector<uint64_t> my_snaps(1);
ASSERT_EQ(0, ioctx.selfmanaged_snap_create(&my_snaps[0]));
ASSERT_EQ(0, ioctx.selfmanaged_snap_set_write_ctx(my_snaps[0],
my_snaps));
{
bufferlist bl;
bl.append("there hiHI");
ASSERT_EQ(0, ioctx.write("foo", bl, bl.length(), 0));
}
// set-chunk
manifest_set_chunk(cluster, cache_ioctx, ioctx, 2, 2, "chunk1", "foo");
manifest_set_chunk(cluster, cache_ioctx, ioctx, 8, 2, "chunk3", "foo");
// foo snap[0]:
// foo head : [chunk1] [chunk3]
ioctx.snap_set_read(my_snaps[0]);
manifest_set_chunk(cluster, cache_ioctx, ioctx, 0, 10, "chunk2", "foo");
// foo snap[0]: [ chunk2 ]
// foo head : [chunk1] [chunk3]
sleep(10);
ioctx.snap_set_read(librados::SNAP_HEAD);
is_intended_refcount_state(ioctx, "foo", cache_ioctx, "chunk1", 1);
is_intended_refcount_state(ioctx, "foo", cache_ioctx, "chunk3", 1);
ioctx.snap_set_read(my_snaps[0]);
// evict--this makes the chunk missing state
{
ObjectReadOperation op, stat_op;
op.tier_evict();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, ioctx.aio_operate(
"foo", completion, &op,
librados::OPERATION_IGNORE_OVERLAY, NULL));
completion->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
}
// rollback to my_snaps[0]
ASSERT_EQ(0, ioctx.selfmanaged_snap_rollback("foo", my_snaps[0]));
ioctx.snap_set_read(librados::SNAP_HEAD);
{
bufferlist bl;
ASSERT_EQ(1, ioctx.read("foo", bl, 1, 0));
ASSERT_EQ('C', bl[0]);
}
is_intended_refcount_state(ioctx, "foo", cache_ioctx, "chunk2", 1);
}
class LibRadosTwoPoolsECPP : public RadosTestECPP
{
public:
LibRadosTwoPoolsECPP() {};
~LibRadosTwoPoolsECPP() override {};
protected:
static void SetUpTestCase() {
SKIP_IF_CRIMSON();
pool_name = get_temp_pool_name();
ASSERT_EQ("", create_one_ec_pool_pp(pool_name, s_cluster));
}
static void TearDownTestCase() {
SKIP_IF_CRIMSON();
ASSERT_EQ(0, destroy_one_ec_pool_pp(pool_name, s_cluster));
}
static std::string cache_pool_name;
void SetUp() override {
SKIP_IF_CRIMSON();
cache_pool_name = get_temp_pool_name();
ASSERT_EQ(0, s_cluster.pool_create(cache_pool_name.c_str()));
RadosTestECPP::SetUp();
ASSERT_EQ(0, cluster.ioctx_create(cache_pool_name.c_str(), cache_ioctx));
cache_ioctx.application_enable("rados", true);
cache_ioctx.set_namespace(nspace);
}
void TearDown() override {
SKIP_IF_CRIMSON();
// flush + evict cache
flush_evict_all(cluster, cache_ioctx);
bufferlist inbl;
// tear down tiers
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier remove-overlay\", \"pool\": \"" + pool_name +
"\"}",
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier remove\", \"pool\": \"" + pool_name +
"\", \"tierpool\": \"" + cache_pool_name + "\"}",
inbl, NULL, NULL));
// wait for maps to settle before next test
cluster.wait_for_latest_osdmap();
RadosTestECPP::TearDown();
cleanup_default_namespace(cache_ioctx);
cleanup_namespace(cache_ioctx, nspace);
cache_ioctx.close();
ASSERT_EQ(0, s_cluster.pool_delete(cache_pool_name.c_str()));
}
librados::IoCtx cache_ioctx;
};
std::string LibRadosTwoPoolsECPP::cache_pool_name;
TEST_F(LibRadosTierECPP, Dirty) {
SKIP_IF_CRIMSON();
{
ObjectWriteOperation op;
op.undirty();
ASSERT_EQ(0, ioctx.operate("foo", &op)); // still get 0 if it dne
}
{
ObjectWriteOperation op;
op.create(true);
ASSERT_EQ(0, ioctx.operate("foo", &op));
}
{
bool dirty = false;
int r = -1;
ObjectReadOperation op;
op.is_dirty(&dirty, &r);
ASSERT_EQ(0, ioctx.operate("foo", &op, NULL));
ASSERT_TRUE(dirty);
ASSERT_EQ(0, r);
}
{
ObjectWriteOperation op;
op.undirty();
ASSERT_EQ(0, ioctx.operate("foo", &op));
}
{
ObjectWriteOperation op;
op.undirty();
ASSERT_EQ(0, ioctx.operate("foo", &op)); // still 0 if already clean
}
{
bool dirty = false;
int r = -1;
ObjectReadOperation op;
op.is_dirty(&dirty, &r);
ASSERT_EQ(0, ioctx.operate("foo", &op, NULL));
ASSERT_FALSE(dirty);
ASSERT_EQ(0, r);
}
//{
// ObjectWriteOperation op;
// op.truncate(0); // still a write even tho it is a no-op
// ASSERT_EQ(0, ioctx.operate("foo", &op));
//}
//{
// bool dirty = false;
// int r = -1;
// ObjectReadOperation op;
// op.is_dirty(&dirty, &r);
// ASSERT_EQ(0, ioctx.operate("foo", &op, NULL));
// ASSERT_TRUE(dirty);
// ASSERT_EQ(0, r);
//}
}
TEST_F(LibRadosTwoPoolsECPP, Overlay) {
SKIP_IF_CRIMSON();
// create objects
{
bufferlist bl;
bl.append("base");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("foo", &op));
}
{
bufferlist bl;
bl.append("cache");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, cache_ioctx.operate("foo", &op));
}
// configure cache
bufferlist inbl;
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier add\", \"pool\": \"" + pool_name +
"\", \"tierpool\": \"" + cache_pool_name +
"\", \"force_nonempty\": \"--force-nonempty\" }",
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier set-overlay\", \"pool\": \"" + pool_name +
"\", \"overlaypool\": \"" + cache_pool_name + "\"}",
inbl, NULL, NULL));
// wait for maps to settle
cluster.wait_for_latest_osdmap();
// by default, the overlay sends us to cache pool
{
bufferlist bl;
ASSERT_EQ(1, ioctx.read("foo", bl, 1, 0));
ASSERT_EQ('c', bl[0]);
}
{
bufferlist bl;
ASSERT_EQ(1, cache_ioctx.read("foo", bl, 1, 0));
ASSERT_EQ('c', bl[0]);
}
// unless we say otherwise
{
bufferlist bl;
ObjectReadOperation op;
op.read(0, 1, &bl, NULL);
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, ioctx.aio_operate(
"foo", completion, &op,
librados::OPERATION_IGNORE_OVERLAY, NULL));
completion->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
completion->release();
ASSERT_EQ('b', bl[0]);
}
}
TEST_F(LibRadosTwoPoolsECPP, Promote) {
SKIP_IF_CRIMSON();
// create object
{
bufferlist bl;
bl.append("hi there");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("foo", &op));
}
// configure cache
bufferlist inbl;
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier add\", \"pool\": \"" + pool_name +
"\", \"tierpool\": \"" + cache_pool_name +
"\", \"force_nonempty\": \"--force-nonempty\" }",
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier set-overlay\", \"pool\": \"" + pool_name +
"\", \"overlaypool\": \"" + cache_pool_name + "\"}",
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier cache-mode\", \"pool\": \"" + cache_pool_name +
"\", \"mode\": \"writeback\"}",
inbl, NULL, NULL));
// wait for maps to settle
cluster.wait_for_latest_osdmap();
// read, trigger a promote
{
bufferlist bl;
ASSERT_EQ(1, ioctx.read("foo", bl, 1, 0));
}
// read, trigger a whiteout
{
bufferlist bl;
ASSERT_EQ(-ENOENT, ioctx.read("bar", bl, 1, 0));
ASSERT_EQ(-ENOENT, ioctx.read("bar", bl, 1, 0));
}
// verify the object is present in the cache tier
{
NObjectIterator it = cache_ioctx.nobjects_begin();
ASSERT_TRUE(it != cache_ioctx.nobjects_end());
ASSERT_TRUE(it->get_oid() == string("foo") || it->get_oid() == string("bar"));
++it;
ASSERT_TRUE(it->get_oid() == string("foo") || it->get_oid() == string("bar"));
++it;
ASSERT_TRUE(it == cache_ioctx.nobjects_end());
}
}
TEST_F(LibRadosTwoPoolsECPP, PromoteSnap) {
SKIP_IF_CRIMSON();
// create object
{
bufferlist bl;
bl.append("hi there");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("foo", &op));
}
{
bufferlist bl;
bl.append("hi there");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("bar", &op));
}
{
bufferlist bl;
bl.append("hi there");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("baz", &op));
}
{
bufferlist bl;
bl.append("hi there");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("bam", &op));
}
// create a snapshot, clone
vector<uint64_t> my_snaps(1);
ASSERT_EQ(0, ioctx.selfmanaged_snap_create(&my_snaps[0]));
ASSERT_EQ(0, ioctx.selfmanaged_snap_set_write_ctx(my_snaps[0],
my_snaps));
{
bufferlist bl;
bl.append("ciao!");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("foo", &op));
}
{
bufferlist bl;
bl.append("ciao!");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("bar", &op));
}
{
ObjectWriteOperation op;
op.remove();
ASSERT_EQ(0, ioctx.operate("baz", &op));
}
{
bufferlist bl;
bl.append("ciao!");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("bam", &op));
}
// configure cache
bufferlist inbl;
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier add\", \"pool\": \"" + pool_name +
"\", \"tierpool\": \"" + cache_pool_name +
"\", \"force_nonempty\": \"--force-nonempty\" }",
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier set-overlay\", \"pool\": \"" + pool_name +
"\", \"overlaypool\": \"" + cache_pool_name + "\"}",
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier cache-mode\", \"pool\": \"" + cache_pool_name +
"\", \"mode\": \"writeback\"}",
inbl, NULL, NULL));
// wait for maps to settle
cluster.wait_for_latest_osdmap();
// read, trigger a promote on the head
{
bufferlist bl;
ASSERT_EQ(1, ioctx.read("foo", bl, 1, 0));
ASSERT_EQ('c', bl[0]);
}
{
bufferlist bl;
ASSERT_EQ(1, ioctx.read("bam", bl, 1, 0));
ASSERT_EQ('c', bl[0]);
}
ioctx.snap_set_read(my_snaps[0]);
// stop and scrub this pg (to make sure scrub can handle missing
// clones in the cache tier)
// This test requires cache tier and base tier to have the same pg_num/pgp_num
{
for (int tries = 0; tries < 5; ++tries) {
IoCtx cache_ioctx;
ASSERT_EQ(0, cluster.ioctx_create(cache_pool_name.c_str(), cache_ioctx));
uint32_t hash;
ASSERT_EQ(0, ioctx.get_object_pg_hash_position2("foo", &hash));
ostringstream ss;
ss << "{\"prefix\": \"pg scrub\", \"pgid\": \""
<< cache_ioctx.get_id() << "."
<< hash
<< "\"}";
int r = cluster.mon_command(ss.str(), inbl, NULL, NULL);
if (r == -EAGAIN ||
r == -ENOENT) { // in case mgr osdmap is a bit stale
sleep(5);
continue;
}
ASSERT_EQ(0, r);
break;
}
// give it a few seconds to go. this is sloppy but is usually enough time
cout << "waiting for scrub..." << std::endl;
sleep(15);
cout << "done waiting" << std::endl;
}
// read foo snap
{
bufferlist bl;
ASSERT_EQ(1, ioctx.read("foo", bl, 1, 0));
ASSERT_EQ('h', bl[0]);
}
// read bar snap
{
bufferlist bl;
ASSERT_EQ(1, ioctx.read("bar", bl, 1, 0));
ASSERT_EQ('h', bl[0]);
}
// read baz snap
{
bufferlist bl;
ASSERT_EQ(1, ioctx.read("baz", bl, 1, 0));
ASSERT_EQ('h', bl[0]);
}
ioctx.snap_set_read(librados::SNAP_HEAD);
// read foo
{
bufferlist bl;
ASSERT_EQ(1, ioctx.read("foo", bl, 1, 0));
ASSERT_EQ('c', bl[0]);
}
// read bar
{
bufferlist bl;
ASSERT_EQ(1, ioctx.read("bar", bl, 1, 0));
ASSERT_EQ('c', bl[0]);
}
// read baz
{
bufferlist bl;
ASSERT_EQ(-ENOENT, ioctx.read("baz", bl, 1, 0));
}
// cleanup
ioctx.selfmanaged_snap_remove(my_snaps[0]);
}
TEST_F(LibRadosTwoPoolsECPP, PromoteSnapTrimRace) {
SKIP_IF_CRIMSON();
// create object
{
bufferlist bl;
bl.append("hi there");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("foo", &op));
}
// create a snapshot, clone
vector<uint64_t> my_snaps(1);
ASSERT_EQ(0, ioctx.selfmanaged_snap_create(&my_snaps[0]));
ASSERT_EQ(0, ioctx.selfmanaged_snap_set_write_ctx(my_snaps[0],
my_snaps));
{
bufferlist bl;
bl.append("ciao!");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("foo", &op));
}
// configure cache
bufferlist inbl;
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier add\", \"pool\": \"" + pool_name +
"\", \"tierpool\": \"" + cache_pool_name +
"\", \"force_nonempty\": \"--force-nonempty\" }",
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier set-overlay\", \"pool\": \"" + pool_name +
"\", \"overlaypool\": \"" + cache_pool_name + "\"}",
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier cache-mode\", \"pool\": \"" + cache_pool_name +
"\", \"mode\": \"writeback\"}",
inbl, NULL, NULL));
// wait for maps to settle
cluster.wait_for_latest_osdmap();
// delete the snap
ASSERT_EQ(0, ioctx.selfmanaged_snap_remove(my_snaps[0]));
ioctx.snap_set_read(my_snaps[0]);
// read foo snap. the OSD may or may not realize that this snap has
// been logically deleted; either response is valid.
{
bufferlist bl;
int r = ioctx.read("foo", bl, 1, 0);
ASSERT_TRUE(r == 1 || r == -ENOENT);
}
// cleanup
ioctx.selfmanaged_snap_remove(my_snaps[0]);
}
TEST_F(LibRadosTwoPoolsECPP, Whiteout) {
SKIP_IF_CRIMSON();
// create object
{
bufferlist bl;
bl.append("hi there");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("foo", &op));
}
// configure cache
bufferlist inbl;
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier add\", \"pool\": \"" + pool_name +
"\", \"tierpool\": \"" + cache_pool_name +
"\", \"force_nonempty\": \"--force-nonempty\" }",
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier set-overlay\", \"pool\": \"" + pool_name +
"\", \"overlaypool\": \"" + cache_pool_name + "\"}",
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier cache-mode\", \"pool\": \"" + cache_pool_name +
"\", \"mode\": \"writeback\"}",
inbl, NULL, NULL));
// wait for maps to settle
cluster.wait_for_latest_osdmap();
// create some whiteouts, verify they behave
{
ObjectWriteOperation op;
op.assert_exists();
op.remove();
ASSERT_EQ(0, ioctx.operate("foo", &op));
}
{
ObjectWriteOperation op;
op.assert_exists();
op.remove();
ASSERT_EQ(-ENOENT, ioctx.operate("bar", &op));
}
{
ObjectWriteOperation op;
op.assert_exists();
op.remove();
ASSERT_EQ(-ENOENT, ioctx.operate("bar", &op));
}
// verify the whiteouts are there in the cache tier
{
NObjectIterator it = cache_ioctx.nobjects_begin();
ASSERT_TRUE(it != cache_ioctx.nobjects_end());
ASSERT_TRUE(it->get_oid() == string("foo") || it->get_oid() == string("bar"));
++it;
ASSERT_TRUE(it->get_oid() == string("foo") || it->get_oid() == string("bar"));
++it;
ASSERT_TRUE(it == cache_ioctx.nobjects_end());
}
// delete a whiteout and verify it goes away
ASSERT_EQ(-ENOENT, ioctx.remove("foo"));
{
ObjectWriteOperation op;
op.remove();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, ioctx.aio_operate("bar", completion, &op,
librados::OPERATION_IGNORE_CACHE));
completion->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
completion->release();
NObjectIterator it = cache_ioctx.nobjects_begin();
ASSERT_TRUE(it != cache_ioctx.nobjects_end());
ASSERT_TRUE(it->get_oid() == string("foo"));
++it;
ASSERT_TRUE(it == cache_ioctx.nobjects_end());
}
// recreate an object and verify we can read it
{
bufferlist bl;
bl.append("hi there");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("foo", &op));
}
{
bufferlist bl;
ASSERT_EQ(1, ioctx.read("foo", bl, 1, 0));
ASSERT_EQ('h', bl[0]);
}
}
TEST_F(LibRadosTwoPoolsECPP, Evict) {
SKIP_IF_CRIMSON();
// create object
{
bufferlist bl;
bl.append("hi there");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("foo", &op));
}
// configure cache
bufferlist inbl;
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier add\", \"pool\": \"" + pool_name +
"\", \"tierpool\": \"" + cache_pool_name +
"\", \"force_nonempty\": \"--force-nonempty\" }",
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier set-overlay\", \"pool\": \"" + pool_name +
"\", \"overlaypool\": \"" + cache_pool_name + "\"}",
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier cache-mode\", \"pool\": \"" + cache_pool_name +
"\", \"mode\": \"writeback\"}",
inbl, NULL, NULL));
// wait for maps to settle
cluster.wait_for_latest_osdmap();
// read, trigger a promote
{
bufferlist bl;
ASSERT_EQ(1, ioctx.read("foo", bl, 1, 0));
}
// read, trigger a whiteout, and a dirty object
{
bufferlist bl;
ASSERT_EQ(-ENOENT, ioctx.read("bar", bl, 1, 0));
ASSERT_EQ(-ENOENT, ioctx.read("bar", bl, 1, 0));
ASSERT_EQ(0, ioctx.write("bar", bl, bl.length(), 0));
}
// verify the object is present in the cache tier
{
NObjectIterator it = cache_ioctx.nobjects_begin();
ASSERT_TRUE(it != cache_ioctx.nobjects_end());
ASSERT_TRUE(it->get_oid() == string("foo") || it->get_oid() == string("bar"));
++it;
ASSERT_TRUE(it->get_oid() == string("foo") || it->get_oid() == string("bar"));
++it;
ASSERT_TRUE(it == cache_ioctx.nobjects_end());
}
// pin
{
ObjectWriteOperation op;
op.cache_pin();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, cache_ioctx.aio_operate("foo", completion, &op));
completion->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
completion->release();
}
// evict the pinned object with -EPERM
{
ObjectReadOperation op;
op.cache_evict();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, cache_ioctx.aio_operate("foo", completion, &op,
librados::OPERATION_IGNORE_CACHE,
NULL));
completion->wait_for_complete();
ASSERT_EQ(-EPERM, completion->get_return_value());
completion->release();
}
// unpin
{
ObjectWriteOperation op;
op.cache_unpin();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, cache_ioctx.aio_operate("foo", completion, &op));
completion->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
completion->release();
}
// flush
{
ObjectReadOperation op;
op.cache_flush();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, cache_ioctx.aio_operate(
"foo", completion, &op,
librados::OPERATION_IGNORE_OVERLAY, NULL));
completion->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
completion->release();
}
// verify clean
{
bool dirty = false;
int r = -1;
ObjectReadOperation op;
op.is_dirty(&dirty, &r);
ASSERT_EQ(0, cache_ioctx.operate("foo", &op, NULL));
ASSERT_FALSE(dirty);
ASSERT_EQ(0, r);
}
// evict
{
ObjectReadOperation op;
op.cache_evict();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, cache_ioctx.aio_operate("foo", completion, &op,
librados::OPERATION_IGNORE_CACHE,
NULL));
completion->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
completion->release();
}
{
ObjectReadOperation op;
op.cache_evict();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, cache_ioctx.aio_operate(
"foo", completion, &op,
librados::OPERATION_IGNORE_CACHE, NULL));
completion->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
completion->release();
}
{
ObjectReadOperation op;
op.cache_evict();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, cache_ioctx.aio_operate(
"bar", completion, &op,
librados::OPERATION_IGNORE_CACHE, NULL));
completion->wait_for_complete();
ASSERT_EQ(-EBUSY, completion->get_return_value());
completion->release();
}
}
TEST_F(LibRadosTwoPoolsECPP, EvictSnap) {
SKIP_IF_CRIMSON();
// create object
{
bufferlist bl;
bl.append("hi there");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("foo", &op));
}
{
bufferlist bl;
bl.append("hi there");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("bar", &op));
}
{
bufferlist bl;
bl.append("hi there");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("baz", &op));
}
{
bufferlist bl;
bl.append("hi there");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("bam", &op));
}
// create a snapshot, clone
vector<uint64_t> my_snaps(1);
ASSERT_EQ(0, ioctx.selfmanaged_snap_create(&my_snaps[0]));
ASSERT_EQ(0, ioctx.selfmanaged_snap_set_write_ctx(my_snaps[0],
my_snaps));
{
bufferlist bl;
bl.append("ciao!");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("foo", &op));
}
{
bufferlist bl;
bl.append("ciao!");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("bar", &op));
}
{
ObjectWriteOperation op;
op.remove();
ASSERT_EQ(0, ioctx.operate("baz", &op));
}
{
bufferlist bl;
bl.append("ciao!");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("bam", &op));
}
// configure cache
bufferlist inbl;
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier add\", \"pool\": \"" + pool_name +
"\", \"tierpool\": \"" + cache_pool_name +
"\", \"force_nonempty\": \"--force-nonempty\" }",
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier set-overlay\", \"pool\": \"" + pool_name +
"\", \"overlaypool\": \"" + cache_pool_name + "\"}",
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier cache-mode\", \"pool\": \"" + cache_pool_name +
"\", \"mode\": \"writeback\"}",
inbl, NULL, NULL));
// wait for maps to settle
cluster.wait_for_latest_osdmap();
// read, trigger a promote on the head
{
bufferlist bl;
ASSERT_EQ(1, ioctx.read("foo", bl, 1, 0));
ASSERT_EQ('c', bl[0]);
}
{
bufferlist bl;
ASSERT_EQ(1, ioctx.read("bam", bl, 1, 0));
ASSERT_EQ('c', bl[0]);
}
// evict bam
{
ObjectReadOperation op;
op.cache_evict();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, cache_ioctx.aio_operate(
"bam", completion, &op,
librados::OPERATION_IGNORE_CACHE, NULL));
completion->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
completion->release();
}
{
bufferlist bl;
ObjectReadOperation op;
op.read(1, 0, &bl, NULL);
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, cache_ioctx.aio_operate(
"bam", completion, &op,
librados::OPERATION_IGNORE_CACHE, NULL));
completion->wait_for_complete();
ASSERT_EQ(-ENOENT, completion->get_return_value());
completion->release();
}
// read foo snap
ioctx.snap_set_read(my_snaps[0]);
{
bufferlist bl;
ASSERT_EQ(1, ioctx.read("foo", bl, 1, 0));
ASSERT_EQ('h', bl[0]);
}
// evict foo snap
{
ObjectReadOperation op;
op.cache_evict();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, ioctx.aio_operate(
"foo", completion, &op,
librados::OPERATION_IGNORE_CACHE, NULL));
completion->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
completion->release();
}
// snap is gone...
{
bufferlist bl;
ObjectReadOperation op;
op.read(1, 0, &bl, NULL);
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, ioctx.aio_operate(
"foo", completion, &op,
librados::OPERATION_IGNORE_CACHE, NULL));
completion->wait_for_complete();
ASSERT_EQ(-ENOENT, completion->get_return_value());
completion->release();
}
// head is still there...
ioctx.snap_set_read(librados::SNAP_HEAD);
{
bufferlist bl;
ObjectReadOperation op;
op.read(1, 0, &bl, NULL);
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, ioctx.aio_operate(
"foo", completion, &op,
librados::OPERATION_IGNORE_CACHE, NULL));
completion->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
completion->release();
}
// promote head + snap of bar
ioctx.snap_set_read(librados::SNAP_HEAD);
{
bufferlist bl;
ASSERT_EQ(1, ioctx.read("bar", bl, 1, 0));
ASSERT_EQ('c', bl[0]);
}
ioctx.snap_set_read(my_snaps[0]);
{
bufferlist bl;
ASSERT_EQ(1, ioctx.read("bar", bl, 1, 0));
ASSERT_EQ('h', bl[0]);
}
// evict bar head (fail)
ioctx.snap_set_read(librados::SNAP_HEAD);
{
ObjectReadOperation op;
op.cache_evict();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, ioctx.aio_operate(
"bar", completion, &op,
librados::OPERATION_IGNORE_CACHE, NULL));
completion->wait_for_complete();
ASSERT_EQ(-EBUSY, completion->get_return_value());
completion->release();
}
// evict bar snap
ioctx.snap_set_read(my_snaps[0]);
{
ObjectReadOperation op;
op.cache_evict();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, ioctx.aio_operate(
"bar", completion, &op,
librados::OPERATION_IGNORE_CACHE, NULL));
completion->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
completion->release();
}
// ...and then head
ioctx.snap_set_read(librados::SNAP_HEAD);
{
bufferlist bl;
ObjectReadOperation op;
op.read(1, 0, &bl, NULL);
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, ioctx.aio_operate(
"bar", completion, &op,
librados::OPERATION_IGNORE_CACHE, NULL));
completion->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
completion->release();
}
{
ObjectReadOperation op;
op.cache_evict();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, ioctx.aio_operate(
"bar", completion, &op,
librados::OPERATION_IGNORE_CACHE, NULL));
completion->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
completion->release();
}
// cleanup
ioctx.selfmanaged_snap_remove(my_snaps[0]);
}
TEST_F(LibRadosTwoPoolsECPP, TryFlush) {
SKIP_IF_CRIMSON();
// configure cache
bufferlist inbl;
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier add\", \"pool\": \"" + pool_name +
"\", \"tierpool\": \"" + cache_pool_name +
"\", \"force_nonempty\": \"--force-nonempty\" }",
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier set-overlay\", \"pool\": \"" + pool_name +
"\", \"overlaypool\": \"" + cache_pool_name + "\"}",
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier cache-mode\", \"pool\": \"" + cache_pool_name +
"\", \"mode\": \"writeback\"}",
inbl, NULL, NULL));
// wait for maps to settle
cluster.wait_for_latest_osdmap();
// create object
{
bufferlist bl;
bl.append("hi there");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("foo", &op));
}
// verify the object is present in the cache tier
{
NObjectIterator it = cache_ioctx.nobjects_begin();
ASSERT_TRUE(it != cache_ioctx.nobjects_end());
ASSERT_TRUE(it->get_oid() == string("foo"));
++it;
ASSERT_TRUE(it == cache_ioctx.nobjects_end());
}
// verify the object is NOT present in the base tier
{
NObjectIterator it = ioctx.nobjects_begin();
ASSERT_TRUE(it == ioctx.nobjects_end());
}
// verify dirty
{
bool dirty = false;
int r = -1;
ObjectReadOperation op;
op.is_dirty(&dirty, &r);
ASSERT_EQ(0, cache_ioctx.operate("foo", &op, NULL));
ASSERT_TRUE(dirty);
ASSERT_EQ(0, r);
}
// pin
{
ObjectWriteOperation op;
op.cache_pin();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, cache_ioctx.aio_operate("foo", completion, &op));
completion->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
completion->release();
}
// flush the pinned object with -EPERM
{
ObjectReadOperation op;
op.cache_try_flush();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, cache_ioctx.aio_operate(
"foo", completion, &op,
librados::OPERATION_IGNORE_OVERLAY |
librados::OPERATION_SKIPRWLOCKS, NULL));
completion->wait_for_complete();
ASSERT_EQ(-EPERM, completion->get_return_value());
completion->release();
}
// unpin
{
ObjectWriteOperation op;
op.cache_unpin();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, cache_ioctx.aio_operate("foo", completion, &op));
completion->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
completion->release();
}
// flush
{
ObjectReadOperation op;
op.cache_try_flush();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, cache_ioctx.aio_operate(
"foo", completion, &op,
librados::OPERATION_IGNORE_OVERLAY |
librados::OPERATION_SKIPRWLOCKS, NULL));
completion->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
completion->release();
}
// verify clean
{
bool dirty = false;
int r = -1;
ObjectReadOperation op;
op.is_dirty(&dirty, &r);
ASSERT_EQ(0, cache_ioctx.operate("foo", &op, NULL));
ASSERT_FALSE(dirty);
ASSERT_EQ(0, r);
}
// verify in base tier
{
NObjectIterator it = ioctx.nobjects_begin();
ASSERT_TRUE(it != ioctx.nobjects_end());
ASSERT_TRUE(it->get_oid() == string("foo"));
++it;
ASSERT_TRUE(it == ioctx.nobjects_end());
}
// evict it
{
ObjectReadOperation op;
op.cache_evict();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, cache_ioctx.aio_operate(
"foo", completion, &op, librados::OPERATION_IGNORE_CACHE, NULL));
completion->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
completion->release();
}
// verify no longer in cache tier
{
NObjectIterator it = cache_ioctx.nobjects_begin();
ASSERT_TRUE(it == cache_ioctx.nobjects_end());
}
}
TEST_F(LibRadosTwoPoolsECPP, FailedFlush) {
SKIP_IF_CRIMSON();
// configure cache
bufferlist inbl;
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier add\", \"pool\": \"" + pool_name +
"\", \"tierpool\": \"" + cache_pool_name +
"\", \"force_nonempty\": \"--force-nonempty\" }",
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier set-overlay\", \"pool\": \"" + pool_name +
"\", \"overlaypool\": \"" + cache_pool_name + "\"}",
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier cache-mode\", \"pool\": \"" + cache_pool_name +
"\", \"mode\": \"writeback\"}",
inbl, NULL, NULL));
// wait for maps to settle
cluster.wait_for_latest_osdmap();
// create object
{
bufferlist bl;
bl.append("hi there");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("foo", &op));
}
// verify the object is present in the cache tier
{
NObjectIterator it = cache_ioctx.nobjects_begin();
ASSERT_TRUE(it != cache_ioctx.nobjects_end());
ASSERT_TRUE(it->get_oid() == string("foo"));
++it;
ASSERT_TRUE(it == cache_ioctx.nobjects_end());
}
// verify the object is NOT present in the base tier
{
NObjectIterator it = ioctx.nobjects_begin();
ASSERT_TRUE(it == ioctx.nobjects_end());
}
// set omap
{
ObjectWriteOperation op;
std::map<std::string, bufferlist> omap;
omap["somekey"] = bufferlist();
op.omap_set(omap);
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, cache_ioctx.aio_operate("foo", completion, &op));
completion->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
completion->release();
}
// flush
{
ObjectReadOperation op;
op.cache_flush();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, cache_ioctx.aio_operate(
"foo", completion, &op,
librados::OPERATION_IGNORE_OVERLAY, NULL));
completion->wait_for_complete();
ASSERT_NE(0, completion->get_return_value());
completion->release();
}
// get omap
{
ObjectReadOperation op;
bufferlist bl;
int prval = 0;
std::set<std::string> keys;
keys.insert("somekey");
std::map<std::string, bufferlist> map;
op.omap_get_vals_by_keys(keys, &map, &prval);
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, cache_ioctx.aio_operate("foo", completion, &op, &bl));
sleep(5);
bool completed = completion->is_complete();
if( !completed ) {
cache_ioctx.aio_cancel(completion);
std::cerr << "Most probably test case will hang here, please reset manually" << std::endl;
ASSERT_TRUE(completed); //in fact we are locked forever at test case shutdown unless fix for http://tracker.ceph.com/issues/14511 is applied. Seems there is no workaround for that
}
completion->release();
}
// verify still not in base tier
{
ASSERT_TRUE(ioctx.nobjects_begin() == ioctx.nobjects_end());
}
// erase it
{
ObjectWriteOperation op;
op.remove();
ASSERT_EQ(0, ioctx.operate("foo", &op));
}
// flush whiteout
{
ObjectReadOperation op;
op.cache_flush();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, cache_ioctx.aio_operate(
"foo", completion, &op,
librados::OPERATION_IGNORE_OVERLAY, NULL));
completion->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
completion->release();
}
// evict
{
ObjectReadOperation op;
op.cache_evict();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, cache_ioctx.aio_operate(
"foo", completion, &op, librados::OPERATION_IGNORE_CACHE, NULL));
completion->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
completion->release();
}
// verify no longer in cache tier
{
NObjectIterator it = cache_ioctx.nobjects_begin();
ASSERT_TRUE(it == cache_ioctx.nobjects_end());
}
// or base tier
{
NObjectIterator it = ioctx.nobjects_begin();
ASSERT_TRUE(it == ioctx.nobjects_end());
}
}
TEST_F(LibRadosTwoPoolsECPP, Flush) {
SKIP_IF_CRIMSON();
// configure cache
bufferlist inbl;
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier add\", \"pool\": \"" + pool_name +
"\", \"tierpool\": \"" + cache_pool_name +
"\", \"force_nonempty\": \"--force-nonempty\" }",
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier set-overlay\", \"pool\": \"" + pool_name +
"\", \"overlaypool\": \"" + cache_pool_name + "\"}",
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier cache-mode\", \"pool\": \"" + cache_pool_name +
"\", \"mode\": \"writeback\"}",
inbl, NULL, NULL));
// wait for maps to settle
cluster.wait_for_latest_osdmap();
uint64_t user_version = 0;
// create object
{
bufferlist bl;
bl.append("hi there");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("foo", &op));
}
// verify the object is present in the cache tier
{
NObjectIterator it = cache_ioctx.nobjects_begin();
ASSERT_TRUE(it != cache_ioctx.nobjects_end());
ASSERT_TRUE(it->get_oid() == string("foo"));
++it;
ASSERT_TRUE(it == cache_ioctx.nobjects_end());
}
// verify the object is NOT present in the base tier
{
NObjectIterator it = ioctx.nobjects_begin();
ASSERT_TRUE(it == ioctx.nobjects_end());
}
// verify dirty
{
bool dirty = false;
int r = -1;
ObjectReadOperation op;
op.is_dirty(&dirty, &r);
ASSERT_EQ(0, cache_ioctx.operate("foo", &op, NULL));
ASSERT_TRUE(dirty);
ASSERT_EQ(0, r);
user_version = cache_ioctx.get_last_version();
}
// pin
{
ObjectWriteOperation op;
op.cache_pin();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, cache_ioctx.aio_operate("foo", completion, &op));
completion->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
completion->release();
}
// flush the pinned object with -EPERM
{
ObjectReadOperation op;
op.cache_try_flush();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, cache_ioctx.aio_operate(
"foo", completion, &op,
librados::OPERATION_IGNORE_OVERLAY |
librados::OPERATION_SKIPRWLOCKS, NULL));
completion->wait_for_complete();
ASSERT_EQ(-EPERM, completion->get_return_value());
completion->release();
}
// unpin
{
ObjectWriteOperation op;
op.cache_unpin();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, cache_ioctx.aio_operate("foo", completion, &op));
completion->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
completion->release();
}
// flush
{
ObjectReadOperation op;
op.cache_flush();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, cache_ioctx.aio_operate(
"foo", completion, &op,
librados::OPERATION_IGNORE_OVERLAY, NULL));
completion->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
completion->release();
}
// verify clean
{
bool dirty = false;
int r = -1;
ObjectReadOperation op;
op.is_dirty(&dirty, &r);
ASSERT_EQ(0, cache_ioctx.operate("foo", &op, NULL));
ASSERT_FALSE(dirty);
ASSERT_EQ(0, r);
}
// verify in base tier
{
NObjectIterator it = ioctx.nobjects_begin();
ASSERT_TRUE(it != ioctx.nobjects_end());
ASSERT_TRUE(it->get_oid() == string("foo"));
++it;
ASSERT_TRUE(it == ioctx.nobjects_end());
}
// evict it
{
ObjectReadOperation op;
op.cache_evict();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, cache_ioctx.aio_operate(
"foo", completion, &op, librados::OPERATION_IGNORE_CACHE, NULL));
completion->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
completion->release();
}
// verify no longer in cache tier
{
NObjectIterator it = cache_ioctx.nobjects_begin();
ASSERT_TRUE(it == cache_ioctx.nobjects_end());
}
// read it again and verify the version is consistent
{
bufferlist bl;
ASSERT_EQ(1, cache_ioctx.read("foo", bl, 1, 0));
ASSERT_EQ(user_version, cache_ioctx.get_last_version());
}
// erase it
{
ObjectWriteOperation op;
op.remove();
ASSERT_EQ(0, ioctx.operate("foo", &op));
}
// flush whiteout
{
ObjectReadOperation op;
op.cache_flush();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, cache_ioctx.aio_operate(
"foo", completion, &op,
librados::OPERATION_IGNORE_OVERLAY, NULL));
completion->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
completion->release();
}
// evict
{
ObjectReadOperation op;
op.cache_evict();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, cache_ioctx.aio_operate(
"foo", completion, &op, librados::OPERATION_IGNORE_CACHE, NULL));
completion->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
completion->release();
}
// verify no longer in cache tier
{
NObjectIterator it = cache_ioctx.nobjects_begin();
ASSERT_TRUE(it == cache_ioctx.nobjects_end());
}
// or base tier
{
NObjectIterator it = ioctx.nobjects_begin();
ASSERT_TRUE(it == ioctx.nobjects_end());
}
}
TEST_F(LibRadosTwoPoolsECPP, FlushSnap) {
SKIP_IF_CRIMSON();
// configure cache
bufferlist inbl;
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier add\", \"pool\": \"" + pool_name +
"\", \"tierpool\": \"" + cache_pool_name +
"\", \"force_nonempty\": \"--force-nonempty\" }",
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier set-overlay\", \"pool\": \"" + pool_name +
"\", \"overlaypool\": \"" + cache_pool_name + "\"}",
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier cache-mode\", \"pool\": \"" + cache_pool_name +
"\", \"mode\": \"writeback\"}",
inbl, NULL, NULL));
// wait for maps to settle
cluster.wait_for_latest_osdmap();
// create object
{
bufferlist bl;
bl.append("a");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("foo", &op));
}
// create a snapshot, clone
vector<uint64_t> my_snaps(1);
ASSERT_EQ(0, ioctx.selfmanaged_snap_create(&my_snaps[0]));
ASSERT_EQ(0, ioctx.selfmanaged_snap_set_write_ctx(my_snaps[0],
my_snaps));
{
bufferlist bl;
bl.append("b");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("foo", &op));
}
// and another
my_snaps.resize(2);
my_snaps[1] = my_snaps[0];
ASSERT_EQ(0, ioctx.selfmanaged_snap_create(&my_snaps[0]));
ASSERT_EQ(0, ioctx.selfmanaged_snap_set_write_ctx(my_snaps[0],
my_snaps));
{
bufferlist bl;
bl.append("c");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("foo", &op));
}
// verify the object is present in the cache tier
{
NObjectIterator it = cache_ioctx.nobjects_begin();
ASSERT_TRUE(it != cache_ioctx.nobjects_end());
ASSERT_TRUE(it->get_oid() == string("foo"));
++it;
ASSERT_TRUE(it == cache_ioctx.nobjects_end());
}
// verify the object is NOT present in the base tier
{
NObjectIterator it = ioctx.nobjects_begin();
ASSERT_TRUE(it == ioctx.nobjects_end());
}
// flush on head (should fail)
ioctx.snap_set_read(librados::SNAP_HEAD);
{
ObjectReadOperation op;
op.cache_flush();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, ioctx.aio_operate(
"foo", completion, &op,
librados::OPERATION_IGNORE_CACHE, NULL));
completion->wait_for_complete();
ASSERT_EQ(-EBUSY, completion->get_return_value());
completion->release();
}
// flush on recent snap (should fail)
ioctx.snap_set_read(my_snaps[0]);
{
ObjectReadOperation op;
op.cache_flush();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, ioctx.aio_operate(
"foo", completion, &op,
librados::OPERATION_IGNORE_CACHE, NULL));
completion->wait_for_complete();
ASSERT_EQ(-EBUSY, completion->get_return_value());
completion->release();
}
// flush on oldest snap
ioctx.snap_set_read(my_snaps[1]);
{
ObjectReadOperation op;
op.cache_flush();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, ioctx.aio_operate(
"foo", completion, &op,
librados::OPERATION_IGNORE_CACHE, NULL));
completion->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
completion->release();
}
// flush on next oldest snap
ioctx.snap_set_read(my_snaps[0]);
{
ObjectReadOperation op;
op.cache_flush();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, ioctx.aio_operate(
"foo", completion, &op,
librados::OPERATION_IGNORE_CACHE, NULL));
completion->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
completion->release();
}
// flush on head
ioctx.snap_set_read(librados::SNAP_HEAD);
{
ObjectReadOperation op;
op.cache_flush();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, ioctx.aio_operate(
"foo", completion, &op,
librados::OPERATION_IGNORE_CACHE, NULL));
completion->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
completion->release();
}
// verify i can read the snaps from the cache pool
ioctx.snap_set_read(librados::SNAP_HEAD);
{
bufferlist bl;
ASSERT_EQ(1, ioctx.read("foo", bl, 1, 0));
ASSERT_EQ('c', bl[0]);
}
ioctx.snap_set_read(my_snaps[0]);
{
bufferlist bl;
ASSERT_EQ(1, ioctx.read("foo", bl, 1, 0));
ASSERT_EQ('b', bl[0]);
}
ioctx.snap_set_read(my_snaps[1]);
{
bufferlist bl;
ASSERT_EQ(1, ioctx.read("foo", bl, 1, 0));
ASSERT_EQ('a', bl[0]);
}
// tear down tiers
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier remove-overlay\", \"pool\": \"" + pool_name +
"\"}",
inbl, NULL, NULL));
// wait for maps to settle
cluster.wait_for_latest_osdmap();
// verify i can read the snaps from the base pool
ioctx.snap_set_read(librados::SNAP_HEAD);
{
bufferlist bl;
ASSERT_EQ(1, ioctx.read("foo", bl, 1, 0));
ASSERT_EQ('c', bl[0]);
}
ioctx.snap_set_read(my_snaps[0]);
{
bufferlist bl;
ASSERT_EQ(1, ioctx.read("foo", bl, 1, 0));
ASSERT_EQ('b', bl[0]);
}
ioctx.snap_set_read(my_snaps[1]);
{
bufferlist bl;
ASSERT_EQ(1, ioctx.read("foo", bl, 1, 0));
ASSERT_EQ('a', bl[0]);
}
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier set-overlay\", \"pool\": \"" + pool_name +
"\", \"overlaypool\": \"" + cache_pool_name + "\"}",
inbl, NULL, NULL));
cluster.wait_for_latest_osdmap();
// cleanup
ioctx.selfmanaged_snap_remove(my_snaps[0]);
}
TEST_F(LibRadosTierECPP, FlushWriteRaces) {
SKIP_IF_CRIMSON();
Rados cluster;
std::string pool_name = get_temp_pool_name();
std::string cache_pool_name = pool_name + "-cache";
ASSERT_EQ("", create_one_pool_pp(pool_name, cluster));
ASSERT_EQ(0, cluster.pool_create(cache_pool_name.c_str()));
IoCtx cache_ioctx;
ASSERT_EQ(0, cluster.ioctx_create(cache_pool_name.c_str(), cache_ioctx));
cache_ioctx.application_enable("rados", true);
IoCtx ioctx;
ASSERT_EQ(0, cluster.ioctx_create(pool_name.c_str(), ioctx));
// configure cache
bufferlist inbl;
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier add\", \"pool\": \"" + pool_name +
"\", \"tierpool\": \"" + cache_pool_name + "\"}",
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier set-overlay\", \"pool\": \"" + pool_name +
"\", \"overlaypool\": \"" + cache_pool_name + "\"}",
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier cache-mode\", \"pool\": \"" + cache_pool_name +
"\", \"mode\": \"writeback\"}",
inbl, NULL, NULL));
// wait for maps to settle
cluster.wait_for_latest_osdmap();
// create/dirty object
bufferlist bl;
bl.append("hi there");
{
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("foo", &op));
}
// flush + write
{
ObjectReadOperation op;
op.cache_flush();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, cache_ioctx.aio_operate(
"foo", completion, &op,
librados::OPERATION_IGNORE_OVERLAY, NULL));
ObjectWriteOperation op2;
op2.write_full(bl);
librados::AioCompletion *completion2 = cluster.aio_create_completion();
ASSERT_EQ(0, ioctx.aio_operate(
"foo", completion2, &op2, 0));
completion->wait_for_complete();
completion2->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
ASSERT_EQ(0, completion2->get_return_value());
completion->release();
completion2->release();
}
int tries = 1000;
do {
// create/dirty object
{
bufferlist bl;
bl.append("hi there");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("foo", &op));
}
// try-flush + write
{
ObjectReadOperation op;
op.cache_try_flush();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, cache_ioctx.aio_operate(
"foo", completion, &op,
librados::OPERATION_IGNORE_OVERLAY |
librados::OPERATION_SKIPRWLOCKS, NULL));
ObjectWriteOperation op2;
op2.write_full(bl);
librados::AioCompletion *completion2 = cluster.aio_create_completion();
ASSERT_EQ(0, ioctx.aio_operate("foo", completion2, &op2, 0));
completion->wait_for_complete();
completion2->wait_for_complete();
int r = completion->get_return_value();
ASSERT_TRUE(r == -EBUSY || r == 0);
ASSERT_EQ(0, completion2->get_return_value());
completion->release();
completion2->release();
if (r == -EBUSY)
break;
cout << "didn't get EBUSY, trying again" << std::endl;
}
ASSERT_TRUE(--tries);
} while (true);
// tear down tiers
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier remove-overlay\", \"pool\": \"" + pool_name +
"\"}",
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier remove\", \"pool\": \"" + pool_name +
"\", \"tierpool\": \"" + cache_pool_name + "\"}",
inbl, NULL, NULL));
// wait for maps to settle before next test
cluster.wait_for_latest_osdmap();
ASSERT_EQ(0, cluster.pool_delete(cache_pool_name.c_str()));
ASSERT_EQ(0, destroy_one_pool_pp(pool_name, cluster));
}
TEST_F(LibRadosTwoPoolsECPP, FlushTryFlushRaces) {
SKIP_IF_CRIMSON();
// configure cache
bufferlist inbl;
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier add\", \"pool\": \"" + pool_name +
"\", \"tierpool\": \"" + cache_pool_name +
"\", \"force_nonempty\": \"--force-nonempty\" }",
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier set-overlay\", \"pool\": \"" + pool_name +
"\", \"overlaypool\": \"" + cache_pool_name + "\"}",
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier cache-mode\", \"pool\": \"" + cache_pool_name +
"\", \"mode\": \"writeback\"}",
inbl, NULL, NULL));
// wait for maps to settle
cluster.wait_for_latest_osdmap();
// create/dirty object
{
bufferlist bl;
bl.append("hi there");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("foo", &op));
}
// flush + flush
{
ObjectReadOperation op;
op.cache_flush();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, cache_ioctx.aio_operate(
"foo", completion, &op,
librados::OPERATION_IGNORE_OVERLAY, NULL));
ObjectReadOperation op2;
op2.cache_flush();
librados::AioCompletion *completion2 = cluster.aio_create_completion();
ASSERT_EQ(0, cache_ioctx.aio_operate(
"foo", completion2, &op2,
librados::OPERATION_IGNORE_OVERLAY, NULL));
completion->wait_for_complete();
completion2->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
ASSERT_EQ(0, completion2->get_return_value());
completion->release();
completion2->release();
}
// create/dirty object
{
bufferlist bl;
bl.append("hi there");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("foo", &op));
}
// flush + try-flush
{
ObjectReadOperation op;
op.cache_flush();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, cache_ioctx.aio_operate(
"foo", completion, &op,
librados::OPERATION_IGNORE_OVERLAY, NULL));
ObjectReadOperation op2;
op2.cache_try_flush();
librados::AioCompletion *completion2 = cluster.aio_create_completion();
ASSERT_EQ(0, cache_ioctx.aio_operate(
"foo", completion2, &op2,
librados::OPERATION_IGNORE_OVERLAY |
librados::OPERATION_SKIPRWLOCKS, NULL));
completion->wait_for_complete();
completion2->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
ASSERT_EQ(0, completion2->get_return_value());
completion->release();
completion2->release();
}
// create/dirty object
int tries = 1000;
do {
{
bufferlist bl;
bl.append("hi there");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("foo", &op));
}
// try-flush + flush
// (flush will not piggyback on try-flush)
{
ObjectReadOperation op;
op.cache_try_flush();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, cache_ioctx.aio_operate(
"foo", completion, &op,
librados::OPERATION_IGNORE_OVERLAY |
librados::OPERATION_SKIPRWLOCKS, NULL));
ObjectReadOperation op2;
op2.cache_flush();
librados::AioCompletion *completion2 = cluster.aio_create_completion();
ASSERT_EQ(0, cache_ioctx.aio_operate(
"foo", completion2, &op2,
librados::OPERATION_IGNORE_OVERLAY, NULL));
completion->wait_for_complete();
completion2->wait_for_complete();
int r = completion->get_return_value();
ASSERT_TRUE(r == -EBUSY || r == 0);
ASSERT_EQ(0, completion2->get_return_value());
completion->release();
completion2->release();
if (r == -EBUSY)
break;
cout << "didn't get EBUSY, trying again" << std::endl;
}
ASSERT_TRUE(--tries);
} while (true);
// create/dirty object
{
bufferlist bl;
bl.append("hi there");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("foo", &op));
}
// try-flush + try-flush
{
ObjectReadOperation op;
op.cache_try_flush();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, cache_ioctx.aio_operate(
"foo", completion, &op,
librados::OPERATION_IGNORE_OVERLAY |
librados::OPERATION_SKIPRWLOCKS, NULL));
ObjectReadOperation op2;
op2.cache_try_flush();
librados::AioCompletion *completion2 = cluster.aio_create_completion();
ASSERT_EQ(0, cache_ioctx.aio_operate(
"foo", completion2, &op2,
librados::OPERATION_IGNORE_OVERLAY |
librados::OPERATION_SKIPRWLOCKS, NULL));
completion->wait_for_complete();
completion2->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
ASSERT_EQ(0, completion2->get_return_value());
completion->release();
completion2->release();
}
}
TEST_F(LibRadosTwoPoolsECPP, TryFlushReadRace) {
SKIP_IF_CRIMSON();
// configure cache
bufferlist inbl;
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier add\", \"pool\": \"" + pool_name +
"\", \"tierpool\": \"" + cache_pool_name +
"\", \"force_nonempty\": \"--force-nonempty\" }",
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier set-overlay\", \"pool\": \"" + pool_name +
"\", \"overlaypool\": \"" + cache_pool_name + "\"}",
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier cache-mode\", \"pool\": \"" + cache_pool_name +
"\", \"mode\": \"writeback\"}",
inbl, NULL, NULL));
// wait for maps to settle
cluster.wait_for_latest_osdmap();
// create/dirty object
{
bufferlist bl;
bl.append("hi there");
bufferptr bp(4000000); // make it big!
bp.zero();
bl.append(bp);
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("foo", &op));
}
// start a continuous stream of reads
read_ioctx = &ioctx;
test_lock.lock();
for (int i = 0; i < max_reads; ++i) {
start_flush_read();
num_reads++;
}
test_lock.unlock();
// try-flush
ObjectReadOperation op;
op.cache_try_flush();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, cache_ioctx.aio_operate(
"foo", completion, &op,
librados::OPERATION_IGNORE_OVERLAY |
librados::OPERATION_SKIPRWLOCKS, NULL));
completion->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
completion->release();
// stop reads
std::unique_lock locker{test_lock};
max_reads = 0;
cond.wait(locker, [] { return num_reads == 0;});
}
TEST_F(LibRadosTierECPP, CallForcesPromote) {
SKIP_IF_CRIMSON();
Rados cluster;
std::string pool_name = get_temp_pool_name();
std::string cache_pool_name = pool_name + "-cache";
ASSERT_EQ("", create_one_ec_pool_pp(pool_name, cluster));
ASSERT_EQ(0, cluster.pool_create(cache_pool_name.c_str()));
IoCtx cache_ioctx;
ASSERT_EQ(0, cluster.ioctx_create(cache_pool_name.c_str(), cache_ioctx));
cache_ioctx.application_enable("rados", true);
IoCtx ioctx;
ASSERT_EQ(0, cluster.ioctx_create(pool_name.c_str(), ioctx));
// configure cache
bufferlist inbl;
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier add\", \"pool\": \"" + pool_name +
"\", \"tierpool\": \"" + cache_pool_name + "\"}",
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier set-overlay\", \"pool\": \"" + pool_name +
"\", \"overlaypool\": \"" + cache_pool_name + "\"}",
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier cache-mode\", \"pool\": \"" + cache_pool_name +
"\", \"mode\": \"writeback\"}",
inbl, NULL, NULL));
// set things up such that the op would normally be proxied
ASSERT_EQ(0, cluster.mon_command(
set_pool_str(cache_pool_name, "hit_set_count", 2),
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
set_pool_str(cache_pool_name, "hit_set_period", 600),
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
set_pool_str(cache_pool_name, "hit_set_type",
"explicit_object"),
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
set_pool_str(cache_pool_name, "min_read_recency_for_promote",
"4"),
inbl, NULL, NULL));
// wait for maps to settle
cluster.wait_for_latest_osdmap();
// create/dirty object
bufferlist bl;
bl.append("hi there");
{
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("foo", &op));
}
// flush
{
ObjectReadOperation op;
op.cache_flush();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, cache_ioctx.aio_operate(
"foo", completion, &op,
librados::OPERATION_IGNORE_OVERLAY, NULL));
completion->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
completion->release();
}
// evict
{
ObjectReadOperation op;
op.cache_evict();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, cache_ioctx.aio_operate("foo", completion, &op,
librados::OPERATION_IGNORE_CACHE,
NULL));
completion->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
completion->release();
}
// call
{
ObjectReadOperation op;
bufferlist bl;
op.exec("rbd", "get_id", bl);
bufferlist out;
// should get EIO (not an rbd object), not -EOPNOTSUPP (we didn't promote)
ASSERT_EQ(-5, ioctx.operate("foo", &op, &out));
}
// make sure foo is back in the cache tier
{
NObjectIterator it = cache_ioctx.nobjects_begin();
ASSERT_TRUE(it != cache_ioctx.nobjects_end());
ASSERT_TRUE(it->get_oid() == string("foo"));
++it;
ASSERT_TRUE(it == cache_ioctx.nobjects_end());
}
// tear down tiers
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier remove-overlay\", \"pool\": \"" + pool_name +
"\"}",
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier remove\", \"pool\": \"" + pool_name +
"\", \"tierpool\": \"" + cache_pool_name + "\"}",
inbl, NULL, NULL));
// wait for maps to settle before next test
cluster.wait_for_latest_osdmap();
ASSERT_EQ(0, cluster.pool_delete(cache_pool_name.c_str()));
ASSERT_EQ(0, destroy_one_ec_pool_pp(pool_name, cluster));
}
TEST_F(LibRadosTierECPP, HitSetNone) {
SKIP_IF_CRIMSON();
{
list< pair<time_t,time_t> > ls;
AioCompletion *c = librados::Rados::aio_create_completion();
ASSERT_EQ(0, ioctx.hit_set_list(123, c, &ls));
c->wait_for_complete();
ASSERT_EQ(0, c->get_return_value());
ASSERT_TRUE(ls.empty());
c->release();
}
{
bufferlist bl;
AioCompletion *c = librados::Rados::aio_create_completion();
ASSERT_EQ(0, ioctx.hit_set_get(123, c, 12345, &bl));
c->wait_for_complete();
ASSERT_EQ(-ENOENT, c->get_return_value());
c->release();
}
}
TEST_F(LibRadosTwoPoolsECPP, HitSetRead) {
SKIP_IF_CRIMSON();
// make it a tier
bufferlist inbl;
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier add\", \"pool\": \"" + pool_name +
"\", \"tierpool\": \"" + cache_pool_name +
"\", \"force_nonempty\": \"--force-nonempty\" }",
inbl, NULL, NULL));
// enable hitset tracking for this pool
ASSERT_EQ(0, cluster.mon_command(set_pool_str(cache_pool_name, "hit_set_count", 2),
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(set_pool_str(cache_pool_name, "hit_set_period", 600),
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(set_pool_str(cache_pool_name, "hit_set_type",
"explicit_object"),
inbl, NULL, NULL));
// wait for maps to settle
cluster.wait_for_latest_osdmap();
cache_ioctx.set_namespace("");
// keep reading until we see our object appear in the HitSet
utime_t start = ceph_clock_now();
utime_t hard_stop = start + utime_t(600, 0);
while (true) {
utime_t now = ceph_clock_now();
ASSERT_TRUE(now < hard_stop);
string name = "foo";
uint32_t hash;
ASSERT_EQ(0, cache_ioctx.get_object_hash_position2(name, &hash));
hobject_t oid(sobject_t(name, CEPH_NOSNAP), "", hash,
cluster.pool_lookup(cache_pool_name.c_str()), "");
bufferlist bl;
ASSERT_EQ(-ENOENT, cache_ioctx.read("foo", bl, 1, 0));
bufferlist hbl;
AioCompletion *c = librados::Rados::aio_create_completion();
ASSERT_EQ(0, cache_ioctx.hit_set_get(hash, c, now.sec(), &hbl));
c->wait_for_complete();
c->release();
if (hbl.length()) {
auto p = hbl.cbegin();
HitSet hs;
decode(hs, p);
if (hs.contains(oid)) {
cout << "ok, hit_set contains " << oid << std::endl;
break;
}
cout << "hmm, not in HitSet yet" << std::endl;
} else {
cout << "hmm, no HitSet yet" << std::endl;
}
sleep(1);
}
}
// disable this test until hitset-get reliably works on EC pools
#if 0
TEST_F(LibRadosTierECPP, HitSetWrite) {
int num_pg = _get_pg_num(cluster, pool_name);
ceph_assert(num_pg > 0);
// enable hitset tracking for this pool
bufferlist inbl;
ASSERT_EQ(0, cluster.mon_command(set_pool_str(pool_name, "hit_set_count", 8),
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(set_pool_str(pool_name, "hit_set_period", 600),
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(set_pool_str(pool_name, "hit_set_type",
"explicit_hash"),
inbl, NULL, NULL));
// wait for maps to settle
cluster.wait_for_latest_osdmap();
ioctx.set_namespace("");
// do a bunch of writes
for (int i=0; i<1000; ++i) {
bufferlist bl;
bl.append("a");
ASSERT_EQ(0, ioctx.write(stringify(i), bl, 1, 0));
}
// get HitSets
std::map<int,HitSet> hitsets;
for (int i=0; i<num_pg; ++i) {
list< pair<time_t,time_t> > ls;
AioCompletion *c = librados::Rados::aio_create_completion();
ASSERT_EQ(0, ioctx.hit_set_list(i, c, &ls));
c->wait_for_complete();
c->release();
std::cout << "pg " << i << " ls " << ls << std::endl;
ASSERT_FALSE(ls.empty());
// get the latest
c = librados::Rados::aio_create_completion();
bufferlist bl;
ASSERT_EQ(0, ioctx.hit_set_get(i, c, ls.back().first, &bl));
c->wait_for_complete();
c->release();
//std::cout << "bl len is " << bl.length() << "\n";
//bl.hexdump(std::cout);
//std::cout << std::endl;
auto p = bl.cbegin();
decode(hitsets[i], p);
// cope with racing splits by refreshing pg_num
if (i == num_pg - 1)
num_pg = _get_pg_num(cluster, pool_name);
}
for (int i=0; i<1000; ++i) {
string n = stringify(i);
uint32_t hash = ioctx.get_object_hash_position(n);
hobject_t oid(sobject_t(n, CEPH_NOSNAP), "", hash,
cluster.pool_lookup(pool_name.c_str()), "");
std::cout << "checking for " << oid << std::endl;
bool found = false;
for (int p=0; p<num_pg; ++p) {
if (hitsets[p].contains(oid)) {
found = true;
break;
}
}
ASSERT_TRUE(found);
}
}
#endif
TEST_F(LibRadosTwoPoolsECPP, HitSetTrim) {
SKIP_IF_CRIMSON();
unsigned count = 3;
unsigned period = 3;
// make it a tier
bufferlist inbl;
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier add\", \"pool\": \"" + pool_name +
"\", \"tierpool\": \"" + cache_pool_name +
"\", \"force_nonempty\": \"--force-nonempty\" }",
inbl, NULL, NULL));
// enable hitset tracking for this pool
ASSERT_EQ(0, cluster.mon_command(set_pool_str(cache_pool_name, "hit_set_count", count),
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(set_pool_str(cache_pool_name, "hit_set_period", period),
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(set_pool_str(cache_pool_name, "hit_set_type", "bloom"),
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(set_pool_str(cache_pool_name, "hit_set_fpp", ".01"),
inbl, NULL, NULL));
// wait for maps to settle
cluster.wait_for_latest_osdmap();
cache_ioctx.set_namespace("");
// do a bunch of writes and make sure the hitsets rotate
utime_t start = ceph_clock_now();
utime_t hard_stop = start + utime_t(count * period * 50, 0);
time_t first = 0;
int bsize = alignment;
char *buf = (char *)new char[bsize];
memset(buf, 'f', bsize);
while (true) {
string name = "foo";
uint32_t hash;
ASSERT_EQ(0, cache_ioctx.get_object_hash_position2(name, &hash));
hobject_t oid(sobject_t(name, CEPH_NOSNAP), "", hash, -1, "");
bufferlist bl;
bl.append(buf, bsize);
ASSERT_EQ(0, cache_ioctx.append("foo", bl, bsize));
list<pair<time_t, time_t> > ls;
AioCompletion *c = librados::Rados::aio_create_completion();
ASSERT_EQ(0, cache_ioctx.hit_set_list(hash, c, &ls));
c->wait_for_complete();
c->release();
cout << " got ls " << ls << std::endl;
if (!ls.empty()) {
if (!first) {
first = ls.front().first;
cout << "first is " << first << std::endl;
} else {
if (ls.front().first != first) {
cout << "first now " << ls.front().first << ", trimmed" << std::endl;
break;
}
}
}
utime_t now = ceph_clock_now();
ASSERT_TRUE(now < hard_stop);
sleep(1);
}
delete[] buf;
}
TEST_F(LibRadosTwoPoolsECPP, PromoteOn2ndRead) {
SKIP_IF_CRIMSON();
// create object
for (int i=0; i<20; ++i) {
bufferlist bl;
bl.append("hi there");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("foo" + stringify(i), &op));
}
// configure cache
bufferlist inbl;
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier add\", \"pool\": \"" + pool_name +
"\", \"tierpool\": \"" + cache_pool_name +
"\", \"force_nonempty\": \"--force-nonempty\" }",
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier set-overlay\", \"pool\": \"" + pool_name +
"\", \"overlaypool\": \"" + cache_pool_name + "\"}",
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier cache-mode\", \"pool\": \"" + cache_pool_name +
"\", \"mode\": \"writeback\"}",
inbl, NULL, NULL));
// enable hitset tracking for this pool
ASSERT_EQ(0, cluster.mon_command(
set_pool_str(cache_pool_name, "hit_set_count", 2),
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
set_pool_str(cache_pool_name, "hit_set_period", 600),
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
set_pool_str(cache_pool_name, "hit_set_type", "bloom"),
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
set_pool_str(cache_pool_name, "min_read_recency_for_promote", 1),
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
set_pool_str(cache_pool_name, "hit_set_grade_decay_rate", 20),
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
set_pool_str(cache_pool_name, "hit_set_search_last_n", 1),
inbl, NULL, NULL));
// wait for maps to settle
cluster.wait_for_latest_osdmap();
int fake = 0; // set this to non-zero to test spurious promotion,
// e.g. from thrashing
int attempt = 0;
string obj;
while (true) {
// 1st read, don't trigger a promote
obj = "foo" + stringify(attempt);
cout << obj << std::endl;
{
bufferlist bl;
ASSERT_EQ(1, ioctx.read(obj.c_str(), bl, 1, 0));
if (--fake >= 0) {
sleep(1);
ASSERT_EQ(1, ioctx.read(obj.c_str(), bl, 1, 0));
sleep(1);
}
}
// verify the object is NOT present in the cache tier
{
bool found = false;
NObjectIterator it = cache_ioctx.nobjects_begin();
while (it != cache_ioctx.nobjects_end()) {
cout << " see " << it->get_oid() << std::endl;
if (it->get_oid() == string(obj.c_str())) {
found = true;
break;
}
++it;
}
if (!found)
break;
}
++attempt;
ASSERT_LE(attempt, 20);
cout << "hrm, object is present in cache on attempt " << attempt
<< ", retrying" << std::endl;
}
// Read until the object is present in the cache tier
cout << "verifying " << obj << " is eventually promoted" << std::endl;
while (true) {
bufferlist bl;
ASSERT_EQ(1, ioctx.read(obj.c_str(), bl, 1, 0));
bool there = false;
NObjectIterator it = cache_ioctx.nobjects_begin();
while (it != cache_ioctx.nobjects_end()) {
if (it->get_oid() == string(obj.c_str())) {
there = true;
break;
}
++it;
}
if (there)
break;
sleep(1);
}
// tear down tiers
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier remove-overlay\", \"pool\": \"" + pool_name +
"\"}",
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier remove\", \"pool\": \"" + pool_name +
"\", \"tierpool\": \"" + cache_pool_name + "\"}",
inbl, NULL, NULL));
// wait for maps to settle before next test
cluster.wait_for_latest_osdmap();
}
TEST_F(LibRadosTwoPoolsECPP, ProxyRead) {
SKIP_IF_CRIMSON();
// create object
{
bufferlist bl;
bl.append("hi there");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("foo", &op));
}
// configure cache
bufferlist inbl;
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier add\", \"pool\": \"" + pool_name +
"\", \"tierpool\": \"" + cache_pool_name +
"\", \"force_nonempty\": \"--force-nonempty\" }",
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier set-overlay\", \"pool\": \"" + pool_name +
"\", \"overlaypool\": \"" + cache_pool_name + "\"}",
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier cache-mode\", \"pool\": \"" + cache_pool_name +
"\", \"mode\": \"readproxy\"}",
inbl, NULL, NULL));
// wait for maps to settle
cluster.wait_for_latest_osdmap();
// read and verify the object
{
bufferlist bl;
ASSERT_EQ(1, ioctx.read("foo", bl, 1, 0));
ASSERT_EQ('h', bl[0]);
}
// Verify 10 times the object is NOT present in the cache tier
uint32_t i = 0;
while (i++ < 10) {
NObjectIterator it = cache_ioctx.nobjects_begin();
ASSERT_TRUE(it == cache_ioctx.nobjects_end());
sleep(1);
}
// tear down tiers
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier remove-overlay\", \"pool\": \"" + pool_name +
"\"}",
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier remove\", \"pool\": \"" + pool_name +
"\", \"tierpool\": \"" + cache_pool_name + "\"}",
inbl, NULL, NULL));
// wait for maps to settle before next test
cluster.wait_for_latest_osdmap();
}
TEST_F(LibRadosTwoPoolsECPP, CachePin) {
SKIP_IF_CRIMSON();
// create object
{
bufferlist bl;
bl.append("hi there");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("foo", &op));
}
{
bufferlist bl;
bl.append("hi there");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("bar", &op));
}
{
bufferlist bl;
bl.append("hi there");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("baz", &op));
}
{
bufferlist bl;
bl.append("hi there");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("bam", &op));
}
// configure cache
bufferlist inbl;
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier add\", \"pool\": \"" + pool_name +
"\", \"tierpool\": \"" + cache_pool_name +
"\", \"force_nonempty\": \"--force-nonempty\" }",
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier set-overlay\", \"pool\": \"" + pool_name +
"\", \"overlaypool\": \"" + cache_pool_name + "\"}",
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier cache-mode\", \"pool\": \"" + cache_pool_name +
"\", \"mode\": \"writeback\"}",
inbl, NULL, NULL));
// wait for maps to settle
cluster.wait_for_latest_osdmap();
// read, trigger promote
{
bufferlist bl;
ASSERT_EQ(1, ioctx.read("foo", bl, 1, 0));
ASSERT_EQ(1, ioctx.read("bar", bl, 1, 0));
ASSERT_EQ(1, ioctx.read("baz", bl, 1, 0));
ASSERT_EQ(1, ioctx.read("bam", bl, 1, 0));
}
// verify the objects are present in the cache tier
{
NObjectIterator it = cache_ioctx.nobjects_begin();
ASSERT_TRUE(it != cache_ioctx.nobjects_end());
for (uint32_t i = 0; i < 4; i++) {
ASSERT_TRUE(it->get_oid() == string("foo") ||
it->get_oid() == string("bar") ||
it->get_oid() == string("baz") ||
it->get_oid() == string("bam"));
++it;
}
ASSERT_TRUE(it == cache_ioctx.nobjects_end());
}
// pin objects
{
ObjectWriteOperation op;
op.cache_pin();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, cache_ioctx.aio_operate("foo", completion, &op));
completion->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
completion->release();
}
{
ObjectWriteOperation op;
op.cache_pin();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, cache_ioctx.aio_operate("baz", completion, &op));
completion->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
completion->release();
}
// enable agent
ASSERT_EQ(0, cluster.mon_command(
set_pool_str(cache_pool_name, "hit_set_count", 2),
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
set_pool_str(cache_pool_name, "hit_set_period", 600),
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
set_pool_str(cache_pool_name, "hit_set_type", "bloom"),
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
set_pool_str(cache_pool_name, "min_read_recency_for_promote", 1),
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
set_pool_str(cache_pool_name, "target_max_objects", 1),
inbl, NULL, NULL));
sleep(10);
// Verify the pinned object 'foo' is not flushed/evicted
uint32_t count = 0;
while (true) {
bufferlist bl;
ASSERT_EQ(1, ioctx.read("baz", bl, 1, 0));
count = 0;
NObjectIterator it = cache_ioctx.nobjects_begin();
while (it != cache_ioctx.nobjects_end()) {
ASSERT_TRUE(it->get_oid() == string("foo") ||
it->get_oid() == string("bar") ||
it->get_oid() == string("baz") ||
it->get_oid() == string("bam"));
++count;
++it;
}
if (count == 2) {
ASSERT_TRUE(it->get_oid() == string("foo") ||
it->get_oid() == string("baz"));
break;
}
sleep(1);
}
// tear down tiers
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier remove-overlay\", \"pool\": \"" + pool_name +
"\"}",
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier remove\", \"pool\": \"" + pool_name +
"\", \"tierpool\": \"" + cache_pool_name + "\"}",
inbl, NULL, NULL));
// wait for maps to settle before next test
cluster.wait_for_latest_osdmap();
}
TEST_F(LibRadosTwoPoolsECPP, SetRedirectRead) {
SKIP_IF_CRIMSON();
// create object
{
bufferlist bl;
bl.append("hi there");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("foo", &op));
}
{
bufferlist bl;
bl.append("there");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, cache_ioctx.operate("bar", &op));
}
// configure tier
bufferlist inbl;
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier add\", \"pool\": \"" + pool_name +
"\", \"tierpool\": \"" + cache_pool_name +
"\", \"force_nonempty\": \"--force-nonempty\" }",
inbl, NULL, NULL));
// wait for maps to settle
cluster.wait_for_latest_osdmap();
{
ObjectWriteOperation op;
op.set_redirect("bar", cache_ioctx, 0);
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, ioctx.aio_operate("foo", completion, &op));
completion->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
completion->release();
}
// read and verify the object
{
bufferlist bl;
ASSERT_EQ(1, ioctx.read("foo", bl, 1, 0));
ASSERT_EQ('t', bl[0]);
}
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier remove\", \"pool\": \"" + pool_name +
"\", \"tierpool\": \"" + cache_pool_name + "\"}",
inbl, NULL, NULL));
// wait for maps to settle before next test
cluster.wait_for_latest_osdmap();
}
TEST_F(LibRadosTwoPoolsECPP, SetChunkRead) {
SKIP_IF_CRIMSON();
// note: require >= mimic
{
bufferlist bl;
bl.append("there hi");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, cache_ioctx.operate("foo", &op));
}
{
bufferlist bl;
bl.append("There hi");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("bar", &op));
}
// wait for maps to settle
cluster.wait_for_latest_osdmap();
// set_chunk
manifest_set_chunk(cluster, ioctx, cache_ioctx, 0, 4, "bar", "foo");
// promote
{
ObjectWriteOperation op;
op.tier_promote();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, cache_ioctx.aio_operate("foo", completion, &op));
completion->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
completion->release();
}
// read and verify the object
{
bufferlist bl;
ASSERT_EQ(1, cache_ioctx.read("foo", bl, 1, 0));
ASSERT_EQ('T', bl[0]);
}
// wait for maps to settle before next test
cluster.wait_for_latest_osdmap();
}
TEST_F(LibRadosTwoPoolsECPP, ManifestPromoteRead) {
SKIP_IF_CRIMSON();
// note: require >= mimic
// create object
{
bufferlist bl;
bl.append("hiaa there");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, cache_ioctx.operate("foo", &op));
}
{
bufferlist bl;
bl.append("base chunk");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, cache_ioctx.operate("foo-chunk", &op));
}
{
bufferlist bl;
bl.append("HIaa there");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("bar", &op));
}
{
bufferlist bl;
bl.append("BASE CHUNK");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("bar-chunk", &op));
}
// set-redirect
{
ObjectWriteOperation op;
op.set_redirect("bar", ioctx, 0);
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, cache_ioctx.aio_operate("foo", completion, &op));
completion->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
completion->release();
}
// set-chunk
manifest_set_chunk(cluster, ioctx, cache_ioctx, 0, 10, "bar-chunk", "foo-chunk");
// promote
{
ObjectWriteOperation op;
op.tier_promote();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, cache_ioctx.aio_operate("foo", completion, &op));
completion->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
completion->release();
}
// read and verify the object (redirect)
{
bufferlist bl;
ASSERT_EQ(1, cache_ioctx.read("foo", bl, 1, 0));
ASSERT_EQ('H', bl[0]);
}
// promote
{
ObjectWriteOperation op;
op.tier_promote();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, cache_ioctx.aio_operate("foo-chunk", completion, &op));
completion->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
completion->release();
}
// read and verify the object
{
bufferlist bl;
ASSERT_EQ(1, cache_ioctx.read("foo-chunk", bl, 1, 0));
ASSERT_EQ('B', bl[0]);
}
// wait for maps to settle before next test
cluster.wait_for_latest_osdmap();
}
TEST_F(LibRadosTwoPoolsECPP, TrySetDedupTier) {
SKIP_IF_CRIMSON();
// note: require >= mimic
bufferlist inbl;
ASSERT_EQ(-EOPNOTSUPP, cluster.mon_command(
set_pool_str(pool_name, "dedup_tier", cache_pool_name),
inbl, NULL, NULL));
}
TEST_F(LibRadosTwoPoolsPP, PropagateBaseTierError) {
SKIP_IF_CRIMSON();
// write object to base tier
bufferlist omap_bl;
encode(static_cast<uint32_t>(0U), omap_bl);
ObjectWriteOperation op1;
op1.omap_set({{"somekey", omap_bl}});
ASSERT_EQ(0, ioctx.operate("propagate-base-tier-error", &op1));
// configure cache
bufferlist inbl;
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier add\", \"pool\": \"" + pool_name +
"\", \"tierpool\": \"" + cache_pool_name +
"\", \"force_nonempty\": \"--force-nonempty\" }",
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier cache-mode\", \"pool\": \"" + cache_pool_name +
"\", \"mode\": \"writeback\"}",
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier set-overlay\", \"pool\": \"" + pool_name +
"\", \"overlaypool\": \"" + cache_pool_name + "\"}",
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
set_pool_str(cache_pool_name, "hit_set_type", "bloom"),
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
set_pool_str(cache_pool_name, "hit_set_count", 1),
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
set_pool_str(cache_pool_name, "hit_set_period", 600),
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
set_pool_str(cache_pool_name, "target_max_objects", 250),
inbl, NULL, NULL));
// wait for maps to settle
cluster.wait_for_latest_osdmap();
// guarded op should fail so expect error to propagate to cache tier
bufferlist test_omap_bl;
encode(static_cast<uint32_t>(1U), test_omap_bl);
ObjectWriteOperation op2;
op2.omap_cmp({{"somekey", {test_omap_bl, CEPH_OSD_CMPXATTR_OP_EQ}}}, nullptr);
op2.omap_set({{"somekey", test_omap_bl}});
ASSERT_EQ(-ECANCELED, ioctx.operate("propagate-base-tier-error", &op2));
}
TEST_F(LibRadosTwoPoolsPP, HelloWriteReturn) {
SKIP_IF_CRIMSON();
// configure cache
bufferlist inbl;
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier add\", \"pool\": \"" + pool_name +
"\", \"tierpool\": \"" + cache_pool_name +
"\", \"force_nonempty\": \"--force-nonempty\" }",
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier set-overlay\", \"pool\": \"" + pool_name +
"\", \"overlaypool\": \"" + cache_pool_name + "\"}",
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
"{\"prefix\": \"osd tier cache-mode\", \"pool\": \"" + cache_pool_name +
"\", \"mode\": \"writeback\"}",
inbl, NULL, NULL));
// set things up such that the op would normally be proxied
ASSERT_EQ(0, cluster.mon_command(
set_pool_str(cache_pool_name, "hit_set_count", 2),
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
set_pool_str(cache_pool_name, "hit_set_period", 600),
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
set_pool_str(cache_pool_name, "hit_set_type",
"explicit_object"),
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
set_pool_str(cache_pool_name, "min_read_recency_for_promote",
"10000"),
inbl, NULL, NULL));
// wait for maps to settle
cluster.wait_for_latest_osdmap();
// this *will* return data due to the RETURNVEC flag
{
bufferlist in, out;
int rval;
ObjectWriteOperation o;
o.exec("hello", "write_return_data", in, &out, &rval);
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, ioctx.aio_operate("foo", completion, &o,
librados::OPERATION_RETURNVEC));
completion->wait_for_complete();
ASSERT_EQ(42, completion->get_return_value());
ASSERT_EQ(42, rval);
out.hexdump(std::cout);
ASSERT_EQ("you might see this", std::string(out.c_str(), out.length()));
}
// this will overflow because the return data is too big
{
bufferlist in, out;
int rval;
ObjectWriteOperation o;
o.exec("hello", "write_too_much_return_data", in, &out, &rval);
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, ioctx.aio_operate("foo", completion, &o,
librados::OPERATION_RETURNVEC));
completion->wait_for_complete();
ASSERT_EQ(-EOVERFLOW, completion->get_return_value());
ASSERT_EQ(-EOVERFLOW, rval);
ASSERT_EQ("", std::string(out.c_str(), out.length()));
}
}
TEST_F(LibRadosTwoPoolsPP, TierFlushDuringUnsetDedupTier) {
SKIP_IF_CRIMSON();
// skip test if not yet octopus
if (_get_required_osd_release(cluster) < "octopus") {
cout << "cluster is not yet octopus, skipping test" << std::endl;
return;
}
bufferlist inbl;
// set dedup parameters without dedup_tier
ASSERT_EQ(0, cluster.mon_command(
set_pool_str(cache_pool_name, "fingerprint_algorithm", "sha1"),
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
set_pool_str(cache_pool_name, "dedup_chunk_algorithm", "fastcdc"),
inbl, NULL, NULL));
ASSERT_EQ(0, cluster.mon_command(
set_pool_str(cache_pool_name, "dedup_cdc_chunk_size", 1024),
inbl, NULL, NULL));
// create object
bufferlist gbl;
{
generate_buffer(1024*8, &gbl);
ObjectWriteOperation op;
op.write_full(gbl);
ASSERT_EQ(0, cache_ioctx.operate("foo", &op));
}
{
bufferlist bl;
bl.append("there hiHI");
ObjectWriteOperation op;
op.write_full(bl);
ASSERT_EQ(0, ioctx.operate("bar", &op));
}
// wait for maps to settle
cluster.wait_for_latest_osdmap();
// set-chunk to set manifest object
{
ObjectReadOperation op;
op.set_chunk(0, 2, ioctx, "bar", 0, CEPH_OSD_OP_FLAG_WITH_REFERENCE);
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, cache_ioctx.aio_operate("foo", completion, &op,
librados::OPERATION_IGNORE_CACHE, NULL));
completion->wait_for_complete();
ASSERT_EQ(0, completion->get_return_value());
completion->release();
}
// flush to check if proper error is returned
{
ObjectReadOperation op;
op.tier_flush();
librados::AioCompletion *completion = cluster.aio_create_completion();
ASSERT_EQ(0, cache_ioctx.aio_operate(
"foo", completion, &op, librados::OPERATION_IGNORE_CACHE, NULL));
completion->wait_for_complete();
ASSERT_EQ(-EINVAL, completion->get_return_value());
completion->release();
}
}
| 263,965 | 27.368189 | 185 | cc |
null | ceph-main/src/test/librados/watch_notify.cc | #include "include/rados/librados.h"
#include "include/rados/rados_types.h"
#include "test/librados/test.h"
#include "test/librados/TestCase.h"
#include "crimson_utils.h"
#include <errno.h>
#include <fcntl.h>
#include <semaphore.h>
#include "gtest/gtest.h"
#include "include/encoding.h"
#include <set>
#include <map>
typedef RadosTestEC LibRadosWatchNotifyEC;
int notify_sleep = 0;
// notify
static sem_t sem;
static void watch_notify_test_cb(uint8_t opcode, uint64_t ver, void *arg)
{
std::cout << __func__ << std::endl;
sem_post(&sem);
}
class LibRadosWatchNotify : public RadosTest
{
protected:
// notify 2
bufferlist notify_bl;
std::set<uint64_t> notify_cookies;
rados_ioctx_t notify_io;
const char *notify_oid = nullptr;
int notify_err = 0;
rados_completion_t notify_comp;
static void watch_notify2_test_cb(void *arg,
uint64_t notify_id,
uint64_t cookie,
uint64_t notifier_gid,
void *data,
size_t data_len);
static void watch_notify2_test_errcb(void *arg, uint64_t cookie, int err);
static void watch_notify2_test_errcb_reconnect(void *arg, uint64_t cookie, int err);
static void watch_notify2_test_errcb_aio_reconnect(void *arg, uint64_t cookie, int err);
};
void LibRadosWatchNotify::watch_notify2_test_cb(void *arg,
uint64_t notify_id,
uint64_t cookie,
uint64_t notifier_gid,
void *data,
size_t data_len)
{
std::cout << __func__ << " from " << notifier_gid << " notify_id " << notify_id
<< " cookie " << cookie << std::endl;
ceph_assert(notifier_gid > 0);
auto thiz = reinterpret_cast<LibRadosWatchNotify*>(arg);
ceph_assert(thiz);
thiz->notify_cookies.insert(cookie);
thiz->notify_bl.clear();
thiz->notify_bl.append((char*)data, data_len);
if (notify_sleep)
sleep(notify_sleep);
thiz->notify_err = 0;
rados_notify_ack(thiz->notify_io, thiz->notify_oid, notify_id, cookie,
"reply", 5);
}
void LibRadosWatchNotify::watch_notify2_test_errcb(void *arg,
uint64_t cookie,
int err)
{
std::cout << __func__ << " cookie " << cookie << " err " << err << std::endl;
ceph_assert(cookie > 1000);
auto thiz = reinterpret_cast<LibRadosWatchNotify*>(arg);
ceph_assert(thiz);
thiz->notify_err = err;
}
void LibRadosWatchNotify::watch_notify2_test_errcb_reconnect(void *arg,
uint64_t cookie,
int err)
{
std::cout << __func__ << " cookie " << cookie << " err " << err << std::endl;
ceph_assert(cookie > 1000);
auto thiz = reinterpret_cast<LibRadosWatchNotify*>(arg);
ceph_assert(thiz);
thiz->notify_err = rados_unwatch2(thiz->ioctx, cookie);
thiz->notify_cookies.erase(cookie); //delete old cookie
thiz->notify_err = rados_watch2(thiz->ioctx, thiz->notify_oid, &cookie,
watch_notify2_test_cb, watch_notify2_test_errcb_reconnect, thiz);
if (thiz->notify_err < 0) {
std::cout << __func__ << " reconnect watch failed with error " << thiz->notify_err << std::endl;
return;
}
return;
}
void LibRadosWatchNotify::watch_notify2_test_errcb_aio_reconnect(void *arg,
uint64_t cookie,
int err)
{
std::cout << __func__ << " cookie " << cookie << " err " << err << std::endl;
ceph_assert(cookie > 1000);
auto thiz = reinterpret_cast<LibRadosWatchNotify*>(arg);
ceph_assert(thiz);
thiz->notify_err = rados_aio_unwatch(thiz->ioctx, cookie, thiz->notify_comp);
ASSERT_EQ(0, rados_aio_create_completion2(nullptr, nullptr, &thiz->notify_comp));
thiz->notify_cookies.erase(cookie); //delete old cookie
thiz->notify_err = rados_aio_watch(thiz->ioctx, thiz->notify_oid, thiz->notify_comp, &cookie,
watch_notify2_test_cb, watch_notify2_test_errcb_aio_reconnect, thiz);
ASSERT_EQ(0, rados_aio_wait_for_complete(thiz->notify_comp));
ASSERT_EQ(0, rados_aio_get_return_value(thiz->notify_comp));
rados_aio_release(thiz->notify_comp);
if (thiz->notify_err < 0) {
std::cout << __func__ << " reconnect watch failed with error " << thiz->notify_err << std::endl;
return;
}
return;
}
class WatchNotifyTestCtx2;
// --
#pragma GCC diagnostic ignored "-Wpragmas"
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
TEST_F(LibRadosWatchNotify, WatchNotify) {
ASSERT_EQ(0, sem_init(&sem, 0, 0));
char buf[128];
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_write(ioctx, "foo", buf, sizeof(buf), 0));
uint64_t handle;
ASSERT_EQ(0,
rados_watch(ioctx, "foo", 0, &handle, watch_notify_test_cb, NULL));
for (unsigned i=0; i<10; ++i) {
int r = rados_notify(ioctx, "foo", 0, NULL, 0);
if (r == 0) {
break;
}
if (!getenv("ALLOW_TIMEOUTS")) {
ASSERT_EQ(0, r);
}
}
TestAlarm alarm;
sem_wait(&sem);
rados_unwatch(ioctx, "foo", handle);
// when dne ...
ASSERT_EQ(-ENOENT,
rados_watch(ioctx, "dne", 0, &handle, watch_notify_test_cb, NULL));
sem_destroy(&sem);
}
TEST_F(LibRadosWatchNotifyEC, WatchNotify) {
SKIP_IF_CRIMSON();
ASSERT_EQ(0, sem_init(&sem, 0, 0));
char buf[128];
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_write(ioctx, "foo", buf, sizeof(buf), 0));
uint64_t handle;
ASSERT_EQ(0,
rados_watch(ioctx, "foo", 0, &handle, watch_notify_test_cb, NULL));
for (unsigned i=0; i<10; ++i) {
int r = rados_notify(ioctx, "foo", 0, NULL, 0);
if (r == 0) {
break;
}
if (!getenv("ALLOW_TIMEOUTS")) {
ASSERT_EQ(0, r);
}
}
TestAlarm alarm;
sem_wait(&sem);
rados_unwatch(ioctx, "foo", handle);
sem_destroy(&sem);
}
#pragma GCC diagnostic pop
#pragma GCC diagnostic warning "-Wpragmas"
// --
TEST_F(LibRadosWatchNotify, Watch2Delete) {
notify_io = ioctx;
notify_oid = "foo";
notify_err = 0;
char buf[128];
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_write(ioctx, notify_oid, buf, sizeof(buf), 0));
uint64_t handle;
ASSERT_EQ(0,
rados_watch2(ioctx, notify_oid, &handle,
watch_notify2_test_cb,
watch_notify2_test_errcb, this));
ASSERT_EQ(0, rados_remove(ioctx, notify_oid));
int left = 300;
std::cout << "waiting up to " << left << " for disconnect notification ..."
<< std::endl;
while (notify_err == 0 && --left) {
sleep(1);
}
ASSERT_TRUE(left > 0);
ASSERT_EQ(-ENOTCONN, notify_err);
int rados_watch_check_err = rados_watch_check(ioctx, handle);
// We may hit ENOENT due to socket failure and a forced reconnect
EXPECT_TRUE(rados_watch_check_err == -ENOTCONN || rados_watch_check_err == -ENOENT)
<< "Where rados_watch_check_err = " << rados_watch_check_err;
rados_unwatch2(ioctx, handle);
rados_watch_flush(cluster);
}
TEST_F(LibRadosWatchNotify, AioWatchDelete) {
notify_io = ioctx;
notify_oid = "foo";
notify_err = 0;
char buf[128];
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_write(ioctx, notify_oid, buf, sizeof(buf), 0));
rados_completion_t comp;
uint64_t handle;
ASSERT_EQ(0, rados_aio_create_completion2(nullptr, nullptr, &comp));
rados_aio_watch(ioctx, notify_oid, comp, &handle,
watch_notify2_test_cb, watch_notify2_test_errcb, this);
ASSERT_EQ(0, rados_aio_wait_for_complete(comp));
ASSERT_EQ(0, rados_aio_get_return_value(comp));
rados_aio_release(comp);
ASSERT_EQ(0, rados_remove(ioctx, notify_oid));
int left = 300;
std::cout << "waiting up to " << left << " for disconnect notification ..."
<< std::endl;
while (notify_err == 0 && --left) {
sleep(1);
}
ASSERT_TRUE(left > 0);
ASSERT_EQ(-ENOTCONN, notify_err);
int rados_watch_check_err = rados_watch_check(ioctx, handle);
// We may hit ENOENT due to socket failure injection and a forced reconnect
EXPECT_TRUE(rados_watch_check_err == -ENOTCONN || rados_watch_check_err == -ENOENT)
<< "Where rados_watch_check_err = " << rados_watch_check_err;
ASSERT_EQ(0, rados_aio_create_completion2(nullptr, nullptr, &comp));
rados_aio_unwatch(ioctx, handle, comp);
ASSERT_EQ(0, rados_aio_wait_for_complete(comp));
ASSERT_EQ(-ENOENT, rados_aio_get_return_value(comp));
rados_aio_release(comp);
}
// --
TEST_F(LibRadosWatchNotify, WatchNotify2) {
notify_io = ioctx;
notify_oid = "foo";
notify_cookies.clear();
char buf[128];
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_write(ioctx, notify_oid, buf, sizeof(buf), 0));
uint64_t handle;
ASSERT_EQ(0,
rados_watch2(ioctx, notify_oid, &handle,
watch_notify2_test_cb,
watch_notify2_test_errcb_reconnect, this));
ASSERT_GT(rados_watch_check(ioctx, handle), 0);
char *reply_buf = 0;
size_t reply_buf_len;
ASSERT_EQ(0, rados_notify2(ioctx, notify_oid,
"notify", 6, 300000,
&reply_buf, &reply_buf_len));
bufferlist reply;
reply.append(reply_buf, reply_buf_len);
std::map<std::pair<uint64_t,uint64_t>, bufferlist> reply_map;
std::set<std::pair<uint64_t,uint64_t> > missed_map;
auto reply_p = reply.cbegin();
decode(reply_map, reply_p);
decode(missed_map, reply_p);
ASSERT_EQ(1u, reply_map.size());
ASSERT_EQ(0u, missed_map.size());
ASSERT_EQ(1u, notify_cookies.size());
handle = *notify_cookies.begin();
ASSERT_EQ(1u, notify_cookies.count(handle));
ASSERT_EQ(5u, reply_map.begin()->second.length());
ASSERT_EQ(0, strncmp("reply", reply_map.begin()->second.c_str(), 5));
ASSERT_GT(rados_watch_check(ioctx, handle), 0);
rados_buffer_free(reply_buf);
// try it on a non-existent object ... our buffer pointers
// should get zeroed.
ASSERT_EQ(-ENOENT, rados_notify2(ioctx, "doesnotexist",
"notify", 6, 300000,
&reply_buf, &reply_buf_len));
ASSERT_EQ((char*)0, reply_buf);
ASSERT_EQ(0u, reply_buf_len);
rados_unwatch2(ioctx, handle);
rados_watch_flush(cluster);
}
TEST_F(LibRadosWatchNotify, AioWatchNotify2) {
notify_io = ioctx;
notify_oid = "foo";
notify_cookies.clear();
char buf[128];
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_write(ioctx, notify_oid, buf, sizeof(buf), 0));
uint64_t handle;
ASSERT_EQ(0, rados_aio_create_completion2(nullptr, nullptr, ¬ify_comp));
rados_aio_watch(ioctx, notify_oid, notify_comp, &handle,
watch_notify2_test_cb, watch_notify2_test_errcb_aio_reconnect, this);
ASSERT_EQ(0, rados_aio_wait_for_complete(notify_comp));
ASSERT_EQ(0, rados_aio_get_return_value(notify_comp));
rados_aio_release(notify_comp);
ASSERT_GT(rados_watch_check(ioctx, handle), 0);
char *reply_buf = 0;
size_t reply_buf_len;
ASSERT_EQ(0, rados_notify2(ioctx, notify_oid,
"notify", 6, 300000,
&reply_buf, &reply_buf_len));
bufferlist reply;
reply.append(reply_buf, reply_buf_len);
std::map<std::pair<uint64_t,uint64_t>, bufferlist> reply_map;
std::set<std::pair<uint64_t,uint64_t> > missed_map;
auto reply_p = reply.cbegin();
decode(reply_map, reply_p);
decode(missed_map, reply_p);
ASSERT_EQ(1u, reply_map.size());
ASSERT_EQ(0u, missed_map.size());
ASSERT_EQ(1u, notify_cookies.size());
handle = *notify_cookies.begin();
ASSERT_EQ(1u, notify_cookies.count(handle));
ASSERT_EQ(5u, reply_map.begin()->second.length());
ASSERT_EQ(0, strncmp("reply", reply_map.begin()->second.c_str(), 5));
ASSERT_GT(rados_watch_check(ioctx, handle), 0);
rados_buffer_free(reply_buf);
// try it on a non-existent object ... our buffer pointers
// should get zeroed.
ASSERT_EQ(-ENOENT, rados_notify2(ioctx, "doesnotexist",
"notify", 6, 300000,
&reply_buf, &reply_buf_len));
ASSERT_EQ((char*)0, reply_buf);
ASSERT_EQ(0u, reply_buf_len);
ASSERT_EQ(0, rados_aio_create_completion2(nullptr, nullptr, ¬ify_comp));
rados_aio_unwatch(ioctx, handle, notify_comp);
ASSERT_EQ(0, rados_aio_wait_for_complete(notify_comp));
ASSERT_EQ(0, rados_aio_get_return_value(notify_comp));
rados_aio_release(notify_comp);
}
TEST_F(LibRadosWatchNotify, AioNotify) {
notify_io = ioctx;
notify_oid = "foo";
notify_cookies.clear();
char buf[128];
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_write(ioctx, notify_oid, buf, sizeof(buf), 0));
uint64_t handle;
ASSERT_EQ(0,
rados_watch2(ioctx, notify_oid, &handle,
watch_notify2_test_cb,
watch_notify2_test_errcb, this));
ASSERT_GT(rados_watch_check(ioctx, handle), 0);
char *reply_buf = 0;
size_t reply_buf_len;
rados_completion_t comp;
ASSERT_EQ(0, rados_aio_create_completion2(nullptr, nullptr, &comp));
ASSERT_EQ(0, rados_aio_notify(ioctx, "foo", comp, "notify", 6, 300000,
&reply_buf, &reply_buf_len));
ASSERT_EQ(0, rados_aio_wait_for_complete(comp));
ASSERT_EQ(0, rados_aio_get_return_value(comp));
rados_aio_release(comp);
size_t nr_acks, nr_timeouts;
notify_ack_t *acks = nullptr;
notify_timeout_t *timeouts = nullptr;
ASSERT_EQ(0, rados_decode_notify_response(reply_buf, reply_buf_len,
&acks, &nr_acks, &timeouts, &nr_timeouts));
ASSERT_EQ(1u, nr_acks);
ASSERT_EQ(0u, nr_timeouts);
ASSERT_EQ(1u, notify_cookies.size());
ASSERT_EQ(1u, notify_cookies.count(handle));
ASSERT_EQ(5u, acks[0].payload_len);
ASSERT_EQ(0, strncmp("reply", acks[0].payload, acks[0].payload_len));
ASSERT_GT(rados_watch_check(ioctx, handle), 0);
rados_free_notify_response(acks, nr_acks, timeouts);
rados_buffer_free(reply_buf);
// try it on a non-existent object ... our buffer pointers
// should get zeroed.
ASSERT_EQ(0, rados_aio_create_completion2(nullptr, nullptr, &comp));
ASSERT_EQ(0, rados_aio_notify(ioctx, "doesnotexist", comp, "notify", 6,
300000, &reply_buf, &reply_buf_len));
ASSERT_EQ(0, rados_aio_wait_for_complete(comp));
ASSERT_EQ(-ENOENT, rados_aio_get_return_value(comp));
rados_aio_release(comp);
ASSERT_EQ((char*)0, reply_buf);
ASSERT_EQ(0u, reply_buf_len);
rados_unwatch2(ioctx, handle);
rados_watch_flush(cluster);
}
// --
TEST_F(LibRadosWatchNotify, WatchNotify2Multi) {
notify_io = ioctx;
notify_oid = "foo";
notify_cookies.clear();
char buf[128];
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_write(ioctx, notify_oid, buf, sizeof(buf), 0));
uint64_t handle1, handle2;
ASSERT_EQ(0,
rados_watch2(ioctx, notify_oid, &handle1,
watch_notify2_test_cb,
watch_notify2_test_errcb, this));
ASSERT_EQ(0,
rados_watch2(ioctx, notify_oid, &handle2,
watch_notify2_test_cb,
watch_notify2_test_errcb, this));
ASSERT_GT(rados_watch_check(ioctx, handle1), 0);
ASSERT_GT(rados_watch_check(ioctx, handle2), 0);
ASSERT_NE(handle1, handle2);
char *reply_buf = 0;
size_t reply_buf_len;
ASSERT_EQ(0, rados_notify2(ioctx, notify_oid,
"notify", 6, 300000,
&reply_buf, &reply_buf_len));
bufferlist reply;
reply.append(reply_buf, reply_buf_len);
std::map<std::pair<uint64_t,uint64_t>, bufferlist> reply_map;
std::set<std::pair<uint64_t,uint64_t> > missed_map;
auto reply_p = reply.cbegin();
decode(reply_map, reply_p);
decode(missed_map, reply_p);
ASSERT_EQ(2u, reply_map.size());
ASSERT_EQ(5u, reply_map.begin()->second.length());
ASSERT_EQ(0u, missed_map.size());
ASSERT_EQ(2u, notify_cookies.size());
ASSERT_EQ(1u, notify_cookies.count(handle1));
ASSERT_EQ(1u, notify_cookies.count(handle2));
ASSERT_EQ(0, strncmp("reply", reply_map.begin()->second.c_str(), 5));
ASSERT_GT(rados_watch_check(ioctx, handle1), 0);
ASSERT_GT(rados_watch_check(ioctx, handle2), 0);
rados_buffer_free(reply_buf);
rados_unwatch2(ioctx, handle1);
rados_unwatch2(ioctx, handle2);
rados_watch_flush(cluster);
}
// --
TEST_F(LibRadosWatchNotify, WatchNotify2Timeout) {
notify_io = ioctx;
notify_oid = "foo";
notify_sleep = 3; // 3s
notify_cookies.clear();
char buf[128];
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_write(ioctx, notify_oid, buf, sizeof(buf), 0));
uint64_t handle;
ASSERT_EQ(0,
rados_watch2(ioctx, notify_oid, &handle,
watch_notify2_test_cb,
watch_notify2_test_errcb, this));
ASSERT_GT(rados_watch_check(ioctx, handle), 0);
char *reply_buf = 0;
size_t reply_buf_len;
ASSERT_EQ(-ETIMEDOUT, rados_notify2(ioctx, notify_oid,
"notify", 6, 1000, // 1s
&reply_buf, &reply_buf_len));
ASSERT_EQ(1u, notify_cookies.size());
{
bufferlist reply;
reply.append(reply_buf, reply_buf_len);
std::map<std::pair<uint64_t,uint64_t>, bufferlist> reply_map;
std::set<std::pair<uint64_t,uint64_t> > missed_map;
auto reply_p = reply.cbegin();
decode(reply_map, reply_p);
decode(missed_map, reply_p);
ASSERT_EQ(0u, reply_map.size());
ASSERT_EQ(1u, missed_map.size());
}
rados_buffer_free(reply_buf);
// we should get the next notify, though!
notify_sleep = 0;
notify_cookies.clear();
ASSERT_EQ(0, rados_notify2(ioctx, notify_oid,
"notify", 6, 300000, // 300s
&reply_buf, &reply_buf_len));
ASSERT_EQ(1u, notify_cookies.size());
ASSERT_GT(rados_watch_check(ioctx, handle), 0);
rados_unwatch2(ioctx, handle);
rados_completion_t comp;
ASSERT_EQ(0, rados_aio_create_completion2(nullptr, nullptr, &comp));
rados_aio_watch_flush(cluster, comp);
ASSERT_EQ(0, rados_aio_wait_for_complete(comp));
ASSERT_EQ(0, rados_aio_get_return_value(comp));
rados_aio_release(comp);
rados_buffer_free(reply_buf);
}
TEST_F(LibRadosWatchNotify, Watch3Timeout) {
notify_io = ioctx;
notify_oid = "foo";
notify_cookies.clear();
notify_err = 0;
char buf[128];
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_write(ioctx, notify_oid, buf, sizeof(buf), 0));
uint64_t handle;
time_t start = time(0);
const uint32_t timeout = 4;
{
// make sure i timeout before the messenger reconnects to the OSD,
// it will resend a watch request on behalf of the client, and the
// timer of timeout on OSD side will be reset by the new request.
char conf[128];
ASSERT_EQ(0, rados_conf_get(cluster,
"ms_connection_idle_timeout",
conf, sizeof(conf)));
auto connection_idle_timeout = std::stoll(conf);
ASSERT_LT(timeout, connection_idle_timeout);
}
ASSERT_EQ(0,
rados_watch3(ioctx, notify_oid, &handle,
watch_notify2_test_cb, watch_notify2_test_errcb,
timeout, this));
int age = rados_watch_check(ioctx, handle);
time_t age_bound = time(0) + 1 - start;
ASSERT_LT(age, age_bound * 1000);
ASSERT_GT(age, 0);
rados_conf_set(cluster, "objecter_inject_no_watch_ping", "true");
// allow a long time here since an osd peering event will renew our
// watch.
int left = 256 * timeout;
std::cout << "waiting up to " << left << " for osd to time us out ..."
<< std::endl;
while (notify_err == 0 && --left) {
sleep(1);
}
ASSERT_GT(left, 0);
rados_conf_set(cluster, "objecter_inject_no_watch_ping", "false");
ASSERT_EQ(-ENOTCONN, notify_err);
ASSERT_EQ(-ENOTCONN, rados_watch_check(ioctx, handle));
// a subsequent notify should not reach us
char *reply_buf = nullptr;
size_t reply_buf_len;
ASSERT_EQ(0, rados_notify2(ioctx, notify_oid,
"notify", 6, 300000,
&reply_buf, &reply_buf_len));
{
bufferlist reply;
reply.append(reply_buf, reply_buf_len);
std::map<std::pair<uint64_t,uint64_t>, bufferlist> reply_map;
std::set<std::pair<uint64_t,uint64_t> > missed_map;
auto reply_p = reply.cbegin();
decode(reply_map, reply_p);
decode(missed_map, reply_p);
ASSERT_EQ(0u, reply_map.size());
ASSERT_EQ(0u, missed_map.size());
}
ASSERT_EQ(0u, notify_cookies.size());
ASSERT_EQ(-ENOTCONN, rados_watch_check(ioctx, handle));
rados_buffer_free(reply_buf);
// re-watch
rados_unwatch2(ioctx, handle);
rados_watch_flush(cluster);
handle = 0;
ASSERT_EQ(0,
rados_watch2(ioctx, notify_oid, &handle,
watch_notify2_test_cb,
watch_notify2_test_errcb, this));
ASSERT_GT(rados_watch_check(ioctx, handle), 0);
// and now a notify will work.
ASSERT_EQ(0, rados_notify2(ioctx, notify_oid,
"notify", 6, 300000,
&reply_buf, &reply_buf_len));
{
bufferlist reply;
reply.append(reply_buf, reply_buf_len);
std::map<std::pair<uint64_t,uint64_t>, bufferlist> reply_map;
std::set<std::pair<uint64_t,uint64_t> > missed_map;
auto reply_p = reply.cbegin();
decode(reply_map, reply_p);
decode(missed_map, reply_p);
ASSERT_EQ(1u, reply_map.size());
ASSERT_EQ(0u, missed_map.size());
ASSERT_EQ(1u, notify_cookies.count(handle));
ASSERT_EQ(5u, reply_map.begin()->second.length());
ASSERT_EQ(0, strncmp("reply", reply_map.begin()->second.c_str(), 5));
}
ASSERT_EQ(1u, notify_cookies.size());
ASSERT_GT(rados_watch_check(ioctx, handle), 0);
rados_buffer_free(reply_buf);
rados_unwatch2(ioctx, handle);
rados_watch_flush(cluster);
}
TEST_F(LibRadosWatchNotify, AioWatchDelete2) {
notify_io = ioctx;
notify_oid = "foo";
notify_err = 0;
char buf[128];
uint32_t timeout = 3;
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_write(ioctx, notify_oid, buf, sizeof(buf), 0));
rados_completion_t comp;
uint64_t handle;
ASSERT_EQ(0, rados_aio_create_completion2(nullptr, nullptr, &comp));
rados_aio_watch2(ioctx, notify_oid, comp, &handle,
watch_notify2_test_cb, watch_notify2_test_errcb, timeout, this);
ASSERT_EQ(0, rados_aio_wait_for_complete(comp));
ASSERT_EQ(0, rados_aio_get_return_value(comp));
rados_aio_release(comp);
ASSERT_EQ(0, rados_remove(ioctx, notify_oid));
int left = 30;
std::cout << "waiting up to " << left << " for disconnect notification ..."
<< std::endl;
while (notify_err == 0 && --left) {
sleep(1);
}
ASSERT_TRUE(left > 0);
ASSERT_EQ(-ENOTCONN, notify_err);
int rados_watch_check_err = rados_watch_check(ioctx, handle);
// We may hit ENOENT due to socket failure injection and a forced reconnect
EXPECT_TRUE(rados_watch_check_err == -ENOTCONN || rados_watch_check_err == -ENOENT)
<< "Where rados_watch_check_err = " << rados_watch_check_err;
ASSERT_EQ(0, rados_aio_create_completion2(nullptr, nullptr, &comp));
rados_aio_unwatch(ioctx, handle, comp);
ASSERT_EQ(0, rados_aio_wait_for_complete(comp));
ASSERT_EQ(-ENOENT, rados_aio_get_return_value(comp));
rados_aio_release(comp);
}
| 22,602 | 33.351064 | 100 | cc |
null | ceph-main/src/test/librados/watch_notify_cxx.cc | #include <errno.h>
#include <fcntl.h>
#include <semaphore.h>
#include <set>
#include <map>
#include "gtest/gtest.h"
#include "include/encoding.h"
#include "include/rados/librados.hpp"
#include "include/rados/rados_types.h"
#include "test/librados/test_cxx.h"
#include "test/librados/testcase_cxx.h"
#include "crimson_utils.h"
using namespace librados;
typedef RadosTestECPP LibRadosWatchNotifyECPP;
int notify_sleep = 0;
#pragma GCC diagnostic ignored "-Wpragmas"
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
class LibRadosWatchNotifyPP : public RadosTestParamPP
{
protected:
bufferlist notify_bl;
std::set<uint64_t> notify_cookies;
rados_ioctx_t notify_io;
const char *notify_oid = nullptr;
int notify_err = 0;
friend class WatchNotifyTestCtx2;
friend class WatchNotifyTestCtx2TimeOut;
};
IoCtx *notify_ioctx;
class WatchNotifyTestCtx2 : public WatchCtx2
{
LibRadosWatchNotifyPP *notify;
public:
WatchNotifyTestCtx2(LibRadosWatchNotifyPP *notify)
: notify(notify)
{}
void handle_notify(uint64_t notify_id, uint64_t cookie, uint64_t notifier_gid,
bufferlist& bl) override {
std::cout << __func__ << " cookie " << cookie << " notify_id " << notify_id
<< " notifier_gid " << notifier_gid << std::endl;
notify->notify_bl = bl;
notify->notify_cookies.insert(cookie);
bufferlist reply;
reply.append("reply", 5);
if (notify_sleep)
sleep(notify_sleep);
notify_ioctx->notify_ack(notify->notify_oid, notify_id, cookie, reply);
}
void handle_error(uint64_t cookie, int err) override {
std::cout << __func__ << " cookie " << cookie
<< " err " << err << std::endl;
ceph_assert(cookie > 1000);
notify_ioctx->unwatch2(cookie);
notify->notify_cookies.erase(cookie);
notify->notify_err = notify_ioctx->watch2(notify->notify_oid, &cookie, this);
if (notify->notify_err < err ) {
std::cout << "reconnect notify_err " << notify->notify_err << " err " << err << std::endl;
}
}
};
class WatchNotifyTestCtx2TimeOut : public WatchCtx2
{
LibRadosWatchNotifyPP *notify;
public:
WatchNotifyTestCtx2TimeOut(LibRadosWatchNotifyPP *notify)
: notify(notify)
{}
void handle_notify(uint64_t notify_id, uint64_t cookie, uint64_t notifier_gid,
bufferlist& bl) override {
std::cout << __func__ << " cookie " << cookie << " notify_id " << notify_id
<< " notifier_gid " << notifier_gid << std::endl;
notify->notify_bl = bl;
notify->notify_cookies.insert(cookie);
bufferlist reply;
reply.append("reply", 5);
if (notify_sleep)
sleep(notify_sleep);
notify_ioctx->notify_ack(notify->notify_oid, notify_id, cookie, reply);
}
void handle_error(uint64_t cookie, int err) override {
std::cout << __func__ << " cookie " << cookie
<< " err " << err << std::endl;
ceph_assert(cookie > 1000);
notify->notify_err = err;
}
};
// notify
static sem_t sem;
class WatchNotifyTestCtx : public WatchCtx
{
public:
void notify(uint8_t opcode, uint64_t ver, bufferlist& bl) override
{
std::cout << __func__ << std::endl;
sem_post(&sem);
}
};
TEST_P(LibRadosWatchNotifyPP, WatchNotify) {
ASSERT_EQ(0, sem_init(&sem, 0, 0));
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.write("foo", bl1, sizeof(buf), 0));
uint64_t handle;
WatchNotifyTestCtx ctx;
ASSERT_EQ(0, ioctx.watch("foo", 0, &handle, &ctx));
std::list<obj_watch_t> watches;
ASSERT_EQ(0, ioctx.list_watchers("foo", &watches));
ASSERT_EQ(1u, watches.size());
bufferlist bl2;
for (unsigned i=0; i<10; ++i) {
int r = ioctx.notify("foo", 0, bl2);
if (r == 0) {
break;
}
if (!getenv("ALLOW_TIMEOUTS")) {
ASSERT_EQ(0, r);
}
}
TestAlarm alarm;
sem_wait(&sem);
ioctx.unwatch("foo", handle);
sem_destroy(&sem);
}
TEST_F(LibRadosWatchNotifyECPP, WatchNotify) {
SKIP_IF_CRIMSON();
ASSERT_EQ(0, sem_init(&sem, 0, 0));
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.write("foo", bl1, sizeof(buf), 0));
uint64_t handle;
WatchNotifyTestCtx ctx;
ASSERT_EQ(0, ioctx.watch("foo", 0, &handle, &ctx));
std::list<obj_watch_t> watches;
ASSERT_EQ(0, ioctx.list_watchers("foo", &watches));
ASSERT_EQ(1u, watches.size());
bufferlist bl2;
for (unsigned i=0; i<10; ++i) {
int r = ioctx.notify("foo", 0, bl2);
if (r == 0) {
break;
}
if (!getenv("ALLOW_TIMEOUTS")) {
ASSERT_EQ(0, r);
}
}
TestAlarm alarm;
sem_wait(&sem);
ioctx.unwatch("foo", handle);
sem_destroy(&sem);
}
// --
TEST_P(LibRadosWatchNotifyPP, WatchNotifyTimeout) {
ASSERT_EQ(0, sem_init(&sem, 0, 0));
ioctx.set_notify_timeout(1);
uint64_t handle;
WatchNotifyTestCtx ctx;
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.write("foo", bl1, sizeof(buf), 0));
ASSERT_EQ(0, ioctx.watch("foo", 0, &handle, &ctx));
sem_destroy(&sem);
ASSERT_EQ(0, ioctx.unwatch("foo", handle));
}
TEST_F(LibRadosWatchNotifyECPP, WatchNotifyTimeout) {
SKIP_IF_CRIMSON();
ASSERT_EQ(0, sem_init(&sem, 0, 0));
ioctx.set_notify_timeout(1);
uint64_t handle;
WatchNotifyTestCtx ctx;
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.write("foo", bl1, sizeof(buf), 0));
ASSERT_EQ(0, ioctx.watch("foo", 0, &handle, &ctx));
sem_destroy(&sem);
ASSERT_EQ(0, ioctx.unwatch("foo", handle));
}
#pragma GCC diagnostic pop
#pragma GCC diagnostic warning "-Wpragmas"
TEST_P(LibRadosWatchNotifyPP, WatchNotify2) {
notify_oid = "foo";
notify_ioctx = &ioctx;
notify_cookies.clear();
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.write(notify_oid, bl1, sizeof(buf), 0));
uint64_t handle;
WatchNotifyTestCtx2 ctx(this);
ASSERT_EQ(0, ioctx.watch2(notify_oid, &handle, &ctx));
ASSERT_GT(ioctx.watch_check(handle), 0);
std::list<obj_watch_t> watches;
ASSERT_EQ(0, ioctx.list_watchers(notify_oid, &watches));
ASSERT_EQ(watches.size(), 1u);
bufferlist bl2, bl_reply;
ASSERT_EQ(0, ioctx.notify2(notify_oid, bl2, 300000, &bl_reply));
auto p = bl_reply.cbegin();
std::map<std::pair<uint64_t,uint64_t>,bufferlist> reply_map;
std::set<std::pair<uint64_t,uint64_t> > missed_map;
decode(reply_map, p);
decode(missed_map, p);
ASSERT_EQ(1u, notify_cookies.size());
ASSERT_EQ(1u, notify_cookies.count(handle));
ASSERT_EQ(1u, reply_map.size());
ASSERT_EQ(5u, reply_map.begin()->second.length());
ASSERT_EQ(0, strncmp("reply", reply_map.begin()->second.c_str(), 5));
ASSERT_EQ(0u, missed_map.size());
ASSERT_GT(ioctx.watch_check(handle), 0);
ioctx.unwatch2(handle);
}
TEST_P(LibRadosWatchNotifyPP, AioWatchNotify2) {
notify_oid = "foo";
notify_ioctx = &ioctx;
notify_cookies.clear();
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.write(notify_oid, bl1, sizeof(buf), 0));
uint64_t handle;
WatchNotifyTestCtx2 ctx(this);
librados::AioCompletion *comp = cluster.aio_create_completion();
ASSERT_EQ(0, ioctx.aio_watch(notify_oid, comp, &handle, &ctx));
ASSERT_EQ(0, comp->wait_for_complete());
ASSERT_EQ(0, comp->get_return_value());
comp->release();
ASSERT_GT(ioctx.watch_check(handle), 0);
std::list<obj_watch_t> watches;
ASSERT_EQ(0, ioctx.list_watchers(notify_oid, &watches));
ASSERT_EQ(watches.size(), 1u);
bufferlist bl2, bl_reply;
ASSERT_EQ(0, ioctx.notify2(notify_oid, bl2, 300000, &bl_reply));
auto p = bl_reply.cbegin();
std::map<std::pair<uint64_t,uint64_t>,bufferlist> reply_map;
std::set<std::pair<uint64_t,uint64_t> > missed_map;
decode(reply_map, p);
decode(missed_map, p);
ASSERT_EQ(1u, notify_cookies.size());
ASSERT_EQ(1u, notify_cookies.count(handle));
ASSERT_EQ(1u, reply_map.size());
ASSERT_EQ(5u, reply_map.begin()->second.length());
ASSERT_EQ(0, strncmp("reply", reply_map.begin()->second.c_str(), 5));
ASSERT_EQ(0u, missed_map.size());
ASSERT_GT(ioctx.watch_check(handle), 0);
comp = cluster.aio_create_completion();
ioctx.aio_unwatch(handle, comp);
ASSERT_EQ(0, comp->wait_for_complete());
comp->release();
}
TEST_P(LibRadosWatchNotifyPP, AioNotify) {
notify_oid = "foo";
notify_ioctx = &ioctx;
notify_cookies.clear();
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.write(notify_oid, bl1, sizeof(buf), 0));
uint64_t handle;
WatchNotifyTestCtx2 ctx(this);
ASSERT_EQ(0, ioctx.watch2(notify_oid, &handle, &ctx));
ASSERT_GT(ioctx.watch_check(handle), 0);
std::list<obj_watch_t> watches;
ASSERT_EQ(0, ioctx.list_watchers(notify_oid, &watches));
ASSERT_EQ(watches.size(), 1u);
bufferlist bl2, bl_reply;
librados::AioCompletion *comp = cluster.aio_create_completion();
ASSERT_EQ(0, ioctx.aio_notify(notify_oid, comp, bl2, 300000, &bl_reply));
ASSERT_EQ(0, comp->wait_for_complete());
ASSERT_EQ(0, comp->get_return_value());
comp->release();
std::vector<librados::notify_ack_t> acks;
std::vector<librados::notify_timeout_t> timeouts;
ioctx.decode_notify_response(bl_reply, &acks, &timeouts);
ASSERT_EQ(1u, notify_cookies.size());
ASSERT_EQ(1u, notify_cookies.count(handle));
ASSERT_EQ(1u, acks.size());
ASSERT_EQ(5u, acks[0].payload_bl.length());
ASSERT_EQ(0, strncmp("reply", acks[0].payload_bl.c_str(), acks[0].payload_bl.length()));
ASSERT_EQ(0u, timeouts.size());
ASSERT_GT(ioctx.watch_check(handle), 0);
ioctx.unwatch2(handle);
cluster.watch_flush();
}
// --
TEST_P(LibRadosWatchNotifyPP, WatchNotify2Timeout) {
notify_oid = "foo";
notify_ioctx = &ioctx;
notify_sleep = 3; // 3s
notify_cookies.clear();
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.write(notify_oid, bl1, sizeof(buf), 0));
uint64_t handle;
WatchNotifyTestCtx2TimeOut ctx(this);
ASSERT_EQ(0, ioctx.watch2(notify_oid, &handle, &ctx));
ASSERT_GT(ioctx.watch_check(handle), 0);
std::list<obj_watch_t> watches;
ASSERT_EQ(0, ioctx.list_watchers(notify_oid, &watches));
ASSERT_EQ(watches.size(), 1u);
ASSERT_EQ(0u, notify_cookies.size());
bufferlist bl2, bl_reply;
std::cout << " trying..." << std::endl;
ASSERT_EQ(-ETIMEDOUT, ioctx.notify2(notify_oid, bl2, 1000 /* 1s */,
&bl_reply));
std::cout << " timed out" << std::endl;
ASSERT_GT(ioctx.watch_check(handle), 0);
ioctx.unwatch2(handle);
std::cout << " flushing" << std::endl;
librados::AioCompletion *comp = cluster.aio_create_completion();
cluster.aio_watch_flush(comp);
ASSERT_EQ(0, comp->wait_for_complete());
ASSERT_EQ(0, comp->get_return_value());
std::cout << " flushed" << std::endl;
comp->release();
}
TEST_P(LibRadosWatchNotifyPP, WatchNotify3) {
notify_oid = "foo";
notify_ioctx = &ioctx;
notify_cookies.clear();
uint32_t timeout = 12; // configured timeout
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.write(notify_oid, bl1, sizeof(buf), 0));
uint64_t handle;
WatchNotifyTestCtx2TimeOut ctx(this);
ASSERT_EQ(0, ioctx.watch3(notify_oid, &handle, &ctx, timeout));
ASSERT_GT(ioctx.watch_check(handle), 0);
std::list<obj_watch_t> watches;
ASSERT_EQ(0, ioctx.list_watchers(notify_oid, &watches));
ASSERT_EQ(watches.size(), 1u);
std::cout << "List watches" << std::endl;
for (std::list<obj_watch_t>::iterator it = watches.begin();
it != watches.end(); ++it) {
ASSERT_EQ(it->timeout_seconds, timeout);
}
bufferlist bl2, bl_reply;
std::cout << "notify2" << std::endl;
ASSERT_EQ(0, ioctx.notify2(notify_oid, bl2, 300000, &bl_reply));
std::cout << "notify2 done" << std::endl;
auto p = bl_reply.cbegin();
std::map<std::pair<uint64_t,uint64_t>,bufferlist> reply_map;
std::set<std::pair<uint64_t,uint64_t> > missed_map;
decode(reply_map, p);
decode(missed_map, p);
ASSERT_EQ(1u, notify_cookies.size());
ASSERT_EQ(1u, notify_cookies.count(handle));
ASSERT_EQ(1u, reply_map.size());
ASSERT_EQ(5u, reply_map.begin()->second.length());
ASSERT_EQ(0, strncmp("reply", reply_map.begin()->second.c_str(), 5));
ASSERT_EQ(0u, missed_map.size());
std::cout << "watch_check" << std::endl;
ASSERT_GT(ioctx.watch_check(handle), 0);
std::cout << "unwatch2" << std::endl;
ioctx.unwatch2(handle);
std::cout << " flushing" << std::endl;
cluster.watch_flush();
std::cout << "done" << std::endl;
}
// --
INSTANTIATE_TEST_SUITE_P(LibRadosWatchNotifyPPTests, LibRadosWatchNotifyPP,
::testing::Values("", "cache"));
| 12,835 | 29.781775 | 96 | cc |
null | ceph-main/src/test/librados_test_stub/LibradosTestStub.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "test/librados_test_stub/LibradosTestStub.h"
#include "include/rados/librados.hpp"
#include "include/stringify.h"
#include "common/ceph_argparse.h"
#include "common/ceph_context.h"
#include "common/common_init.h"
#include "common/config.h"
#include "common/debug.h"
#include "common/snap_types.h"
#include "librados/AioCompletionImpl.h"
#include "log/Log.h"
#include "test/librados_test_stub/TestClassHandler.h"
#include "test/librados_test_stub/TestIoCtxImpl.h"
#include "test/librados_test_stub/TestRadosClient.h"
#include "test/librados_test_stub/TestMemCluster.h"
#include "test/librados_test_stub/TestMemRadosClient.h"
#include "objclass/objclass.h"
#include "osd/osd_types.h"
#include <arpa/inet.h>
#include <boost/shared_ptr.hpp>
#include <deque>
#include <functional>
#include <list>
#include <vector>
#include "include/ceph_assert.h"
#include "include/compat.h"
#define dout_context g_ceph_context
#define dout_subsys ceph_subsys_rados
using namespace std;
namespace librados {
MockTestMemIoCtxImpl &get_mock_io_ctx(IoCtx &ioctx) {
MockTestMemIoCtxImpl **mock =
reinterpret_cast<MockTestMemIoCtxImpl **>(&ioctx);
return **mock;
}
} // namespace librados
namespace librados_test_stub {
TestClusterRef &cluster() {
static TestClusterRef s_cluster;
return s_cluster;
}
void set_cluster(TestClusterRef cluster_ref) {
cluster() = cluster_ref;
}
TestClusterRef get_cluster() {
auto &cluster_ref = cluster();
if (cluster_ref.get() == nullptr) {
cluster_ref.reset(new librados::TestMemCluster());
}
return cluster_ref;
}
librados::TestClassHandler *get_class_handler() {
static boost::shared_ptr<librados::TestClassHandler> s_class_handler;
if (!s_class_handler) {
s_class_handler.reset(new librados::TestClassHandler());
s_class_handler->open_all_classes();
}
return s_class_handler.get();
}
} // namespace librados_test_stub
namespace {
void do_out_buffer(bufferlist& outbl, char **outbuf, size_t *outbuflen) {
if (outbuf) {
if (outbl.length() > 0) {
*outbuf = (char *)malloc(outbl.length());
memcpy(*outbuf, outbl.c_str(), outbl.length());
} else {
*outbuf = NULL;
}
}
if (outbuflen) {
*outbuflen = outbl.length();
}
}
void do_out_buffer(string& outbl, char **outbuf, size_t *outbuflen) {
if (outbuf) {
if (outbl.length() > 0) {
*outbuf = (char *)malloc(outbl.length());
memcpy(*outbuf, outbl.c_str(), outbl.length());
} else {
*outbuf = NULL;
}
}
if (outbuflen) {
*outbuflen = outbl.length();
}
}
librados::TestRadosClient *create_rados_client() {
CephInitParameters iparams(CEPH_ENTITY_TYPE_CLIENT);
CephContext *cct = common_preinit(iparams, CODE_ENVIRONMENT_LIBRARY, 0);
cct->_conf.parse_env(cct->get_module_type());
cct->_conf.apply_changes(nullptr);
cct->_log->start();
auto rados_client =
librados_test_stub::get_cluster()->create_rados_client(cct);
cct->put();
return rados_client;
}
} // anonymous namespace
extern "C" int rados_aio_create_completion2(void *cb_arg,
rados_callback_t cb_complete,
rados_completion_t *pc)
{
librados::AioCompletionImpl *c = new librados::AioCompletionImpl;
if (cb_complete) {
c->set_complete_callback(cb_arg, cb_complete);
}
*pc = c;
return 0;
}
extern "C" int rados_aio_get_return_value(rados_completion_t c) {
return reinterpret_cast<librados::AioCompletionImpl*>(c)->get_return_value();
}
extern "C" rados_config_t rados_cct(rados_t cluster)
{
librados::TestRadosClient *client =
reinterpret_cast<librados::TestRadosClient*>(cluster);
return reinterpret_cast<rados_config_t>(client->cct());
}
extern "C" int rados_conf_set(rados_t cluster, const char *option,
const char *value) {
librados::TestRadosClient *impl =
reinterpret_cast<librados::TestRadosClient*>(cluster);
CephContext *cct = impl->cct();
return cct->_conf.set_val(option, value);
}
extern "C" int rados_conf_parse_env(rados_t cluster, const char *var) {
librados::TestRadosClient *client =
reinterpret_cast<librados::TestRadosClient*>(cluster);
auto& conf = client->cct()->_conf;
conf.parse_env(client->cct()->get_module_type(), var);
conf.apply_changes(NULL);
return 0;
}
extern "C" int rados_conf_read_file(rados_t cluster, const char *path) {
librados::TestRadosClient *client =
reinterpret_cast<librados::TestRadosClient*>(cluster);
auto& conf = client->cct()->_conf;
int ret = conf.parse_config_files(path, NULL, 0);
if (ret == 0) {
conf.parse_env(client->cct()->get_module_type());
conf.apply_changes(NULL);
conf.complain_about_parse_error(client->cct());
} else if (ret == -ENOENT) {
// ignore missing client config
return 0;
}
return ret;
}
extern "C" int rados_connect(rados_t cluster) {
librados::TestRadosClient *client =
reinterpret_cast<librados::TestRadosClient*>(cluster);
return client->connect();
}
extern "C" int rados_create(rados_t *cluster, const char * const id) {
*cluster = create_rados_client();
return 0;
}
extern "C" int rados_create_with_context(rados_t *cluster,
rados_config_t cct_) {
auto cct = reinterpret_cast<CephContext*>(cct_);
*cluster = librados_test_stub::get_cluster()->create_rados_client(cct);
return 0;
}
extern "C" rados_config_t rados_ioctx_cct(rados_ioctx_t ioctx)
{
librados::TestIoCtxImpl *ctx =
reinterpret_cast<librados::TestIoCtxImpl*>(ioctx);
return reinterpret_cast<rados_config_t>(ctx->get_rados_client()->cct());
}
extern "C" int rados_ioctx_create(rados_t cluster, const char *pool_name,
rados_ioctx_t *ioctx) {
librados::TestRadosClient *client =
reinterpret_cast<librados::TestRadosClient*>(cluster);
int64_t pool_id = client->pool_lookup(pool_name);
if (pool_id < 0) {
return static_cast<int>(pool_id);
}
*ioctx = reinterpret_cast<rados_ioctx_t>(
client->create_ioctx(pool_id, pool_name));
return 0;
}
extern "C" int rados_ioctx_create2(rados_t cluster, int64_t pool_id,
rados_ioctx_t *ioctx)
{
librados::TestRadosClient *client =
reinterpret_cast<librados::TestRadosClient*>(cluster);
std::list<std::pair<int64_t, std::string> > pools;
int r = client->pool_list(pools);
if (r < 0) {
return r;
}
for (std::list<std::pair<int64_t, std::string> >::iterator it =
pools.begin(); it != pools.end(); ++it) {
if (it->first == pool_id) {
*ioctx = reinterpret_cast<rados_ioctx_t>(
client->create_ioctx(pool_id, it->second));
return 0;
}
}
return -ENOENT;
}
extern "C" void rados_ioctx_destroy(rados_ioctx_t io) {
librados::TestIoCtxImpl *ctx =
reinterpret_cast<librados::TestIoCtxImpl*>(io);
ctx->put();
}
extern "C" rados_t rados_ioctx_get_cluster(rados_ioctx_t io) {
librados::TestIoCtxImpl *ctx =
reinterpret_cast<librados::TestIoCtxImpl*>(io);
return reinterpret_cast<rados_t>(ctx->get_rados_client());
}
extern "C" int rados_mon_command(rados_t cluster, const char **cmd,
size_t cmdlen, const char *inbuf,
size_t inbuflen, char **outbuf,
size_t *outbuflen, char **outs,
size_t *outslen) {
librados::TestRadosClient *client =
reinterpret_cast<librados::TestRadosClient*>(cluster);
vector<string> cmdvec;
for (size_t i = 0; i < cmdlen; i++) {
cmdvec.push_back(cmd[i]);
}
bufferlist inbl;
inbl.append(inbuf, inbuflen);
bufferlist outbl;
string outstring;
int ret = client->mon_command(cmdvec, inbl, &outbl, &outstring);
do_out_buffer(outbl, outbuf, outbuflen);
do_out_buffer(outstring, outs, outslen);
return ret;
}
extern "C" int rados_nobjects_list_open(rados_ioctx_t io,
rados_list_ctx_t *ctx) {
librados::TestIoCtxImpl *io_ctx =
reinterpret_cast<librados::TestIoCtxImpl*>(io);
librados::TestRadosClient *client = io_ctx->get_rados_client();
std::list<librados::TestRadosClient::Object> *list =
new std::list<librados::TestRadosClient::Object>();
client->object_list(io_ctx->get_id(), list);
list->push_front(librados::TestRadosClient::Object());
*ctx = reinterpret_cast<rados_list_ctx_t>(list);
return 0;
}
extern "C" int rados_nobjects_list_next(rados_list_ctx_t ctx,
const char **entry,
const char **key,
const char **nspace) {
std::list<librados::TestRadosClient::Object> *list =
reinterpret_cast<std::list<librados::TestRadosClient::Object> *>(ctx);
if (!list->empty()) {
list->pop_front();
}
if (list->empty()) {
return -ENOENT;
}
librados::TestRadosClient::Object &obj = list->front();
if (entry != NULL) {
*entry = obj.oid.c_str();
}
if (key != NULL) {
*key = obj.locator.c_str();
}
if (nspace != NULL) {
*nspace = obj.nspace.c_str();
}
return 0;
}
extern "C" void rados_nobjects_list_close(rados_list_ctx_t ctx) {
std::list<librados::TestRadosClient::Object> *list =
reinterpret_cast<std::list<librados::TestRadosClient::Object> *>(ctx);
delete list;
}
extern "C" int rados_pool_create(rados_t cluster, const char *pool_name) {
librados::TestRadosClient *client =
reinterpret_cast<librados::TestRadosClient*>(cluster);
return client->pool_create(pool_name);
}
extern "C" int rados_pool_delete(rados_t cluster, const char *pool_name) {
librados::TestRadosClient *client =
reinterpret_cast<librados::TestRadosClient*>(cluster);
return client->pool_delete(pool_name);
}
extern "C" void rados_shutdown(rados_t cluster) {
librados::TestRadosClient *client =
reinterpret_cast<librados::TestRadosClient*>(cluster);
client->put();
}
extern "C" int rados_wait_for_latest_osdmap(rados_t cluster) {
librados::TestRadosClient *client =
reinterpret_cast<librados::TestRadosClient*>(cluster);
return client->wait_for_latest_osdmap();
}
using namespace std::placeholders;
namespace librados {
AioCompletion::~AioCompletion()
{
auto c = reinterpret_cast<AioCompletionImpl *>(pc);
c->release();
}
void AioCompletion::release() {
delete this;
}
IoCtx::IoCtx() : io_ctx_impl(NULL) {
}
IoCtx::~IoCtx() {
close();
}
IoCtx::IoCtx(const IoCtx& rhs) {
io_ctx_impl = rhs.io_ctx_impl;
if (io_ctx_impl) {
TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
ctx->get();
}
}
IoCtx::IoCtx(IoCtx&& rhs) noexcept : io_ctx_impl(std::exchange(rhs.io_ctx_impl, nullptr))
{
}
IoCtx& IoCtx::operator=(const IoCtx& rhs) {
if (io_ctx_impl) {
TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
ctx->put();
}
io_ctx_impl = rhs.io_ctx_impl;
if (io_ctx_impl) {
TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
ctx->get();
}
return *this;
}
librados::IoCtx& librados::IoCtx::operator=(IoCtx&& rhs) noexcept
{
if (io_ctx_impl) {
TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
ctx->put();
}
io_ctx_impl = std::exchange(rhs.io_ctx_impl, nullptr);
return *this;
}
int IoCtx::aio_flush() {
TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
ctx->aio_flush();
return 0;
}
int IoCtx::aio_flush_async(AioCompletion *c) {
TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
ctx->aio_flush_async(c->pc);
return 0;
}
int IoCtx::aio_notify(const std::string& oid, AioCompletion *c, bufferlist& bl,
uint64_t timeout_ms, bufferlist *pbl) {
TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
ctx->aio_notify(oid, c->pc, bl, timeout_ms, pbl);
return 0;
}
int IoCtx::aio_operate(const std::string& oid, AioCompletion *c,
ObjectReadOperation *op, bufferlist *pbl) {
return aio_operate(oid, c, op, 0, pbl);
}
int IoCtx::aio_operate(const std::string& oid, AioCompletion *c,
ObjectReadOperation *op, int flags,
bufferlist *pbl) {
TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
TestObjectOperationImpl *ops = reinterpret_cast<TestObjectOperationImpl*>(op->impl);
return ctx->aio_operate_read(oid, *ops, c->pc, flags, pbl,
ctx->get_snap_read(), nullptr);
}
int IoCtx::aio_operate(const std::string& oid, AioCompletion *c,
ObjectReadOperation *op, int flags,
bufferlist *pbl, const blkin_trace_info *trace_info) {
return aio_operate(oid, c, op, flags, pbl);
}
int IoCtx::aio_operate(const std::string& oid, AioCompletion *c,
ObjectWriteOperation *op) {
TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
TestObjectOperationImpl *ops = reinterpret_cast<TestObjectOperationImpl*>(op->impl);
return ctx->aio_operate(oid, *ops, c->pc, nullptr, nullptr, 0);
}
int IoCtx::aio_operate(const std::string& oid, AioCompletion *c,
ObjectWriteOperation *op, snap_t seq,
std::vector<snap_t>& snaps, int flags,
const blkin_trace_info *trace_info) {
TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
TestObjectOperationImpl *ops = reinterpret_cast<TestObjectOperationImpl*>(op->impl);
std::vector<snapid_t> snv;
snv.resize(snaps.size());
for (size_t i = 0; i < snaps.size(); ++i)
snv[i] = snaps[i];
SnapContext snapc(seq, snv);
return ctx->aio_operate(oid, *ops, c->pc, &snapc, nullptr, flags);
}
int IoCtx::aio_operate(const std::string& oid, AioCompletion *c,
ObjectWriteOperation *op, snap_t seq,
std::vector<snap_t>& snaps) {
return aio_operate(oid, c, op, seq, snaps, 0, nullptr);
}
int IoCtx::aio_operate(const std::string& oid, AioCompletion *c,
ObjectWriteOperation *op, snap_t seq,
std::vector<snap_t>& snaps,
const blkin_trace_info *trace_info) {
return aio_operate(oid, c, op, seq, snaps, 0, trace_info);
}
int IoCtx::aio_remove(const std::string& oid, AioCompletion *c) {
TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
return ctx->aio_remove(oid, c->pc);
}
int IoCtx::aio_remove(const std::string& oid, AioCompletion *c, int flags) {
TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
return ctx->aio_remove(oid, c->pc, flags);
}
int IoCtx::aio_watch(const std::string& o, AioCompletion *c, uint64_t *handle,
librados::WatchCtx2 *watch_ctx) {
TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
return ctx->aio_watch(o, c->pc, handle, watch_ctx);
}
int IoCtx::aio_unwatch(uint64_t handle, AioCompletion *c) {
TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
return ctx->aio_unwatch(handle, c->pc);
}
config_t IoCtx::cct() {
TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
return reinterpret_cast<config_t>(ctx->get_rados_client()->cct());
}
void IoCtx::close() {
if (io_ctx_impl) {
TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
ctx->put();
}
io_ctx_impl = NULL;
}
int IoCtx::create(const std::string& oid, bool exclusive) {
TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
return ctx->execute_operation(
oid, std::bind(&TestIoCtxImpl::create, _1, _2, exclusive,
ctx->get_snap_context()));
}
void IoCtx::dup(const IoCtx& rhs) {
close();
TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(rhs.io_ctx_impl);
io_ctx_impl = reinterpret_cast<IoCtxImpl*>(ctx->clone());
}
int IoCtx::exec(const std::string& oid, const char *cls, const char *method,
bufferlist& inbl, bufferlist& outbl) {
TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
return ctx->execute_operation(
oid, std::bind(&TestIoCtxImpl::exec, _1, _2,
librados_test_stub::get_class_handler(), cls,
method, inbl, &outbl, ctx->get_snap_read(),
ctx->get_snap_context()));
}
void IoCtx::from_rados_ioctx_t(rados_ioctx_t p, IoCtx &io) {
TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(p);
ctx->get();
io.close();
io.io_ctx_impl = reinterpret_cast<IoCtxImpl*>(ctx);
}
uint64_t IoCtx::get_instance_id() const {
TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
return ctx->get_instance_id();
}
int64_t IoCtx::get_id() {
TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
return ctx->get_id();
}
uint64_t IoCtx::get_last_version() {
TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
return ctx->get_last_version();
}
std::string IoCtx::get_pool_name() {
TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
return ctx->get_pool_name();
}
int IoCtx::list_snaps(const std::string& o, snap_set_t *out_snaps) {
TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
return ctx->execute_operation(
o, std::bind(&TestIoCtxImpl::list_snaps, _1, _2, out_snaps));
}
int IoCtx::list_watchers(const std::string& o,
std::list<obj_watch_t> *out_watchers) {
TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
return ctx->execute_operation(
o, std::bind(&TestIoCtxImpl::list_watchers, _1, _2, out_watchers));
}
int IoCtx::notify(const std::string& o, uint64_t ver, bufferlist& bl) {
TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
return ctx->notify(o, bl, 0, NULL);
}
int IoCtx::notify2(const std::string& o, bufferlist& bl,
uint64_t timeout_ms, bufferlist *pbl) {
TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
return ctx->notify(o, bl, timeout_ms, pbl);
}
void IoCtx::notify_ack(const std::string& o, uint64_t notify_id,
uint64_t handle, bufferlist& bl) {
TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
ctx->notify_ack(o, notify_id, handle, bl);
}
int IoCtx::omap_get_vals(const std::string& oid,
const std::string& start_after,
uint64_t max_return,
std::map<std::string, bufferlist> *out_vals) {
TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
return ctx->execute_operation(
oid, std::bind(&TestIoCtxImpl::omap_get_vals, _1, _2, start_after, "",
max_return, out_vals));
}
int IoCtx::operate(const std::string& oid, ObjectWriteOperation *op) {
TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
TestObjectOperationImpl *ops = reinterpret_cast<TestObjectOperationImpl*>(op->impl);
return ctx->operate(oid, *ops);
}
int IoCtx::operate(const std::string& oid, ObjectReadOperation *op,
bufferlist *pbl) {
TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
TestObjectOperationImpl *ops = reinterpret_cast<TestObjectOperationImpl*>(op->impl);
return ctx->operate_read(oid, *ops, pbl);
}
int IoCtx::read(const std::string& oid, bufferlist& bl, size_t len,
uint64_t off) {
TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
return ctx->execute_operation(
oid, std::bind(&TestIoCtxImpl::read, _1, _2, len, off, &bl,
ctx->get_snap_read(), nullptr));
}
int IoCtx::remove(const std::string& oid) {
TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
return ctx->execute_operation(
oid, std::bind(&TestIoCtxImpl::remove, _1, _2, ctx->get_snap_context()));
}
int IoCtx::selfmanaged_snap_create(uint64_t *snapid) {
TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
return ctx->selfmanaged_snap_create(snapid);
}
void IoCtx::aio_selfmanaged_snap_create(uint64_t *snapid, AioCompletion* c) {
TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
return ctx->aio_selfmanaged_snap_create(snapid, c->pc);
}
int IoCtx::selfmanaged_snap_remove(uint64_t snapid) {
TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
return ctx->selfmanaged_snap_remove(snapid);
}
void IoCtx::aio_selfmanaged_snap_remove(uint64_t snapid, AioCompletion* c) {
TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
ctx->aio_selfmanaged_snap_remove(snapid, c->pc);
}
int IoCtx::selfmanaged_snap_rollback(const std::string& oid,
uint64_t snapid) {
TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
return ctx->selfmanaged_snap_rollback(oid, snapid);
}
int IoCtx::selfmanaged_snap_set_write_ctx(snap_t seq,
std::vector<snap_t>& snaps) {
TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
return ctx->selfmanaged_snap_set_write_ctx(seq, snaps);
}
void IoCtx::snap_set_read(snap_t seq) {
TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
ctx->set_snap_read(seq);
}
int IoCtx::sparse_read(const std::string& oid, std::map<uint64_t,uint64_t>& m,
bufferlist& bl, size_t len, uint64_t off) {
TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
return ctx->execute_operation(
oid, std::bind(&TestIoCtxImpl::sparse_read, _1, _2, off, len, &m, &bl,
ctx->get_snap_read()));
}
int IoCtx::stat(const std::string& oid, uint64_t *psize, time_t *pmtime) {
TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
return ctx->execute_operation(
oid, std::bind(&TestIoCtxImpl::stat, _1, _2, psize, pmtime));
}
int IoCtx::tmap_update(const std::string& oid, bufferlist& cmdbl) {
TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
return ctx->execute_operation(
oid, std::bind(&TestIoCtxImpl::tmap_update, _1, _2, cmdbl));
}
int IoCtx::trunc(const std::string& oid, uint64_t off) {
TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
return ctx->execute_operation(
oid, std::bind(&TestIoCtxImpl::truncate, _1, _2, off,
ctx->get_snap_context()));
}
int IoCtx::unwatch2(uint64_t handle) {
TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
return ctx->unwatch(handle);
}
int IoCtx::unwatch(const std::string& o, uint64_t handle) {
TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
return ctx->unwatch(handle);
}
int IoCtx::watch(const std::string& o, uint64_t ver, uint64_t *handle,
librados::WatchCtx *wctx) {
TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
return ctx->watch(o, handle, wctx, NULL);
}
int IoCtx::watch2(const std::string& o, uint64_t *handle,
librados::WatchCtx2 *wctx) {
TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
return ctx->watch(o, handle, NULL, wctx);
}
int IoCtx::write(const std::string& oid, bufferlist& bl, size_t len,
uint64_t off) {
TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
return ctx->execute_operation(
oid, std::bind(&TestIoCtxImpl::write, _1, _2, bl, len, off,
ctx->get_snap_context()));
}
int IoCtx::write_full(const std::string& oid, bufferlist& bl) {
TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
return ctx->execute_operation(
oid, std::bind(&TestIoCtxImpl::write_full, _1, _2, bl,
ctx->get_snap_context()));
}
int IoCtx::writesame(const std::string& oid, bufferlist& bl, size_t len,
uint64_t off) {
TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
return ctx->execute_operation(
oid, std::bind(&TestIoCtxImpl::writesame, _1, _2, bl, len, off,
ctx->get_snap_context()));
}
int IoCtx::cmpext(const std::string& oid, uint64_t off, bufferlist& cmp_bl) {
TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
return ctx->execute_operation(
oid, std::bind(&TestIoCtxImpl::cmpext, _1, _2, off, cmp_bl,
ctx->get_snap_read()));
}
int IoCtx::application_enable(const std::string& app_name, bool force) {
return 0;
}
int IoCtx::application_enable_async(const std::string& app_name,
bool force, PoolAsyncCompletion *c) {
return -EOPNOTSUPP;
}
int IoCtx::application_list(std::set<std::string> *app_names) {
return -EOPNOTSUPP;
}
int IoCtx::application_metadata_get(const std::string& app_name,
const std::string &key,
std::string *value) {
return -EOPNOTSUPP;
}
int IoCtx::application_metadata_set(const std::string& app_name,
const std::string &key,
const std::string& value) {
return -EOPNOTSUPP;
}
int IoCtx::application_metadata_remove(const std::string& app_name,
const std::string &key) {
return -EOPNOTSUPP;
}
int IoCtx::application_metadata_list(const std::string& app_name,
std::map<std::string, std::string> *values) {
return -EOPNOTSUPP;
}
void IoCtx::set_namespace(const std::string& nspace) {
TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
ctx->set_namespace(nspace);
}
std::string IoCtx::get_namespace() const {
TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(io_ctx_impl);
return ctx->get_namespace();
}
void IoCtx::set_pool_full_try() {
}
bool IoCtx::get_pool_full_try() {
return false;
}
static int save_operation_result(int result, int *pval) {
if (pval != NULL) {
*pval = result;
}
return result;
}
ObjectOperation::ObjectOperation() {
TestObjectOperationImpl *o = new TestObjectOperationImpl();
o->get();
impl = reinterpret_cast<ObjectOperationImpl*>(o);
}
ObjectOperation::~ObjectOperation() {
TestObjectOperationImpl *o = reinterpret_cast<TestObjectOperationImpl*>(impl);
if (o) {
o->put();
o = NULL;
}
}
void ObjectOperation::assert_exists() {
TestObjectOperationImpl *o = reinterpret_cast<TestObjectOperationImpl*>(impl);
o->ops.push_back(std::bind(&TestIoCtxImpl::assert_exists, _1, _2, _4));
}
void ObjectOperation::assert_version(uint64_t ver) {
TestObjectOperationImpl *o = reinterpret_cast<TestObjectOperationImpl*>(impl);
o->ops.push_back(std::bind(&TestIoCtxImpl::assert_version, _1, _2, ver));
}
void ObjectOperation::exec(const char *cls, const char *method,
bufferlist& inbl) {
TestObjectOperationImpl *o = reinterpret_cast<TestObjectOperationImpl*>(impl);
o->ops.push_back(std::bind(&TestIoCtxImpl::exec, _1, _2,
librados_test_stub::get_class_handler(), cls,
method, inbl, _3, _4, _5));
}
void ObjectOperation::set_op_flags2(int flags) {
}
size_t ObjectOperation::size() {
TestObjectOperationImpl *o = reinterpret_cast<TestObjectOperationImpl*>(impl);
return o->ops.size();
}
void ObjectOperation::cmpext(uint64_t off, const bufferlist& cmp_bl,
int *prval) {
TestObjectOperationImpl *o = reinterpret_cast<TestObjectOperationImpl*>(impl);
ObjectOperationTestImpl op = std::bind(&TestIoCtxImpl::cmpext, _1, _2, off,
cmp_bl, _4);
if (prval != NULL) {
op = std::bind(save_operation_result,
std::bind(op, _1, _2, _3, _4, _5, _6), prval);
}
o->ops.push_back(op);
}
void ObjectReadOperation::list_snaps(snap_set_t *out_snaps, int *prval) {
TestObjectOperationImpl *o = reinterpret_cast<TestObjectOperationImpl*>(impl);
ObjectOperationTestImpl op = std::bind(&TestIoCtxImpl::list_snaps, _1, _2,
out_snaps);
if (prval != NULL) {
op = std::bind(save_operation_result,
std::bind(op, _1, _2, _3, _4, _5, _6), prval);
}
o->ops.push_back(op);
}
void ObjectReadOperation::list_watchers(std::list<obj_watch_t> *out_watchers,
int *prval) {
TestObjectOperationImpl *o = reinterpret_cast<TestObjectOperationImpl*>(impl);
ObjectOperationTestImpl op = std::bind(&TestIoCtxImpl::list_watchers, _1,
_2, out_watchers);
if (prval != NULL) {
op = std::bind(save_operation_result,
std::bind(op, _1, _2, _3, _4, _5, _6), prval);
}
o->ops.push_back(op);
}
void ObjectReadOperation::read(size_t off, uint64_t len, bufferlist *pbl,
int *prval) {
TestObjectOperationImpl *o = reinterpret_cast<TestObjectOperationImpl*>(impl);
ObjectOperationTestImpl op;
if (pbl != NULL) {
op = std::bind(&TestIoCtxImpl::read, _1, _2, len, off, pbl, _4, nullptr);
} else {
op = std::bind(&TestIoCtxImpl::read, _1, _2, len, off, _3, _4, nullptr);
}
if (prval != NULL) {
op = std::bind(save_operation_result,
std::bind(op, _1, _2, _3, _4, _5, _6), prval);
}
o->ops.push_back(op);
}
void ObjectReadOperation::sparse_read(uint64_t off, uint64_t len,
std::map<uint64_t,uint64_t> *m,
bufferlist *pbl, int *prval,
uint64_t truncate_size,
uint32_t truncate_seq) {
TestObjectOperationImpl *o = reinterpret_cast<TestObjectOperationImpl*>(impl);
ObjectOperationTestImpl op;
if (pbl != NULL) {
op = std::bind(&TestIoCtxImpl::sparse_read, _1, _2, off, len, m, pbl, _4);
} else {
op = std::bind(&TestIoCtxImpl::sparse_read, _1, _2, off, len, m, _3, _4);
}
if (prval != NULL) {
op = std::bind(save_operation_result,
std::bind(op, _1, _2, _3, _4, _5, _6), prval);
}
o->ops.push_back(op);
}
void ObjectReadOperation::stat(uint64_t *psize, time_t *pmtime, int *prval) {
TestObjectOperationImpl *o = reinterpret_cast<TestObjectOperationImpl*>(impl);
ObjectOperationTestImpl op = std::bind(&TestIoCtxImpl::stat, _1, _2,
psize, pmtime);
if (prval != NULL) {
op = std::bind(save_operation_result,
std::bind(op, _1, _2, _3, _4, _5, _6), prval);
}
o->ops.push_back(op);
}
void ObjectWriteOperation::append(const bufferlist &bl) {
TestObjectOperationImpl *o = reinterpret_cast<TestObjectOperationImpl*>(impl);
o->ops.push_back(std::bind(&TestIoCtxImpl::append, _1, _2, bl, _5));
}
void ObjectWriteOperation::create(bool exclusive) {
TestObjectOperationImpl *o = reinterpret_cast<TestObjectOperationImpl*>(impl);
o->ops.push_back(std::bind(&TestIoCtxImpl::create, _1, _2, exclusive, _5));
}
void ObjectWriteOperation::omap_set(const std::map<std::string, bufferlist> &map) {
TestObjectOperationImpl *o = reinterpret_cast<TestObjectOperationImpl*>(impl);
o->ops.push_back(std::bind(&TestIoCtxImpl::omap_set, _1, _2, boost::ref(map)));
}
void ObjectWriteOperation::remove() {
TestObjectOperationImpl *o = reinterpret_cast<TestObjectOperationImpl*>(impl);
o->ops.push_back(std::bind(&TestIoCtxImpl::remove, _1, _2, _5));
}
void ObjectWriteOperation::selfmanaged_snap_rollback(uint64_t snapid) {
TestObjectOperationImpl *o = reinterpret_cast<TestObjectOperationImpl*>(impl);
o->ops.push_back(std::bind(&TestIoCtxImpl::selfmanaged_snap_rollback,
_1, _2, snapid));
}
void ObjectWriteOperation::set_alloc_hint(uint64_t expected_object_size,
uint64_t expected_write_size) {
TestObjectOperationImpl *o = reinterpret_cast<TestObjectOperationImpl*>(impl);
o->ops.push_back(std::bind(&TestIoCtxImpl::set_alloc_hint, _1, _2,
expected_object_size, expected_write_size, 0,
_5));
}
void ObjectWriteOperation::set_alloc_hint2(uint64_t expected_object_size,
uint64_t expected_write_size,
uint32_t flags) {
TestObjectOperationImpl *o = reinterpret_cast<TestObjectOperationImpl*>(impl);
o->ops.push_back(std::bind(&TestIoCtxImpl::set_alloc_hint, _1, _2,
expected_object_size, expected_write_size, flags,
_5));
}
void ObjectWriteOperation::tmap_update(const bufferlist& cmdbl) {
TestObjectOperationImpl *o = reinterpret_cast<TestObjectOperationImpl*>(impl);
o->ops.push_back(std::bind(&TestIoCtxImpl::tmap_update, _1, _2,
cmdbl));
}
void ObjectWriteOperation::truncate(uint64_t off) {
TestObjectOperationImpl *o = reinterpret_cast<TestObjectOperationImpl*>(impl);
o->ops.push_back(std::bind(&TestIoCtxImpl::truncate, _1, _2, off, _5));
}
void ObjectWriteOperation::write(uint64_t off, const bufferlist& bl) {
TestObjectOperationImpl *o = reinterpret_cast<TestObjectOperationImpl*>(impl);
o->ops.push_back(std::bind(&TestIoCtxImpl::write, _1, _2, bl, bl.length(),
off, _5));
}
void ObjectWriteOperation::write_full(const bufferlist& bl) {
TestObjectOperationImpl *o = reinterpret_cast<TestObjectOperationImpl*>(impl);
o->ops.push_back(std::bind(&TestIoCtxImpl::write_full, _1, _2, bl, _5));
}
void ObjectWriteOperation::writesame(uint64_t off, uint64_t len,
const bufferlist& bl) {
TestObjectOperationImpl *o = reinterpret_cast<TestObjectOperationImpl*>(impl);
o->ops.push_back(std::bind(&TestIoCtxImpl::writesame, _1, _2, bl, len,
off, _5));
}
void ObjectWriteOperation::zero(uint64_t off, uint64_t len) {
TestObjectOperationImpl *o = reinterpret_cast<TestObjectOperationImpl*>(impl);
o->ops.push_back(std::bind(&TestIoCtxImpl::zero, _1, _2, off, len, _5));
}
Rados::Rados() : client(NULL) {
}
Rados::Rados(IoCtx& ioctx) {
TestIoCtxImpl *ctx = reinterpret_cast<TestIoCtxImpl*>(ioctx.io_ctx_impl);
TestRadosClient *impl = ctx->get_rados_client();
impl->get();
client = reinterpret_cast<RadosClient*>(impl);
ceph_assert(client != NULL);
}
Rados::~Rados() {
shutdown();
}
void Rados::from_rados_t(rados_t p, Rados &rados) {
if (rados.client != nullptr) {
reinterpret_cast<TestRadosClient*>(rados.client)->put();
rados.client = nullptr;
}
auto impl = reinterpret_cast<TestRadosClient*>(p);
if (impl) {
impl->get();
rados.client = reinterpret_cast<RadosClient*>(impl);
}
}
AioCompletion *Rados::aio_create_completion(void *cb_arg,
callback_t cb_complete) {
AioCompletionImpl *c;
int r = rados_aio_create_completion2(cb_arg, cb_complete,
reinterpret_cast<void**>(&c));
ceph_assert(r == 0);
return new AioCompletion(c);
}
int Rados::aio_watch_flush(AioCompletion* c) {
TestRadosClient *impl = reinterpret_cast<TestRadosClient*>(client);
return impl->aio_watch_flush(c->pc);
}
int Rados::blocklist_add(const std::string& client_address,
uint32_t expire_seconds) {
TestRadosClient *impl = reinterpret_cast<TestRadosClient*>(client);
return impl->blocklist_add(client_address, expire_seconds);
}
config_t Rados::cct() {
TestRadosClient *impl = reinterpret_cast<TestRadosClient*>(client);
return reinterpret_cast<config_t>(impl->cct());
}
int Rados::cluster_fsid(std::string* fsid) {
*fsid = "00000000-1111-2222-3333-444444444444";
return 0;
}
int Rados::conf_set(const char *option, const char *value) {
return rados_conf_set(reinterpret_cast<rados_t>(client), option, value);
}
int Rados::conf_get(const char *option, std::string &val) {
TestRadosClient *impl = reinterpret_cast<TestRadosClient*>(client);
CephContext *cct = impl->cct();
char *str = NULL;
int ret = cct->_conf.get_val(option, &str, -1);
if (ret != 0) {
free(str);
return ret;
}
val = str;
free(str);
return 0;
}
int Rados::conf_parse_env(const char *env) const {
return rados_conf_parse_env(reinterpret_cast<rados_t>(client), env);
}
int Rados::conf_read_file(const char * const path) const {
return rados_conf_read_file(reinterpret_cast<rados_t>(client), path);
}
int Rados::connect() {
return rados_connect(reinterpret_cast<rados_t>(client));
}
uint64_t Rados::get_instance_id() {
TestRadosClient *impl = reinterpret_cast<TestRadosClient*>(client);
return impl->get_instance_id();
}
int Rados::get_min_compatible_osd(int8_t* require_osd_release) {
TestRadosClient *impl = reinterpret_cast<TestRadosClient*>(client);
return impl->get_min_compatible_osd(require_osd_release);
}
int Rados::get_min_compatible_client(int8_t* min_compat_client,
int8_t* require_min_compat_client) {
TestRadosClient *impl = reinterpret_cast<TestRadosClient*>(client);
return impl->get_min_compatible_client(min_compat_client,
require_min_compat_client);
}
int Rados::init(const char * const id) {
return rados_create(reinterpret_cast<rados_t *>(&client), id);
}
int Rados::init_with_context(config_t cct_) {
return rados_create_with_context(reinterpret_cast<rados_t *>(&client), cct_);
}
int Rados::ioctx_create(const char *name, IoCtx &io) {
rados_ioctx_t p;
int ret = rados_ioctx_create(reinterpret_cast<rados_t>(client), name, &p);
if (ret) {
return ret;
}
io.close();
io.io_ctx_impl = reinterpret_cast<IoCtxImpl*>(p);
return 0;
}
int Rados::ioctx_create2(int64_t pool_id, IoCtx &io)
{
rados_ioctx_t p;
int ret = rados_ioctx_create2(reinterpret_cast<rados_t>(client), pool_id, &p);
if (ret) {
return ret;
}
io.close();
io.io_ctx_impl = reinterpret_cast<IoCtxImpl*>(p);
return 0;
}
int Rados::mon_command(std::string cmd, const bufferlist& inbl,
bufferlist *outbl, std::string *outs) {
TestRadosClient *impl = reinterpret_cast<TestRadosClient*>(client);
std::vector<std::string> cmds;
cmds.push_back(cmd);
return impl->mon_command(cmds, inbl, outbl, outs);
}
int Rados::service_daemon_register(const std::string& service,
const std::string& name,
const std::map<std::string,std::string>& metadata) {
TestRadosClient *impl = reinterpret_cast<TestRadosClient*>(client);
return impl->service_daemon_register(service, name, metadata);
}
int Rados::service_daemon_update_status(std::map<std::string,std::string>&& status) {
TestRadosClient *impl = reinterpret_cast<TestRadosClient*>(client);
return impl->service_daemon_update_status(std::move(status));
}
int Rados::pool_create(const char *name) {
TestRadosClient *impl = reinterpret_cast<TestRadosClient*>(client);
return impl->pool_create(name);
}
int Rados::pool_delete(const char *name) {
TestRadosClient *impl = reinterpret_cast<TestRadosClient*>(client);
return impl->pool_delete(name);
}
int Rados::pool_get_base_tier(int64_t pool, int64_t* base_tier) {
TestRadosClient *impl = reinterpret_cast<TestRadosClient*>(client);
return impl->pool_get_base_tier(pool, base_tier);
}
int Rados::pool_list(std::list<std::string>& v) {
TestRadosClient *impl = reinterpret_cast<TestRadosClient*>(client);
std::list<std::pair<int64_t, std::string> > pools;
int r = impl->pool_list(pools);
if (r < 0) {
return r;
}
v.clear();
for (std::list<std::pair<int64_t, std::string> >::iterator it = pools.begin();
it != pools.end(); ++it) {
v.push_back(it->second);
}
return 0;
}
int Rados::pool_list2(std::list<std::pair<int64_t, std::string> >& v)
{
TestRadosClient *impl = reinterpret_cast<TestRadosClient*>(client);
return impl->pool_list(v);
}
int64_t Rados::pool_lookup(const char *name) {
TestRadosClient *impl = reinterpret_cast<TestRadosClient*>(client);
return impl->pool_lookup(name);
}
int Rados::pool_reverse_lookup(int64_t id, std::string *name) {
TestRadosClient *impl = reinterpret_cast<TestRadosClient*>(client);
return impl->pool_reverse_lookup(id, name);
}
void Rados::shutdown() {
if (client == NULL) {
return;
}
TestRadosClient *impl = reinterpret_cast<TestRadosClient*>(client);
impl->put();
client = NULL;
}
void Rados::test_blocklist_self(bool set) {
}
int Rados::wait_for_latest_osdmap() {
TestRadosClient *impl = reinterpret_cast<TestRadosClient*>(client);
return impl->wait_for_latest_osdmap();
}
int Rados::watch_flush() {
TestRadosClient *impl = reinterpret_cast<TestRadosClient*>(client);
return impl->watch_flush();
}
WatchCtx::~WatchCtx() {
}
WatchCtx2::~WatchCtx2() {
}
} // namespace librados
int cls_cxx_create(cls_method_context_t hctx, bool exclusive) {
librados::TestClassHandler::MethodContext *ctx =
reinterpret_cast<librados::TestClassHandler::MethodContext*>(hctx);
return ctx->io_ctx_impl->create(ctx->oid, exclusive, ctx->snapc);
}
int cls_cxx_remove(cls_method_context_t hctx) {
librados::TestClassHandler::MethodContext *ctx =
reinterpret_cast<librados::TestClassHandler::MethodContext*>(hctx);
return ctx->io_ctx_impl->remove(ctx->oid, ctx->io_ctx_impl->get_snap_context());
}
int cls_get_request_origin(cls_method_context_t hctx, entity_inst_t *origin) {
librados::TestClassHandler::MethodContext *ctx =
reinterpret_cast<librados::TestClassHandler::MethodContext*>(hctx);
librados::TestRadosClient *rados_client =
ctx->io_ctx_impl->get_rados_client();
struct sockaddr_in sin;
memset(&sin, 0, sizeof(sin));
sin.sin_family = AF_INET;
sin.sin_port = 0;
inet_pton(AF_INET, "127.0.0.1", &sin.sin_addr);
entity_addr_t entity_addr(entity_addr_t::TYPE_DEFAULT,
rados_client->get_nonce());
entity_addr.in4_addr() = sin;
*origin = entity_inst_t(
entity_name_t::CLIENT(rados_client->get_instance_id()),
entity_addr);
return 0;
}
int cls_cxx_getxattr(cls_method_context_t hctx, const char *name,
bufferlist *outbl) {
std::map<string, bufferlist> attrs;
int r = cls_cxx_getxattrs(hctx, &attrs);
if (r < 0) {
return r;
}
std::map<string, bufferlist>::iterator it = attrs.find(name);
if (it == attrs.end()) {
return -ENODATA;
}
*outbl = it->second;
return 0;
}
int cls_cxx_getxattrs(cls_method_context_t hctx, std::map<string, bufferlist> *attrset) {
librados::TestClassHandler::MethodContext *ctx =
reinterpret_cast<librados::TestClassHandler::MethodContext*>(hctx);
return ctx->io_ctx_impl->xattr_get(ctx->oid, attrset);
}
int cls_cxx_map_get_keys(cls_method_context_t hctx, const string &start_obj,
uint64_t max_to_get, std::set<string> *keys, bool *more) {
librados::TestClassHandler::MethodContext *ctx =
reinterpret_cast<librados::TestClassHandler::MethodContext*>(hctx);
keys->clear();
std::map<string, bufferlist> vals;
int r = ctx->io_ctx_impl->omap_get_vals2(ctx->oid, start_obj, "", max_to_get,
&vals, more);
if (r < 0) {
return r;
}
for (std::map<string, bufferlist>::iterator it = vals.begin();
it != vals.end(); ++it) {
keys->insert(it->first);
}
return keys->size();
}
int cls_cxx_map_get_val(cls_method_context_t hctx, const string &key,
bufferlist *outbl) {
librados::TestClassHandler::MethodContext *ctx =
reinterpret_cast<librados::TestClassHandler::MethodContext*>(hctx);
std::map<string, bufferlist> vals;
int r = ctx->io_ctx_impl->omap_get_vals(ctx->oid, "", key, 1024, &vals);
if (r < 0) {
return r;
}
std::map<string, bufferlist>::iterator it = vals.find(key);
if (it == vals.end()) {
return -ENOENT;
}
*outbl = it->second;
return 0;
}
int cls_cxx_map_get_vals(cls_method_context_t hctx, const string &start_obj,
const string &filter_prefix, uint64_t max_to_get,
std::map<string, bufferlist> *vals, bool *more) {
librados::TestClassHandler::MethodContext *ctx =
reinterpret_cast<librados::TestClassHandler::MethodContext*>(hctx);
int r = ctx->io_ctx_impl->omap_get_vals2(ctx->oid, start_obj, filter_prefix,
max_to_get, vals, more);
if (r < 0) {
return r;
}
return vals->size();
}
int cls_cxx_map_remove_key(cls_method_context_t hctx, const string &key) {
std::set<std::string> keys;
keys.insert(key);
librados::TestClassHandler::MethodContext *ctx =
reinterpret_cast<librados::TestClassHandler::MethodContext*>(hctx);
return ctx->io_ctx_impl->omap_rm_keys(ctx->oid, keys);
}
int cls_cxx_map_set_val(cls_method_context_t hctx, const string &key,
bufferlist *inbl) {
std::map<std::string, bufferlist> m;
m[key] = *inbl;
return cls_cxx_map_set_vals(hctx, &m);
}
int cls_cxx_map_set_vals(cls_method_context_t hctx,
const std::map<string, bufferlist> *map) {
librados::TestClassHandler::MethodContext *ctx =
reinterpret_cast<librados::TestClassHandler::MethodContext*>(hctx);
return ctx->io_ctx_impl->omap_set(ctx->oid, *map);
}
int cls_cxx_read(cls_method_context_t hctx, int ofs, int len,
bufferlist *outbl) {
return cls_cxx_read2(hctx, ofs, len, outbl, 0);
}
int cls_cxx_read2(cls_method_context_t hctx, int ofs, int len,
bufferlist *outbl, uint32_t op_flags) {
librados::TestClassHandler::MethodContext *ctx =
reinterpret_cast<librados::TestClassHandler::MethodContext*>(hctx);
return ctx->io_ctx_impl->read(
ctx->oid, len, ofs, outbl, ctx->snap_id, nullptr);
}
int cls_cxx_setxattr(cls_method_context_t hctx, const char *name,
bufferlist *inbl) {
librados::TestClassHandler::MethodContext *ctx =
reinterpret_cast<librados::TestClassHandler::MethodContext*>(hctx);
return ctx->io_ctx_impl->xattr_set(ctx->oid, name, *inbl);
}
int cls_cxx_stat(cls_method_context_t hctx, uint64_t *size, time_t *mtime) {
librados::TestClassHandler::MethodContext *ctx =
reinterpret_cast<librados::TestClassHandler::MethodContext*>(hctx);
return ctx->io_ctx_impl->stat(ctx->oid, size, mtime);
}
int cls_cxx_write(cls_method_context_t hctx, int ofs, int len,
bufferlist *inbl) {
return cls_cxx_write2(hctx, ofs, len, inbl, 0);
}
int cls_cxx_write2(cls_method_context_t hctx, int ofs, int len,
bufferlist *inbl, uint32_t op_flags) {
librados::TestClassHandler::MethodContext *ctx =
reinterpret_cast<librados::TestClassHandler::MethodContext*>(hctx);
return ctx->io_ctx_impl->write(ctx->oid, *inbl, len, ofs, ctx->snapc);
}
int cls_cxx_write_full(cls_method_context_t hctx, bufferlist *inbl) {
librados::TestClassHandler::MethodContext *ctx =
reinterpret_cast<librados::TestClassHandler::MethodContext*>(hctx);
return ctx->io_ctx_impl->write_full(ctx->oid, *inbl, ctx->snapc);
}
int cls_cxx_replace(cls_method_context_t hctx, int ofs, int len,
bufferlist *inbl) {
librados::TestClassHandler::MethodContext *ctx =
reinterpret_cast<librados::TestClassHandler::MethodContext*>(hctx);
int r = ctx->io_ctx_impl->truncate(ctx->oid, 0, ctx->snapc);
if (r < 0) {
return r;
}
return ctx->io_ctx_impl->write(ctx->oid, *inbl, len, ofs, ctx->snapc);
}
int cls_cxx_truncate(cls_method_context_t hctx, int ofs) {
librados::TestClassHandler::MethodContext *ctx =
reinterpret_cast<librados::TestClassHandler::MethodContext*>(hctx);
return ctx->io_ctx_impl->truncate(ctx->oid, ofs, ctx->snapc);
}
int cls_cxx_write_zero(cls_method_context_t hctx, int ofs, int len) {
librados::TestClassHandler::MethodContext *ctx =
reinterpret_cast<librados::TestClassHandler::MethodContext*>(hctx);
return ctx->io_ctx_impl->zero(ctx->oid, len, ofs, ctx->snapc);
}
int cls_cxx_list_watchers(cls_method_context_t hctx,
obj_list_watch_response_t *watchers) {
librados::TestClassHandler::MethodContext *ctx =
reinterpret_cast<librados::TestClassHandler::MethodContext*>(hctx);
std::list<obj_watch_t> obj_watchers;
int r = ctx->io_ctx_impl->list_watchers(ctx->oid, &obj_watchers);
if (r < 0) {
return r;
}
for (auto &w : obj_watchers) {
watch_item_t watcher;
watcher.name = entity_name_t::CLIENT(w.watcher_id);
watcher.cookie = w.cookie;
watcher.timeout_seconds = w.timeout_seconds;
watcher.addr.parse(w.addr);
watchers->entries.push_back(watcher);
}
return 0;
}
uint64_t cls_get_features(cls_method_context_t hctx) {
return CEPH_FEATURES_SUPPORTED_DEFAULT;
}
uint64_t cls_get_client_features(cls_method_context_t hctx) {
return CEPH_FEATURES_SUPPORTED_DEFAULT;
}
int cls_get_snapset_seq(cls_method_context_t hctx, uint64_t *snap_seq) {
librados::TestClassHandler::MethodContext *ctx =
reinterpret_cast<librados::TestClassHandler::MethodContext*>(hctx);
librados::snap_set_t snapset;
int r = ctx->io_ctx_impl->list_snaps(ctx->oid, &snapset);
if (r < 0) {
return r;
}
*snap_seq = snapset.seq;
return 0;
}
int cls_log(int level, const char *format, ...) {
int size = 256;
va_list ap;
while (1) {
char buf[size];
va_start(ap, format);
int n = vsnprintf(buf, size, format, ap);
va_end(ap);
if ((n > -1 && n < size) || size > 8196) {
dout(ceph::dout::need_dynamic(level)) << buf << dendl;
return n;
}
size *= 2;
}
return 0;
}
int cls_register(const char *name, cls_handle_t *handle) {
librados::TestClassHandler *cls = librados_test_stub::get_class_handler();
return cls->create(name, handle);
}
int cls_register_cxx_method(cls_handle_t hclass, const char *method,
int flags,
cls_method_cxx_call_t class_call,
cls_method_handle_t *handle) {
librados::TestClassHandler *cls = librados_test_stub::get_class_handler();
return cls->create_method(hclass, method, class_call, handle);
}
int cls_register_cxx_filter(cls_handle_t hclass,
const std::string &filter_name,
cls_cxx_filter_factory_t fn,
cls_filter_handle_t *)
{
librados::TestClassHandler *cls = librados_test_stub::get_class_handler();
return cls->create_filter(hclass, filter_name, fn);
}
ceph_release_t cls_get_required_osd_release(cls_handle_t hclass) {
return ceph_release_t::nautilus;
}
ceph_release_t cls_get_min_compatible_client(cls_handle_t hclass) {
return ceph_release_t::nautilus;
}
// stubs to silence TestClassHandler::open_class()
PGLSFilter::~PGLSFilter()
{}
int cls_gen_rand_base64(char *, int) {
return -ENOTSUP;
}
int cls_cxx_chunk_write_and_set(cls_method_handle_t, int,
int, bufferlist *,
uint32_t, bufferlist *, int) {
return -ENOTSUP;
}
int cls_cxx_map_read_header(cls_method_handle_t, bufferlist *) {
return -ENOTSUP;
}
uint64_t cls_get_osd_min_alloc_size(cls_method_context_t hctx) {
return 0;
}
uint64_t cls_get_pool_stripe_width(cls_method_context_t hctx) {
return 0;
}
| 51,065 | 31.546845 | 89 | cc |
null | ceph-main/src/test/librados_test_stub/LibradosTestStub.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef LIBRADOS_TEST_STUB_H
#define LIBRADOS_TEST_STUB_H
#include "include/rados/librados_fwd.hpp"
#include <boost/shared_ptr.hpp>
namespace neorados {
struct IOContext;
struct RADOS;
} // namespace neorados
namespace librados {
class MockTestMemIoCtxImpl;
class MockTestMemRadosClient;
class TestCluster;
class TestClassHandler;
MockTestMemIoCtxImpl &get_mock_io_ctx(IoCtx &ioctx);
MockTestMemIoCtxImpl &get_mock_io_ctx(neorados::RADOS& rados,
neorados::IOContext& io_context);
MockTestMemRadosClient &get_mock_rados_client(neorados::RADOS& rados);
} // namespace librados
namespace librados_test_stub {
typedef boost::shared_ptr<librados::TestCluster> TestClusterRef;
void set_cluster(TestClusterRef cluster);
TestClusterRef get_cluster();
librados::TestClassHandler* get_class_handler();
} // namespace librados_test_stub
#endif // LIBRADOS_TEST_STUB_H
| 1,008 | 22.465116 | 71 | h |
null | ceph-main/src/test/librados_test_stub/MockTestMemCluster.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef LIBRADOS_MOCK_TEST_MEM_CLUSTER_H
#define LIBRADOS_MOCK_TEST_MEM_CLUSTER_H
#include "include/common_fwd.h"
#include "test/librados_test_stub/TestMemCluster.h"
#include "test/librados_test_stub/MockTestMemRadosClient.h"
#include "gmock/gmock.h"
namespace librados {
class TestRadosClient;
class MockTestMemCluster : public TestMemCluster {
public:
MockTestMemCluster() {
default_to_dispatch();
}
MOCK_METHOD1(create_rados_client, TestRadosClient*(CephContext*));
MockTestMemRadosClient* do_create_rados_client(CephContext *cct) {
return new ::testing::NiceMock<MockTestMemRadosClient>(cct, this);
}
void default_to_dispatch() {
using namespace ::testing;
ON_CALL(*this, create_rados_client(_)).WillByDefault(Invoke(this, &MockTestMemCluster::do_create_rados_client));
}
};
} // namespace librados
#endif // LIBRADOS_MOCK_TEST_MEM_CLUSTER_H
| 984 | 25.621622 | 116 | h |
null | ceph-main/src/test/librados_test_stub/MockTestMemIoCtxImpl.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef LIBRADOS_TEST_STUB_MOCK_TEST_MEM_IO_CTX_IMPL_H
#define LIBRADOS_TEST_STUB_MOCK_TEST_MEM_IO_CTX_IMPL_H
#include "test/librados_test_stub/TestMemIoCtxImpl.h"
#include "test/librados_test_stub/TestMemCluster.h"
#include "gmock/gmock.h"
namespace librados {
class MockTestMemRadosClient;
class MockTestMemIoCtxImpl : public TestMemIoCtxImpl {
public:
MockTestMemIoCtxImpl(MockTestMemRadosClient *mock_client,
TestMemRadosClient *client, int64_t pool_id,
const std::string& pool_name,
TestMemCluster::Pool *pool)
: TestMemIoCtxImpl(client, pool_id, pool_name, pool),
m_mock_client(mock_client), m_client(client) {
default_to_parent();
}
MockTestMemRadosClient *get_mock_rados_client() {
return m_mock_client;
}
MOCK_METHOD0(clone, TestIoCtxImpl*());
TestIoCtxImpl *do_clone() {
TestIoCtxImpl *io_ctx_impl = new ::testing::NiceMock<MockTestMemIoCtxImpl>(
m_mock_client, m_client, get_pool_id(), get_pool_name(), get_pool());
io_ctx_impl->set_snap_read(get_snap_read());
io_ctx_impl->set_snap_context(get_snap_context());
return io_ctx_impl;
}
MOCK_METHOD5(aio_notify, void(const std::string& o, AioCompletionImpl *c,
bufferlist& bl, uint64_t timeout_ms,
bufferlist *pbl));
void do_aio_notify(const std::string& o, AioCompletionImpl *c, bufferlist& bl,
uint64_t timeout_ms, bufferlist *pbl) {
return TestMemIoCtxImpl::aio_notify(o, c, bl, timeout_ms, pbl);
}
MOCK_METHOD6(aio_operate, int(const std::string&, TestObjectOperationImpl&,
AioCompletionImpl*, SnapContext*,
const ceph::real_time*, int));
int do_aio_operate(const std::string& o, TestObjectOperationImpl& ops,
AioCompletionImpl* c, SnapContext* snapc,
const ceph::real_time* pmtime, int flags) {
return TestMemIoCtxImpl::aio_operate(o, ops, c, snapc, pmtime, flags);
}
MOCK_METHOD4(aio_watch, int(const std::string& o, AioCompletionImpl *c,
uint64_t *handle, librados::WatchCtx2 *ctx));
int do_aio_watch(const std::string& o, AioCompletionImpl *c,
uint64_t *handle, librados::WatchCtx2 *ctx) {
return TestMemIoCtxImpl::aio_watch(o, c, handle, ctx);
}
MOCK_METHOD2(aio_unwatch, int(uint64_t handle, AioCompletionImpl *c));
int do_aio_unwatch(uint64_t handle, AioCompletionImpl *c) {
return TestMemIoCtxImpl::aio_unwatch(handle, c);
}
MOCK_METHOD2(assert_exists, int(const std::string &, uint64_t));
int do_assert_exists(const std::string &oid, uint64_t snap_id) {
return TestMemIoCtxImpl::assert_exists(oid, snap_id);
}
MOCK_METHOD2(assert_version, int(const std::string &, uint64_t));
int do_assert_version(const std::string &oid, uint64_t ver) {
return TestMemIoCtxImpl::assert_version(oid, ver);
}
MOCK_METHOD3(create, int(const std::string&, bool, const SnapContext &));
int do_create(const std::string& oid, bool exclusive,
const SnapContext &snapc) {
return TestMemIoCtxImpl::create(oid, exclusive, snapc);
}
MOCK_METHOD4(cmpext, int(const std::string&, uint64_t, bufferlist&,
uint64_t snap_id));
int do_cmpext(const std::string& oid, uint64_t off, bufferlist& cmp_bl,
uint64_t snap_id) {
return TestMemIoCtxImpl::cmpext(oid, off, cmp_bl, snap_id);
}
MOCK_METHOD8(exec, int(const std::string& oid,
TestClassHandler *handler,
const char *cls,
const char *method,
bufferlist& inbl,
bufferlist* outbl,
uint64_t snap_id,
const SnapContext &snapc));
int do_exec(const std::string& oid, TestClassHandler *handler,
const char *cls, const char *method, bufferlist& inbl,
bufferlist* outbl, uint64_t snap_id, const SnapContext &snapc) {
return TestMemIoCtxImpl::exec(oid, handler, cls, method, inbl, outbl,
snap_id, snapc);
}
MOCK_CONST_METHOD0(get_instance_id, uint64_t());
uint64_t do_get_instance_id() const {
return TestMemIoCtxImpl::get_instance_id();
}
MOCK_METHOD2(list_snaps, int(const std::string& o, snap_set_t *out_snaps));
int do_list_snaps(const std::string& o, snap_set_t *out_snaps) {
return TestMemIoCtxImpl::list_snaps(o, out_snaps);
}
MOCK_METHOD2(list_watchers, int(const std::string& o,
std::list<obj_watch_t> *out_watchers));
int do_list_watchers(const std::string& o,
std::list<obj_watch_t> *out_watchers) {
return TestMemIoCtxImpl::list_watchers(o, out_watchers);
}
MOCK_METHOD4(notify, int(const std::string& o, bufferlist& bl,
uint64_t timeout_ms, bufferlist *pbl));
int do_notify(const std::string& o, bufferlist& bl,
uint64_t timeout_ms, bufferlist *pbl) {
return TestMemIoCtxImpl::notify(o, bl, timeout_ms, pbl);
}
MOCK_METHOD1(set_snap_read, void(snap_t));
void do_set_snap_read(snap_t snap_id) {
return TestMemIoCtxImpl::set_snap_read(snap_id);
}
MOCK_METHOD6(sparse_read, int(const std::string& oid,
uint64_t off,
uint64_t len,
std::map<uint64_t, uint64_t> *m,
bufferlist *bl, uint64_t));
int do_sparse_read(const std::string& oid, uint64_t off, size_t len,
std::map<uint64_t, uint64_t> *m, bufferlist *bl,
uint64_t snap_id) {
return TestMemIoCtxImpl::sparse_read(oid, off, len, m, bl, snap_id);
}
MOCK_METHOD6(read, int(const std::string& oid,
size_t len,
uint64_t off,
bufferlist *bl, uint64_t snap_id, uint64_t* objver));
int do_read(const std::string& oid, size_t len, uint64_t off,
bufferlist *bl, uint64_t snap_id, uint64_t* objver) {
return TestMemIoCtxImpl::read(oid, len, off, bl, snap_id, objver);
}
MOCK_METHOD2(remove, int(const std::string& oid, const SnapContext &snapc));
int do_remove(const std::string& oid, const SnapContext &snapc) {
return TestMemIoCtxImpl::remove(oid, snapc);
}
MOCK_METHOD1(selfmanaged_snap_create, int(uint64_t *snap_id));
int do_selfmanaged_snap_create(uint64_t *snap_id) {
return TestMemIoCtxImpl::selfmanaged_snap_create(snap_id);
}
MOCK_METHOD1(selfmanaged_snap_remove, int(uint64_t snap_id));
int do_selfmanaged_snap_remove(uint64_t snap_id) {
return TestMemIoCtxImpl::selfmanaged_snap_remove(snap_id);
}
MOCK_METHOD2(selfmanaged_snap_rollback, int(const std::string& oid,
uint64_t snap_id));
int do_selfmanaged_snap_rollback(const std::string& oid, uint64_t snap_id) {
return TestMemIoCtxImpl::selfmanaged_snap_rollback(oid, snap_id);
}
MOCK_METHOD3(truncate, int(const std::string& oid,
uint64_t size,
const SnapContext &snapc));
int do_truncate(const std::string& oid, uint64_t size,
const SnapContext &snapc) {
return TestMemIoCtxImpl::truncate(oid, size, snapc);
}
MOCK_METHOD5(write, int(const std::string& oid, bufferlist& bl, size_t len,
uint64_t off, const SnapContext &snapc));
int do_write(const std::string& oid, bufferlist& bl, size_t len, uint64_t off,
const SnapContext &snapc) {
return TestMemIoCtxImpl::write(oid, bl, len, off, snapc);
}
MOCK_METHOD3(write_full, int(const std::string& oid,
bufferlist& bl,
const SnapContext &snapc));
int do_write_full(const std::string& oid, bufferlist& bl,
const SnapContext &snapc) {
return TestMemIoCtxImpl::write_full(oid, bl, snapc);
}
MOCK_METHOD5(writesame, int(const std::string& oid, bufferlist& bl,
size_t len, uint64_t off,
const SnapContext &snapc));
int do_writesame(const std::string& oid, bufferlist& bl, size_t len,
uint64_t off, const SnapContext &snapc) {
return TestMemIoCtxImpl::writesame(oid, bl, len, off, snapc);
}
MOCK_METHOD4(zero, int(const std::string& oid, uint64_t offset,
uint64_t length, const SnapContext &snapc));
int do_zero(const std::string& oid, uint64_t offset,
uint64_t length, const SnapContext &snapc) {
return TestMemIoCtxImpl::zero(oid, offset, length, snapc);
}
void default_to_parent() {
using namespace ::testing;
ON_CALL(*this, clone()).WillByDefault(Invoke(this, &MockTestMemIoCtxImpl::do_clone));
ON_CALL(*this, aio_notify(_, _, _, _, _)).WillByDefault(Invoke(this, &MockTestMemIoCtxImpl::do_aio_notify));
ON_CALL(*this, aio_operate(_, _, _, _, _, _)).WillByDefault(Invoke(this, &MockTestMemIoCtxImpl::do_aio_operate));
ON_CALL(*this, aio_watch(_, _, _, _)).WillByDefault(Invoke(this, &MockTestMemIoCtxImpl::do_aio_watch));
ON_CALL(*this, aio_unwatch(_, _)).WillByDefault(Invoke(this, &MockTestMemIoCtxImpl::do_aio_unwatch));
ON_CALL(*this, assert_exists(_, _)).WillByDefault(Invoke(this, &MockTestMemIoCtxImpl::do_assert_exists));
ON_CALL(*this, assert_version(_, _)).WillByDefault(Invoke(this, &MockTestMemIoCtxImpl::do_assert_version));
ON_CALL(*this, create(_, _, _)).WillByDefault(Invoke(this, &MockTestMemIoCtxImpl::do_create));
ON_CALL(*this, cmpext(_, _, _, _)).WillByDefault(Invoke(this, &MockTestMemIoCtxImpl::do_cmpext));
ON_CALL(*this, exec(_, _, _, _, _, _, _, _)).WillByDefault(Invoke(this, &MockTestMemIoCtxImpl::do_exec));
ON_CALL(*this, get_instance_id()).WillByDefault(Invoke(this, &MockTestMemIoCtxImpl::do_get_instance_id));
ON_CALL(*this, list_snaps(_, _)).WillByDefault(Invoke(this, &MockTestMemIoCtxImpl::do_list_snaps));
ON_CALL(*this, list_watchers(_, _)).WillByDefault(Invoke(this, &MockTestMemIoCtxImpl::do_list_watchers));
ON_CALL(*this, notify(_, _, _, _)).WillByDefault(Invoke(this, &MockTestMemIoCtxImpl::do_notify));
ON_CALL(*this, read(_, _, _, _, _, _)).WillByDefault(Invoke(this, &MockTestMemIoCtxImpl::do_read));
ON_CALL(*this, set_snap_read(_)).WillByDefault(Invoke(this, &MockTestMemIoCtxImpl::do_set_snap_read));
ON_CALL(*this, sparse_read(_, _, _, _, _, _)).WillByDefault(Invoke(this, &MockTestMemIoCtxImpl::do_sparse_read));
ON_CALL(*this, remove(_, _)).WillByDefault(Invoke(this, &MockTestMemIoCtxImpl::do_remove));
ON_CALL(*this, selfmanaged_snap_create(_)).WillByDefault(Invoke(this, &MockTestMemIoCtxImpl::do_selfmanaged_snap_create));
ON_CALL(*this, selfmanaged_snap_remove(_)).WillByDefault(Invoke(this, &MockTestMemIoCtxImpl::do_selfmanaged_snap_remove));
ON_CALL(*this, selfmanaged_snap_rollback(_, _)).WillByDefault(Invoke(this, &MockTestMemIoCtxImpl::do_selfmanaged_snap_rollback));
ON_CALL(*this, truncate(_,_,_)).WillByDefault(Invoke(this, &MockTestMemIoCtxImpl::do_truncate));
ON_CALL(*this, write(_, _, _, _, _)).WillByDefault(Invoke(this, &MockTestMemIoCtxImpl::do_write));
ON_CALL(*this, write_full(_, _, _)).WillByDefault(Invoke(this, &MockTestMemIoCtxImpl::do_write_full));
ON_CALL(*this, writesame(_, _, _, _, _)).WillByDefault(Invoke(this, &MockTestMemIoCtxImpl::do_writesame));
ON_CALL(*this, zero(_,_,_,_)).WillByDefault(Invoke(this, &MockTestMemIoCtxImpl::do_zero));
}
private:
MockTestMemRadosClient *m_mock_client;
TestMemRadosClient *m_client;
};
} // namespace librados
#endif // LIBRADOS_TEST_STUB_MOCK_TEST_MEM_IO_CTX_IMPL_H
| 12,014 | 46.490119 | 133 | h |
null | ceph-main/src/test/librados_test_stub/MockTestMemRadosClient.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef LIBRADOS_TEST_STUB_MOCK_TEST_MEM_RADOS_CLIENT_H
#define LIBRADOS_TEST_STUB_MOCK_TEST_MEM_RADOS_CLIENT_H
#include "test/librados_test_stub/TestMemRadosClient.h"
#include "test/librados_test_stub/MockTestMemIoCtxImpl.h"
#include "gmock/gmock.h"
namespace librados {
class TestMemCluster;
class MockTestMemRadosClient : public TestMemRadosClient {
public:
MockTestMemRadosClient(CephContext *cct, TestMemCluster *test_mem_cluster)
: TestMemRadosClient(cct, test_mem_cluster) {
default_to_dispatch();
}
MOCK_METHOD0(connect, int());
int do_connect() {
return TestMemRadosClient::connect();
}
MOCK_METHOD2(create_ioctx, TestIoCtxImpl *(int64_t pool_id,
const std::string &pool_name));
MockTestMemIoCtxImpl* do_create_ioctx(int64_t pool_id,
const std::string &pool_name) {
return new ::testing::NiceMock<MockTestMemIoCtxImpl>(
this, this, pool_id, pool_name,
get_mem_cluster()->get_pool(pool_name));
}
MOCK_METHOD2(blocklist_add, int(const std::string& client_address,
uint32_t expire_seconds));
int do_blocklist_add(const std::string& client_address,
uint32_t expire_seconds) {
return TestMemRadosClient::blocklist_add(client_address, expire_seconds);
}
MOCK_METHOD1(get_min_compatible_osd, int(int8_t*));
int do_get_min_compatible_osd(int8_t* require_osd_release) {
return TestMemRadosClient::get_min_compatible_osd(require_osd_release);
}
MOCK_METHOD2(get_min_compatible_client, int(int8_t*, int8_t*));
int do_get_min_compatible_client(int8_t* min_compat_client,
int8_t* require_min_compat_client) {
return TestMemRadosClient::get_min_compatible_client(
min_compat_client, require_min_compat_client);
}
MOCK_METHOD3(service_daemon_register,
int(const std::string&,
const std::string&,
const std::map<std::string,std::string>&));
int do_service_daemon_register(const std::string& service,
const std::string& name,
const std::map<std::string,std::string>& metadata) {
return TestMemRadosClient::service_daemon_register(service, name, metadata);
}
// workaround of https://github.com/google/googletest/issues/1155
MOCK_METHOD1(service_daemon_update_status_r,
int(const std::map<std::string,std::string>&));
int do_service_daemon_update_status_r(const std::map<std::string,std::string>& status) {
auto s = status;
return TestMemRadosClient::service_daemon_update_status(std::move(s));
}
MOCK_METHOD4(mon_command, int(const std::vector<std::string>&,
const bufferlist&, bufferlist*, std::string*));
int do_mon_command(const std::vector<std::string>& cmd,
const bufferlist &inbl, bufferlist *outbl,
std::string *outs) {
return mon_command(cmd, inbl, outbl, outs);
}
MOCK_METHOD0(wait_for_latest_osd_map, int());
int do_wait_for_latest_osd_map() {
return wait_for_latest_osd_map();
}
void default_to_dispatch() {
using namespace ::testing;
ON_CALL(*this, connect()).WillByDefault(Invoke(this, &MockTestMemRadosClient::do_connect));
ON_CALL(*this, create_ioctx(_, _)).WillByDefault(Invoke(this, &MockTestMemRadosClient::do_create_ioctx));
ON_CALL(*this, blocklist_add(_, _)).WillByDefault(Invoke(this, &MockTestMemRadosClient::do_blocklist_add));
ON_CALL(*this, get_min_compatible_osd(_)).WillByDefault(Invoke(this, &MockTestMemRadosClient::do_get_min_compatible_osd));
ON_CALL(*this, get_min_compatible_client(_, _)).WillByDefault(Invoke(this, &MockTestMemRadosClient::do_get_min_compatible_client));
ON_CALL(*this, service_daemon_register(_, _, _)).WillByDefault(Invoke(this, &MockTestMemRadosClient::do_service_daemon_register));
ON_CALL(*this, service_daemon_update_status_r(_)).WillByDefault(Invoke(this, &MockTestMemRadosClient::do_service_daemon_update_status_r));
ON_CALL(*this, mon_command(_, _, _, _)).WillByDefault(Invoke(this, &MockTestMemRadosClient::do_mon_command));
ON_CALL(*this, wait_for_latest_osd_map()).WillByDefault(Invoke(this, &MockTestMemRadosClient::do_wait_for_latest_osd_map));
}
};
} // namespace librados
#endif // LIBRADOS_TEST_STUB_MOCK_TEST_MEM_RADOS_CLIENT_H
| 4,561 | 42.865385 | 142 | h |
null | ceph-main/src/test/librados_test_stub/NeoradosTestStub.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "include/neorados/RADOS.hpp"
#include "include/rados/librados.hpp"
#include "common/ceph_mutex.h"
#include "common/hobject.h"
#include "librados/AioCompletionImpl.h"
#include "mon/error_code.h"
#include "osd/error_code.h"
#include "osd/osd_types.h"
#include "osdc/error_code.h"
#include "test/librados_test_stub/LibradosTestStub.h"
#include "test/librados_test_stub/TestClassHandler.h"
#include "test/librados_test_stub/TestIoCtxImpl.h"
#include "test/librados_test_stub/TestRadosClient.h"
#include <map>
#include <memory>
#include <optional>
#include <string>
#include <functional>
#include <boost/system/system_error.hpp>
namespace bs = boost::system;
using namespace std::literals;
using namespace std::placeholders;
namespace neorados {
namespace detail {
class Client {
public:
ceph::mutex mutex = ceph::make_mutex("NeoradosTestStub::Client");
librados::TestRadosClient* test_rados_client;
boost::asio::io_context& io_context;
std::map<std::pair<int64_t, std::string>, librados::TestIoCtxImpl*> io_ctxs;
Client(librados::TestRadosClient* test_rados_client)
: test_rados_client(test_rados_client),
io_context(test_rados_client->get_io_context()) {
}
~Client() {
for (auto& io_ctx : io_ctxs) {
io_ctx.second->put();
}
}
librados::TestIoCtxImpl* get_io_ctx(const IOContext& ioc) {
int64_t pool_id = ioc.pool();
std::string ns = std::string{ioc.ns()};
auto lock = std::scoped_lock{mutex};
auto key = make_pair(pool_id, ns);
auto it = io_ctxs.find(key);
if (it != io_ctxs.end()) {
return it->second;
}
std::list<std::pair<int64_t, std::string>> pools;
int r = test_rados_client->pool_list(pools);
if (r < 0) {
return nullptr;
}
for (auto& pool : pools) {
if (pool.first == pool_id) {
auto io_ctx = test_rados_client->create_ioctx(pool_id, pool.second);
io_ctx->set_namespace(ns);
io_ctxs[key] = io_ctx;
return io_ctx;
}
}
return nullptr;
}
};
} // namespace detail
namespace {
struct CompletionPayload {
std::unique_ptr<Op::Completion> c;
};
void completion_callback_adapter(rados_completion_t c, void *arg) {
auto impl = reinterpret_cast<librados::AioCompletionImpl *>(c);
auto r = impl->get_return_value();
impl->release();
auto payload = reinterpret_cast<CompletionPayload*>(arg);
payload->c->defer(std::move(payload->c),
(r < 0) ? bs::error_code(-r, osd_category()) :
bs::error_code());
delete payload;
}
librados::AioCompletionImpl* create_aio_completion(
std::unique_ptr<Op::Completion>&& c) {
auto payload = new CompletionPayload{std::move(c)};
auto impl = new librados::AioCompletionImpl();
impl->set_complete_callback(payload, completion_callback_adapter);
return impl;
}
int save_operation_size(int result, size_t* pval) {
if (pval != NULL) {
*pval = result;
}
return result;
}
int save_operation_ec(int result, boost::system::error_code* ec) {
if (ec != NULL) {
*ec = {std::abs(result), bs::system_category()};
}
return result;
}
} // anonymous namespace
Object::Object() {
static_assert(impl_size >= sizeof(object_t));
new (&impl) object_t();
}
Object::Object(std::string&& s) {
static_assert(impl_size >= sizeof(object_t));
new (&impl) object_t(std::move(s));
}
Object::~Object() {
reinterpret_cast<object_t*>(&impl)->~object_t();
}
Object::operator std::string_view() const {
return std::string_view(reinterpret_cast<const object_t*>(&impl)->name);
}
struct IOContextImpl {
object_locator_t oloc;
snapid_t snap_seq = CEPH_NOSNAP;
SnapContext snapc;
};
IOContext::IOContext() {
static_assert(impl_size >= sizeof(IOContextImpl));
new (&impl) IOContextImpl();
}
IOContext::IOContext(const IOContext& rhs) {
static_assert(impl_size >= sizeof(IOContextImpl));
new (&impl) IOContextImpl(*reinterpret_cast<const IOContextImpl*>(&rhs.impl));
}
IOContext::IOContext(int64_t _pool, std::string&& _ns)
: IOContext() {
pool(_pool);
ns(std::move(_ns));
}
IOContext::~IOContext() {
reinterpret_cast<IOContextImpl*>(&impl)->~IOContextImpl();
}
std::int64_t IOContext::pool() const {
return reinterpret_cast<const IOContextImpl*>(&impl)->oloc.pool;
}
void IOContext::pool(std::int64_t _pool) {
reinterpret_cast<IOContextImpl*>(&impl)->oloc.pool = _pool;
}
std::string_view IOContext::ns() const {
return reinterpret_cast<const IOContextImpl*>(&impl)->oloc.nspace;
}
void IOContext::ns(std::string&& _ns) {
reinterpret_cast<IOContextImpl*>(&impl)->oloc.nspace = std::move(_ns);
}
std::optional<std::uint64_t> IOContext::read_snap() const {
auto& snap_seq = reinterpret_cast<const IOContextImpl*>(&impl)->snap_seq;
if (snap_seq == CEPH_NOSNAP)
return std::nullopt;
else
return snap_seq;
}
void IOContext::read_snap(std::optional<std::uint64_t> _snapid) {
auto& snap_seq = reinterpret_cast<IOContextImpl*>(&impl)->snap_seq;
snap_seq = _snapid.value_or(CEPH_NOSNAP);
}
std::optional<
std::pair<std::uint64_t,
std::vector<std::uint64_t>>> IOContext::write_snap_context() const {
auto& snapc = reinterpret_cast<const IOContextImpl*>(&impl)->snapc;
if (snapc.empty()) {
return std::nullopt;
} else {
std::vector<uint64_t> v(snapc.snaps.begin(), snapc.snaps.end());
return std::make_optional(std::make_pair(uint64_t(snapc.seq), v));
}
}
void IOContext::write_snap_context(
std::optional<std::pair<std::uint64_t, std::vector<std::uint64_t>>> _snapc) {
auto& snapc = reinterpret_cast<IOContextImpl*>(&impl)->snapc;
if (!_snapc) {
snapc.clear();
} else {
SnapContext n(_snapc->first, { _snapc->second.begin(), _snapc->second.end()});
if (!n.is_valid()) {
throw bs::system_error(EINVAL,
bs::system_category(),
"Invalid snap context.");
}
snapc = n;
}
}
void IOContext::full_try(bool _full_try) {
// no-op
}
bool operator ==(const IOContext& lhs, const IOContext& rhs) {
auto l = reinterpret_cast<const IOContextImpl*>(&lhs.impl);
auto r = reinterpret_cast<const IOContextImpl*>(&rhs.impl);
return (l->oloc == r->oloc &&
l->snap_seq == r->snap_seq &&
l->snapc.seq == r->snapc.seq &&
l->snapc.snaps == r->snapc.snaps);
}
bool operator !=(const IOContext& lhs, const IOContext& rhs) {
return !(lhs == rhs);
}
Op::Op() {
static_assert(Op::impl_size >= sizeof(librados::TestObjectOperationImpl*));
auto& o = *reinterpret_cast<librados::TestObjectOperationImpl**>(&impl);
o = new librados::TestObjectOperationImpl();
o->get();
}
Op::~Op() {
auto& o = *reinterpret_cast<librados::TestObjectOperationImpl**>(&impl);
if (o != nullptr) {
o->put();
o = nullptr;
}
}
void Op::assert_exists() {
auto o = *reinterpret_cast<librados::TestObjectOperationImpl**>(&impl);
o->ops.push_back(std::bind(
&librados::TestIoCtxImpl::assert_exists, _1, _2, _4));
}
void Op::assert_version(uint64_t ver) {
auto o = *reinterpret_cast<librados::TestObjectOperationImpl**>(&impl);
o->ops.push_back(std::bind(
&librados::TestIoCtxImpl::assert_version, _1, _2, ver));
}
void Op::cmpext(uint64_t off, ceph::buffer::list&& cmp_bl, std::size_t* s) {
auto o = *reinterpret_cast<librados::TestObjectOperationImpl**>(&impl);
librados::ObjectOperationTestImpl op = std::bind(
&librados::TestIoCtxImpl::cmpext, _1, _2, off, cmp_bl, _4);
if (s != nullptr) {
op = std::bind(
save_operation_size, std::bind(op, _1, _2, _3, _4, _5, _6), s);
}
o->ops.push_back(op);
}
std::size_t Op::size() const {
auto o = *reinterpret_cast<librados::TestObjectOperationImpl* const *>(&impl);
return o->ops.size();
}
void Op::set_fadvise_random() {
// no-op
}
void Op::set_fadvise_sequential() {
// no-op
}
void Op::set_fadvise_willneed() {
// no-op
}
void Op::set_fadvise_dontneed() {
// no-op
}
void Op::set_fadvise_nocache() {
// no-op
}
void Op::balance_reads() {
// no-op
}
void Op::localize_reads() {
// no-op
}
void Op::exec(std::string_view cls, std::string_view method,
const ceph::buffer::list& inbl,
ceph::buffer::list* out,
boost::system::error_code* ec) {
auto o = *reinterpret_cast<librados::TestObjectOperationImpl**>(&impl);
auto cls_handler = librados_test_stub::get_class_handler();
librados::ObjectOperationTestImpl op =
[cls_handler, cls, method, inbl = const_cast<bufferlist&>(inbl), out]
(librados::TestIoCtxImpl* io_ctx, const std::string& oid, bufferlist* outbl,
uint64_t snap_id, const SnapContext& snapc, uint64_t*) mutable -> int {
return io_ctx->exec(
oid, cls_handler, std::string(cls).c_str(),
std::string(method).c_str(), inbl,
(out != nullptr ? out : outbl), snap_id, snapc);
};
if (ec != nullptr) {
op = std::bind(
save_operation_ec, std::bind(op, _1, _2, _3, _4, _5, _6), ec);
}
o->ops.push_back(op);
}
void Op::exec(std::string_view cls, std::string_view method,
const ceph::buffer::list& inbl,
boost::system::error_code* ec) {
auto o = *reinterpret_cast<librados::TestObjectOperationImpl**>(&impl);
auto cls_handler = librados_test_stub::get_class_handler();
librados::ObjectOperationTestImpl op =
[cls_handler, cls, method, inbl = const_cast<bufferlist&>(inbl)]
(librados::TestIoCtxImpl* io_ctx, const std::string& oid, bufferlist* outbl,
uint64_t snap_id, const SnapContext& snapc, uint64_t*) mutable -> int {
return io_ctx->exec(
oid, cls_handler, std::string(cls).c_str(),
std::string(method).c_str(), inbl, outbl, snap_id, snapc);
};
if (ec != NULL) {
op = std::bind(
save_operation_ec, std::bind(op, _1, _2, _3, _4, _5, _6), ec);
}
o->ops.push_back(op);
}
void ReadOp::read(size_t off, uint64_t len, ceph::buffer::list* out,
boost::system::error_code* ec) {
auto o = *reinterpret_cast<librados::TestObjectOperationImpl**>(&impl);
librados::ObjectOperationTestImpl op;
if (out != nullptr) {
op = std::bind(
&librados::TestIoCtxImpl::read, _1, _2, len, off, out, _4, _6);
} else {
op = std::bind(
&librados::TestIoCtxImpl::read, _1, _2, len, off, _3, _4, _6);
}
if (ec != NULL) {
op = std::bind(
save_operation_ec, std::bind(op, _1, _2, _3, _4, _5, _6), ec);
}
o->ops.push_back(op);
}
void ReadOp::sparse_read(uint64_t off, uint64_t len,
ceph::buffer::list* out,
std::vector<std::pair<std::uint64_t,
std::uint64_t>>* extents,
boost::system::error_code* ec) {
auto o = *reinterpret_cast<librados::TestObjectOperationImpl**>(&impl);
librados::ObjectOperationTestImpl op =
[off, len, out, extents]
(librados::TestIoCtxImpl* io_ctx, const std::string& oid, bufferlist* outbl,
uint64_t snap_id, const SnapContext& snapc, uint64_t*) mutable -> int {
std::map<uint64_t,uint64_t> m;
int r = io_ctx->sparse_read(
oid, off, len, &m, (out != nullptr ? out : outbl), snap_id);
if (r >= 0 && extents != nullptr) {
extents->clear();
extents->insert(extents->end(), m.begin(), m.end());
}
return r;
};
if (ec != NULL) {
op = std::bind(save_operation_ec,
std::bind(op, _1, _2, _3, _4, _5, _6), ec);
}
o->ops.push_back(op);
}
void ReadOp::list_snaps(SnapSet* snaps, bs::error_code* ec) {
auto o = *reinterpret_cast<librados::TestObjectOperationImpl**>(&impl);
librados::ObjectOperationTestImpl op =
[snaps]
(librados::TestIoCtxImpl* io_ctx, const std::string& oid, bufferlist*,
uint64_t, const SnapContext&, uint64_t*) mutable -> int {
librados::snap_set_t snap_set;
int r = io_ctx->list_snaps(oid, &snap_set);
if (r >= 0 && snaps != nullptr) {
*snaps = {};
snaps->seq = snap_set.seq;
snaps->clones.reserve(snap_set.clones.size());
for (auto& clone : snap_set.clones) {
neorados::CloneInfo clone_info;
clone_info.cloneid = clone.cloneid;
clone_info.snaps = clone.snaps;
clone_info.overlap = clone.overlap;
clone_info.size = clone.size;
snaps->clones.push_back(clone_info);
}
}
return r;
};
if (ec != NULL) {
op = std::bind(save_operation_ec,
std::bind(op, _1, _2, _3, _4, _5, _6), ec);
}
o->ops.push_back(op);
}
void WriteOp::create(bool exclusive) {
auto o = *reinterpret_cast<librados::TestObjectOperationImpl**>(&impl);
o->ops.push_back(std::bind(
&librados::TestIoCtxImpl::create, _1, _2, exclusive, _5));
}
void WriteOp::write(uint64_t off, ceph::buffer::list&& bl) {
auto o = *reinterpret_cast<librados::TestObjectOperationImpl**>(&impl);
o->ops.push_back(std::bind(
&librados::TestIoCtxImpl::write, _1, _2, bl, bl.length(), off, _5));
}
void WriteOp::write_full(ceph::buffer::list&& bl) {
auto o = *reinterpret_cast<librados::TestObjectOperationImpl**>(&impl);
o->ops.push_back(std::bind(
&librados::TestIoCtxImpl::write_full, _1, _2, bl, _5));
}
void WriteOp::remove() {
auto o = *reinterpret_cast<librados::TestObjectOperationImpl**>(&impl);
o->ops.push_back(std::bind(
&librados::TestIoCtxImpl::remove, _1, _2, _5));
}
void WriteOp::truncate(uint64_t off) {
auto o = *reinterpret_cast<librados::TestObjectOperationImpl**>(&impl);
o->ops.push_back(std::bind(
&librados::TestIoCtxImpl::truncate, _1, _2, off, _5));
}
void WriteOp::zero(uint64_t off, uint64_t len) {
auto o = *reinterpret_cast<librados::TestObjectOperationImpl**>(&impl);
o->ops.push_back(std::bind(
&librados::TestIoCtxImpl::zero, _1, _2, off, len, _5));
}
void WriteOp::writesame(std::uint64_t off, std::uint64_t write_len,
ceph::buffer::list&& bl) {
auto o = *reinterpret_cast<librados::TestObjectOperationImpl**>(&impl);
o->ops.push_back(std::bind(
&librados::TestIoCtxImpl::writesame, _1, _2, bl, write_len, off, _5));
}
void WriteOp::set_alloc_hint(uint64_t expected_object_size,
uint64_t expected_write_size,
alloc_hint::alloc_hint_t flags) {
// no-op
}
RADOS::RADOS() = default;
RADOS::RADOS(RADOS&&) = default;
RADOS::RADOS(std::unique_ptr<detail::Client> impl)
: impl(std::move(impl)) {
}
RADOS::~RADOS() = default;
RADOS RADOS::make_with_librados(librados::Rados& rados) {
auto test_rados_client = reinterpret_cast<librados::TestRadosClient*>(
rados.client);
return RADOS{std::make_unique<detail::Client>(test_rados_client)};
}
CephContext* neorados::RADOS::cct() {
return impl->test_rados_client->cct();
}
boost::asio::io_context& neorados::RADOS::get_io_context() {
return impl->io_context;
}
boost::asio::io_context::executor_type neorados::RADOS::get_executor() const {
return impl->io_context.get_executor();
}
void RADOS::execute(const Object& o, const IOContext& ioc, ReadOp&& op,
ceph::buffer::list* bl, std::unique_ptr<Op::Completion> c,
uint64_t* objver, const blkin_trace_info* trace_info) {
auto io_ctx = impl->get_io_ctx(ioc);
if (io_ctx == nullptr) {
c->dispatch(std::move(c), osdc_errc::pool_dne);
return;
}
auto ops = *reinterpret_cast<librados::TestObjectOperationImpl**>(&op.impl);
auto snap_id = CEPH_NOSNAP;
auto opt_snap_id = ioc.read_snap();
if (opt_snap_id) {
snap_id = *opt_snap_id;
}
auto completion = create_aio_completion(std::move(c));
auto r = io_ctx->aio_operate_read(std::string{o}, *ops, completion, 0U, bl,
snap_id, objver);
ceph_assert(r == 0);
}
void RADOS::execute(const Object& o, const IOContext& ioc, WriteOp&& op,
std::unique_ptr<Op::Completion> c, uint64_t* objver,
const blkin_trace_info* trace_info) {
auto io_ctx = impl->get_io_ctx(ioc);
if (io_ctx == nullptr) {
c->dispatch(std::move(c), osdc_errc::pool_dne);
return;
}
auto ops = *reinterpret_cast<librados::TestObjectOperationImpl**>(&op.impl);
SnapContext snapc;
auto opt_snapc = ioc.write_snap_context();
if (opt_snapc) {
snapc.seq = opt_snapc->first;
snapc.snaps.assign(opt_snapc->second.begin(), opt_snapc->second.end());
}
auto completion = create_aio_completion(std::move(c));
auto r = io_ctx->aio_operate(std::string{o}, *ops, completion, &snapc, nullptr, 0U);
ceph_assert(r == 0);
}
void RADOS::mon_command(std::vector<std::string> command,
const bufferlist& bl,
std::string* outs, bufferlist* outbl,
std::unique_ptr<Op::Completion> c) {
auto r = impl->test_rados_client->mon_command(command, bl, outbl, outs);
c->post(std::move(c),
(r < 0 ? bs::error_code(-r, osd_category()) : bs::error_code()));
}
void RADOS::blocklist_add(std::string_view client_address,
std::optional<std::chrono::seconds> expire,
std::unique_ptr<SimpleOpComp> c) {
auto r = impl->test_rados_client->blocklist_add(
std::string(client_address), expire.value_or(0s).count());
c->post(std::move(c),
(r < 0 ? bs::error_code(-r, mon_category()) : bs::error_code()));
}
void RADOS::wait_for_latest_osd_map(std::unique_ptr<Op::Completion> c) {
auto r = impl->test_rados_client->wait_for_latest_osd_map();
c->dispatch(std::move(c),
(r < 0 ? bs::error_code(-r, osd_category()) :
bs::error_code()));
}
} // namespace neorados
namespace librados {
MockTestMemIoCtxImpl& get_mock_io_ctx(neorados::RADOS& rados,
neorados::IOContext& io_context) {
auto& impl = *reinterpret_cast<std::unique_ptr<neorados::detail::Client>*>(
&rados);
auto io_ctx = impl->get_io_ctx(io_context);
ceph_assert(io_ctx != nullptr);
return *reinterpret_cast<MockTestMemIoCtxImpl*>(io_ctx);
}
MockTestMemRadosClient& get_mock_rados_client(neorados::RADOS& rados) {
auto& impl = *reinterpret_cast<std::unique_ptr<neorados::detail::Client>*>(
&rados);
return *reinterpret_cast<MockTestMemRadosClient*>(impl->test_rados_client);
}
} // namespace librados
| 18,400 | 29.566445 | 86 | cc |
null | ceph-main/src/test/librados_test_stub/TestClassHandler.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "test/librados_test_stub/TestClassHandler.h"
#include "test/librados_test_stub/TestIoCtxImpl.h"
#include <boost/algorithm/string/predicate.hpp>
#include <errno.h>
#include <stdlib.h>
#include <string.h>
#include "common/debug.h"
#include "include/ceph_assert.h"
#include "include/dlfcn_compat.h"
#define dout_context g_ceph_context
#define dout_subsys ceph_subsys_rados
namespace librados {
TestClassHandler::TestClassHandler() {
}
TestClassHandler::~TestClassHandler() {
for (ClassHandles::iterator it = m_class_handles.begin();
it != m_class_handles.end(); ++it) {
dlclose(*it);
}
}
void TestClassHandler::open_class(const std::string& name,
const std::string& path) {
void *handle = dlopen(path.c_str(), RTLD_NOW);
if (handle == NULL) {
std::cerr << "Failed to load class: " << name << " (" << path << "): "
<< dlerror() << std::endl;
return;
}
// initialize
void (*cls_init)() = reinterpret_cast<void (*)()>(
dlsym(handle, "__cls_init"));
if (!cls_init) {
std::cerr << "Error locating initializer: " << dlerror() << std::endl;
} else if (cls_init) {
m_class_handles.push_back(handle);
cls_init();
return;
}
std::cerr << "Class: " << name << " (" << path << ") missing initializer"
<< std::endl;
dlclose(handle);
}
void TestClassHandler::open_all_classes() {
ceph_assert(m_class_handles.empty());
const char* env = getenv("CEPH_LIB");
std::string CEPH_LIB(env ? env : "lib");
DIR *dir = ::opendir(CEPH_LIB.c_str());
if (dir == NULL) {
ceph_abort();;
}
std::set<std::string> names;
struct dirent *pde = nullptr;
while ((pde = ::readdir(dir))) {
std::string name(pde->d_name);
if (!boost::algorithm::starts_with(name, "libcls_") ||
!boost::algorithm::ends_with(name, SHARED_LIB_SUFFIX)) {
continue;
}
names.insert(name);
}
for (auto& name : names) {
std::string class_name = name.substr(7, name.size() - 10);
open_class(class_name, CEPH_LIB + "/" + name);
}
closedir(dir);
}
int TestClassHandler::create(const std::string &name, cls_handle_t *handle) {
if (m_classes.find(name) != m_classes.end()) {
std::cerr << "Class " << name << " already exists" << std::endl;
return -EEXIST;
}
SharedClass cls(new Class());
m_classes[name] = cls;
*handle = reinterpret_cast<cls_handle_t>(cls.get());
return 0;
}
int TestClassHandler::create_method(cls_handle_t hclass,
const char *name,
cls_method_cxx_call_t class_call,
cls_method_handle_t *handle) {
Class *cls = reinterpret_cast<Class*>(hclass);
if (cls->methods.find(name) != cls->methods.end()) {
std::cerr << "Class method " << hclass << ":" << name << " already exists"
<< std::endl;
return -EEXIST;
}
SharedMethod method(new Method());
method->class_call = class_call;
cls->methods[name] = method;
return 0;
}
cls_method_cxx_call_t TestClassHandler::get_method(const std::string &cls,
const std::string &method) {
Classes::iterator c_it = m_classes.find(cls);
if (c_it == m_classes.end()) {
std::cerr << "Failed to located class " << cls << std::endl;
return NULL;
}
SharedClass scls = c_it->second;
Methods::iterator m_it = scls->methods.find(method);
if (m_it == scls->methods.end()) {
std::cerr << "Failed to located class method" << cls << "." << method
<< std::endl;
return NULL;
}
return m_it->second->class_call;
}
TestClassHandler::SharedMethodContext TestClassHandler::get_method_context(
TestIoCtxImpl *io_ctx_impl, const std::string &oid, uint64_t snap_id,
const SnapContext &snapc) {
SharedMethodContext ctx(new MethodContext());
// clone to ioctx to provide a firewall for gmock expectations
ctx->io_ctx_impl = io_ctx_impl->clone();
ctx->oid = oid;
ctx->snap_id = snap_id;
ctx->snapc = snapc;
return ctx;
}
int TestClassHandler::create_filter(cls_handle_t hclass,
const std::string& name,
cls_cxx_filter_factory_t fn)
{
Class *cls = reinterpret_cast<Class*>(hclass);
if (cls->filters.find(name) != cls->filters.end()) {
return -EEXIST;
}
cls->filters[name] = fn;
return 0;
}
TestClassHandler::MethodContext::~MethodContext() {
io_ctx_impl->put();
}
} // namespace librados
| 4,570 | 27.56875 | 79 | cc |
null | ceph-main/src/test/librados_test_stub/TestClassHandler.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_TEST_CLASS_HANDLER_H
#define CEPH_TEST_CLASS_HANDLER_H
#include "objclass/objclass.h"
#include "common/snap_types.h"
#include <boost/shared_ptr.hpp>
#include <list>
#include <map>
#include <string>
namespace librados
{
class TestIoCtxImpl;
class TestClassHandler {
public:
TestClassHandler();
~TestClassHandler();
struct MethodContext {
~MethodContext();
TestIoCtxImpl *io_ctx_impl;
std::string oid;
uint64_t snap_id;
SnapContext snapc;
};
typedef boost::shared_ptr<MethodContext> SharedMethodContext;
struct Method {
cls_method_cxx_call_t class_call;
};
typedef boost::shared_ptr<Method> SharedMethod;
typedef std::map<std::string, SharedMethod> Methods;
typedef std::map<std::string, cls_cxx_filter_factory_t> Filters;
struct Class {
Methods methods;
Filters filters;
};
typedef boost::shared_ptr<Class> SharedClass;
void open_all_classes();
int create(const std::string &name, cls_handle_t *handle);
int create_method(cls_handle_t hclass, const char *method,
cls_method_cxx_call_t class_call,
cls_method_handle_t *handle);
cls_method_cxx_call_t get_method(const std::string &cls,
const std::string &method);
SharedMethodContext get_method_context(TestIoCtxImpl *io_ctx_impl,
const std::string &oid,
uint64_t snap_id,
const SnapContext &snapc);
int create_filter(cls_handle_t hclass, const std::string& filter_name,
cls_cxx_filter_factory_t fn);
private:
typedef std::map<std::string, SharedClass> Classes;
typedef std::list<void*> ClassHandles;
Classes m_classes;
ClassHandles m_class_handles;
Filters m_filters;
void open_class(const std::string& name, const std::string& path);
};
} // namespace librados
#endif // CEPH_TEST_CLASS_HANDLER_H
| 2,063 | 24.8 | 72 | h |
null | ceph-main/src/test/librados_test_stub/TestCluster.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_TEST_CLUSTER_H
#define CEPH_TEST_CLUSTER_H
#include "test/librados_test_stub/TestWatchNotify.h"
#include "include/common_fwd.h"
namespace librados {
class TestRadosClient;
class TestWatchNotify;
class TestCluster {
public:
struct ObjectLocator {
std::string nspace;
std::string name;
ObjectLocator(const std::string& nspace, const std::string& name)
: nspace(nspace), name(name) {
}
bool operator<(const ObjectLocator& rhs) const {
if (nspace != rhs.nspace) {
return nspace < rhs.nspace;
}
return name < rhs.name;
}
};
struct ObjectHandler {
virtual ~ObjectHandler() {}
virtual void handle_removed(TestRadosClient* test_rados_client) = 0;
};
TestCluster() : m_watch_notify(this) {
}
virtual ~TestCluster() {
}
virtual TestRadosClient *create_rados_client(CephContext *cct) = 0;
virtual int register_object_handler(int64_t pool_id,
const ObjectLocator& locator,
ObjectHandler* object_handler) = 0;
virtual void unregister_object_handler(int64_t pool_id,
const ObjectLocator& locator,
ObjectHandler* object_handler) = 0;
TestWatchNotify *get_watch_notify() {
return &m_watch_notify;
}
protected:
TestWatchNotify m_watch_notify;
};
} // namespace librados
#endif // CEPH_TEST_CLUSTER_H
| 1,565 | 23.092308 | 76 | h |
null | ceph-main/src/test/librados_test_stub/TestIoCtxImpl.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "test/librados_test_stub/TestIoCtxImpl.h"
#include "test/librados_test_stub/TestClassHandler.h"
#include "test/librados_test_stub/TestRadosClient.h"
#include "test/librados_test_stub/TestWatchNotify.h"
#include "librados/AioCompletionImpl.h"
#include "include/ceph_assert.h"
#include "common/Finisher.h"
#include "common/valgrind.h"
#include "objclass/objclass.h"
#include <functional>
#include <errno.h>
using namespace std;
namespace librados {
TestIoCtxImpl::TestIoCtxImpl() : m_client(NULL) {
get();
}
TestIoCtxImpl::TestIoCtxImpl(TestRadosClient *client, int64_t pool_id,
const std::string& pool_name)
: m_client(client), m_pool_id(pool_id), m_pool_name(pool_name),
m_snap_seq(CEPH_NOSNAP)
{
m_client->get();
get();
}
TestIoCtxImpl::TestIoCtxImpl(const TestIoCtxImpl& rhs)
: m_client(rhs.m_client),
m_pool_id(rhs.m_pool_id),
m_pool_name(rhs.m_pool_name),
m_namespace_name(rhs.m_namespace_name),
m_snap_seq(rhs.m_snap_seq)
{
m_client->get();
get();
}
TestIoCtxImpl::~TestIoCtxImpl() {
ceph_assert(m_pending_ops == 0);
}
void TestObjectOperationImpl::get() {
m_refcount++;
}
void TestObjectOperationImpl::put() {
if (--m_refcount == 0) {
ANNOTATE_HAPPENS_AFTER(&m_refcount);
ANNOTATE_HAPPENS_BEFORE_FORGET_ALL(&m_refcount);
delete this;
} else {
ANNOTATE_HAPPENS_BEFORE(&m_refcount);
}
}
void TestIoCtxImpl::get() {
m_refcount++;
}
void TestIoCtxImpl::put() {
if (--m_refcount == 0) {
m_client->put();
delete this;
}
}
uint64_t TestIoCtxImpl::get_instance_id() const {
return m_client->get_instance_id();
}
int64_t TestIoCtxImpl::get_id() {
return m_pool_id;
}
uint64_t TestIoCtxImpl::get_last_version() {
return 0;
}
std::string TestIoCtxImpl::get_pool_name() {
return m_pool_name;
}
int TestIoCtxImpl::aio_flush() {
m_client->flush_aio_operations();
return 0;
}
void TestIoCtxImpl::aio_flush_async(AioCompletionImpl *c) {
m_client->flush_aio_operations(c);
}
void TestIoCtxImpl::aio_notify(const std::string& oid, AioCompletionImpl *c,
bufferlist& bl, uint64_t timeout_ms,
bufferlist *pbl) {
m_pending_ops++;
c->get();
C_AioNotify *ctx = new C_AioNotify(this, c);
m_client->get_watch_notify()->aio_notify(m_client, m_pool_id, get_namespace(),
oid, bl, timeout_ms, pbl, ctx);
}
int TestIoCtxImpl::aio_operate(const std::string& oid, TestObjectOperationImpl &ops,
AioCompletionImpl *c, SnapContext *snap_context,
const ceph::real_time *pmtime, int flags) {
// TODO flags for now
ops.get();
m_pending_ops++;
m_client->add_aio_operation(oid, true, std::bind(
&TestIoCtxImpl::execute_aio_operations, this, oid, &ops,
reinterpret_cast<bufferlist*>(0), m_snap_seq,
snap_context != NULL ? *snap_context : m_snapc, nullptr), c);
return 0;
}
int TestIoCtxImpl::aio_operate_read(const std::string& oid,
TestObjectOperationImpl &ops,
AioCompletionImpl *c, int flags,
bufferlist *pbl, uint64_t snap_id,
uint64_t* objver) {
// TODO ignoring flags for now
ops.get();
m_pending_ops++;
m_client->add_aio_operation(oid, true, std::bind(
&TestIoCtxImpl::execute_aio_operations, this, oid, &ops, pbl, snap_id,
m_snapc, objver), c);
return 0;
}
int TestIoCtxImpl::aio_watch(const std::string& o, AioCompletionImpl *c,
uint64_t *handle, librados::WatchCtx2 *watch_ctx) {
m_pending_ops++;
c->get();
C_AioNotify *ctx = new C_AioNotify(this, c);
if (m_client->is_blocklisted()) {
m_client->get_aio_finisher()->queue(ctx, -EBLOCKLISTED);
} else {
m_client->get_watch_notify()->aio_watch(m_client, m_pool_id,
get_namespace(), o,
get_instance_id(), handle, nullptr,
watch_ctx, ctx);
}
return 0;
}
int TestIoCtxImpl::aio_unwatch(uint64_t handle, AioCompletionImpl *c) {
m_pending_ops++;
c->get();
C_AioNotify *ctx = new C_AioNotify(this, c);
if (m_client->is_blocklisted()) {
m_client->get_aio_finisher()->queue(ctx, -EBLOCKLISTED);
} else {
m_client->get_watch_notify()->aio_unwatch(m_client, handle, ctx);
}
return 0;
}
int TestIoCtxImpl::exec(const std::string& oid, TestClassHandler *handler,
const char *cls, const char *method,
bufferlist& inbl, bufferlist* outbl,
uint64_t snap_id, const SnapContext &snapc) {
if (m_client->is_blocklisted()) {
return -EBLOCKLISTED;
}
cls_method_cxx_call_t call = handler->get_method(cls, method);
if (call == NULL) {
return -ENOSYS;
}
return (*call)(reinterpret_cast<cls_method_context_t>(
handler->get_method_context(this, oid, snap_id, snapc).get()), &inbl,
outbl);
}
int TestIoCtxImpl::list_watchers(const std::string& o,
std::list<obj_watch_t> *out_watchers) {
if (m_client->is_blocklisted()) {
return -EBLOCKLISTED;
}
return m_client->get_watch_notify()->list_watchers(m_pool_id, get_namespace(),
o, out_watchers);
}
int TestIoCtxImpl::notify(const std::string& o, bufferlist& bl,
uint64_t timeout_ms, bufferlist *pbl) {
if (m_client->is_blocklisted()) {
return -EBLOCKLISTED;
}
return m_client->get_watch_notify()->notify(m_client, m_pool_id,
get_namespace(), o, bl,
timeout_ms, pbl);
}
void TestIoCtxImpl::notify_ack(const std::string& o, uint64_t notify_id,
uint64_t handle, bufferlist& bl) {
m_client->get_watch_notify()->notify_ack(m_client, m_pool_id, get_namespace(),
o, notify_id, handle,
m_client->get_instance_id(), bl);
}
int TestIoCtxImpl::operate(const std::string& oid,
TestObjectOperationImpl &ops) {
AioCompletionImpl *comp = new AioCompletionImpl();
ops.get();
m_pending_ops++;
m_client->add_aio_operation(oid, false, std::bind(
&TestIoCtxImpl::execute_aio_operations, this, oid, &ops,
reinterpret_cast<bufferlist*>(0), m_snap_seq, m_snapc, nullptr), comp);
comp->wait_for_complete();
int ret = comp->get_return_value();
comp->put();
return ret;
}
int TestIoCtxImpl::operate_read(const std::string& oid,
TestObjectOperationImpl &ops,
bufferlist *pbl) {
AioCompletionImpl *comp = new AioCompletionImpl();
ops.get();
m_pending_ops++;
m_client->add_aio_operation(oid, false, std::bind(
&TestIoCtxImpl::execute_aio_operations, this, oid, &ops, pbl,
m_snap_seq, m_snapc, nullptr), comp);
comp->wait_for_complete();
int ret = comp->get_return_value();
comp->put();
return ret;
}
void TestIoCtxImpl::aio_selfmanaged_snap_create(uint64_t *snapid,
AioCompletionImpl *c) {
m_client->add_aio_operation(
"", true,
std::bind(&TestIoCtxImpl::selfmanaged_snap_create, this, snapid), c);
}
void TestIoCtxImpl::aio_selfmanaged_snap_remove(uint64_t snapid,
AioCompletionImpl *c) {
m_client->add_aio_operation(
"", true,
std::bind(&TestIoCtxImpl::selfmanaged_snap_remove, this, snapid), c);
}
int TestIoCtxImpl::selfmanaged_snap_set_write_ctx(snap_t seq,
std::vector<snap_t>& snaps) {
std::vector<snapid_t> snap_ids(snaps.begin(), snaps.end());
m_snapc = SnapContext(seq, snap_ids);
return 0;
}
int TestIoCtxImpl::set_alloc_hint(const std::string& oid,
uint64_t expected_object_size,
uint64_t expected_write_size,
uint32_t flags,
const SnapContext &snapc) {
return 0;
}
void TestIoCtxImpl::set_snap_read(snap_t seq) {
if (seq == 0) {
seq = CEPH_NOSNAP;
}
m_snap_seq = seq;
}
int TestIoCtxImpl::tmap_update(const std::string& oid, bufferlist& cmdbl) {
if (m_client->is_blocklisted()) {
return -EBLOCKLISTED;
}
// TODO: protect against concurrent tmap updates
bufferlist tmap_header;
std::map<string,bufferlist> tmap;
uint64_t size = 0;
int r = stat(oid, &size, NULL);
if (r == -ENOENT) {
r = create(oid, false, m_snapc);
}
if (r < 0) {
return r;
}
if (size > 0) {
bufferlist inbl;
r = read(oid, size, 0, &inbl, CEPH_NOSNAP, nullptr);
if (r < 0) {
return r;
}
auto iter = inbl.cbegin();
decode(tmap_header, iter);
decode(tmap, iter);
}
__u8 c;
std::string key;
bufferlist value;
auto iter = cmdbl.cbegin();
decode(c, iter);
decode(key, iter);
switch (c) {
case CEPH_OSD_TMAP_SET:
decode(value, iter);
tmap[key] = value;
break;
case CEPH_OSD_TMAP_RM:
r = tmap.erase(key);
if (r == 0) {
return -ENOENT;
}
break;
default:
return -EINVAL;
}
bufferlist out;
encode(tmap_header, out);
encode(tmap, out);
r = write_full(oid, out, m_snapc);
return r;
}
int TestIoCtxImpl::unwatch(uint64_t handle) {
if (m_client->is_blocklisted()) {
return -EBLOCKLISTED;
}
return m_client->get_watch_notify()->unwatch(m_client, handle);
}
int TestIoCtxImpl::watch(const std::string& o, uint64_t *handle,
librados::WatchCtx *ctx, librados::WatchCtx2 *ctx2) {
if (m_client->is_blocklisted()) {
return -EBLOCKLISTED;
}
return m_client->get_watch_notify()->watch(m_client, m_pool_id,
get_namespace(), o,
get_instance_id(), handle, ctx,
ctx2);
}
int TestIoCtxImpl::execute_operation(const std::string& oid,
const Operation &operation) {
if (m_client->is_blocklisted()) {
return -EBLOCKLISTED;
}
TestRadosClient::Transaction transaction(m_client, get_namespace(), oid);
return operation(this, oid);
}
int TestIoCtxImpl::execute_aio_operations(const std::string& oid,
TestObjectOperationImpl *ops,
bufferlist *pbl, uint64_t snap_id,
const SnapContext &snapc,
uint64_t* objver) {
int ret = 0;
if (m_client->is_blocklisted()) {
ret = -EBLOCKLISTED;
} else {
TestRadosClient::Transaction transaction(m_client, get_namespace(), oid);
for (ObjectOperations::iterator it = ops->ops.begin();
it != ops->ops.end(); ++it) {
ret = (*it)(this, oid, pbl, snap_id, snapc, objver);
if (ret < 0) {
break;
}
}
}
m_pending_ops--;
ops->put();
return ret;
}
void TestIoCtxImpl::handle_aio_notify_complete(AioCompletionImpl *c, int r) {
m_pending_ops--;
m_client->finish_aio_completion(c, r);
}
} // namespace librados
| 11,565 | 28.281013 | 84 | cc |
null | ceph-main/src/test/librados_test_stub/TestIoCtxImpl.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_TEST_IO_CTX_IMPL_H
#define CEPH_TEST_IO_CTX_IMPL_H
#include <list>
#include <atomic>
#include <boost/function.hpp>
#include "include/rados/librados.hpp"
#include "include/Context.h"
#include "common/snap_types.h"
namespace librados {
class TestClassHandler;
class TestIoCtxImpl;
class TestRadosClient;
typedef boost::function<int(TestIoCtxImpl*,
const std::string&,
bufferlist *,
uint64_t,
const SnapContext &,
uint64_t*)> ObjectOperationTestImpl;
typedef std::list<ObjectOperationTestImpl> ObjectOperations;
struct TestObjectOperationImpl {
public:
void get();
void put();
ObjectOperations ops;
private:
std::atomic<uint64_t> m_refcount = { 0 };
};
class TestIoCtxImpl {
public:
typedef boost::function<int(TestIoCtxImpl *, const std::string &)> Operation;
TestIoCtxImpl();
explicit TestIoCtxImpl(TestRadosClient *client, int64_t m_pool_id,
const std::string& pool_name);
TestRadosClient *get_rados_client() {
return m_client;
}
void get();
void put();
inline int64_t get_pool_id() const {
return m_pool_id;
}
virtual TestIoCtxImpl *clone() = 0;
virtual uint64_t get_instance_id() const;
virtual int64_t get_id();
virtual uint64_t get_last_version();
virtual std::string get_pool_name();
inline void set_namespace(const std::string& namespace_name) {
m_namespace_name = namespace_name;
}
inline std::string get_namespace() const {
return m_namespace_name;
}
snap_t get_snap_read() const {
return m_snap_seq;
}
inline void set_snap_context(const SnapContext& snapc) {
m_snapc = snapc;
}
const SnapContext &get_snap_context() const {
return m_snapc;
}
virtual int aio_flush();
virtual void aio_flush_async(AioCompletionImpl *c);
virtual void aio_notify(const std::string& oid, AioCompletionImpl *c,
bufferlist& bl, uint64_t timeout_ms, bufferlist *pbl);
virtual int aio_operate(const std::string& oid, TestObjectOperationImpl &ops,
AioCompletionImpl *c, SnapContext *snap_context,
const ceph::real_time *pmtime, int flags);
virtual int aio_operate_read(const std::string& oid, TestObjectOperationImpl &ops,
AioCompletionImpl *c, int flags,
bufferlist *pbl, uint64_t snap_id,
uint64_t* objver);
virtual int aio_remove(const std::string& oid, AioCompletionImpl *c,
int flags = 0) = 0;
virtual int aio_watch(const std::string& o, AioCompletionImpl *c,
uint64_t *handle, librados::WatchCtx2 *ctx);
virtual int aio_unwatch(uint64_t handle, AioCompletionImpl *c);
virtual int append(const std::string& oid, const bufferlist &bl,
const SnapContext &snapc) = 0;
virtual int assert_exists(const std::string &oid, uint64_t snap_id) = 0;
virtual int assert_version(const std::string &oid, uint64_t ver) = 0;
virtual int create(const std::string& oid, bool exclusive,
const SnapContext &snapc) = 0;
virtual int exec(const std::string& oid, TestClassHandler *handler,
const char *cls, const char *method,
bufferlist& inbl, bufferlist* outbl,
uint64_t snap_id, const SnapContext &snapc);
virtual int list_snaps(const std::string& o, snap_set_t *out_snaps) = 0;
virtual int list_watchers(const std::string& o,
std::list<obj_watch_t> *out_watchers);
virtual int notify(const std::string& o, bufferlist& bl,
uint64_t timeout_ms, bufferlist *pbl);
virtual void notify_ack(const std::string& o, uint64_t notify_id,
uint64_t handle, bufferlist& bl);
virtual int omap_get_vals(const std::string& oid,
const std::string& start_after,
const std::string &filter_prefix,
uint64_t max_return,
std::map<std::string, bufferlist> *out_vals) = 0;
virtual int omap_get_vals2(const std::string& oid,
const std::string& start_after,
const std::string &filter_prefix,
uint64_t max_return,
std::map<std::string, bufferlist> *out_vals,
bool *pmore) = 0;
virtual int omap_rm_keys(const std::string& oid,
const std::set<std::string>& keys) = 0;
virtual int omap_set(const std::string& oid,
const std::map<std::string, bufferlist> &map) = 0;
virtual int operate(const std::string& oid, TestObjectOperationImpl &ops);
virtual int operate_read(const std::string& oid, TestObjectOperationImpl &ops,
bufferlist *pbl);
virtual int read(const std::string& oid, size_t len, uint64_t off,
bufferlist *bl, uint64_t snap_id, uint64_t* objver) = 0;
virtual int remove(const std::string& oid, const SnapContext &snapc) = 0;
virtual int selfmanaged_snap_create(uint64_t *snapid) = 0;
virtual void aio_selfmanaged_snap_create(uint64_t *snapid,
AioCompletionImpl *c);
virtual int selfmanaged_snap_remove(uint64_t snapid) = 0;
virtual void aio_selfmanaged_snap_remove(uint64_t snapid,
AioCompletionImpl *c);
virtual int selfmanaged_snap_rollback(const std::string& oid,
uint64_t snapid) = 0;
virtual int selfmanaged_snap_set_write_ctx(snap_t seq,
std::vector<snap_t>& snaps);
virtual int set_alloc_hint(const std::string& oid,
uint64_t expected_object_size,
uint64_t expected_write_size,
uint32_t flags,
const SnapContext &snapc);
virtual void set_snap_read(snap_t seq);
virtual int sparse_read(const std::string& oid, uint64_t off, uint64_t len,
std::map<uint64_t,uint64_t> *m,
bufferlist *data_bl, uint64_t snap_id) = 0;
virtual int stat(const std::string& oid, uint64_t *psize, time_t *pmtime) = 0;
virtual int truncate(const std::string& oid, uint64_t size,
const SnapContext &snapc) = 0;
virtual int tmap_update(const std::string& oid, bufferlist& cmdbl);
virtual int unwatch(uint64_t handle);
virtual int watch(const std::string& o, uint64_t *handle,
librados::WatchCtx *ctx, librados::WatchCtx2 *ctx2);
virtual int write(const std::string& oid, bufferlist& bl, size_t len,
uint64_t off, const SnapContext &snapc) = 0;
virtual int write_full(const std::string& oid, bufferlist& bl,
const SnapContext &snapc) = 0;
virtual int writesame(const std::string& oid, bufferlist& bl, size_t len,
uint64_t off, const SnapContext &snapc) = 0;
virtual int cmpext(const std::string& oid, uint64_t off, bufferlist& cmp_bl,
uint64_t snap_id) = 0;
virtual int xattr_get(const std::string& oid,
std::map<std::string, bufferlist>* attrset) = 0;
virtual int xattr_set(const std::string& oid, const std::string &name,
bufferlist& bl) = 0;
virtual int zero(const std::string& oid, uint64_t off, uint64_t len,
const SnapContext &snapc) = 0;
int execute_operation(const std::string& oid,
const Operation &operation);
protected:
TestIoCtxImpl(const TestIoCtxImpl& rhs);
virtual ~TestIoCtxImpl();
int execute_aio_operations(const std::string& oid,
TestObjectOperationImpl *ops,
bufferlist *pbl, uint64_t,
const SnapContext &snapc,
uint64_t* objver);
private:
struct C_AioNotify : public Context {
TestIoCtxImpl *io_ctx;
AioCompletionImpl *aio_comp;
C_AioNotify(TestIoCtxImpl *_io_ctx, AioCompletionImpl *_aio_comp)
: io_ctx(_io_ctx), aio_comp(_aio_comp) {
}
void finish(int r) override {
io_ctx->handle_aio_notify_complete(aio_comp, r);
}
};
TestRadosClient *m_client;
int64_t m_pool_id = 0;
std::string m_pool_name;
std::string m_namespace_name;
snap_t m_snap_seq = 0;
SnapContext m_snapc;
std::atomic<uint64_t> m_refcount = { 0 };
std::atomic<uint64_t> m_pending_ops = { 0 };
void handle_aio_notify_complete(AioCompletionImpl *aio_comp, int r);
};
} // namespace librados
#endif // CEPH_TEST_IO_CTX_IMPL_H
| 8,907 | 39.126126 | 84 | h |
null | ceph-main/src/test/librados_test_stub/TestMemCluster.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "test/librados_test_stub/TestMemCluster.h"
#include "test/librados_test_stub/TestMemRadosClient.h"
namespace librados {
TestMemCluster::File::File()
: objver(0), snap_id(), exists(true) {
}
TestMemCluster::File::File(const File &rhs)
: data(rhs.data),
mtime(rhs.mtime),
objver(rhs.objver),
snap_id(rhs.snap_id),
exists(rhs.exists) {
}
TestMemCluster::Pool::Pool() = default;
TestMemCluster::TestMemCluster()
: m_next_nonce(static_cast<uint32_t>(reinterpret_cast<uint64_t>(this))) {
}
TestMemCluster::~TestMemCluster() {
for (auto pool_pair : m_pools) {
pool_pair.second->put();
}
}
TestRadosClient *TestMemCluster::create_rados_client(CephContext *cct) {
return new TestMemRadosClient(cct, this);
}
int TestMemCluster::register_object_handler(int64_t pool_id,
const ObjectLocator& locator,
ObjectHandler* object_handler) {
std::lock_guard locker{m_lock};
auto pool = get_pool(m_lock, pool_id);
if (pool == nullptr) {
return -ENOENT;
}
std::unique_lock pool_locker{pool->file_lock};
auto file_it = pool->files.find(locator);
if (file_it == pool->files.end()) {
return -ENOENT;
}
auto& object_handlers = pool->file_handlers[locator];
auto it = object_handlers.find(object_handler);
ceph_assert(it == object_handlers.end());
object_handlers.insert(object_handler);
return 0;
}
void TestMemCluster::unregister_object_handler(int64_t pool_id,
const ObjectLocator& locator,
ObjectHandler* object_handler) {
std::lock_guard locker{m_lock};
auto pool = get_pool(m_lock, pool_id);
if (pool == nullptr) {
return;
}
std::unique_lock pool_locker{pool->file_lock};
auto handlers_it = pool->file_handlers.find(locator);
if (handlers_it == pool->file_handlers.end()) {
return;
}
auto& object_handlers = handlers_it->second;
object_handlers.erase(object_handler);
}
int TestMemCluster::pool_create(const std::string &pool_name) {
std::lock_guard locker{m_lock};
if (m_pools.find(pool_name) != m_pools.end()) {
return -EEXIST;
}
Pool *pool = new Pool();
pool->pool_id = ++m_pool_id;
m_pools[pool_name] = pool;
return 0;
}
int TestMemCluster::pool_delete(const std::string &pool_name) {
std::lock_guard locker{m_lock};
Pools::iterator iter = m_pools.find(pool_name);
if (iter == m_pools.end()) {
return -ENOENT;
}
iter->second->put();
m_pools.erase(iter);
return 0;
}
int TestMemCluster::pool_get_base_tier(int64_t pool_id, int64_t* base_tier) {
// TODO
*base_tier = pool_id;
return 0;
}
int TestMemCluster::pool_list(std::list<std::pair<int64_t, std::string> >& v) {
std::lock_guard locker{m_lock};
v.clear();
for (Pools::iterator iter = m_pools.begin(); iter != m_pools.end(); ++iter) {
v.push_back(std::make_pair(iter->second->pool_id, iter->first));
}
return 0;
}
int64_t TestMemCluster::pool_lookup(const std::string &pool_name) {
std::lock_guard locker{m_lock};
Pools::iterator iter = m_pools.find(pool_name);
if (iter == m_pools.end()) {
return -ENOENT;
}
return iter->second->pool_id;
}
int TestMemCluster::pool_reverse_lookup(int64_t id, std::string *name) {
std::lock_guard locker{m_lock};
for (Pools::iterator iter = m_pools.begin(); iter != m_pools.end(); ++iter) {
if (iter->second->pool_id == id) {
*name = iter->first;
return 0;
}
}
return -ENOENT;
}
TestMemCluster::Pool *TestMemCluster::get_pool(int64_t pool_id) {
std::lock_guard locker{m_lock};
return get_pool(m_lock, pool_id);
}
TestMemCluster::Pool *TestMemCluster::get_pool(const ceph::mutex& lock,
int64_t pool_id) {
for (auto &pool_pair : m_pools) {
if (pool_pair.second->pool_id == pool_id) {
return pool_pair.second;
}
}
return nullptr;
}
TestMemCluster::Pool *TestMemCluster::get_pool(const std::string &pool_name) {
std::lock_guard locker{m_lock};
Pools::iterator iter = m_pools.find(pool_name);
if (iter != m_pools.end()) {
return iter->second;
}
return nullptr;
}
void TestMemCluster::allocate_client(uint32_t *nonce, uint64_t *global_id) {
std::lock_guard locker{m_lock};
*nonce = m_next_nonce++;
*global_id = m_next_global_id++;
}
void TestMemCluster::deallocate_client(uint32_t nonce) {
std::lock_guard locker{m_lock};
m_blocklist.erase(nonce);
}
bool TestMemCluster::is_blocklisted(uint32_t nonce) const {
std::lock_guard locker{m_lock};
return (m_blocklist.find(nonce) != m_blocklist.end());
}
void TestMemCluster::blocklist(uint32_t nonce) {
{
std::lock_guard locker{m_lock};
m_blocklist.insert(nonce);
}
// after blocklisting the client, disconnect and drop its watches
m_watch_notify.blocklist(nonce);
}
void TestMemCluster::transaction_start(const ObjectLocator& locator) {
std::unique_lock locker{m_lock};
m_transaction_cond.wait(locker, [&locator, this] {
return m_transactions.count(locator) == 0;
});
auto result = m_transactions.insert(locator);
ceph_assert(result.second);
}
void TestMemCluster::transaction_finish(const ObjectLocator& locator) {
std::lock_guard locker{m_lock};
size_t count = m_transactions.erase(locator);
ceph_assert(count == 1);
m_transaction_cond.notify_all();
}
} // namespace librados
| 5,538 | 26.151961 | 79 | cc |
null | ceph-main/src/test/librados_test_stub/TestMemCluster.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_TEST_MEM_CLUSTER_H
#define CEPH_TEST_MEM_CLUSTER_H
#include "test/librados_test_stub/TestCluster.h"
#include "include/buffer.h"
#include "include/interval_set.h"
#include "include/int_types.h"
#include "common/ceph_mutex.h"
#include "common/RefCountedObj.h"
#include <boost/shared_ptr.hpp>
#include <list>
#include <map>
#include <set>
#include <string>
namespace librados {
class TestMemCluster : public TestCluster {
public:
typedef std::map<std::string, bufferlist> OMap;
typedef std::map<ObjectLocator, OMap> FileOMaps;
typedef std::map<ObjectLocator, bufferlist> FileTMaps;
typedef std::map<std::string, bufferlist> XAttrs;
typedef std::map<ObjectLocator, XAttrs> FileXAttrs;
typedef std::set<ObjectHandler*> ObjectHandlers;
typedef std::map<ObjectLocator, ObjectHandlers> FileHandlers;
struct File {
File();
File(const File &rhs);
bufferlist data;
time_t mtime;
uint64_t objver;
uint64_t snap_id;
std::vector<uint64_t> snaps;
interval_set<uint64_t> snap_overlap;
bool exists;
ceph::shared_mutex lock =
ceph::make_shared_mutex("TestMemCluster::File::lock");
};
typedef boost::shared_ptr<File> SharedFile;
typedef std::list<SharedFile> FileSnapshots;
typedef std::map<ObjectLocator, FileSnapshots> Files;
typedef std::set<uint64_t> SnapSeqs;
struct Pool : public RefCountedObject {
Pool();
int64_t pool_id = 0;
SnapSeqs snap_seqs;
uint64_t snap_id = 1;
ceph::shared_mutex file_lock =
ceph::make_shared_mutex("TestMemCluster::Pool::file_lock");
Files files;
FileOMaps file_omaps;
FileTMaps file_tmaps;
FileXAttrs file_xattrs;
FileHandlers file_handlers;
};
TestMemCluster();
~TestMemCluster() override;
TestRadosClient *create_rados_client(CephContext *cct) override;
int register_object_handler(int64_t pool_id, const ObjectLocator& locator,
ObjectHandler* object_handler) override;
void unregister_object_handler(int64_t pool_id, const ObjectLocator& locator,
ObjectHandler* object_handler) override;
int pool_create(const std::string &pool_name);
int pool_delete(const std::string &pool_name);
int pool_get_base_tier(int64_t pool_id, int64_t* base_tier);
int pool_list(std::list<std::pair<int64_t, std::string> >& v);
int64_t pool_lookup(const std::string &name);
int pool_reverse_lookup(int64_t id, std::string *name);
Pool *get_pool(int64_t pool_id);
Pool *get_pool(const std::string &pool_name);
void allocate_client(uint32_t *nonce, uint64_t *global_id);
void deallocate_client(uint32_t nonce);
bool is_blocklisted(uint32_t nonce) const;
void blocklist(uint32_t nonce);
void transaction_start(const ObjectLocator& locator);
void transaction_finish(const ObjectLocator& locator);
private:
typedef std::map<std::string, Pool*> Pools;
typedef std::set<uint32_t> Blocklist;
mutable ceph::mutex m_lock =
ceph::make_mutex("TestMemCluster::m_lock");
Pools m_pools;
int64_t m_pool_id = 0;
uint32_t m_next_nonce;
uint64_t m_next_global_id = 1234;
Blocklist m_blocklist;
ceph::condition_variable m_transaction_cond;
std::set<ObjectLocator> m_transactions;
Pool *get_pool(const ceph::mutex& lock, int64_t pool_id);
};
} // namespace librados
#endif // CEPH_TEST_MEM_CLUSTER_H
| 3,468 | 26.752 | 79 | h |
null | ceph-main/src/test/librados_test_stub/TestMemIoCtxImpl.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "test/librados_test_stub/TestMemIoCtxImpl.h"
#include "test/librados_test_stub/TestMemRadosClient.h"
#include "common/Clock.h"
#include "include/err.h"
#include <functional>
#include <boost/algorithm/string/predicate.hpp>
#include <errno.h>
#include <include/compat.h>
#define dout_subsys ceph_subsys_rados
#undef dout_prefix
#define dout_prefix *_dout << "TestMemIoCtxImpl: " << this << " " << __func__ \
<< ": " << oid << " "
static void to_vector(const interval_set<uint64_t> &set,
std::vector<std::pair<uint64_t, uint64_t> > *vec) {
vec->clear();
for (interval_set<uint64_t>::const_iterator it = set.begin();
it != set.end(); ++it) {
vec->push_back(*it);
}
}
// see PrimaryLogPG::finish_extent_cmp()
static int cmpext_compare(const bufferlist &bl, const bufferlist &read_bl) {
for (uint64_t idx = 0; idx < bl.length(); ++idx) {
char read_byte = (idx < read_bl.length() ? read_bl[idx] : 0);
if (bl[idx] != read_byte) {
return -MAX_ERRNO - idx;
}
}
return 0;
}
namespace librados {
TestMemIoCtxImpl::TestMemIoCtxImpl() {
}
TestMemIoCtxImpl::TestMemIoCtxImpl(const TestMemIoCtxImpl& rhs)
: TestIoCtxImpl(rhs), m_client(rhs.m_client), m_pool(rhs.m_pool) {
m_pool->get();
}
TestMemIoCtxImpl::TestMemIoCtxImpl(TestMemRadosClient *client, int64_t pool_id,
const std::string& pool_name,
TestMemCluster::Pool *pool)
: TestIoCtxImpl(client, pool_id, pool_name), m_client(client),
m_pool(pool) {
m_pool->get();
}
TestMemIoCtxImpl::~TestMemIoCtxImpl() {
m_pool->put();
}
TestIoCtxImpl *TestMemIoCtxImpl::clone() {
return new TestMemIoCtxImpl(*this);
}
int TestMemIoCtxImpl::aio_remove(const std::string& oid, AioCompletionImpl *c, int flags) {
m_client->add_aio_operation(oid, true,
std::bind(&TestMemIoCtxImpl::remove, this, oid,
get_snap_context()),
c);
return 0;
}
int TestMemIoCtxImpl::append(const std::string& oid, const bufferlist &bl,
const SnapContext &snapc) {
if (get_snap_read() != CEPH_NOSNAP) {
return -EROFS;
} else if (m_client->is_blocklisted()) {
return -EBLOCKLISTED;
}
auto cct = m_client->cct();
ldout(cct, 20) << "length=" << bl.length() << ", snapc=" << snapc << dendl;
TestMemCluster::SharedFile file;
{
std::unique_lock l{m_pool->file_lock};
file = get_file(oid, true, CEPH_NOSNAP, snapc);
}
std::unique_lock l{file->lock};
auto off = file->data.length();
ensure_minimum_length(off + bl.length(), &file->data);
file->data.begin(off).copy_in(bl.length(), bl);
return 0;
}
int TestMemIoCtxImpl::assert_exists(const std::string &oid, uint64_t snap_id) {
if (m_client->is_blocklisted()) {
return -EBLOCKLISTED;
}
std::shared_lock l{m_pool->file_lock};
TestMemCluster::SharedFile file = get_file(oid, false, snap_id, {});
if (file == NULL) {
return -ENOENT;
}
return 0;
}
int TestMemIoCtxImpl::assert_version(const std::string &oid, uint64_t ver) {
if (m_client->is_blocklisted()) {
return -EBLOCKLISTED;
}
std::shared_lock l{m_pool->file_lock};
TestMemCluster::SharedFile file = get_file(oid, false, CEPH_NOSNAP, {});
if (file == NULL || !file->exists) {
return -ENOENT;
}
if (ver < file->objver) {
return -ERANGE;
}
if (ver > file->objver) {
return -EOVERFLOW;
}
return 0;
}
int TestMemIoCtxImpl::create(const std::string& oid, bool exclusive,
const SnapContext &snapc) {
if (get_snap_read() != CEPH_NOSNAP) {
return -EROFS;
} else if (m_client->is_blocklisted()) {
return -EBLOCKLISTED;
}
auto cct = m_client->cct();
ldout(cct, 20) << "snapc=" << snapc << dendl;
std::unique_lock l{m_pool->file_lock};
if (exclusive) {
TestMemCluster::SharedFile file = get_file(oid, false, CEPH_NOSNAP, {});
if (file != NULL && file->exists) {
return -EEXIST;
}
}
get_file(oid, true, CEPH_NOSNAP, snapc);
return 0;
}
int TestMemIoCtxImpl::list_snaps(const std::string& oid, snap_set_t *out_snaps) {
auto cct = m_client->cct();
ldout(cct, 20) << dendl;
if (m_client->is_blocklisted()) {
return -EBLOCKLISTED;
}
out_snaps->seq = 0;
out_snaps->clones.clear();
std::shared_lock l{m_pool->file_lock};
TestMemCluster::Files::iterator it = m_pool->files.find(
{get_namespace(), oid});
if (it == m_pool->files.end()) {
return -ENOENT;
}
bool include_head = false;
TestMemCluster::FileSnapshots &file_snaps = it->second;
for (TestMemCluster::FileSnapshots::iterator s_it = file_snaps.begin();
s_it != file_snaps.end(); ++s_it) {
TestMemCluster::File &file = *s_it->get();
if (file_snaps.size() > 1) {
out_snaps->seq = file.snap_id;
TestMemCluster::FileSnapshots::iterator next_it(s_it);
++next_it;
if (next_it == file_snaps.end()) {
include_head = true;
break;
}
++out_snaps->seq;
if (!file.exists) {
continue;
}
// update the overlap with the next version's overlap metadata
TestMemCluster::File &next_file = *next_it->get();
interval_set<uint64_t> overlap;
if (next_file.exists) {
overlap = next_file.snap_overlap;
}
clone_info_t clone;
clone.cloneid = file.snap_id;
clone.snaps = file.snaps;
to_vector(overlap, &clone.overlap);
clone.size = file.data.length();
out_snaps->clones.push_back(clone);
}
}
if ((file_snaps.size() == 1 && file_snaps.back()->data.length() > 0) ||
include_head)
{
// Include the SNAP_HEAD
TestMemCluster::File &file = *file_snaps.back();
if (file.exists) {
std::shared_lock l2{file.lock};
if (out_snaps->seq == 0 && !include_head) {
out_snaps->seq = file.snap_id;
}
clone_info_t head_clone;
head_clone.cloneid = librados::SNAP_HEAD;
head_clone.size = file.data.length();
out_snaps->clones.push_back(head_clone);
}
}
ldout(cct, 20) << "seq=" << out_snaps->seq << ", "
<< "clones=[";
bool first_clone = true;
for (auto& clone : out_snaps->clones) {
*_dout << "{"
<< "cloneid=" << clone.cloneid << ", "
<< "snaps=" << clone.snaps << ", "
<< "overlap=" << clone.overlap << ", "
<< "size=" << clone.size << "}";
if (!first_clone) {
*_dout << ", ";
} else {
first_clone = false;
}
}
*_dout << "]" << dendl;
return 0;
}
int TestMemIoCtxImpl::omap_get_vals2(const std::string& oid,
const std::string& start_after,
const std::string &filter_prefix,
uint64_t max_return,
std::map<std::string, bufferlist> *out_vals,
bool *pmore) {
if (out_vals == NULL) {
return -EINVAL;
} else if (m_client->is_blocklisted()) {
return -EBLOCKLISTED;
}
TestMemCluster::SharedFile file;
{
std::shared_lock l{m_pool->file_lock};
file = get_file(oid, false, CEPH_NOSNAP, {});
if (file == NULL) {
return -ENOENT;
}
}
out_vals->clear();
std::shared_lock l{file->lock};
TestMemCluster::FileOMaps::iterator o_it = m_pool->file_omaps.find(
{get_namespace(), oid});
if (o_it == m_pool->file_omaps.end()) {
if (pmore) {
*pmore = false;
}
return 0;
}
TestMemCluster::OMap &omap = o_it->second;
TestMemCluster::OMap::iterator it = omap.begin();
if (!start_after.empty()) {
it = omap.upper_bound(start_after);
}
while (it != omap.end() && max_return > 0) {
if (filter_prefix.empty() ||
boost::algorithm::starts_with(it->first, filter_prefix)) {
(*out_vals)[it->first] = it->second;
--max_return;
}
++it;
}
if (pmore) {
*pmore = (it != omap.end());
}
return 0;
}
int TestMemIoCtxImpl::omap_get_vals(const std::string& oid,
const std::string& start_after,
const std::string &filter_prefix,
uint64_t max_return,
std::map<std::string, bufferlist> *out_vals) {
return omap_get_vals2(oid, start_after, filter_prefix, max_return, out_vals, nullptr);
}
int TestMemIoCtxImpl::omap_rm_keys(const std::string& oid,
const std::set<std::string>& keys) {
if (get_snap_read() != CEPH_NOSNAP) {
return -EROFS;
} else if (m_client->is_blocklisted()) {
return -EBLOCKLISTED;
}
TestMemCluster::SharedFile file;
{
std::unique_lock l{m_pool->file_lock};
file = get_file(oid, true, CEPH_NOSNAP, get_snap_context());
if (file == NULL) {
return -ENOENT;
}
}
std::unique_lock l{file->lock};
for (std::set<std::string>::iterator it = keys.begin();
it != keys.end(); ++it) {
m_pool->file_omaps[{get_namespace(), oid}].erase(*it);
}
return 0;
}
int TestMemIoCtxImpl::omap_set(const std::string& oid,
const std::map<std::string, bufferlist> &map) {
if (get_snap_read() != CEPH_NOSNAP) {
return -EROFS;
} else if (m_client->is_blocklisted()) {
return -EBLOCKLISTED;
}
TestMemCluster::SharedFile file;
{
std::unique_lock l{m_pool->file_lock};
file = get_file(oid, true, CEPH_NOSNAP, get_snap_context());
if (file == NULL) {
return -ENOENT;
}
}
std::unique_lock l{file->lock};
for (std::map<std::string, bufferlist>::const_iterator it = map.begin();
it != map.end(); ++it) {
bufferlist bl;
bl.append(it->second);
m_pool->file_omaps[{get_namespace(), oid}][it->first] = bl;
}
return 0;
}
int TestMemIoCtxImpl::read(const std::string& oid, size_t len, uint64_t off,
bufferlist *bl, uint64_t snap_id,
uint64_t* objver) {
if (m_client->is_blocklisted()) {
return -EBLOCKLISTED;
}
TestMemCluster::SharedFile file;
{
std::shared_lock l{m_pool->file_lock};
file = get_file(oid, false, snap_id, {});
if (file == NULL) {
return -ENOENT;
}
}
std::shared_lock l{file->lock};
if (len == 0) {
len = file->data.length();
}
len = clip_io(off, len, file->data.length());
if (bl != NULL && len > 0) {
bufferlist bit;
bit.substr_of(file->data, off, len);
append_clone(bit, bl);
}
if (objver != nullptr) {
*objver = file->objver;
}
return len;
}
int TestMemIoCtxImpl::remove(const std::string& oid, const SnapContext &snapc) {
if (get_snap_read() != CEPH_NOSNAP) {
return -EROFS;
} else if (m_client->is_blocklisted()) {
return -EBLOCKLISTED;
}
auto cct = m_client->cct();
ldout(cct, 20) << "snapc=" << snapc << dendl;
std::unique_lock l{m_pool->file_lock};
TestMemCluster::SharedFile file = get_file(oid, false, CEPH_NOSNAP, snapc);
if (file == NULL) {
return -ENOENT;
}
file = get_file(oid, true, CEPH_NOSNAP, snapc);
{
std::unique_lock l2{file->lock};
file->exists = false;
}
TestCluster::ObjectLocator locator(get_namespace(), oid);
TestMemCluster::Files::iterator it = m_pool->files.find(locator);
ceph_assert(it != m_pool->files.end());
if (*it->second.rbegin() == file) {
TestMemCluster::ObjectHandlers object_handlers;
std::swap(object_handlers, m_pool->file_handlers[locator]);
m_pool->file_handlers.erase(locator);
for (auto object_handler : object_handlers) {
object_handler->handle_removed(m_client);
}
}
if (it->second.size() == 1) {
m_pool->files.erase(it);
m_pool->file_omaps.erase(locator);
}
return 0;
}
int TestMemIoCtxImpl::selfmanaged_snap_create(uint64_t *snapid) {
if (m_client->is_blocklisted()) {
return -EBLOCKLISTED;
}
std::unique_lock l{m_pool->file_lock};
*snapid = ++m_pool->snap_id;
m_pool->snap_seqs.insert(*snapid);
return 0;
}
int TestMemIoCtxImpl::selfmanaged_snap_remove(uint64_t snapid) {
if (m_client->is_blocklisted()) {
return -EBLOCKLISTED;
}
std::unique_lock l{m_pool->file_lock};
TestMemCluster::SnapSeqs::iterator it =
m_pool->snap_seqs.find(snapid);
if (it == m_pool->snap_seqs.end()) {
return -ENOENT;
}
// TODO clean up all file snapshots
m_pool->snap_seqs.erase(it);
return 0;
}
int TestMemIoCtxImpl::selfmanaged_snap_rollback(const std::string& oid,
uint64_t snapid) {
if (m_client->is_blocklisted()) {
return -EBLOCKLISTED;
}
std::unique_lock l{m_pool->file_lock};
TestMemCluster::SharedFile file;
TestMemCluster::Files::iterator f_it = m_pool->files.find(
{get_namespace(), oid});
if (f_it == m_pool->files.end()) {
return 0;
}
TestMemCluster::FileSnapshots &snaps = f_it->second;
file = snaps.back();
size_t versions = 0;
for (TestMemCluster::FileSnapshots::reverse_iterator it = snaps.rbegin();
it != snaps.rend(); ++it) {
TestMemCluster::SharedFile file = *it;
if (file->snap_id < get_snap_read()) {
if (versions == 0) {
// already at the snapshot version
return 0;
} else if (file->snap_id == CEPH_NOSNAP) {
if (versions == 1) {
// delete it current HEAD, next one is correct version
snaps.erase(it.base());
} else {
// overwrite contents of current HEAD
file = TestMemCluster::SharedFile (new TestMemCluster::File(**it));
file->snap_id = CEPH_NOSNAP;
*it = file;
}
} else {
// create new head version
file = TestMemCluster::SharedFile (new TestMemCluster::File(**it));
file->snap_id = m_pool->snap_id;
snaps.push_back(file);
}
return 0;
}
++versions;
}
return 0;
}
int TestMemIoCtxImpl::set_alloc_hint(const std::string& oid,
uint64_t expected_object_size,
uint64_t expected_write_size,
uint32_t flags,
const SnapContext &snapc) {
if (get_snap_read() != CEPH_NOSNAP) {
return -EROFS;
} else if (m_client->is_blocklisted()) {
return -EBLOCKLISTED;
}
{
std::unique_lock l{m_pool->file_lock};
get_file(oid, true, CEPH_NOSNAP, snapc);
}
return 0;
}
int TestMemIoCtxImpl::sparse_read(const std::string& oid, uint64_t off,
uint64_t len,
std::map<uint64_t,uint64_t> *m,
bufferlist *data_bl, uint64_t snap_id) {
if (m_client->is_blocklisted()) {
return -EBLOCKLISTED;
}
// TODO verify correctness
TestMemCluster::SharedFile file;
{
std::shared_lock l{m_pool->file_lock};
file = get_file(oid, false, snap_id, {});
if (file == NULL) {
return -ENOENT;
}
}
std::shared_lock l{file->lock};
len = clip_io(off, len, file->data.length());
// TODO support sparse read
if (m != NULL) {
m->clear();
if (len > 0) {
(*m)[off] = len;
}
}
if (data_bl != NULL && len > 0) {
bufferlist bit;
bit.substr_of(file->data, off, len);
append_clone(bit, data_bl);
}
return len > 0 ? 1 : 0;
}
int TestMemIoCtxImpl::stat(const std::string& oid, uint64_t *psize,
time_t *pmtime) {
if (m_client->is_blocklisted()) {
return -EBLOCKLISTED;
}
TestMemCluster::SharedFile file;
{
std::shared_lock l{m_pool->file_lock};
file = get_file(oid, false, CEPH_NOSNAP, {});
if (file == NULL) {
return -ENOENT;
}
}
std::shared_lock l{file->lock};
if (psize != NULL) {
*psize = file->data.length();
}
if (pmtime != NULL) {
*pmtime = file->mtime;
}
return 0;
}
int TestMemIoCtxImpl::truncate(const std::string& oid, uint64_t size,
const SnapContext &snapc) {
if (get_snap_read() != CEPH_NOSNAP) {
return -EROFS;
} else if (m_client->is_blocklisted()) {
return -EBLOCKLISTED;
}
auto cct = m_client->cct();
ldout(cct, 20) << "size=" << size << ", snapc=" << snapc << dendl;
TestMemCluster::SharedFile file;
{
std::unique_lock l{m_pool->file_lock};
file = get_file(oid, true, CEPH_NOSNAP, snapc);
}
std::unique_lock l{file->lock};
bufferlist bl(size);
interval_set<uint64_t> is;
if (file->data.length() > size) {
is.insert(size, file->data.length() - size);
bl.substr_of(file->data, 0, size);
file->data.swap(bl);
} else if (file->data.length() != size) {
if (size == 0) {
bl.clear();
} else {
is.insert(0, size);
bl.append_zero(size - file->data.length());
file->data.append(bl);
}
}
is.intersection_of(file->snap_overlap);
file->snap_overlap.subtract(is);
return 0;
}
int TestMemIoCtxImpl::write(const std::string& oid, bufferlist& bl, size_t len,
uint64_t off, const SnapContext &snapc) {
if (get_snap_read() != CEPH_NOSNAP) {
return -EROFS;
} else if (m_client->is_blocklisted()) {
return -EBLOCKLISTED;
}
auto cct = m_client->cct();
ldout(cct, 20) << "extent=" << off << "~" << len << ", snapc=" << snapc
<< dendl;
TestMemCluster::SharedFile file;
{
std::unique_lock l{m_pool->file_lock};
file = get_file(oid, true, CEPH_NOSNAP, snapc);
}
std::unique_lock l{file->lock};
if (len > 0) {
interval_set<uint64_t> is;
is.insert(off, len);
is.intersection_of(file->snap_overlap);
file->snap_overlap.subtract(is);
}
ensure_minimum_length(off + len, &file->data);
file->data.begin(off).copy_in(len, bl);
return 0;
}
int TestMemIoCtxImpl::write_full(const std::string& oid, bufferlist& bl,
const SnapContext &snapc) {
if (get_snap_read() != CEPH_NOSNAP) {
return -EROFS;
} else if (m_client->is_blocklisted()) {
return -EBLOCKLISTED;
}
auto cct = m_client->cct();
ldout(cct, 20) << "length=" << bl.length() << ", snapc=" << snapc << dendl;
TestMemCluster::SharedFile file;
{
std::unique_lock l{m_pool->file_lock};
file = get_file(oid, true, CEPH_NOSNAP, snapc);
if (file == NULL) {
return -ENOENT;
}
}
std::unique_lock l{file->lock};
if (bl.length() > 0) {
interval_set<uint64_t> is;
is.insert(0, bl.length());
is.intersection_of(file->snap_overlap);
file->snap_overlap.subtract(is);
}
file->data.clear();
ensure_minimum_length(bl.length(), &file->data);
file->data.begin().copy_in(bl.length(), bl);
return 0;
}
int TestMemIoCtxImpl::writesame(const std::string& oid, bufferlist& bl,
size_t len, uint64_t off,
const SnapContext &snapc) {
if (get_snap_read() != CEPH_NOSNAP) {
return -EROFS;
} else if (m_client->is_blocklisted()) {
return -EBLOCKLISTED;
}
if (len == 0 || (len % bl.length())) {
return -EINVAL;
}
TestMemCluster::SharedFile file;
{
std::unique_lock l{m_pool->file_lock};
file = get_file(oid, true, CEPH_NOSNAP, snapc);
}
std::unique_lock l{file->lock};
if (len > 0) {
interval_set<uint64_t> is;
is.insert(off, len);
is.intersection_of(file->snap_overlap);
file->snap_overlap.subtract(is);
}
ensure_minimum_length(off + len, &file->data);
while (len > 0) {
file->data.begin(off).copy_in(bl.length(), bl);
off += bl.length();
len -= bl.length();
}
return 0;
}
int TestMemIoCtxImpl::cmpext(const std::string& oid, uint64_t off,
bufferlist& cmp_bl, uint64_t snap_id) {
if (m_client->is_blocklisted()) {
return -EBLOCKLISTED;
}
bufferlist read_bl;
uint64_t len = cmp_bl.length();
TestMemCluster::SharedFile file;
{
std::shared_lock l{m_pool->file_lock};
file = get_file(oid, false, snap_id, {});
if (file == NULL) {
return cmpext_compare(cmp_bl, read_bl);
}
}
std::shared_lock l{file->lock};
if (off >= file->data.length()) {
len = 0;
} else if (off + len > file->data.length()) {
len = file->data.length() - off;
}
read_bl.substr_of(file->data, off, len);
return cmpext_compare(cmp_bl, read_bl);
}
int TestMemIoCtxImpl::xattr_get(const std::string& oid,
std::map<std::string, bufferlist>* attrset) {
if (m_client->is_blocklisted()) {
return -EBLOCKLISTED;
}
TestMemCluster::SharedFile file;
std::shared_lock l{m_pool->file_lock};
TestMemCluster::FileXAttrs::iterator it = m_pool->file_xattrs.find(
{get_namespace(), oid});
if (it == m_pool->file_xattrs.end()) {
return -ENODATA;
}
*attrset = it->second;
return 0;
}
int TestMemIoCtxImpl::xattr_set(const std::string& oid, const std::string &name,
bufferlist& bl) {
if (m_client->is_blocklisted()) {
return -EBLOCKLISTED;
}
std::unique_lock l{m_pool->file_lock};
m_pool->file_xattrs[{get_namespace(), oid}][name] = bl;
return 0;
}
int TestMemIoCtxImpl::zero(const std::string& oid, uint64_t off, uint64_t len,
const SnapContext &snapc) {
if (m_client->is_blocklisted()) {
return -EBLOCKLISTED;
}
auto cct = m_client->cct();
ldout(cct, 20) << "extent=" << off << "~" << len << ", snapc=" << snapc
<< dendl;
bool truncate_redirect = false;
TestMemCluster::SharedFile file;
{
std::unique_lock l{m_pool->file_lock};
file = get_file(oid, false, CEPH_NOSNAP, snapc);
if (!file) {
return 0;
}
file = get_file(oid, true, CEPH_NOSNAP, snapc);
std::shared_lock l2{file->lock};
if (len > 0 && off + len >= file->data.length()) {
// Zero -> Truncate logic embedded in OSD
truncate_redirect = true;
}
}
if (truncate_redirect) {
return truncate(oid, off, snapc);
}
bufferlist bl;
bl.append_zero(len);
return write(oid, bl, len, off, snapc);
}
void TestMemIoCtxImpl::append_clone(bufferlist& src, bufferlist* dest) {
// deep-copy the src to ensure our memory-based mock RADOS data cannot
// be modified by callers
if (src.length() > 0) {
bufferlist::iterator iter = src.begin();
buffer::ptr ptr;
iter.copy_deep(src.length(), ptr);
dest->append(ptr);
}
}
size_t TestMemIoCtxImpl::clip_io(size_t off, size_t len, size_t bl_len) {
if (off >= bl_len) {
len = 0;
} else if (off + len > bl_len) {
len = bl_len - off;
}
return len;
}
void TestMemIoCtxImpl::ensure_minimum_length(size_t len, bufferlist *bl) {
if (len > bl->length()) {
bufferptr ptr(buffer::create(len - bl->length()));
ptr.zero();
bl->append(ptr);
}
}
TestMemCluster::SharedFile TestMemIoCtxImpl::get_file(
const std::string &oid, bool write, uint64_t snap_id,
const SnapContext &snapc) {
ceph_assert(ceph_mutex_is_locked(m_pool->file_lock) ||
ceph_mutex_is_wlocked(m_pool->file_lock));
ceph_assert(!write || ceph_mutex_is_wlocked(m_pool->file_lock));
TestMemCluster::SharedFile file;
TestMemCluster::Files::iterator it = m_pool->files.find(
{get_namespace(), oid});
if (it != m_pool->files.end()) {
file = it->second.back();
} else if (!write) {
return TestMemCluster::SharedFile();
}
if (write) {
bool new_version = false;
if (!file || !file->exists) {
file = TestMemCluster::SharedFile(new TestMemCluster::File());
new_version = true;
} else {
if (!snapc.snaps.empty() && file->snap_id < snapc.seq) {
for (std::vector<snapid_t>::const_reverse_iterator seq_it =
snapc.snaps.rbegin();
seq_it != snapc.snaps.rend(); ++seq_it) {
if (*seq_it > file->snap_id && *seq_it <= snapc.seq) {
file->snaps.push_back(*seq_it);
}
}
bufferlist prev_data = file->data;
file = TestMemCluster::SharedFile(
new TestMemCluster::File(*file));
file->data.clear();
append_clone(prev_data, &file->data);
if (prev_data.length() > 0) {
file->snap_overlap.insert(0, prev_data.length());
}
new_version = true;
}
}
if (new_version) {
file->snap_id = snapc.seq;
file->mtime = ceph_clock_now().sec();
m_pool->files[{get_namespace(), oid}].push_back(file);
}
file->objver++;
return file;
}
if (snap_id == CEPH_NOSNAP) {
if (!file->exists) {
ceph_assert(it->second.size() > 1);
return TestMemCluster::SharedFile();
}
return file;
}
TestMemCluster::FileSnapshots &snaps = it->second;
for (TestMemCluster::FileSnapshots::reverse_iterator it = snaps.rbegin();
it != snaps.rend(); ++it) {
TestMemCluster::SharedFile file = *it;
if (file->snap_id < snap_id) {
if (!file->exists) {
return TestMemCluster::SharedFile();
}
return file;
}
}
return TestMemCluster::SharedFile();
}
} // namespace librados
| 25,213 | 26.258378 | 91 | cc |
null | ceph-main/src/test/librados_test_stub/TestMemIoCtxImpl.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_TEST_MEM_IO_CTX_IMPL_H
#define CEPH_TEST_MEM_IO_CTX_IMPL_H
#include "test/librados_test_stub/TestIoCtxImpl.h"
#include "test/librados_test_stub/TestMemCluster.h"
namespace librados {
class TestMemRadosClient;
class TestMemIoCtxImpl : public TestIoCtxImpl {
public:
TestMemIoCtxImpl();
TestMemIoCtxImpl(TestMemRadosClient *client, int64_t m_pool_id,
const std::string& pool_name,
TestMemCluster::Pool *pool);
~TestMemIoCtxImpl() override;
TestIoCtxImpl *clone() override;
int aio_remove(const std::string& oid, AioCompletionImpl *c, int flags = 0) override;
int append(const std::string& oid, const bufferlist &bl,
const SnapContext &snapc) override;
int assert_exists(const std::string &oid, uint64_t snap_id) override;
int assert_version(const std::string &oid, uint64_t ver) override;
int create(const std::string& oid, bool exclusive,
const SnapContext &snapc) override;
int list_snaps(const std::string& o, snap_set_t *out_snaps) override;
int omap_get_vals(const std::string& oid,
const std::string& start_after,
const std::string &filter_prefix,
uint64_t max_return,
std::map<std::string, bufferlist> *out_vals) override;
int omap_get_vals2(const std::string& oid,
const std::string& start_after,
const std::string &filter_prefix,
uint64_t max_return,
std::map<std::string, bufferlist> *out_vals,
bool *pmore) override;
int omap_rm_keys(const std::string& oid,
const std::set<std::string>& keys) override;
int omap_set(const std::string& oid, const std::map<std::string,
bufferlist> &map) override;
int read(const std::string& oid, size_t len, uint64_t off,
bufferlist *bl, uint64_t snap_id, uint64_t* objver) override;
int remove(const std::string& oid, const SnapContext &snapc) override;
int selfmanaged_snap_create(uint64_t *snapid) override;
int selfmanaged_snap_remove(uint64_t snapid) override;
int selfmanaged_snap_rollback(const std::string& oid,
uint64_t snapid) override;
int set_alloc_hint(const std::string& oid, uint64_t expected_object_size,
uint64_t expected_write_size, uint32_t flags,
const SnapContext &snapc) override;
int sparse_read(const std::string& oid, uint64_t off, uint64_t len,
std::map<uint64_t,uint64_t> *m, bufferlist *data_bl,
uint64_t snap_id) override;
int stat(const std::string& oid, uint64_t *psize, time_t *pmtime) override;
int truncate(const std::string& oid, uint64_t size,
const SnapContext &snapc) override;
int write(const std::string& oid, bufferlist& bl, size_t len,
uint64_t off, const SnapContext &snapc) override;
int write_full(const std::string& oid, bufferlist& bl,
const SnapContext &snapc) override;
int writesame(const std::string& oid, bufferlist& bl, size_t len,
uint64_t off, const SnapContext &snapc) override;
int cmpext(const std::string& oid, uint64_t off, bufferlist& cmp_bl,
uint64_t snap_id) override;
int xattr_get(const std::string& oid,
std::map<std::string, bufferlist>* attrset) override;
int xattr_set(const std::string& oid, const std::string &name,
bufferlist& bl) override;
int zero(const std::string& oid, uint64_t off, uint64_t len,
const SnapContext &snapc) override;
protected:
TestMemCluster::Pool *get_pool() {
return m_pool;
}
private:
TestMemIoCtxImpl(const TestMemIoCtxImpl&);
TestMemRadosClient *m_client = nullptr;
TestMemCluster::Pool *m_pool = nullptr;
void append_clone(bufferlist& src, bufferlist* dest);
size_t clip_io(size_t off, size_t len, size_t bl_len);
void ensure_minimum_length(size_t len, bufferlist *bl);
TestMemCluster::SharedFile get_file(const std::string &oid, bool write,
uint64_t snap_id,
const SnapContext &snapc);
};
} // namespace librados
#endif // CEPH_TEST_MEM_IO_CTX_IMPL_H
| 4,396 | 40.87619 | 87 | h |
null | ceph-main/src/test/librados_test_stub/TestMemRadosClient.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "test/librados_test_stub/TestMemRadosClient.h"
#include "test/librados_test_stub/TestMemCluster.h"
#include "test/librados_test_stub/TestMemIoCtxImpl.h"
#include <errno.h>
#include <sstream>
namespace librados {
TestMemRadosClient::TestMemRadosClient(CephContext *cct,
TestMemCluster *test_mem_cluster)
: TestRadosClient(cct, test_mem_cluster->get_watch_notify()),
m_mem_cluster(test_mem_cluster) {
m_mem_cluster->allocate_client(&m_nonce, &m_global_id);
}
TestMemRadosClient::~TestMemRadosClient() {
m_mem_cluster->deallocate_client(m_nonce);
}
TestIoCtxImpl *TestMemRadosClient::create_ioctx(int64_t pool_id,
const std::string &pool_name) {
return new TestMemIoCtxImpl(this, pool_id, pool_name,
m_mem_cluster->get_pool(pool_name));
}
void TestMemRadosClient::object_list(int64_t pool_id,
std::list<librados::TestRadosClient::Object> *list) {
list->clear();
auto pool = m_mem_cluster->get_pool(pool_id);
if (pool != nullptr) {
std::shared_lock file_locker{pool->file_lock};
for (auto &file_pair : pool->files) {
Object obj;
obj.oid = file_pair.first.name;
list->push_back(obj);
}
}
}
int TestMemRadosClient::pool_create(const std::string &pool_name) {
if (is_blocklisted()) {
return -EBLOCKLISTED;
}
return m_mem_cluster->pool_create(pool_name);
}
int TestMemRadosClient::pool_delete(const std::string &pool_name) {
if (is_blocklisted()) {
return -EBLOCKLISTED;
}
return m_mem_cluster->pool_delete(pool_name);
}
int TestMemRadosClient::pool_get_base_tier(int64_t pool_id, int64_t* base_tier) {
// TODO
*base_tier = pool_id;
return 0;
}
int TestMemRadosClient::pool_list(std::list<std::pair<int64_t, std::string> >& v) {
return m_mem_cluster->pool_list(v);
}
int64_t TestMemRadosClient::pool_lookup(const std::string &pool_name) {
return m_mem_cluster->pool_lookup(pool_name);
}
int TestMemRadosClient::pool_reverse_lookup(int64_t id, std::string *name) {
return m_mem_cluster->pool_reverse_lookup(id, name);
}
int TestMemRadosClient::watch_flush() {
get_watch_notify()->flush(this);
return 0;
}
bool TestMemRadosClient::is_blocklisted() const {
return m_mem_cluster->is_blocklisted(m_nonce);
}
int TestMemRadosClient::blocklist_add(const std::string& client_address,
uint32_t expire_seconds) {
if (is_blocklisted()) {
return -EBLOCKLISTED;
}
// extract the nonce to use as a unique key to the client
auto idx = client_address.find("/");
if (idx == std::string::npos || idx + 1 >= client_address.size()) {
return -EINVAL;
}
std::stringstream nonce_ss(client_address.substr(idx + 1));
uint32_t nonce;
nonce_ss >> nonce;
if (!nonce_ss) {
return -EINVAL;
}
m_mem_cluster->blocklist(nonce);
return 0;
}
void TestMemRadosClient::transaction_start(const std::string& nspace,
const std::string &oid) {
m_mem_cluster->transaction_start({nspace, oid});
}
void TestMemRadosClient::transaction_finish(const std::string& nspace,
const std::string &oid) {
m_mem_cluster->transaction_finish({nspace, oid});
}
} // namespace librados
| 3,360 | 27.243697 | 83 | cc |
null | ceph-main/src/test/librados_test_stub/TestMemRadosClient.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_TEST_MEM_RADOS_CLIENT_H
#define CEPH_TEST_MEM_RADOS_CLIENT_H
#include "test/librados_test_stub/TestRadosClient.h"
#include "include/ceph_assert.h"
#include <list>
#include <string>
namespace librados {
class AioCompletionImpl;
class TestMemCluster;
class TestMemRadosClient : public TestRadosClient {
public:
TestMemRadosClient(CephContext *cct, TestMemCluster *test_mem_cluster);
~TestMemRadosClient() override;
TestIoCtxImpl *create_ioctx(int64_t pool_id,
const std::string &pool_name) override;
uint32_t get_nonce() override {
return m_nonce;
}
uint64_t get_instance_id() override {
return m_global_id;
}
int get_min_compatible_osd(int8_t* require_osd_release) override {
*require_osd_release = CEPH_RELEASE_OCTOPUS;
return 0;
}
int get_min_compatible_client(int8_t* min_compat_client,
int8_t* require_min_compat_client) override {
*min_compat_client = CEPH_RELEASE_MIMIC;
*require_min_compat_client = CEPH_RELEASE_MIMIC;
return 0;
}
void object_list(int64_t pool_id,
std::list<librados::TestRadosClient::Object> *list) override;
int service_daemon_register(const std::string& service,
const std::string& name,
const std::map<std::string,std::string>& metadata) override {
return 0;
}
int service_daemon_update_status(std::map<std::string,std::string>&& status) override {
return 0;
}
int pool_create(const std::string &pool_name) override;
int pool_delete(const std::string &pool_name) override;
int pool_get_base_tier(int64_t pool_id, int64_t* base_tier) override;
int pool_list(std::list<std::pair<int64_t, std::string> >& v) override;
int64_t pool_lookup(const std::string &name) override;
int pool_reverse_lookup(int64_t id, std::string *name) override;
int watch_flush() override;
bool is_blocklisted() const override;
int blocklist_add(const std::string& client_address,
uint32_t expire_seconds) override;
protected:
TestMemCluster *get_mem_cluster() {
return m_mem_cluster;
}
protected:
void transaction_start(const std::string& nspace,
const std::string &oid) override;
void transaction_finish(const std::string& nspace,
const std::string &oid) override;
private:
TestMemCluster *m_mem_cluster;
uint32_t m_nonce;
uint64_t m_global_id;
};
} // namespace librados
#endif // CEPH_TEST_MEM_RADOS_CLIENT_H
| 2,667 | 28.977528 | 91 | h |
null | ceph-main/src/test/librados_test_stub/TestRadosClient.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "test/librados_test_stub/TestRadosClient.h"
#include "test/librados_test_stub/TestIoCtxImpl.h"
#include "librados/AioCompletionImpl.h"
#include "include/ceph_assert.h"
#include "common/ceph_json.h"
#include "common/Finisher.h"
#include "common/async/context_pool.h"
#include <boost/lexical_cast.hpp>
#include <boost/thread.hpp>
#include <errno.h>
#include <atomic>
#include <functional>
#include <sstream>
static int get_concurrency() {
int concurrency = 0;
char *env = getenv("LIBRADOS_CONCURRENCY");
if (env != NULL) {
concurrency = atoi(env);
}
if (concurrency == 0) {
concurrency = boost::thread::thread::hardware_concurrency();
}
if (concurrency == 0) {
concurrency = 1;
}
return concurrency;
}
using namespace std::placeholders;
namespace librados {
namespace {
const char *config_keys[] = {
"librados_thread_count",
NULL
};
} // anonymous namespace
static void finish_aio_completion(AioCompletionImpl *c, int r) {
c->lock.lock();
c->complete = true;
c->rval = r;
c->lock.unlock();
rados_callback_t cb_complete = c->callback_complete;
void *cb_complete_arg = c->callback_complete_arg;
if (cb_complete) {
cb_complete(c, cb_complete_arg);
}
rados_callback_t cb_safe = c->callback_safe;
void *cb_safe_arg = c->callback_safe_arg;
if (cb_safe) {
cb_safe(c, cb_safe_arg);
}
c->lock.lock();
c->callback_complete = NULL;
c->callback_safe = NULL;
c->cond.notify_all();
c->put_unlock();
}
class AioFunctionContext : public Context {
public:
AioFunctionContext(const TestRadosClient::AioFunction &callback,
Finisher *finisher, AioCompletionImpl *c)
: m_callback(callback), m_finisher(finisher), m_comp(c)
{
if (m_comp != NULL) {
m_comp->get();
}
}
void finish(int r) override {
int ret = m_callback();
if (m_comp != NULL) {
if (m_finisher != NULL) {
m_finisher->queue(new LambdaContext(std::bind(
&finish_aio_completion, m_comp, ret)));
} else {
finish_aio_completion(m_comp, ret);
}
}
}
private:
TestRadosClient::AioFunction m_callback;
Finisher *m_finisher;
AioCompletionImpl *m_comp;
};
TestRadosClient::TestRadosClient(CephContext *cct,
TestWatchNotify *watch_notify)
: m_cct(cct->get()), m_watch_notify(watch_notify),
m_aio_finisher(new Finisher(m_cct)),
m_io_context_pool(std::make_unique<ceph::async::io_context_pool>())
{
get();
// simulate multiple OSDs
int concurrency = get_concurrency();
for (int i = 0; i < concurrency; ++i) {
m_finishers.push_back(new Finisher(m_cct));
m_finishers.back()->start();
}
// replicate AIO callback processing
m_aio_finisher->start();
// replicate neorados callback processing
m_cct->_conf.add_observer(this);
m_io_context_pool->start(m_cct->_conf.get_val<uint64_t>(
"librados_thread_count"));
}
TestRadosClient::~TestRadosClient() {
flush_aio_operations();
for (size_t i = 0; i < m_finishers.size(); ++i) {
m_finishers[i]->stop();
delete m_finishers[i];
}
m_aio_finisher->stop();
delete m_aio_finisher;
m_cct->_conf.remove_observer(this);
m_io_context_pool->stop();
m_cct->put();
m_cct = NULL;
}
boost::asio::io_context& TestRadosClient::get_io_context() {
return m_io_context_pool->get_io_context();
}
const char** TestRadosClient::get_tracked_conf_keys() const {
return config_keys;
}
void TestRadosClient::handle_conf_change(
const ConfigProxy& conf, const std::set<std::string> &changed) {
if (changed.count("librados_thread_count")) {
m_io_context_pool->stop();
m_io_context_pool->start(conf.get_val<std::uint64_t>(
"librados_thread_count"));
}
}
void TestRadosClient::get() {
m_refcount++;
}
void TestRadosClient::put() {
if (--m_refcount == 0) {
shutdown();
delete this;
}
}
CephContext *TestRadosClient::cct() {
return m_cct;
}
int TestRadosClient::connect() {
return 0;
}
void TestRadosClient::shutdown() {
}
int TestRadosClient::wait_for_latest_osdmap() {
return 0;
}
int TestRadosClient::mon_command(const std::vector<std::string>& cmd,
const bufferlist &inbl,
bufferlist *outbl, std::string *outs) {
for (std::vector<std::string>::const_iterator it = cmd.begin();
it != cmd.end(); ++it) {
JSONParser parser;
if (!parser.parse(it->c_str(), it->length())) {
return -EINVAL;
}
JSONObjIter j_it = parser.find("prefix");
if (j_it.end()) {
return -EINVAL;
}
if ((*j_it)->get_data() == "osd tier add") {
return 0;
} else if ((*j_it)->get_data() == "osd tier cache-mode") {
return 0;
} else if ((*j_it)->get_data() == "osd tier set-overlay") {
return 0;
} else if ((*j_it)->get_data() == "osd tier remove-overlay") {
return 0;
} else if ((*j_it)->get_data() == "osd tier remove") {
return 0;
} else if ((*j_it)->get_data() == "config-key rm") {
return 0;
} else if ((*j_it)->get_data() == "config set") {
return 0;
} else if ((*j_it)->get_data() == "df") {
std::stringstream str;
str << R"({"pools": [)";
std::list<std::pair<int64_t, std::string>> pools;
pool_list(pools);
for (auto& pool : pools) {
if (pools.begin()->first != pool.first) {
str << ",";
}
str << R"({"name": ")" << pool.second << R"(", "stats": )"
<< R"({"percent_used": 1.0, "bytes_used": 0, "max_avail": 0}})";
}
str << "]}";
outbl->append(str.str());
return 0;
} else if ((*j_it)->get_data() == "osd blocklist") {
auto op_it = parser.find("blocklistop");
if (!op_it.end() && (*op_it)->get_data() == "add") {
uint32_t expire = 0;
auto expire_it = parser.find("expire");
if (!expire_it.end()) {
expire = boost::lexical_cast<uint32_t>((*expire_it)->get_data());
}
auto addr_it = parser.find("addr");
return blocklist_add((*addr_it)->get_data(), expire);
}
}
}
return -ENOSYS;
}
void TestRadosClient::add_aio_operation(const std::string& oid,
bool queue_callback,
const AioFunction &aio_function,
AioCompletionImpl *c) {
AioFunctionContext *ctx = new AioFunctionContext(
aio_function, queue_callback ? m_aio_finisher : NULL, c);
get_finisher(oid)->queue(ctx);
}
struct WaitForFlush {
int flushed() {
if (--count == 0) {
aio_finisher->queue(new LambdaContext(std::bind(
&finish_aio_completion, c, 0)));
delete this;
}
return 0;
}
std::atomic<int64_t> count = { 0 };
Finisher *aio_finisher;
AioCompletionImpl *c;
};
void TestRadosClient::flush_aio_operations() {
AioCompletionImpl *comp = new AioCompletionImpl();
flush_aio_operations(comp);
comp->wait_for_complete();
comp->put();
}
void TestRadosClient::flush_aio_operations(AioCompletionImpl *c) {
c->get();
WaitForFlush *wait_for_flush = new WaitForFlush();
wait_for_flush->count = m_finishers.size();
wait_for_flush->aio_finisher = m_aio_finisher;
wait_for_flush->c = c;
for (size_t i = 0; i < m_finishers.size(); ++i) {
AioFunctionContext *ctx = new AioFunctionContext(
std::bind(&WaitForFlush::flushed, wait_for_flush),
nullptr, nullptr);
m_finishers[i]->queue(ctx);
}
}
int TestRadosClient::aio_watch_flush(AioCompletionImpl *c) {
c->get();
Context *ctx = new LambdaContext(std::bind(
&TestRadosClient::finish_aio_completion, this, c, std::placeholders::_1));
get_watch_notify()->aio_flush(this, ctx);
return 0;
}
void TestRadosClient::finish_aio_completion(AioCompletionImpl *c, int r) {
librados::finish_aio_completion(c, r);
}
Finisher *TestRadosClient::get_finisher(const std::string &oid) {
std::size_t h = m_hash(oid);
return m_finishers[h % m_finishers.size()];
}
} // namespace librados
| 8,123 | 25.038462 | 78 | cc |
null | ceph-main/src/test/librados_test_stub/TestRadosClient.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_TEST_RADOS_CLIENT_H
#define CEPH_TEST_RADOS_CLIENT_H
#include <map>
#include <memory>
#include <list>
#include <string>
#include <vector>
#include <atomic>
#include <boost/function.hpp>
#include <boost/functional/hash.hpp>
#include "include/rados/librados.hpp"
#include "common/config.h"
#include "common/config_obs.h"
#include "include/buffer_fwd.h"
#include "test/librados_test_stub/TestWatchNotify.h"
class Finisher;
namespace boost { namespace asio { struct io_context; }}
namespace ceph { namespace async { struct io_context_pool; }}
namespace librados {
class TestIoCtxImpl;
class TestRadosClient : public md_config_obs_t {
public:
static void Deallocate(librados::TestRadosClient* client)
{
client->put();
}
typedef boost::function<int()> AioFunction;
struct Object {
std::string oid;
std::string locator;
std::string nspace;
};
class Transaction {
public:
Transaction(TestRadosClient *rados_client, const std::string& nspace,
const std::string &oid)
: rados_client(rados_client), nspace(nspace), oid(oid) {
rados_client->transaction_start(nspace, oid);
}
~Transaction() {
rados_client->transaction_finish(nspace, oid);
}
private:
TestRadosClient *rados_client;
std::string nspace;
std::string oid;
};
TestRadosClient(CephContext *cct, TestWatchNotify *watch_notify);
void get();
void put();
virtual CephContext *cct();
virtual uint32_t get_nonce() = 0;
virtual uint64_t get_instance_id() = 0;
virtual int get_min_compatible_osd(int8_t* require_osd_release) = 0;
virtual int get_min_compatible_client(int8_t* min_compat_client,
int8_t* require_min_compat_client) = 0;
virtual int connect();
virtual void shutdown();
virtual int wait_for_latest_osdmap();
virtual TestIoCtxImpl *create_ioctx(int64_t pool_id,
const std::string &pool_name) = 0;
virtual int mon_command(const std::vector<std::string>& cmd,
const bufferlist &inbl,
bufferlist *outbl, std::string *outs);
virtual void object_list(int64_t pool_id,
std::list<librados::TestRadosClient::Object> *list) = 0;
virtual int service_daemon_register(const std::string& service,
const std::string& name,
const std::map<std::string,std::string>& metadata) = 0;
virtual int service_daemon_update_status(std::map<std::string,std::string>&& status) = 0;
virtual int pool_create(const std::string &pool_name) = 0;
virtual int pool_delete(const std::string &pool_name) = 0;
virtual int pool_get_base_tier(int64_t pool_id, int64_t* base_tier) = 0;
virtual int pool_list(std::list<std::pair<int64_t, std::string> >& v) = 0;
virtual int64_t pool_lookup(const std::string &name) = 0;
virtual int pool_reverse_lookup(int64_t id, std::string *name) = 0;
virtual int aio_watch_flush(AioCompletionImpl *c);
virtual int watch_flush() = 0;
virtual bool is_blocklisted() const = 0;
virtual int blocklist_add(const std::string& client_address,
uint32_t expire_seconds) = 0;
virtual int wait_for_latest_osd_map() {
return 0;
}
Finisher *get_aio_finisher() {
return m_aio_finisher;
}
TestWatchNotify *get_watch_notify() {
return m_watch_notify;
}
void add_aio_operation(const std::string& oid, bool queue_callback,
const AioFunction &aio_function, AioCompletionImpl *c);
void flush_aio_operations();
void flush_aio_operations(AioCompletionImpl *c);
void finish_aio_completion(AioCompletionImpl *c, int r);
boost::asio::io_context& get_io_context();
protected:
virtual ~TestRadosClient();
virtual void transaction_start(const std::string& nspace,
const std::string &oid) = 0;
virtual void transaction_finish(const std::string& nspace,
const std::string &oid) = 0;
const char** get_tracked_conf_keys() const override;
void handle_conf_change(const ConfigProxy& conf,
const std::set<std::string> &changed) override;
private:
struct IOContextPool;
CephContext *m_cct;
std::atomic<uint64_t> m_refcount = { 0 };
TestWatchNotify *m_watch_notify;
Finisher *get_finisher(const std::string& oid);
Finisher *m_aio_finisher;
std::vector<Finisher *> m_finishers;
boost::hash<std::string> m_hash;
std::unique_ptr<ceph::async::io_context_pool> m_io_context_pool;
};
} // namespace librados
#endif // CEPH_TEST_RADOS_CLIENT_H
| 4,728 | 28.01227 | 93 | h |
null | ceph-main/src/test/librados_test_stub/TestWatchNotify.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "test/librados_test_stub/TestWatchNotify.h"
#include "include/Context.h"
#include "common/Cond.h"
#include "include/stringify.h"
#include "common/Finisher.h"
#include "test/librados_test_stub/TestCluster.h"
#include "test/librados_test_stub/TestRadosClient.h"
#include <boost/bind/bind.hpp>
#include <boost/function.hpp>
#include "include/ceph_assert.h"
#define dout_subsys ceph_subsys_rados
#undef dout_prefix
#define dout_prefix *_dout << "TestWatchNotify::" << __func__ << ": "
namespace librados {
std::ostream& operator<<(std::ostream& out,
const TestWatchNotify::WatcherID &watcher_id) {
out << "(" << watcher_id.first << "," << watcher_id.second << ")";
return out;
}
struct TestWatchNotify::ObjectHandler : public TestCluster::ObjectHandler {
TestWatchNotify* test_watch_notify;
int64_t pool_id;
std::string nspace;
std::string oid;
ObjectHandler(TestWatchNotify* test_watch_notify, int64_t pool_id,
const std::string& nspace, const std::string& oid)
: test_watch_notify(test_watch_notify), pool_id(pool_id),
nspace(nspace), oid(oid) {
}
void handle_removed(TestRadosClient* test_rados_client) override {
// copy member variables since this object might be deleted
auto _test_watch_notify = test_watch_notify;
auto _pool_id = pool_id;
auto _nspace = nspace;
auto _oid = oid;
auto ctx = new LambdaContext([_test_watch_notify, _pool_id, _nspace, _oid](int r) {
_test_watch_notify->handle_object_removed(_pool_id, _nspace, _oid);
});
test_rados_client->get_aio_finisher()->queue(ctx);
}
};
TestWatchNotify::TestWatchNotify(TestCluster* test_cluster)
: m_test_cluster(test_cluster) {
}
void TestWatchNotify::flush(TestRadosClient *rados_client) {
CephContext *cct = rados_client->cct();
ldout(cct, 20) << "enter" << dendl;
// block until we know no additional async notify callbacks will occur
C_SaferCond ctx;
m_async_op_tracker.wait_for_ops(&ctx);
ctx.wait();
}
int TestWatchNotify::list_watchers(int64_t pool_id, const std::string& nspace,
const std::string& o,
std::list<obj_watch_t> *out_watchers) {
std::lock_guard lock{m_lock};
SharedWatcher watcher = get_watcher(pool_id, nspace, o);
if (!watcher) {
return -ENOENT;
}
out_watchers->clear();
for (TestWatchNotify::WatchHandles::iterator it =
watcher->watch_handles.begin();
it != watcher->watch_handles.end(); ++it) {
obj_watch_t obj;
strncpy(obj.addr, it->second.addr.c_str(), sizeof(obj.addr) - 1);
obj.addr[sizeof(obj.addr) - 1] = '\0';
obj.watcher_id = static_cast<int64_t>(it->second.gid);
obj.cookie = it->second.handle;
obj.timeout_seconds = 30;
out_watchers->push_back(obj);
}
return 0;
}
void TestWatchNotify::aio_flush(TestRadosClient *rados_client,
Context *on_finish) {
rados_client->get_aio_finisher()->queue(on_finish);
}
int TestWatchNotify::watch(TestRadosClient *rados_client, int64_t pool_id,
const std::string& nspace, const std::string& o,
uint64_t gid, uint64_t *handle,
librados::WatchCtx *ctx, librados::WatchCtx2 *ctx2) {
C_SaferCond cond;
aio_watch(rados_client, pool_id, nspace, o, gid, handle, ctx, ctx2, &cond);
return cond.wait();
}
void TestWatchNotify::aio_watch(TestRadosClient *rados_client, int64_t pool_id,
const std::string& nspace, const std::string& o,
uint64_t gid, uint64_t *handle,
librados::WatchCtx *watch_ctx,
librados::WatchCtx2 *watch_ctx2,
Context *on_finish) {
auto ctx = new LambdaContext([=, this](int) {
execute_watch(rados_client, pool_id, nspace, o, gid, handle, watch_ctx,
watch_ctx2, on_finish);
});
rados_client->get_aio_finisher()->queue(ctx);
}
int TestWatchNotify::unwatch(TestRadosClient *rados_client,
uint64_t handle) {
C_SaferCond ctx;
aio_unwatch(rados_client, handle, &ctx);
return ctx.wait();
}
void TestWatchNotify::aio_unwatch(TestRadosClient *rados_client,
uint64_t handle, Context *on_finish) {
auto ctx = new LambdaContext([this, rados_client, handle, on_finish](int) {
execute_unwatch(rados_client, handle, on_finish);
});
rados_client->get_aio_finisher()->queue(ctx);
}
void TestWatchNotify::aio_notify(TestRadosClient *rados_client, int64_t pool_id,
const std::string& nspace,
const std::string& oid, const bufferlist& bl,
uint64_t timeout_ms, bufferlist *pbl,
Context *on_notify) {
auto ctx = new LambdaContext([=, this](int) {
execute_notify(rados_client, pool_id, nspace, oid, bl, pbl, on_notify);
});
rados_client->get_aio_finisher()->queue(ctx);
}
int TestWatchNotify::notify(TestRadosClient *rados_client, int64_t pool_id,
const std::string& nspace, const std::string& oid,
bufferlist& bl, uint64_t timeout_ms,
bufferlist *pbl) {
C_SaferCond cond;
aio_notify(rados_client, pool_id, nspace, oid, bl, timeout_ms, pbl, &cond);
return cond.wait();
}
void TestWatchNotify::notify_ack(TestRadosClient *rados_client, int64_t pool_id,
const std::string& nspace,
const std::string& o, uint64_t notify_id,
uint64_t handle, uint64_t gid,
bufferlist& bl) {
CephContext *cct = rados_client->cct();
ldout(cct, 20) << "notify_id=" << notify_id << ", handle=" << handle
<< ", gid=" << gid << dendl;
std::lock_guard lock{m_lock};
WatcherID watcher_id = std::make_pair(gid, handle);
ack_notify(rados_client, pool_id, nspace, o, notify_id, watcher_id, bl);
finish_notify(rados_client, pool_id, nspace, o, notify_id);
}
void TestWatchNotify::execute_watch(TestRadosClient *rados_client,
int64_t pool_id, const std::string& nspace,
const std::string& o, uint64_t gid,
uint64_t *handle, librados::WatchCtx *ctx,
librados::WatchCtx2 *ctx2,
Context* on_finish) {
CephContext *cct = rados_client->cct();
m_lock.lock();
SharedWatcher watcher = get_watcher(pool_id, nspace, o);
if (!watcher) {
m_lock.unlock();
on_finish->complete(-ENOENT);
return;
}
WatchHandle watch_handle;
watch_handle.rados_client = rados_client;
watch_handle.addr = "127.0.0.1:0/" + stringify(rados_client->get_nonce());
watch_handle.nonce = rados_client->get_nonce();
watch_handle.gid = gid;
watch_handle.handle = ++m_handle;
watch_handle.watch_ctx = ctx;
watch_handle.watch_ctx2 = ctx2;
watcher->watch_handles[watch_handle.handle] = watch_handle;
*handle = watch_handle.handle;
ldout(cct, 20) << "oid=" << o << ", gid=" << gid << ": handle=" << *handle
<< dendl;
m_lock.unlock();
on_finish->complete(0);
}
void TestWatchNotify::execute_unwatch(TestRadosClient *rados_client,
uint64_t handle, Context* on_finish) {
CephContext *cct = rados_client->cct();
ldout(cct, 20) << "handle=" << handle << dendl;
{
std::lock_guard locker{m_lock};
for (FileWatchers::iterator it = m_file_watchers.begin();
it != m_file_watchers.end(); ++it) {
SharedWatcher watcher = it->second;
WatchHandles::iterator w_it = watcher->watch_handles.find(handle);
if (w_it != watcher->watch_handles.end()) {
watcher->watch_handles.erase(w_it);
maybe_remove_watcher(watcher);
break;
}
}
}
on_finish->complete(0);
}
TestWatchNotify::SharedWatcher TestWatchNotify::get_watcher(
int64_t pool_id, const std::string& nspace, const std::string& oid) {
ceph_assert(ceph_mutex_is_locked(m_lock));
auto it = m_file_watchers.find({pool_id, nspace, oid});
if (it == m_file_watchers.end()) {
SharedWatcher watcher(new Watcher(pool_id, nspace, oid));
watcher->object_handler.reset(new ObjectHandler(
this, pool_id, nspace, oid));
int r = m_test_cluster->register_object_handler(
pool_id, {nspace, oid}, watcher->object_handler.get());
if (r < 0) {
// object doesn't exist
return SharedWatcher();
}
m_file_watchers[{pool_id, nspace, oid}] = watcher;
return watcher;
}
return it->second;
}
void TestWatchNotify::maybe_remove_watcher(SharedWatcher watcher) {
ceph_assert(ceph_mutex_is_locked(m_lock));
// TODO
if (watcher->watch_handles.empty() && watcher->notify_handles.empty()) {
auto pool_id = watcher->pool_id;
auto& nspace = watcher->nspace;
auto& oid = watcher->oid;
if (watcher->object_handler) {
m_test_cluster->unregister_object_handler(pool_id, {nspace, oid},
watcher->object_handler.get());
watcher->object_handler.reset();
}
m_file_watchers.erase({pool_id, nspace, oid});
}
}
void TestWatchNotify::execute_notify(TestRadosClient *rados_client,
int64_t pool_id, const std::string& nspace,
const std::string &oid,
const bufferlist &bl, bufferlist *pbl,
Context *on_notify) {
CephContext *cct = rados_client->cct();
m_lock.lock();
uint64_t notify_id = ++m_notify_id;
SharedWatcher watcher = get_watcher(pool_id, nspace, oid);
if (!watcher) {
ldout(cct, 1) << "oid=" << oid << ": not found" << dendl;
m_lock.unlock();
on_notify->complete(-ENOENT);
return;
}
ldout(cct, 20) << "oid=" << oid << ": notify_id=" << notify_id << dendl;
SharedNotifyHandle notify_handle(new NotifyHandle());
notify_handle->rados_client = rados_client;
notify_handle->pbl = pbl;
notify_handle->on_notify = on_notify;
WatchHandles &watch_handles = watcher->watch_handles;
for (auto &watch_handle_pair : watch_handles) {
WatchHandle &watch_handle = watch_handle_pair.second;
notify_handle->pending_watcher_ids.insert(std::make_pair(
watch_handle.gid, watch_handle.handle));
m_async_op_tracker.start_op();
uint64_t notifier_id = rados_client->get_instance_id();
watch_handle.rados_client->get_aio_finisher()->queue(new LambdaContext(
[this, pool_id, nspace, oid, bl, notify_id, watch_handle, notifier_id](int r) {
bufferlist notify_bl;
notify_bl.append(bl);
if (watch_handle.watch_ctx2 != NULL) {
watch_handle.watch_ctx2->handle_notify(notify_id,
watch_handle.handle,
notifier_id, notify_bl);
} else if (watch_handle.watch_ctx != NULL) {
watch_handle.watch_ctx->notify(0, 0, notify_bl);
// auto ack old-style watch/notify clients
ack_notify(watch_handle.rados_client, pool_id, nspace, oid, notify_id,
{watch_handle.gid, watch_handle.handle}, bufferlist());
}
m_async_op_tracker.finish_op();
}));
}
watcher->notify_handles[notify_id] = notify_handle;
finish_notify(rados_client, pool_id, nspace, oid, notify_id);
m_lock.unlock();
}
void TestWatchNotify::ack_notify(TestRadosClient *rados_client, int64_t pool_id,
const std::string& nspace,
const std::string &oid, uint64_t notify_id,
const WatcherID &watcher_id,
const bufferlist &bl) {
CephContext *cct = rados_client->cct();
ceph_assert(ceph_mutex_is_locked(m_lock));
SharedWatcher watcher = get_watcher(pool_id, nspace, oid);
if (!watcher) {
ldout(cct, 1) << "oid=" << oid << ": not found" << dendl;
return;
}
NotifyHandles::iterator it = watcher->notify_handles.find(notify_id);
if (it == watcher->notify_handles.end()) {
ldout(cct, 1) << "oid=" << oid << ", notify_id=" << notify_id
<< ", WatcherID=" << watcher_id << ": not found" << dendl;
return;
}
ldout(cct, 20) << "oid=" << oid << ", notify_id=" << notify_id
<< ", WatcherID=" << watcher_id << dendl;
bufferlist response;
response.append(bl);
SharedNotifyHandle notify_handle = it->second;
notify_handle->notify_responses[watcher_id] = response;
notify_handle->pending_watcher_ids.erase(watcher_id);
}
void TestWatchNotify::finish_notify(TestRadosClient *rados_client,
int64_t pool_id, const std::string& nspace,
const std::string &oid,
uint64_t notify_id) {
CephContext *cct = rados_client->cct();
ldout(cct, 20) << "oid=" << oid << ", notify_id=" << notify_id << dendl;
ceph_assert(ceph_mutex_is_locked(m_lock));
SharedWatcher watcher = get_watcher(pool_id, nspace, oid);
if (!watcher) {
ldout(cct, 1) << "oid=" << oid << ": not found" << dendl;
return;
}
NotifyHandles::iterator it = watcher->notify_handles.find(notify_id);
if (it == watcher->notify_handles.end()) {
ldout(cct, 1) << "oid=" << oid << ", notify_id=" << notify_id
<< ": not found" << dendl;
return;
}
SharedNotifyHandle notify_handle = it->second;
if (!notify_handle->pending_watcher_ids.empty()) {
ldout(cct, 10) << "oid=" << oid << ", notify_id=" << notify_id
<< ": pending watchers, returning" << dendl;
return;
}
ldout(cct, 20) << "oid=" << oid << ", notify_id=" << notify_id
<< ": completing" << dendl;
if (notify_handle->pbl != NULL) {
encode(notify_handle->notify_responses, *notify_handle->pbl);
encode(notify_handle->pending_watcher_ids, *notify_handle->pbl);
}
notify_handle->rados_client->get_aio_finisher()->queue(
notify_handle->on_notify, 0);
watcher->notify_handles.erase(notify_id);
maybe_remove_watcher(watcher);
}
void TestWatchNotify::blocklist(uint32_t nonce) {
std::lock_guard locker{m_lock};
for (auto file_it = m_file_watchers.begin();
file_it != m_file_watchers.end(); ) {
auto &watcher = file_it->second;
for (auto w_it = watcher->watch_handles.begin();
w_it != watcher->watch_handles.end();) {
auto& watch_handle = w_it->second;
if (watch_handle.nonce == nonce) {
auto handle = watch_handle.handle;
auto watch_ctx2 = watch_handle.watch_ctx2;
if (watch_ctx2 != nullptr) {
auto ctx = new LambdaContext([handle, watch_ctx2](int) {
watch_ctx2->handle_error(handle, -ENOTCONN);
});
watch_handle.rados_client->get_aio_finisher()->queue(ctx);
}
w_it = watcher->watch_handles.erase(w_it);
} else {
++w_it;
}
}
++file_it;
maybe_remove_watcher(watcher);
}
}
void TestWatchNotify::handle_object_removed(int64_t pool_id,
const std::string& nspace,
const std::string& oid) {
std::lock_guard locker{m_lock};
auto it = m_file_watchers.find({pool_id, nspace, oid});
if (it == m_file_watchers.end()) {
return;
}
auto watcher = it->second;
// cancel all in-flight notifications
for (auto& notify_handle_pair : watcher->notify_handles) {
auto notify_handle = notify_handle_pair.second;
notify_handle->rados_client->get_aio_finisher()->queue(
notify_handle->on_notify, -ENOENT);
}
// alert all watchers of the loss of connection
for (auto& watch_handle_pair : watcher->watch_handles) {
auto& watch_handle = watch_handle_pair.second;
auto handle = watch_handle.handle;
auto watch_ctx2 = watch_handle.watch_ctx2;
if (watch_ctx2 != nullptr) {
auto ctx = new LambdaContext([handle, watch_ctx2](int) {
watch_ctx2->handle_error(handle, -ENOTCONN);
});
watch_handle.rados_client->get_aio_finisher()->queue(ctx);
}
}
m_file_watchers.erase(it);
}
} // namespace librados
| 16,552 | 34.984783 | 87 | cc |
null | ceph-main/src/test/librados_test_stub/TestWatchNotify.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_TEST_WATCH_NOTIFY_H
#define CEPH_TEST_WATCH_NOTIFY_H
#include "include/rados/librados.hpp"
#include "common/AsyncOpTracker.h"
#include "common/ceph_mutex.h"
#include <boost/noncopyable.hpp>
#include <boost/shared_ptr.hpp>
#include <list>
#include <map>
class Finisher;
namespace librados {
class TestCluster;
class TestRadosClient;
class TestWatchNotify : boost::noncopyable {
public:
typedef std::pair<uint64_t, uint64_t> WatcherID;
typedef std::set<WatcherID> WatcherIDs;
typedef std::map<std::pair<uint64_t, uint64_t>, bufferlist> NotifyResponses;
struct NotifyHandle {
TestRadosClient *rados_client = nullptr;
WatcherIDs pending_watcher_ids;
NotifyResponses notify_responses;
bufferlist *pbl = nullptr;
Context *on_notify = nullptr;
};
typedef boost::shared_ptr<NotifyHandle> SharedNotifyHandle;
typedef std::map<uint64_t, SharedNotifyHandle> NotifyHandles;
struct WatchHandle {
TestRadosClient *rados_client = nullptr;
std::string addr;
uint32_t nonce;
uint64_t gid;
uint64_t handle;
librados::WatchCtx* watch_ctx;
librados::WatchCtx2* watch_ctx2;
};
typedef std::map<uint64_t, WatchHandle> WatchHandles;
struct ObjectHandler;
typedef boost::shared_ptr<ObjectHandler> SharedObjectHandler;
struct Watcher {
Watcher(int64_t pool_id, const std::string& nspace, const std::string& oid)
: pool_id(pool_id), nspace(nspace), oid(oid) {
}
int64_t pool_id;
std::string nspace;
std::string oid;
SharedObjectHandler object_handler;
WatchHandles watch_handles;
NotifyHandles notify_handles;
};
typedef boost::shared_ptr<Watcher> SharedWatcher;
TestWatchNotify(TestCluster* test_cluster);
int list_watchers(int64_t pool_id, const std::string& nspace,
const std::string& o, std::list<obj_watch_t> *out_watchers);
void aio_flush(TestRadosClient *rados_client, Context *on_finish);
void aio_watch(TestRadosClient *rados_client, int64_t pool_id,
const std::string& nspace, const std::string& o, uint64_t gid,
uint64_t *handle, librados::WatchCtx *watch_ctx,
librados::WatchCtx2 *watch_ctx2, Context *on_finish);
void aio_unwatch(TestRadosClient *rados_client, uint64_t handle,
Context *on_finish);
void aio_notify(TestRadosClient *rados_client, int64_t pool_id,
const std::string& nspace, const std::string& oid,
const bufferlist& bl, uint64_t timeout_ms, bufferlist *pbl,
Context *on_notify);
void flush(TestRadosClient *rados_client);
int notify(TestRadosClient *rados_client, int64_t pool_id,
const std::string& nspace, const std::string& o, bufferlist& bl,
uint64_t timeout_ms, bufferlist *pbl);
void notify_ack(TestRadosClient *rados_client, int64_t pool_id,
const std::string& nspace, const std::string& o,
uint64_t notify_id, uint64_t handle, uint64_t gid,
bufferlist& bl);
int watch(TestRadosClient *rados_client, int64_t pool_id,
const std::string& nspace, const std::string& o, uint64_t gid,
uint64_t *handle, librados::WatchCtx *ctx,
librados::WatchCtx2 *ctx2);
int unwatch(TestRadosClient *rados_client, uint64_t handle);
void blocklist(uint32_t nonce);
private:
typedef std::tuple<int64_t, std::string, std::string> PoolFile;
typedef std::map<PoolFile, SharedWatcher> FileWatchers;
TestCluster *m_test_cluster;
uint64_t m_handle = 0;
uint64_t m_notify_id = 0;
ceph::mutex m_lock =
ceph::make_mutex("librados::TestWatchNotify::m_lock");
AsyncOpTracker m_async_op_tracker;
FileWatchers m_file_watchers;
SharedWatcher get_watcher(int64_t pool_id, const std::string& nspace,
const std::string& oid);
void maybe_remove_watcher(SharedWatcher shared_watcher);
void execute_watch(TestRadosClient *rados_client, int64_t pool_id,
const std::string& nspace, const std::string& o,
uint64_t gid, uint64_t *handle,
librados::WatchCtx *watch_ctx,
librados::WatchCtx2 *watch_ctx2,
Context *on_finish);
void execute_unwatch(TestRadosClient *rados_client, uint64_t handle,
Context *on_finish);
void execute_notify(TestRadosClient *rados_client, int64_t pool_id,
const std::string& nspace, const std::string &oid,
const bufferlist &bl, bufferlist *pbl,
Context *on_notify);
void ack_notify(TestRadosClient *rados_client, int64_t pool_id,
const std::string& nspace, const std::string &oid,
uint64_t notify_id, const WatcherID &watcher_id,
const bufferlist &bl);
void finish_notify(TestRadosClient *rados_client, int64_t pool_id,
const std::string& nspace, const std::string &oid,
uint64_t notify_id);
void handle_object_removed(int64_t pool_id, const std::string& nspace,
const std::string& oid);
};
} // namespace librados
#endif // CEPH_TEST_WATCH_NOTIFY_H
| 5,364 | 35.006711 | 80 | h |
null | ceph-main/src/test/libradosstriper/TestCase.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include <errno.h>
#include "test/librados/test.h"
#include "test/librados/test_cxx.h"
#include "test/libradosstriper/TestCase.h"
using namespace libradosstriper;
std::string StriperTest::pool_name;
rados_t StriperTest::s_cluster = NULL;
void StriperTest::SetUpTestCase()
{
pool_name = get_temp_pool_name();
ASSERT_EQ("", create_one_pool(pool_name, &s_cluster));
}
void StriperTest::TearDownTestCase()
{
ASSERT_EQ(0, destroy_one_pool(pool_name, &s_cluster));
}
void StriperTest::SetUp()
{
cluster = StriperTest::s_cluster;
ASSERT_EQ(0, rados_ioctx_create(cluster, pool_name.c_str(), &ioctx));
ASSERT_EQ(0, rados_striper_create(ioctx, &striper));
}
void StriperTest::TearDown()
{
rados_striper_destroy(striper);
rados_ioctx_destroy(ioctx);
}
std::string StriperTestPP::pool_name;
librados::Rados StriperTestPP::s_cluster;
void StriperTestPP::SetUpTestCase()
{
pool_name = get_temp_pool_name();
ASSERT_EQ("", create_one_pool_pp(pool_name, s_cluster));
}
void StriperTestPP::TearDownTestCase()
{
ASSERT_EQ(0, destroy_one_pool_pp(pool_name, s_cluster));
}
void StriperTestPP::SetUp()
{
ASSERT_EQ(0, cluster.ioctx_create(pool_name.c_str(), ioctx));
ASSERT_EQ(0, RadosStriper::striper_create(ioctx, &striper));
}
// this is pure copy and paste from previous class
// but for the inheritance from TestWithParam
// with gtest >= 1.6, we couldd avoid this by using
// inheritance from WithParamInterface
std::string StriperTestParam::pool_name;
librados::Rados StriperTestParam::s_cluster;
void StriperTestParam::SetUpTestCase()
{
pool_name = get_temp_pool_name();
ASSERT_EQ("", create_one_pool_pp(pool_name, s_cluster));
}
void StriperTestParam::TearDownTestCase()
{
ASSERT_EQ(0, destroy_one_pool_pp(pool_name, s_cluster));
}
void StriperTestParam::SetUp()
{
ASSERT_EQ(0, cluster.ioctx_create(pool_name.c_str(), ioctx));
ASSERT_EQ(0, RadosStriper::striper_create(ioctx, &striper));
}
| 2,025 | 24.012346 | 71 | cc |
null | ceph-main/src/test/libradosstriper/TestCase.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_TEST_RADOS_TESTCASE_H
#define CEPH_TEST_RADOS_TESTCASE_H
#include "include/rados/librados.h"
#include "include/rados/librados.hpp"
#include "include/radosstriper/libradosstriper.h"
#include "include/radosstriper/libradosstriper.hpp"
#include "gtest/gtest.h"
#include <string>
/**
* These test cases create a temporary pool that lives as long as the
* test case. Each test within a test case gets a new ioctx and striper
* set to a unique namespace within the pool.
*
* Since pool creation and deletion is slow, this allows many tests to
* run faster.
*/
class StriperTest : public ::testing::Test {
public:
StriperTest() {}
~StriperTest() override {}
protected:
static void SetUpTestCase();
static void TearDownTestCase();
static rados_t s_cluster;
static std::string pool_name;
void SetUp() override;
void TearDown() override;
rados_t cluster = NULL;
rados_ioctx_t ioctx = NULL;
rados_striper_t striper = NULL;
};
class StriperTestPP : public ::testing::Test {
public:
StriperTestPP() : cluster(s_cluster) {}
~StriperTestPP() override {}
static void SetUpTestCase();
static void TearDownTestCase();
protected:
static librados::Rados s_cluster;
static std::string pool_name;
void SetUp() override;
librados::Rados &cluster;
librados::IoCtx ioctx;
libradosstriper::RadosStriper striper;
};
struct TestData {
uint32_t stripe_unit;
uint32_t stripe_count;
uint32_t object_size;
size_t size;
};
// this is pure copy and paste from previous class
// but for the inheritance from TestWithParam
// with gtest >= 1.6, we couldd avoid this by using
// inheritance from WithParamInterface
class StriperTestParam : public ::testing::TestWithParam<TestData> {
public:
StriperTestParam() : cluster(s_cluster) {}
~StriperTestParam() override {}
static void SetUpTestCase();
static void TearDownTestCase();
protected:
static librados::Rados s_cluster;
static std::string pool_name;
void SetUp() override;
librados::Rados &cluster;
librados::IoCtx ioctx;
libradosstriper::RadosStriper striper;
};
#endif
| 2,186 | 25.349398 | 72 | h |