Search is not available for this dataset
repo
stringlengths
2
152
file
stringlengths
15
239
code
stringlengths
0
58.4M
file_length
int64
0
58.4M
avg_line_length
float64
0
1.81M
max_line_length
int64
0
12.7M
extension_type
stringclasses
364 values
null
ceph-main/src/test/librbd/mock/MockImageCtx.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "include/neorados/RADOS.hpp" #include "test/librbd/mock/MockImageCtx.h" #include "test/librbd/mock/MockSafeTimer.h" #include "test/librbd/mock/crypto/MockEncryptionFormat.h" #include "librbd/io/AsyncOperation.h" static MockSafeTimer *s_timer; static ceph::mutex *s_timer_lock; namespace librbd { MockImageCtx* MockImageCtx::s_instance = nullptr; MockImageCtx::MockImageCtx(librbd::ImageCtx &image_ctx) : image_ctx(&image_ctx), cct(image_ctx.cct), perfcounter(image_ctx.perfcounter), snap_namespace(image_ctx.snap_namespace), snap_name(image_ctx.snap_name), snap_id(image_ctx.snap_id), snap_exists(image_ctx.snap_exists), snapc(image_ctx.snapc), snaps(image_ctx.snaps), snap_info(image_ctx.snap_info), snap_ids(image_ctx.snap_ids), old_format(image_ctx.old_format), read_only(image_ctx.read_only), read_only_flags(image_ctx.read_only_flags), read_only_mask(image_ctx.read_only_mask), clone_copy_on_read(image_ctx.clone_copy_on_read), lockers(image_ctx.lockers), exclusive_locked(image_ctx.exclusive_locked), lock_tag(image_ctx.lock_tag), asio_engine(image_ctx.asio_engine), rados_api(image_ctx.rados_api), owner_lock(image_ctx.owner_lock), image_lock(image_ctx.image_lock), timestamp_lock(image_ctx.timestamp_lock), async_ops_lock(image_ctx.async_ops_lock), copyup_list_lock(image_ctx.copyup_list_lock), order(image_ctx.order), size(image_ctx.size), features(image_ctx.features), flags(image_ctx.flags), op_features(image_ctx.op_features), operations_disabled(image_ctx.operations_disabled), stripe_unit(image_ctx.stripe_unit), stripe_count(image_ctx.stripe_count), object_prefix(image_ctx.object_prefix), header_oid(image_ctx.header_oid), id(image_ctx.id), name(image_ctx.name), parent_md(image_ctx.parent_md), format_string(image_ctx.format_string), group_spec(image_ctx.group_spec), layout(image_ctx.layout), io_image_dispatcher(new io::MockImageDispatcher()), io_object_dispatcher(new io::MockObjectDispatcher()), op_work_queue(new MockContextWQ()), plugin_registry(new MockPluginRegistry()), readahead_max_bytes(image_ctx.readahead_max_bytes), event_socket(image_ctx.event_socket), parent(NULL), operations(new MockOperations()), state(new MockImageState()), image_watcher(NULL), object_map(NULL), exclusive_lock(NULL), journal(NULL), trace_endpoint(image_ctx.trace_endpoint), sparse_read_threshold_bytes(image_ctx.sparse_read_threshold_bytes), discard_granularity_bytes(image_ctx.discard_granularity_bytes), mirroring_replay_delay(image_ctx.mirroring_replay_delay), non_blocking_aio(image_ctx.non_blocking_aio), blkin_trace_all(image_ctx.blkin_trace_all), enable_alloc_hint(image_ctx.enable_alloc_hint), alloc_hint_flags(image_ctx.alloc_hint_flags), read_flags(image_ctx.read_flags), ignore_migrating(image_ctx.ignore_migrating), enable_sparse_copyup(image_ctx.enable_sparse_copyup), mtime_update_interval(image_ctx.mtime_update_interval), atime_update_interval(image_ctx.atime_update_interval), cache(image_ctx.cache), config(image_ctx.config) { md_ctx.dup(image_ctx.md_ctx); data_ctx.dup(image_ctx.data_ctx); if (image_ctx.image_watcher != NULL) { image_watcher = new MockImageWatcher(); } } MockImageCtx::~MockImageCtx() { wait_for_async_requests(); wait_for_async_ops(); image_ctx->md_ctx.aio_flush(); image_ctx->data_ctx.aio_flush(); image_ctx->op_work_queue->drain(); delete state; delete operations; delete image_watcher; delete op_work_queue; delete plugin_registry; delete io_image_dispatcher; delete io_object_dispatcher; } void MockImageCtx::set_timer_instance(MockSafeTimer *timer, ceph::mutex *timer_lock) { s_timer = timer; s_timer_lock = timer_lock; } void MockImageCtx::get_timer_instance(CephContext *cct, MockSafeTimer **timer, ceph::mutex **timer_lock) { *timer = s_timer; *timer_lock = s_timer_lock; } void MockImageCtx::wait_for_async_ops() { io::AsyncOperation async_op; async_op.start_op(*image_ctx); C_SaferCond ctx; async_op.flush(&ctx); ctx.wait(); async_op.finish_op(); } IOContext MockImageCtx::get_data_io_context() { auto ctx = std::make_shared<neorados::IOContext>( data_ctx.get_id(), data_ctx.get_namespace()); if (snap_id != CEPH_NOSNAP) { ctx->read_snap(snap_id); } if (!snapc.snaps.empty()) { ctx->write_snap_context( {{snapc.seq, {snapc.snaps.begin(), snapc.snaps.end()}}}); } return ctx; } IOContext MockImageCtx::duplicate_data_io_context() { return std::make_shared<neorados::IOContext>(*get_data_io_context()); } } // namespace librbd
4,932
31.886667
78
cc
null
ceph-main/src/test/librbd/mock/MockImageCtx.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_TEST_LIBRBD_MOCK_IMAGE_CTX_H #define CEPH_TEST_LIBRBD_MOCK_IMAGE_CTX_H #include "include/rados/librados.hpp" #include "test/librbd/mock/MockContextWQ.h" #include "test/librbd/mock/MockExclusiveLock.h" #include "test/librbd/mock/MockImageState.h" #include "test/librbd/mock/MockImageWatcher.h" #include "test/librbd/mock/MockJournal.h" #include "test/librbd/mock/MockObjectMap.h" #include "test/librbd/mock/MockOperations.h" #include "test/librbd/mock/MockPluginRegistry.h" #include "test/librbd/mock/MockReadahead.h" #include "test/librbd/mock/io/MockImageDispatcher.h" #include "test/librbd/mock/io/MockObjectDispatcher.h" #include "common/WorkQueue.h" #include "common/zipkin_trace.h" #include "librbd/ImageCtx.h" #include "gmock/gmock.h" #include <string> class MockSafeTimer; namespace librbd { namespace operation { template <typename> class ResizeRequest; } namespace crypto { class MockEncryptionFormat; } struct MockImageCtx { static MockImageCtx *s_instance; static MockImageCtx *create(const std::string &image_name, const std::string &image_id, const char *snap, librados::IoCtx& p, bool read_only) { ceph_assert(s_instance != nullptr); return s_instance; } MockImageCtx(librbd::ImageCtx &image_ctx); virtual ~MockImageCtx(); void wait_for_async_ops(); void wait_for_async_requests() { async_ops_lock.lock(); if (async_requests.empty()) { async_ops_lock.unlock(); return; } C_SaferCond ctx; async_requests_waiters.push_back(&ctx); async_ops_lock.unlock(); ctx.wait(); } MOCK_METHOD1(init_layout, void(int64_t)); MOCK_CONST_METHOD1(get_object_name, std::string(uint64_t)); MOCK_CONST_METHOD0(get_object_size, uint64_t()); MOCK_CONST_METHOD0(get_current_size, uint64_t()); MOCK_CONST_METHOD1(get_image_size, uint64_t(librados::snap_t)); MOCK_CONST_METHOD1(get_area_size, uint64_t(io::ImageArea)); MOCK_CONST_METHOD1(get_object_count, uint64_t(librados::snap_t)); MOCK_CONST_METHOD1(get_read_flags, int(librados::snap_t)); MOCK_CONST_METHOD2(get_flags, int(librados::snap_t in_snap_id, uint64_t *flags)); MOCK_CONST_METHOD2(get_snap_id, librados::snap_t(cls::rbd::SnapshotNamespace snap_namespace, std::string in_snap_name)); MOCK_CONST_METHOD1(get_snap_info, const SnapInfo*(librados::snap_t)); MOCK_CONST_METHOD2(get_snap_name, int(librados::snap_t, std::string *)); MOCK_CONST_METHOD2(get_snap_namespace, int(librados::snap_t, cls::rbd::SnapshotNamespace *out_snap_namespace)); MOCK_CONST_METHOD2(get_parent_spec, int(librados::snap_t in_snap_id, cls::rbd::ParentImageSpec *pspec)); MOCK_CONST_METHOD1(get_parent_info, const ParentImageInfo*(librados::snap_t)); MOCK_CONST_METHOD2(get_parent_overlap, int(librados::snap_t in_snap_id, uint64_t *raw_overlap)); MOCK_CONST_METHOD2(reduce_parent_overlap, std::pair<uint64_t, io::ImageArea>(uint64_t, bool)); MOCK_CONST_METHOD4(prune_parent_extents, uint64_t(std::vector<std::pair<uint64_t, uint64_t>>&, io::ImageArea, uint64_t, bool)); MOCK_CONST_METHOD2(is_snap_protected, int(librados::snap_t in_snap_id, bool *is_protected)); MOCK_CONST_METHOD2(is_snap_unprotected, int(librados::snap_t in_snap_id, bool *is_unprotected)); MOCK_CONST_METHOD0(get_create_timestamp, utime_t()); MOCK_CONST_METHOD0(get_access_timestamp, utime_t()); MOCK_CONST_METHOD0(get_modify_timestamp, utime_t()); MOCK_METHOD1(set_access_timestamp, void(const utime_t at)); MOCK_METHOD1(set_modify_timestamp, void(const utime_t at)); MOCK_METHOD8(add_snap, void(cls::rbd::SnapshotNamespace in_snap_namespace, std::string in_snap_name, librados::snap_t id, uint64_t in_size, const ParentImageInfo &parent, uint8_t protection_status, uint64_t flags, utime_t timestamp)); MOCK_METHOD3(rm_snap, void(cls::rbd::SnapshotNamespace in_snap_namespace, std::string in_snap_name, librados::snap_t id)); MOCK_METHOD0(user_flushed, void()); MOCK_METHOD1(flush_copyup, void(Context *)); MOCK_CONST_METHOD1(test_features, bool(uint64_t test_features)); MOCK_CONST_METHOD2(test_features, bool(uint64_t test_features, const ceph::shared_mutex &in_image_lock)); MOCK_CONST_METHOD1(test_op_features, bool(uint64_t op_features)); MOCK_METHOD1(cancel_async_requests, void(Context*)); MOCK_METHOD0(create_exclusive_lock, MockExclusiveLock*()); MOCK_METHOD1(create_object_map, MockObjectMap*(uint64_t)); MOCK_METHOD0(create_journal, MockJournal*()); MOCK_METHOD0(notify_update, void()); MOCK_METHOD1(notify_update, void(Context *)); MOCK_CONST_METHOD0(get_exclusive_lock_policy, exclusive_lock::Policy*()); MOCK_METHOD1(set_exclusive_lock_policy, void(exclusive_lock::Policy*)); MOCK_CONST_METHOD0(get_journal_policy, journal::Policy*()); MOCK_METHOD1(set_journal_policy, void(journal::Policy*)); MOCK_METHOD2(apply_metadata, int(const std::map<std::string, bufferlist> &, bool)); MOCK_CONST_METHOD0(get_stripe_count, uint64_t()); MOCK_CONST_METHOD0(get_stripe_period, uint64_t()); MOCK_METHOD0(rebuild_data_io_context, void()); IOContext get_data_io_context(); IOContext duplicate_data_io_context(); static void set_timer_instance(MockSafeTimer *timer, ceph::mutex *timer_lock); static void get_timer_instance(CephContext *cct, MockSafeTimer **timer, ceph::mutex **timer_lock); ImageCtx *image_ctx; CephContext *cct; PerfCounters *perfcounter; cls::rbd::SnapshotNamespace snap_namespace; std::string snap_name; uint64_t snap_id; bool snap_exists; ::SnapContext snapc; std::vector<librados::snap_t> snaps; std::map<librados::snap_t, SnapInfo> snap_info; std::map<ImageCtx::SnapKey, librados::snap_t, ImageCtx::SnapKeyComparator> snap_ids; bool old_format; bool read_only; uint32_t read_only_flags; uint32_t read_only_mask; bool clone_copy_on_read; std::map<rados::cls::lock::locker_id_t, rados::cls::lock::locker_info_t> lockers; bool exclusive_locked; std::string lock_tag; std::shared_ptr<AsioEngine> asio_engine; neorados::RADOS& rados_api; librados::IoCtx md_ctx; librados::IoCtx data_ctx; ceph::shared_mutex &owner_lock; ceph::shared_mutex &image_lock; ceph::shared_mutex &timestamp_lock; ceph::mutex &async_ops_lock; ceph::mutex &copyup_list_lock; uint8_t order; uint64_t size; uint64_t features; uint64_t flags; uint64_t op_features; bool operations_disabled; uint64_t stripe_unit; uint64_t stripe_count; std::string object_prefix; std::string header_oid; std::string id; std::string name; ParentImageInfo parent_md; MigrationInfo migration_info; char *format_string; cls::rbd::GroupSpec group_spec; file_layout_t layout; xlist<operation::ResizeRequest<MockImageCtx>*> resize_reqs; xlist<AsyncRequest<MockImageCtx>*> async_requests; std::list<Context*> async_requests_waiters; std::map<uint64_t, io::CopyupRequest<MockImageCtx>*> copyup_list; io::MockImageDispatcher *io_image_dispatcher; io::MockObjectDispatcher *io_object_dispatcher; MockContextWQ *op_work_queue; MockPluginRegistry* plugin_registry; MockReadahead readahead; uint64_t readahead_max_bytes; EventSocket &event_socket; MockImageCtx *child = nullptr; MockImageCtx *parent; MockOperations *operations; MockImageState *state; MockImageWatcher *image_watcher; MockObjectMap *object_map; MockExclusiveLock *exclusive_lock; MockJournal *journal; ZTracer::Endpoint trace_endpoint; std::unique_ptr<crypto::MockEncryptionFormat> encryption_format; uint64_t sparse_read_threshold_bytes; uint32_t discard_granularity_bytes; int mirroring_replay_delay; bool non_blocking_aio; bool blkin_trace_all; bool enable_alloc_hint; uint32_t alloc_hint_flags; uint32_t read_flags; bool ignore_migrating; bool enable_sparse_copyup; uint64_t mtime_update_interval; uint64_t atime_update_interval; bool cache; ConfigProxy config; std::set<std::string> config_overrides; }; } // namespace librbd #endif // CEPH_TEST_LIBRBD_MOCK_IMAGE_CTX_H
8,644
31.996183
86
h
null
ceph-main/src/test/librbd/mock/MockImageState.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_TEST_LIBRBD_MOCK_IMAGE_STATE_H #define CEPH_TEST_LIBRBD_MOCK_IMAGE_STATE_H #include <gmock/gmock.h> #include "cls/rbd/cls_rbd_types.h" class Context; namespace librbd { class UpdateWatchCtx; struct MockImageState { MOCK_CONST_METHOD0(is_refresh_required, bool()); MOCK_METHOD1(refresh, void(Context*)); MOCK_METHOD2(open, void(bool, Context*)); MOCK_METHOD0(close, int()); MOCK_METHOD1(close, void(Context*)); MOCK_METHOD2(snap_set, void(uint64_t snap_id, Context*)); MOCK_METHOD1(prepare_lock, void(Context*)); MOCK_METHOD0(handle_prepare_lock_complete, void()); MOCK_METHOD2(register_update_watcher, int(UpdateWatchCtx *, uint64_t *)); MOCK_METHOD2(unregister_update_watcher, void(uint64_t, Context *)); MOCK_METHOD0(handle_update_notification, void()); }; } // namespace librbd #endif // CEPH_TEST_LIBRBD_MOCK_IMAGE_STATE_H
975
23.4
75
h
null
ceph-main/src/test/librbd/mock/MockImageWatcher.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_TEST_LIBRBD_MOCK_IMAGE_WATCHER_H #define CEPH_TEST_LIBRBD_MOCK_IMAGE_WATCHER_H #include "gmock/gmock.h" class Context; namespace librbd { class ProgressContext; struct MockImageWatcher { MOCK_METHOD0(is_registered, bool()); MOCK_METHOD0(is_unregistered, bool()); MOCK_METHOD0(is_blocklisted, bool()); MOCK_METHOD0(unregister_watch, void()); MOCK_METHOD1(flush, void(Context *)); MOCK_CONST_METHOD0(get_watch_handle, uint64_t()); MOCK_METHOD0(notify_acquired_lock, void()); MOCK_METHOD0(notify_released_lock, void()); MOCK_METHOD0(notify_request_lock, void()); MOCK_METHOD3(notify_quiesce, void(uint64_t *, ProgressContext &, Context *)); MOCK_METHOD2(notify_unquiesce, void(uint64_t, Context *)); }; } // namespace librbd #endif // CEPH_TEST_LIBRBD_MOCK_IMAGE_WATCHER_H
914
25.142857
79
h
null
ceph-main/src/test/librbd/mock/MockJournal.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "test/librbd/mock/MockJournal.h" namespace librbd { MockJournal *MockJournal::s_instance = nullptr; } // namespace librbd
233
20.272727
70
cc
null
ceph-main/src/test/librbd/mock/MockJournal.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_TEST_LIBRBD_MOCK_JOURNAL_H #define CEPH_TEST_LIBRBD_MOCK_JOURNAL_H #include "common/RefCountedObj.h" #include "gmock/gmock.h" #include "include/rados/librados_fwd.hpp" #include "librbd/Journal.h" #include "librbd/journal/Types.h" #include <list> struct Context; struct ContextWQ; namespace librbd { struct ImageCtx; struct MockJournal { static MockJournal *s_instance; static MockJournal *get_instance() { ceph_assert(s_instance != nullptr); return s_instance; } template <typename ImageCtxT> static int is_tag_owner(ImageCtxT *image_ctx, bool *is_tag_owner) { return get_instance()->is_tag_owner(is_tag_owner); } static void get_tag_owner(librados::IoCtx &, const std::string &global_image_id, std::string *tag_owner, ContextWQ *work_queue, Context *on_finish) { get_instance()->get_tag_owner(global_image_id, tag_owner, work_queue, on_finish); } MockJournal() { s_instance = this; } void get() {} void put() {} MOCK_CONST_METHOD0(is_journal_ready, bool()); MOCK_CONST_METHOD0(is_journal_replaying, bool()); MOCK_CONST_METHOD0(is_journal_appending, bool()); MOCK_METHOD1(wait_for_journal_ready, void(Context *)); MOCK_METHOD4(get_tag_owner, void(const std::string &, std::string *, ContextWQ *, Context *)); MOCK_CONST_METHOD0(is_tag_owner, bool()); MOCK_CONST_METHOD1(is_tag_owner, int(bool *)); MOCK_METHOD3(allocate_tag, void(const std::string &mirror_uuid, const journal::TagPredecessor &predecessor, Context *on_finish)); MOCK_METHOD1(open, void(Context *)); MOCK_METHOD1(close, void(Context *)); MOCK_CONST_METHOD0(get_tag_tid, uint64_t()); MOCK_CONST_METHOD0(get_tag_data, journal::TagData()); MOCK_METHOD0(allocate_op_tid, uint64_t()); MOCK_METHOD0(user_flushed, void()); MOCK_METHOD3(append_op_event_mock, void(uint64_t, const journal::EventEntry&, Context *)); void append_op_event(uint64_t op_tid, journal::EventEntry &&event_entry, Context *on_safe) { // googlemock doesn't support move semantics append_op_event_mock(op_tid, event_entry, on_safe); } MOCK_METHOD2(flush_event, void(uint64_t, Context *)); MOCK_METHOD2(wait_event, void(uint64_t, Context *)); MOCK_METHOD3(commit_op_event, void(uint64_t, int, Context *)); MOCK_METHOD2(replay_op_ready, void(uint64_t, Context *)); MOCK_METHOD1(add_listener, void(journal::Listener *)); MOCK_METHOD1(remove_listener, void(journal::Listener *)); MOCK_METHOD1(is_resync_requested, int(bool *)); }; } // namespace librbd #endif // CEPH_TEST_LIBRBD_MOCK_JOURNAL_H
2,977
29.701031
79
h
null
ceph-main/src/test/librbd/mock/MockJournalPolicy.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_TEST_LIBRBD_MOCK_JOURNAL_POLICY_H #define CEPH_TEST_LIBRBD_MOCK_JOURNAL_POLICY_H #include "librbd/journal/Policy.h" #include "gmock/gmock.h" namespace librbd { struct MockJournalPolicy : public journal::Policy { MOCK_CONST_METHOD0(append_disabled, bool()); MOCK_CONST_METHOD0(journal_disabled, bool()); MOCK_METHOD1(allocate_tag_on_lock, void(Context*)); }; } // namespace librbd #endif // CEPH_TEST_LIBRBD_MOCK_JOURNAL_POLICY_H
554
23.130435
70
h
null
ceph-main/src/test/librbd/mock/MockObjectMap.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_TEST_LIBRBD_MOCK_OBJECT_MAP_H #define CEPH_TEST_LIBRBD_MOCK_OBJECT_MAP_H #include "librbd/Utils.h" #include "gmock/gmock.h" namespace librbd { struct MockObjectMap { MOCK_METHOD1(at, uint8_t(uint64_t)); uint8_t operator[](uint64_t object_no) { return at(object_no); } MOCK_CONST_METHOD0(size, uint64_t()); MOCK_METHOD1(open, void(Context *on_finish)); MOCK_METHOD1(close, void(Context *on_finish)); MOCK_METHOD3(aio_resize, void(uint64_t new_size, uint8_t default_object_state, Context *on_finish)); void get() {} void put() {} template <typename T, void(T::*MF)(int) = &T::complete> bool aio_update(uint64_t snap_id, uint64_t start_object_no, uint8_t new_state, const boost::optional<uint8_t> &current_state, const ZTracer::Trace &parent_trace, bool ignore_enoent, T *callback_object) { return aio_update<T, MF>(snap_id, start_object_no, start_object_no + 1, new_state, current_state, parent_trace, ignore_enoent, callback_object); } template <typename T, void(T::*MF)(int) = &T::complete> bool aio_update(uint64_t snap_id, uint64_t start_object_no, uint64_t end_object_no, uint8_t new_state, const boost::optional<uint8_t> &current_state, const ZTracer::Trace &parent_trace, bool ignore_enoent, T *callback_object) { auto ctx = util::create_context_callback<T, MF>(callback_object); bool updated = aio_update(snap_id, start_object_no, end_object_no, new_state, current_state, parent_trace, ignore_enoent, ctx); if (!updated) { delete ctx; } return updated; } MOCK_METHOD8(aio_update, bool(uint64_t snap_id, uint64_t start_object_no, uint64_t end_object_no, uint8_t new_state, const boost::optional<uint8_t> &current_state, const ZTracer::Trace &parent_trace, bool ignore_enoent, Context *on_finish)); MOCK_METHOD2(snapshot_add, void(uint64_t snap_id, Context *on_finish)); MOCK_METHOD2(snapshot_remove, void(uint64_t snap_id, Context *on_finish)); MOCK_METHOD2(rollback, void(uint64_t snap_id, Context *on_finish)); MOCK_CONST_METHOD1(object_may_exist, bool(uint64_t)); }; } // namespace librbd #endif // CEPH_TEST_LIBRBD_MOCK_OBJECT_MAP_H
2,646
36.28169
80
h
null
ceph-main/src/test/librbd/mock/MockOperations.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_TEST_LIBRBD_MOCK_OPERATIONS_H #define CEPH_TEST_LIBRBD_MOCK_OPERATIONS_H #include "cls/rbd/cls_rbd_types.h" #include "include/int_types.h" #include "include/rbd/librbd.hpp" #include "gmock/gmock.h" #include <string> class Context; namespace librbd { struct MockOperations { MOCK_METHOD2(execute_flatten, void(ProgressContext &prog_ctx, Context *on_finish)); MOCK_METHOD2(execute_rebuild_object_map, void(ProgressContext &prog_ctx, Context *on_finish)); MOCK_METHOD2(execute_rename, void(const std::string &dstname, Context *on_finish)); MOCK_METHOD5(execute_resize, void(uint64_t size, bool allow_shrink, ProgressContext &prog_ctx, Context *on_finish, uint64_t journal_op_tid)); MOCK_METHOD5(snap_create, void(const cls::rbd::SnapshotNamespace &snapshot_namespace, const std::string &snap_name, uint64_t flags, ProgressContext &prog_ctx, Context *on_finish)); MOCK_METHOD6(execute_snap_create, void(const cls::rbd::SnapshotNamespace &snapshot_namespace, const std::string &snap_name, Context *on_finish, uint64_t journal_op_tid, uint64_t flags, ProgressContext &prog_ctx)); MOCK_METHOD3(snap_remove, void(const cls::rbd::SnapshotNamespace &snap_namespace, const std::string &snap_name, Context *on_finish)); MOCK_METHOD3(execute_snap_remove, void(const cls::rbd::SnapshotNamespace &snap_namespace, const std::string &snap_name, Context *on_finish)); MOCK_METHOD3(execute_snap_rename, void(uint64_t src_snap_id, const std::string &snap_name, Context *on_finish)); MOCK_METHOD4(execute_snap_rollback, void(const cls::rbd::SnapshotNamespace &snap_namespace, const std::string &snap_name, ProgressContext &prog_ctx, Context *on_finish)); MOCK_METHOD3(execute_snap_protect, void(const cls::rbd::SnapshotNamespace &snap_namespace, const std::string &snap_name, Context *on_finish)); MOCK_METHOD3(execute_snap_unprotect, void(const cls::rbd::SnapshotNamespace &snap_namespace, const std::string &snap_name, Context *on_finish)); MOCK_METHOD2(execute_snap_set_limit, void(uint64_t limit, Context *on_finish)); MOCK_METHOD4(execute_update_features, void(uint64_t features, bool enabled, Context *on_finish, uint64_t journal_op_tid)); MOCK_METHOD3(execute_metadata_set, void(const std::string &key, const std::string &value, Context *on_finish)); MOCK_METHOD2(execute_metadata_remove, void(const std::string &key, Context *on_finish)); }; } // namespace librbd #endif // CEPH_TEST_LIBRBD_MOCK_OPERATIONS_H
3,640
48.876712
95
h
null
ceph-main/src/test/librbd/mock/MockPluginRegistry.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_TEST_LIBRBD_MOCK_PLUGIN_REGISTRY_H #define CEPH_TEST_LIBRBD_MOCK_PLUGIN_REGISTRY_H #include <gmock/gmock.h> class Context; namespace librbd { struct MockPluginRegistry{ MOCK_METHOD2(init, void(const std::string&, Context*)); MOCK_METHOD1(acquired_exclusive_lock, void(Context*)); MOCK_METHOD1(prerelease_exclusive_lock, void(Context*)); }; } // namespace librbd #endif // CEPH_TEST_LIBRBD_MOCK_PLUGIN_REGISTRY_H
536
23.409091
70
h
null
ceph-main/src/test/librbd/mock/MockReadahead.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_TEST_LIBRBD_MOCK_READAHEAD_H #define CEPH_TEST_LIBRBD_MOCK_READAHEAD_H #include "include/int_types.h" #include "gmock/gmock.h" class Context; namespace librbd { struct MockReadahead { MOCK_METHOD1(set_max_readahead_size, void(uint64_t)); MOCK_METHOD1(wait_for_pending, void(Context *)); }; } // namespace librbd #endif // CEPH_TEST_LIBRBD_MOCK_READAHEAD_H
478
20.772727
70
h
null
ceph-main/src/test/librbd/mock/MockSafeTimer.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_MOCK_SAFE_TIMER_H #define CEPH_MOCK_SAFE_TIMER_H #include <gmock/gmock.h> struct Context; struct MockSafeTimer { virtual ~MockSafeTimer() { } MOCK_METHOD2(add_event_after, Context*(double, Context *)); MOCK_METHOD2(add_event_at, Context*(ceph::real_clock::time_point, Context *)); MOCK_METHOD1(cancel_event, bool(Context *)); }; #endif // CEPH_MOCK_SAFE_TIMER_H
489
22.333333
80
h
null
ceph-main/src/test/librbd/mock/cache/MockImageCache.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_TEST_LIBRBD_CACHE_MOCK_IMAGE_CACHE_H #define CEPH_TEST_LIBRBD_CACHE_MOCK_IMAGE_CACHE_H #include "gmock/gmock.h" #include "librbd/io/Types.h" #include <vector> namespace librbd { namespace cache { struct MockImageCache { typedef std::vector<std::pair<uint64_t,uint64_t> > Extents; MOCK_METHOD4(aio_read_mock, void(const Extents &, ceph::bufferlist*, int, Context *)); void aio_read(Extents&& image_extents, ceph::bufferlist* bl, int fadvise_flags, Context *on_finish) { aio_read_mock(image_extents, bl, fadvise_flags, on_finish); } MOCK_METHOD4(aio_write_mock, void(const Extents &, const ceph::bufferlist &, int, Context *)); void aio_write(Extents&& image_extents, ceph::bufferlist&& bl, int fadvise_flags, Context *on_finish) { aio_write_mock(image_extents, bl, fadvise_flags, on_finish); } MOCK_METHOD4(aio_discard, void(uint64_t, uint64_t, uint32_t, Context *)); MOCK_METHOD1(aio_flush, void(Context *)); MOCK_METHOD2(aio_flush, void(librbd::io::FlushSource, Context *)); MOCK_METHOD5(aio_writesame_mock, void(uint64_t, uint64_t, ceph::bufferlist& bl, int, Context *)); void aio_writesame(uint64_t off, uint64_t len, ceph::bufferlist&& bl, int fadvise_flags, Context *on_finish) { aio_writesame_mock(off, len, bl, fadvise_flags, on_finish); } MOCK_METHOD6(aio_compare_and_write_mock, void(const Extents &, const ceph::bufferlist &, const ceph::bufferlist &, uint64_t *, int, Context *)); void aio_compare_and_write(Extents&& image_extents, ceph::bufferlist&& cmp_bl, ceph::bufferlist&& bl, uint64_t *mismatch_offset, int fadvise_flags, Context *on_finish) { aio_compare_and_write_mock(image_extents, cmp_bl, bl, mismatch_offset, fadvise_flags, on_finish); } }; } // namespace cache } // namespace librbd #endif // CEPH_TEST_LIBRBD_CACHE_MOCK_IMAGE_CACHE_H
2,329
38.491525
81
h
null
ceph-main/src/test/librbd/mock/crypto/MockCryptoInterface.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_TEST_LIBRBD_MOCK_CRYPTO_MOCK_CRYPTO_INTERFACE_H #define CEPH_TEST_LIBRBD_MOCK_CRYPTO_MOCK_CRYPTO_INTERFACE_H #include "include/buffer.h" #include "gmock/gmock.h" #include "librbd/crypto/CryptoInterface.h" namespace librbd { namespace crypto { struct MockCryptoInterface : CryptoInterface { static const uint64_t BLOCK_SIZE = 4096; static const uint64_t DATA_OFFSET = 4 * 1024 * 1024; MOCK_METHOD2(encrypt, int(ceph::bufferlist*, uint64_t)); MOCK_METHOD2(decrypt, int(ceph::bufferlist*, uint64_t)); MOCK_CONST_METHOD0(get_key, const unsigned char*()); MOCK_CONST_METHOD0(get_key_length, int()); uint64_t get_block_size() const override { return BLOCK_SIZE; } uint64_t get_data_offset() const override { return DATA_OFFSET; } }; } // namespace crypto } // namespace librbd #endif // CEPH_TEST_LIBRBD_MOCK_CRYPTO_MOCK_CRYPTO_INTERFACE_H
983
25.594595
70
h
null
ceph-main/src/test/librbd/mock/crypto/MockDataCryptor.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_TEST_LIBRBD_MOCK_CRYPTO_MOCK_DATA_CRYPTOR_H #define CEPH_TEST_LIBRBD_MOCK_CRYPTO_MOCK_DATA_CRYPTOR_H #include "gmock/gmock.h" #include "librbd/crypto/DataCryptor.h" namespace librbd { namespace crypto { struct MockCryptoContext {}; class MockDataCryptor : public DataCryptor<MockCryptoContext> { public: uint32_t block_size = 16; uint32_t iv_size = 16; uint32_t get_block_size() const override { return block_size; } uint32_t get_iv_size() const override { return iv_size; } MOCK_METHOD1(get_context, MockCryptoContext*(CipherMode)); MOCK_METHOD2(return_context, void(MockCryptoContext*, CipherMode)); MOCK_CONST_METHOD3(init_context, int(MockCryptoContext*, const unsigned char*, uint32_t)); MOCK_CONST_METHOD4(update_context, int(MockCryptoContext*, const unsigned char*, unsigned char*, uint32_t)); MOCK_CONST_METHOD0(get_key, const unsigned char*()); MOCK_CONST_METHOD0(get_key_length, int()); }; } // namespace crypto } // namespace librbd #endif // CEPH_TEST_LIBRBD_MOCK_CRYPTO_MOCK_DATA_CRYPTOR_H
1,279
28.090909
78
h
null
ceph-main/src/test/librbd/mock/crypto/MockEncryptionFormat.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_TEST_LIBRBD_MOCK_CRYPTO_MOCK_ENCRYPTION_FORMAT_H #define CEPH_TEST_LIBRBD_MOCK_CRYPTO_MOCK_ENCRYPTION_FORMAT_H #include "gmock/gmock.h" #include "librbd/crypto/EncryptionFormat.h" #include "test/librbd/mock/MockImageCtx.h" #include "test/librbd/mock/crypto/MockCryptoInterface.h" namespace librbd { namespace crypto { struct MockEncryptionFormat { MOCK_CONST_METHOD0(clone, std::unique_ptr<MockEncryptionFormat>()); MOCK_METHOD2(format, void(MockImageCtx*, Context*)); MOCK_METHOD3(load, void(MockImageCtx*, std::string*, Context*)); MOCK_METHOD2(flatten, void(MockImageCtx*, Context*)); MOCK_METHOD0(get_crypto, MockCryptoInterface*()); }; } // namespace crypto } // namespace librbd #endif // CEPH_TEST_LIBRBD_MOCK_CRYPTO_MOCK_ENCRYPTION_FORMAT_H
876
31.481481
70
h
null
ceph-main/src/test/librbd/mock/exclusive_lock/MockPolicy.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_TEST_LIBRBD_MOCK_EXCLUSIVE_LOCK_POLICY_H #define CEPH_TEST_LIBRBD_MOCK_EXCLUSIVE_LOCK_POLICY_H #include "librbd/exclusive_lock/Policy.h" #include <gmock/gmock.h> namespace librbd { namespace exclusive_lock { struct MockPolicy : public Policy { MOCK_METHOD0(may_auto_request_lock, bool()); MOCK_METHOD1(lock_requested, int(bool)); MOCK_METHOD1(accept_blocked_request, bool(OperationRequestType)); }; } // namespace exclusive_lock } // librbd #endif
572
22.875
70
h
null
ceph-main/src/test/librbd/mock/io/MockImageDispatch.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_TEST_LIBRBD_MOCK_IO_IMAGE_DISPATCH_H #define CEPH_TEST_LIBRBD_MOCK_IO_IMAGE_DISPATCH_H #include "gmock/gmock.h" #include "include/Context.h" #include "librbd/io/ImageDispatchInterface.h" #include "librbd/io/Types.h" class Context; namespace librbd { namespace io { struct MockImageDispatch : public ImageDispatchInterface { public: MOCK_CONST_METHOD0(get_dispatch_layer, ImageDispatchLayer()); MOCK_METHOD1(shut_down, void(Context*)); bool read( AioCompletion* aio_comp, Extents &&image_extents, ReadResult &&read_result, IOContext io_context, int op_flags, int read_flags, const ZTracer::Trace &parent_trace, uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags, DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) override { return false; } bool write( AioCompletion* aio_comp, Extents &&image_extents, bufferlist &&bl, int op_flags, const ZTracer::Trace &parent_trace, uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags, DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) override { return false; } bool discard( AioCompletion* aio_comp, Extents &&image_extents, uint32_t discard_granularity_bytes, const ZTracer::Trace &parent_trace, uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags, DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) override { return false; } bool write_same( AioCompletion* aio_comp, Extents &&image_extents, bufferlist &&bl, int op_flags, const ZTracer::Trace &parent_trace, uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags, DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) override { return false; } bool compare_and_write( AioCompletion* aio_comp, Extents &&image_extents, bufferlist &&cmp_bl, bufferlist &&bl, uint64_t *mismatch_offset, int op_flags, const ZTracer::Trace &parent_trace, uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags, DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) override { return false; } bool flush( AioCompletion* aio_comp, FlushSource flush_source, const ZTracer::Trace &parent_trace, uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags, DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) override { return false; } bool list_snaps( AioCompletion* aio_comp, Extents&& image_extents, SnapIds&& snap_ids, int list_snaps_flags, SnapshotDelta* snapshot_delta, const ZTracer::Trace &parent_trace, uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags, DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) override { return false; } bool invalidate_cache(Context* on_finish) override { return false; } }; } // namespace io } // namespace librbd #endif // CEPH_TEST_LIBRBD_MOCK_IO_IMAGE_DISPATCH_H
3,209
31.424242
77
h
null
ceph-main/src/test/librbd/mock/io/MockImageDispatcher.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_TEST_LIBRBD_MOCK_IO_IMAGE_DISPATCHER_H #define CEPH_TEST_LIBRBD_MOCK_IO_IMAGE_DISPATCHER_H #include "gmock/gmock.h" #include "include/Context.h" #include "librbd/io/ImageDispatcher.h" #include "librbd/io/ImageDispatchSpec.h" #include "librbd/io/Types.h" class Context; namespace librbd { namespace io { struct ImageDispatchInterface; struct MockImageDispatcher : public ImageDispatcherInterface { public: MOCK_METHOD1(shut_down, void(Context*)); MOCK_METHOD1(register_dispatch, void(ImageDispatchInterface*)); MOCK_METHOD1(exists, bool(ImageDispatchLayer)); MOCK_METHOD2(shut_down_dispatch, void(ImageDispatchLayer, Context*)); MOCK_METHOD1(invalidate_cache, void(Context *)); MOCK_METHOD1(send, void(ImageDispatchSpec*)); MOCK_METHOD3(finish, void(int r, ImageDispatchLayer, uint64_t)); MOCK_METHOD1(apply_qos_schedule_tick_min, void(uint64_t)); MOCK_METHOD4(apply_qos_limit, void(uint64_t, uint64_t, uint64_t, uint64_t)); MOCK_METHOD1(apply_qos_exclude_ops, void(uint64_t)); MOCK_CONST_METHOD0(writes_blocked, bool()); MOCK_METHOD0(block_writes, int()); MOCK_METHOD1(block_writes, void(Context*)); MOCK_METHOD0(unblock_writes, void()); MOCK_METHOD1(wait_on_writes_unblocked, void(Context*)); MOCK_METHOD2(remap_to_physical, void(Extents&, ImageArea)); MOCK_METHOD1(remap_to_logical, ImageArea(Extents&)); }; } // namespace io } // namespace librbd #endif // CEPH_TEST_LIBRBD_MOCK_IO_IMAGE_DISPATCHER_H
1,563
29.666667
78
h
null
ceph-main/src/test/librbd/mock/io/MockObjectDispatch.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_TEST_LIBRBD_MOCK_IO_OBJECT_DISPATCH_H #define CEPH_TEST_LIBRBD_MOCK_IO_OBJECT_DISPATCH_H #include "gmock/gmock.h" #include "common/ceph_mutex.h" #include "librbd/io/ObjectDispatchInterface.h" #include "librbd/io/Types.h" class Context; namespace librbd { namespace io { struct MockObjectDispatch : public ObjectDispatchInterface { public: ceph::shared_mutex lock = ceph::make_shared_mutex("MockObjectDispatch::lock"); MockObjectDispatch() {} MOCK_CONST_METHOD0(get_dispatch_layer, ObjectDispatchLayer()); MOCK_METHOD1(shut_down, void(Context*)); MOCK_METHOD6(execute_read, bool(uint64_t, ReadExtents*, IOContext io_context, uint64_t*, DispatchResult*, Context*)); bool read( uint64_t object_no, ReadExtents* extents, IOContext io_context, int op_flags, int read_flags, const ZTracer::Trace& parent_trace, uint64_t* version, int* dispatch_flags, DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) { return execute_read(object_no, extents, io_context, version, dispatch_result, on_dispatched); } MOCK_METHOD9(execute_discard, bool(uint64_t, uint64_t, uint64_t, IOContext, int, int*, uint64_t*, DispatchResult*, Context*)); bool discard( uint64_t object_no, uint64_t object_off, uint64_t object_len, IOContext io_context, int discard_flags, const ZTracer::Trace &parent_trace, int* dispatch_flags, uint64_t* journal_tid, DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) { return execute_discard(object_no, object_off, object_len, io_context, discard_flags, dispatch_flags, journal_tid, dispatch_result, on_dispatched); } MOCK_METHOD10(execute_write, bool(uint64_t, uint64_t, const ceph::bufferlist&, IOContext, int, std::optional<uint64_t>, int*, uint64_t*, DispatchResult*, Context *)); bool write( uint64_t object_no, uint64_t object_off, ceph::bufferlist&& data, IOContext io_context, int op_flags, int write_flags, std::optional<uint64_t> assert_version, const ZTracer::Trace &parent_trace, int* dispatch_flags, uint64_t* journal_tid, DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) override { return execute_write(object_no, object_off, data, io_context, write_flags, assert_version, dispatch_flags, journal_tid, dispatch_result, on_dispatched); } MOCK_METHOD10(execute_write_same, bool(uint64_t, uint64_t, uint64_t, const LightweightBufferExtents&, const ceph::bufferlist&, IOContext, int*, uint64_t*, DispatchResult*, Context *)); bool write_same( uint64_t object_no, uint64_t object_off, uint64_t object_len, LightweightBufferExtents&& buffer_extents, ceph::bufferlist&& data, IOContext io_context, int op_flags, const ZTracer::Trace &parent_trace, int* dispatch_flags, uint64_t* journal_tid, DispatchResult* dispatch_result, Context* *on_finish, Context* on_dispatched) override { return execute_write_same(object_no, object_off, object_len, buffer_extents, data, io_context, dispatch_flags, journal_tid, dispatch_result, on_dispatched); } MOCK_METHOD9(execute_compare_and_write, bool(uint64_t, uint64_t, const ceph::bufferlist&, const ceph::bufferlist&, uint64_t*, int*, uint64_t*, DispatchResult*, Context *)); bool compare_and_write( uint64_t object_no, uint64_t object_off, ceph::bufferlist&& cmp_data, ceph::bufferlist&& write_data, IOContext io_context, int op_flags, const ZTracer::Trace &parent_trace, uint64_t* mismatch_offset, int* dispatch_flags, uint64_t* journal_tid, DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) override { return execute_compare_and_write(object_no, object_off, cmp_data, write_data, mismatch_offset, dispatch_flags, journal_tid, dispatch_result, on_dispatched); } MOCK_METHOD4(execute_flush, bool(FlushSource, uint64_t*, DispatchResult*, Context*)); bool flush(FlushSource flush_source, const ZTracer::Trace &parent_trace, uint64_t* journal_tid, DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) { return execute_flush(flush_source, journal_tid, dispatch_result, on_dispatched); } MOCK_METHOD7(execute_list_snaps, bool(uint64_t, const Extents&, const SnapIds&, int, SnapshotDelta*, DispatchResult*, Context*)); bool list_snaps( uint64_t object_no, io::Extents&& extents, SnapIds&& snap_ids, int list_snaps_flags, const ZTracer::Trace &parent_trace, SnapshotDelta* snapshot_delta, int* object_dispatch_flags, DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) override { return execute_list_snaps(object_no, extents, snap_ids, list_snaps_flags, snapshot_delta, dispatch_result, on_dispatched); } MOCK_METHOD1(invalidate_cache, bool(Context*)); MOCK_METHOD1(reset_existence_cache, bool(Context*)); MOCK_METHOD5(extent_overwritten, void(uint64_t, uint64_t, uint64_t, uint64_t, uint64_t)); MOCK_METHOD2(prepare_copyup, int(uint64_t, SnapshotSparseBufferlist*)); }; } // namespace io } // namespace librbd #endif // CEPH_TEST_LIBRBD_MOCK_IO_OBJECT_DISPATCH_H
6,074
43.021739
80
h
null
ceph-main/src/test/librbd/mock/io/MockObjectDispatcher.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_TEST_LIBRBD_MOCK_IO_OBJECT_DISPATCHER_H #define CEPH_TEST_LIBRBD_MOCK_IO_OBJECT_DISPATCHER_H #include "gmock/gmock.h" #include "include/Context.h" #include "librbd/io/ObjectDispatcher.h" #include "librbd/io/ObjectDispatchSpec.h" #include "librbd/io/Types.h" class Context; namespace librbd { namespace io { struct ObjectDispatchInterface; struct MockObjectDispatcher : public ObjectDispatcherInterface { public: MOCK_METHOD1(shut_down, void(Context*)); MOCK_METHOD1(register_dispatch, void(ObjectDispatchInterface*)); MOCK_METHOD1(exists, bool(ObjectDispatchLayer)); MOCK_METHOD2(shut_down_dispatch, void(ObjectDispatchLayer, Context*)); MOCK_METHOD2(flush, void(FlushSource, Context*)); MOCK_METHOD1(invalidate_cache, void(Context*)); MOCK_METHOD1(reset_existence_cache, void(Context*)); MOCK_METHOD5(extent_overwritten, void(uint64_t, uint64_t, uint64_t, uint64_t, uint64_t)); MOCK_METHOD2(prepare_copyup, int(uint64_t, SnapshotSparseBufferlist*)); MOCK_METHOD1(send, void(ObjectDispatchSpec*)); }; } // namespace io } // namespace librbd #endif // CEPH_TEST_LIBRBD_MOCK_IO_OBJECT_DISPATCHER_H
1,281
27.488889
79
h
null
ceph-main/src/test/librbd/mock/io/MockQosImageDispatch.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_TEST_LIBRBD_MOCK_IO_QOS_IMAGE_DISPATCH_H #define CEPH_TEST_LIBRBD_MOCK_IO_QOS_IMAGE_DISPATCH_H #include "gmock/gmock.h" #include "librbd/io/Types.h" #include <atomic> struct Context; namespace librbd { namespace io { struct MockQosImageDispatch { MOCK_METHOD4(needs_throttle, bool(bool, const Extents&, std::atomic<uint32_t>*, Context*)); }; } // namespace io } // namespace librbd #endif // CEPH_TEST_LIBRBD_MOCK_IO_QOS_IMAGE_DISPATCH_H
594
22.8
71
h
null
ceph-main/src/test/librbd/mock/migration/MockSnapshotInterface.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_TEST_LIBRBD_MOCK_MIGRATION_MOCK_SNAPSHOT_INTERFACE_H #define CEPH_TEST_LIBRBD_MOCK_MIGRATION_MOCK_SNAPSHOT_INTERFACE_H #include "include/buffer.h" #include "gmock/gmock.h" #include "librbd/io/AioCompletion.h" #include "librbd/io/ReadResult.h" #include "librbd/io/Types.h" #include "librbd/migration/SnapshotInterface.h" namespace librbd { namespace migration { struct MockSnapshotInterface : public SnapshotInterface { MOCK_METHOD2(open, void(SnapshotInterface*, Context*)); MOCK_METHOD1(close, void(Context*)); MOCK_CONST_METHOD0(get_snap_info, const SnapInfo&()); MOCK_METHOD3(read, void(io::AioCompletion*, const io::Extents&, io::ReadResult&)); void read(io::AioCompletion* aio_comp, io::Extents&& image_extents, io::ReadResult&& read_result, int op_flags, int read_flags, const ZTracer::Trace &parent_trace) override { read(aio_comp, image_extents, read_result); } MOCK_METHOD3(list_snap, void(const io::Extents&, io::SparseExtents*, Context*)); void list_snap(io::Extents&& image_extents, int list_snaps_flags, io::SparseExtents* sparse_extents, const ZTracer::Trace &parent_trace, Context* on_finish) override { list_snap(image_extents, sparse_extents, on_finish); } }; } // namespace migration } // namespace librbd #endif // CEPH_TEST_LIBRBD_MOCK_MIGRATION_MOCK_SNAPSHOT_INTERFACE_H
1,570
33.911111
71
h
null
ceph-main/src/test/librbd/mock/migration/MockStreamInterface.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_TEST_LIBRBD_MOCK_MIGRATION_MOCK_STREAM_INTERFACE_H #define CEPH_TEST_LIBRBD_MOCK_MIGRATION_MOCK_STREAM_INTERFACE_H #include "include/buffer.h" #include "gmock/gmock.h" #include "librbd/migration/StreamInterface.h" namespace librbd { namespace migration { struct MockStreamInterface : public StreamInterface { MOCK_METHOD1(open, void(Context*)); MOCK_METHOD1(close, void(Context*)); MOCK_METHOD2(get_size, void(uint64_t*, Context*)); MOCK_METHOD3(read, void(const io::Extents&, bufferlist*, Context*)); void read(io::Extents&& byte_extents, bufferlist* bl, Context* on_finish) { read(byte_extents, bl, on_finish); } }; } // namespace migration } // namespace librbd #endif // CEPH_TEST_LIBRBD_MOCK_MIGRATION_MOCK_STREAM_INTERFACE_H
866
27.9
77
h
null
ceph-main/src/test/librbd/object_map/test_mock_DiffRequest.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "test/librbd/test_mock_fixture.h" #include "test/librbd/test_support.h" #include "test/librados_test_stub/MockTestMemIoCtxImpl.h" #include "include/rbd_types.h" #include "common/ceph_mutex.h" #include "librbd/object_map/DiffRequest.h" #include "gtest/gtest.h" #include "gmock/gmock.h" namespace librbd { namespace { struct MockTestImageCtx : public MockImageCtx { MockTestImageCtx(ImageCtx &image_ctx) : MockImageCtx(image_ctx) { } }; } // anonymous namespace } // namespace librbd #include "librbd/object_map/DiffRequest.cc" using ::testing::_; using ::testing::Invoke; using ::testing::InSequence; using ::testing::StrEq; using ::testing::WithArg; namespace librbd { namespace object_map { class TestMockObjectMapDiffRequest : public TestMockFixture { public: typedef DiffRequest<MockTestImageCtx> MockDiffRequest; void SetUp() override { TestMockFixture::SetUp(); ASSERT_EQ(0, open_image(m_image_name, &m_image_ctx)); } void expect_get_flags(MockTestImageCtx& mock_image_ctx, uint64_t snap_id, int32_t flags, int r) { EXPECT_CALL(mock_image_ctx, get_flags(snap_id, _)) .WillOnce(WithArg<1>(Invoke([flags, r](uint64_t *out_flags) { *out_flags = flags; return r; }))); } template <typename Lambda> void expect_load_map(MockTestImageCtx& mock_image_ctx, uint64_t snap_id, const BitVector<2>& object_map, int r, Lambda&& lambda) { std::string snap_oid(ObjectMap<>::object_map_name(mock_image_ctx.id, snap_id)); EXPECT_CALL(get_mock_io_ctx(mock_image_ctx.md_ctx), exec(snap_oid, _, StrEq("rbd"), StrEq("object_map_load"), _, _, _, _)) .WillOnce(WithArg<5>(Invoke([object_map, r, lambda=std::move(lambda)] (bufferlist* out_bl) { lambda(); auto out_object_map{object_map}; out_object_map.set_crc_enabled(false); encode(out_object_map, *out_bl); return r; }))); } void expect_load_map(MockTestImageCtx& mock_image_ctx, uint64_t snap_id, const BitVector<2>& object_map, int r) { expect_load_map(mock_image_ctx, snap_id, object_map, r, [](){}); } librbd::ImageCtx* m_image_ctx = nullptr; BitVector<2> m_object_diff_state; }; TEST_F(TestMockObjectMapDiffRequest, InvalidStartSnap) { MockTestImageCtx mock_image_ctx(*m_image_ctx); InSequence seq; C_SaferCond ctx; auto req = new MockDiffRequest(&mock_image_ctx, CEPH_NOSNAP, 0, &m_object_diff_state, &ctx); req->send(); ASSERT_EQ(-EINVAL, ctx.wait()); } TEST_F(TestMockObjectMapDiffRequest, StartEndSnapEqual) { MockTestImageCtx mock_image_ctx(*m_image_ctx); InSequence seq; C_SaferCond ctx; auto req = new MockDiffRequest(&mock_image_ctx, 1, 1, &m_object_diff_state, &ctx); req->send(); ASSERT_EQ(0, ctx.wait()); ASSERT_EQ(0U, m_object_diff_state.size()); } TEST_F(TestMockObjectMapDiffRequest, FastDiffDisabled) { // negative test -- object-map implicitly enables fast-diff REQUIRE(!is_feature_enabled(RBD_FEATURE_OBJECT_MAP)); MockTestImageCtx mock_image_ctx(*m_image_ctx); InSequence seq; C_SaferCond ctx; auto req = new MockDiffRequest(&mock_image_ctx, 0, CEPH_NOSNAP, &m_object_diff_state, &ctx); req->send(); ASSERT_EQ(-EINVAL, ctx.wait()); } TEST_F(TestMockObjectMapDiffRequest, FastDiffInvalid) { REQUIRE_FEATURE(RBD_FEATURE_FAST_DIFF); MockTestImageCtx mock_image_ctx(*m_image_ctx); mock_image_ctx.snap_info = { {1U, {"snap1", {cls::rbd::UserSnapshotNamespace{}}, {}, {}, {}, {}, {}}} }; InSequence seq; expect_get_flags(mock_image_ctx, 1U, RBD_FLAG_FAST_DIFF_INVALID, 0); C_SaferCond ctx; auto req = new MockDiffRequest(&mock_image_ctx, 0, CEPH_NOSNAP, &m_object_diff_state, &ctx); req->send(); ASSERT_EQ(-EINVAL, ctx.wait()); } TEST_F(TestMockObjectMapDiffRequest, FullDelta) { REQUIRE_FEATURE(RBD_FEATURE_FAST_DIFF); uint32_t object_count = 5; m_image_ctx->size = object_count * (1 << m_image_ctx->order); MockTestImageCtx mock_image_ctx(*m_image_ctx); mock_image_ctx.snap_info = { {1U, {"snap1", {cls::rbd::UserSnapshotNamespace{}}, mock_image_ctx.size, {}, {}, {}, {}}}, {2U, {"snap2", {cls::rbd::UserSnapshotNamespace{}}, mock_image_ctx.size, {}, {}, {}, {}}} }; InSequence seq; expect_get_flags(mock_image_ctx, 1U, 0, 0); BitVector<2> object_map_1; object_map_1.resize(object_count); object_map_1[1] = OBJECT_EXISTS_CLEAN; expect_load_map(mock_image_ctx, 1U, object_map_1, 0); expect_get_flags(mock_image_ctx, 2U, 0, 0); BitVector<2> object_map_2; object_map_2.resize(object_count); object_map_2[1] = OBJECT_EXISTS_CLEAN; object_map_2[2] = OBJECT_EXISTS; object_map_2[3] = OBJECT_EXISTS; expect_load_map(mock_image_ctx, 2U, object_map_2, 0); expect_get_flags(mock_image_ctx, CEPH_NOSNAP, 0, 0); BitVector<2> object_map_head; object_map_head.resize(object_count); object_map_head[1] = OBJECT_EXISTS_CLEAN; object_map_head[2] = OBJECT_EXISTS_CLEAN; expect_load_map(mock_image_ctx, CEPH_NOSNAP, object_map_head, 0); C_SaferCond ctx; auto req = new MockDiffRequest(&mock_image_ctx, 0, CEPH_NOSNAP, &m_object_diff_state, &ctx); req->send(); ASSERT_EQ(0, ctx.wait()); BitVector<2> expected_diff_state; expected_diff_state.resize(object_count); expected_diff_state[1] = DIFF_STATE_DATA_UPDATED; expected_diff_state[2] = DIFF_STATE_DATA_UPDATED; expected_diff_state[3] = DIFF_STATE_HOLE_UPDATED; ASSERT_EQ(expected_diff_state, m_object_diff_state); } TEST_F(TestMockObjectMapDiffRequest, IntermediateDelta) { REQUIRE_FEATURE(RBD_FEATURE_FAST_DIFF); uint32_t object_count = 5; m_image_ctx->size = object_count * (1 << m_image_ctx->order); MockTestImageCtx mock_image_ctx(*m_image_ctx); mock_image_ctx.snap_info = { {1U, {"snap1", {cls::rbd::UserSnapshotNamespace{}}, mock_image_ctx.size, {}, {}, {}, {}}}, {2U, {"snap2", {cls::rbd::UserSnapshotNamespace{}}, mock_image_ctx.size, {}, {}, {}, {}}} }; InSequence seq; expect_get_flags(mock_image_ctx, 1U, 0, 0); BitVector<2> object_map_1; object_map_1.resize(object_count); object_map_1[1] = OBJECT_EXISTS; object_map_1[2] = OBJECT_EXISTS_CLEAN; expect_load_map(mock_image_ctx, 1U, object_map_1, 0); expect_get_flags(mock_image_ctx, 2U, 0, 0); BitVector<2> object_map_2; object_map_2.resize(object_count); object_map_2[1] = OBJECT_EXISTS_CLEAN; object_map_2[2] = OBJECT_EXISTS; object_map_2[3] = OBJECT_EXISTS; expect_load_map(mock_image_ctx, 2U, object_map_2, 0); C_SaferCond ctx; auto req = new MockDiffRequest(&mock_image_ctx, 1, 2, &m_object_diff_state, &ctx); req->send(); ASSERT_EQ(0, ctx.wait()); BitVector<2> expected_diff_state; expected_diff_state.resize(object_count); expected_diff_state[1] = DIFF_STATE_DATA; expected_diff_state[2] = DIFF_STATE_DATA_UPDATED; expected_diff_state[3] = DIFF_STATE_DATA_UPDATED; ASSERT_EQ(expected_diff_state, m_object_diff_state); } TEST_F(TestMockObjectMapDiffRequest, EndDelta) { REQUIRE_FEATURE(RBD_FEATURE_FAST_DIFF); uint32_t object_count = 5; m_image_ctx->size = object_count * (1 << m_image_ctx->order); MockTestImageCtx mock_image_ctx(*m_image_ctx); mock_image_ctx.snap_info = { {1U, {"snap1", {cls::rbd::UserSnapshotNamespace{}}, mock_image_ctx.size, {}, {}, {}, {}}}, {2U, {"snap2", {cls::rbd::UserSnapshotNamespace{}}, mock_image_ctx.size, {}, {}, {}, {}}} }; InSequence seq; expect_get_flags(mock_image_ctx, 2U, 0, 0); BitVector<2> object_map_2; object_map_2.resize(object_count); object_map_2[1] = OBJECT_EXISTS_CLEAN; object_map_2[2] = OBJECT_EXISTS; object_map_2[3] = OBJECT_EXISTS; expect_load_map(mock_image_ctx, 2U, object_map_2, 0); expect_get_flags(mock_image_ctx, CEPH_NOSNAP, 0, 0); BitVector<2> object_map_head; object_map_head.resize(object_count); object_map_head[1] = OBJECT_EXISTS_CLEAN; object_map_head[2] = OBJECT_EXISTS_CLEAN; expect_load_map(mock_image_ctx, CEPH_NOSNAP, object_map_head, 0); C_SaferCond ctx; auto req = new MockDiffRequest(&mock_image_ctx, 2, CEPH_NOSNAP, &m_object_diff_state, &ctx); req->send(); ASSERT_EQ(0, ctx.wait()); BitVector<2> expected_diff_state; expected_diff_state.resize(object_count); expected_diff_state[1] = DIFF_STATE_DATA; expected_diff_state[2] = DIFF_STATE_DATA; expected_diff_state[3] = DIFF_STATE_HOLE_UPDATED; ASSERT_EQ(expected_diff_state, m_object_diff_state); } TEST_F(TestMockObjectMapDiffRequest, StartSnapDNE) { REQUIRE_FEATURE(RBD_FEATURE_FAST_DIFF); uint32_t object_count = 5; m_image_ctx->size = object_count * (1 << m_image_ctx->order); MockTestImageCtx mock_image_ctx(*m_image_ctx); mock_image_ctx.snap_info = { {2U, {"snap2", {cls::rbd::UserSnapshotNamespace{}}, mock_image_ctx.size, {}, {}, {}, {}}} }; InSequence seq; C_SaferCond ctx; auto req = new MockDiffRequest(&mock_image_ctx, 1, CEPH_NOSNAP, &m_object_diff_state, &ctx); req->send(); ASSERT_EQ(-ENOENT, ctx.wait()); } TEST_F(TestMockObjectMapDiffRequest, EndSnapDNE) { REQUIRE_FEATURE(RBD_FEATURE_FAST_DIFF); uint32_t object_count = 5; m_image_ctx->size = object_count * (1 << m_image_ctx->order); MockTestImageCtx mock_image_ctx(*m_image_ctx); mock_image_ctx.snap_info = { {1U, {"snap1", {cls::rbd::UserSnapshotNamespace{}}, mock_image_ctx.size, {}, {}, {}, {}}} }; InSequence seq; expect_get_flags(mock_image_ctx, 1U, 0, 0); BitVector<2> object_map_1; object_map_1.resize(object_count); expect_load_map(mock_image_ctx, 1U, object_map_1, 0); C_SaferCond ctx; auto req = new MockDiffRequest(&mock_image_ctx, 1, 2, &m_object_diff_state, &ctx); req->send(); ASSERT_EQ(-ENOENT, ctx.wait()); } TEST_F(TestMockObjectMapDiffRequest, IntermediateSnapDNE) { REQUIRE_FEATURE(RBD_FEATURE_FAST_DIFF); uint32_t object_count = 5; m_image_ctx->size = object_count * (1 << m_image_ctx->order); MockTestImageCtx mock_image_ctx(*m_image_ctx); mock_image_ctx.snap_info = { {1U, {"snap1", {cls::rbd::UserSnapshotNamespace{}}, mock_image_ctx.size, {}, {}, {}, {}}}, {2U, {"snap2", {cls::rbd::UserSnapshotNamespace{}}, mock_image_ctx.size, {}, {}, {}, {}}} }; InSequence seq; expect_get_flags(mock_image_ctx, 1U, 0, 0); BitVector<2> object_map_1; object_map_1.resize(object_count); object_map_1[1] = OBJECT_EXISTS_CLEAN; expect_load_map(mock_image_ctx, 1U, object_map_1, 0, [&mock_image_ctx]() { mock_image_ctx.snap_info.erase(2); }); expect_get_flags(mock_image_ctx, CEPH_NOSNAP, 0, 0); BitVector<2> object_map_head; object_map_head.resize(object_count); object_map_head[1] = OBJECT_EXISTS_CLEAN; expect_load_map(mock_image_ctx, CEPH_NOSNAP, object_map_head, 0); C_SaferCond ctx; auto req = new MockDiffRequest(&mock_image_ctx, 0, CEPH_NOSNAP, &m_object_diff_state, &ctx); req->send(); ASSERT_EQ(0, ctx.wait()); BitVector<2> expected_diff_state; expected_diff_state.resize(object_count); expected_diff_state[1] = DIFF_STATE_DATA_UPDATED; ASSERT_EQ(expected_diff_state, m_object_diff_state); } TEST_F(TestMockObjectMapDiffRequest, LoadObjectMapDNE) { REQUIRE_FEATURE(RBD_FEATURE_FAST_DIFF); uint32_t object_count = 5; m_image_ctx->size = object_count * (1 << m_image_ctx->order); MockTestImageCtx mock_image_ctx(*m_image_ctx); InSequence seq; expect_get_flags(mock_image_ctx, CEPH_NOSNAP, 0, 0); BitVector<2> object_map_head; expect_load_map(mock_image_ctx, CEPH_NOSNAP, object_map_head, -ENOENT); C_SaferCond ctx; auto req = new MockDiffRequest(&mock_image_ctx, 0, CEPH_NOSNAP, &m_object_diff_state, &ctx); req->send(); ASSERT_EQ(-ENOENT, ctx.wait()); } TEST_F(TestMockObjectMapDiffRequest, LoadIntermediateObjectMapDNE) { REQUIRE_FEATURE(RBD_FEATURE_FAST_DIFF); uint32_t object_count = 5; m_image_ctx->size = object_count * (1 << m_image_ctx->order); MockTestImageCtx mock_image_ctx(*m_image_ctx); mock_image_ctx.snap_info = { {1U, {"snap1", {cls::rbd::UserSnapshotNamespace{}}, mock_image_ctx.size, {}, {}, {}, {}}} }; InSequence seq; expect_get_flags(mock_image_ctx, 1U, 0, 0); BitVector<2> object_map_1; expect_load_map(mock_image_ctx, 1U, object_map_1, -ENOENT); expect_get_flags(mock_image_ctx, CEPH_NOSNAP, 0, 0); BitVector<2> object_map_head; object_map_head.resize(object_count); object_map_head[1] = OBJECT_EXISTS_CLEAN; expect_load_map(mock_image_ctx, CEPH_NOSNAP, object_map_head, 0); C_SaferCond ctx; auto req = new MockDiffRequest(&mock_image_ctx, 0, CEPH_NOSNAP, &m_object_diff_state, &ctx); req->send(); ASSERT_EQ(0, ctx.wait()); BitVector<2> expected_diff_state; expected_diff_state.resize(object_count); expected_diff_state[1] = DIFF_STATE_DATA_UPDATED; ASSERT_EQ(expected_diff_state, m_object_diff_state); } TEST_F(TestMockObjectMapDiffRequest, LoadObjectMapError) { REQUIRE_FEATURE(RBD_FEATURE_FAST_DIFF); uint32_t object_count = 5; m_image_ctx->size = object_count * (1 << m_image_ctx->order); MockTestImageCtx mock_image_ctx(*m_image_ctx); mock_image_ctx.snap_info = { {1U, {"snap1", {cls::rbd::UserSnapshotNamespace{}}, mock_image_ctx.size, {}, {}, {}, {}}} }; InSequence seq; expect_get_flags(mock_image_ctx, 1U, 0, 0); BitVector<2> object_map_1; expect_load_map(mock_image_ctx, 1U, object_map_1, -EPERM); C_SaferCond ctx; auto req = new MockDiffRequest(&mock_image_ctx, 0, CEPH_NOSNAP, &m_object_diff_state, &ctx); req->send(); ASSERT_EQ(-EPERM, ctx.wait()); } TEST_F(TestMockObjectMapDiffRequest, ObjectMapTooSmall) { REQUIRE_FEATURE(RBD_FEATURE_FAST_DIFF); uint32_t object_count = 5; m_image_ctx->size = object_count * (1 << m_image_ctx->order); MockTestImageCtx mock_image_ctx(*m_image_ctx); mock_image_ctx.snap_info = { {1U, {"snap1", {cls::rbd::UserSnapshotNamespace{}}, mock_image_ctx.size, {}, {}, {}, {}}} }; InSequence seq; expect_get_flags(mock_image_ctx, 1U, 0, 0); BitVector<2> object_map_1; expect_load_map(mock_image_ctx, 1U, object_map_1, 0); C_SaferCond ctx; auto req = new MockDiffRequest(&mock_image_ctx, 0, CEPH_NOSNAP, &m_object_diff_state, &ctx); req->send(); ASSERT_EQ(-EINVAL, ctx.wait()); } } // namespace object_map } // librbd
15,059
29.48583
80
cc
null
ceph-main/src/test/librbd/object_map/test_mock_InvalidateRequest.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "test/librbd/test_mock_fixture.h" #include "test/librbd/test_support.h" #include "test/librados_test_stub/MockTestMemIoCtxImpl.h" #include "librbd/internal.h" #include "librbd/api/Image.h" #include "librbd/object_map/InvalidateRequest.h" #include "gmock/gmock.h" #include "gtest/gtest.h" namespace librbd { namespace object_map { using ::testing::_; using ::testing::DoDefault; using ::testing::Return; using ::testing::StrEq; class TestMockObjectMapInvalidateRequest : public TestMockFixture { public: }; TEST_F(TestMockObjectMapInvalidateRequest, UpdatesInMemoryFlag) { REQUIRE_FEATURE(RBD_FEATURE_OBJECT_MAP); librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); bool flags_set; ASSERT_EQ(0, ictx->test_flags(CEPH_NOSNAP, RBD_FLAG_OBJECT_MAP_INVALID, &flags_set)); ASSERT_FALSE(flags_set); C_SaferCond cond_ctx; AsyncRequest<> *request = new InvalidateRequest<>(*ictx, CEPH_NOSNAP, true, &cond_ctx); EXPECT_CALL(get_mock_io_ctx(ictx->md_ctx), exec(ictx->header_oid, _, StrEq("rbd"), StrEq("set_flags"), _, _, _, _)) .WillOnce(DoDefault()); { std::shared_lock owner_locker{ictx->owner_lock}; std::unique_lock image_locker{ictx->image_lock}; request->send(); } ASSERT_EQ(0, cond_ctx.wait()); ASSERT_EQ(0, ictx->test_flags(CEPH_NOSNAP, RBD_FLAG_OBJECT_MAP_INVALID, &flags_set)); ASSERT_TRUE(flags_set); } TEST_F(TestMockObjectMapInvalidateRequest, UpdatesHeadOnDiskFlag) { REQUIRE_FEATURE(RBD_FEATURE_OBJECT_MAP); librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); ASSERT_EQ(0, acquire_exclusive_lock(*ictx)); C_SaferCond cond_ctx; AsyncRequest<> *request = new InvalidateRequest<>(*ictx, CEPH_NOSNAP, false, &cond_ctx); EXPECT_CALL(get_mock_io_ctx(ictx->md_ctx), exec(ictx->header_oid, _, StrEq("rbd"), StrEq("set_flags"), _, _, _, _)) .WillOnce(DoDefault()); { std::shared_lock owner_locker{ictx->owner_lock}; std::unique_lock image_locker{ictx->image_lock}; request->send(); } ASSERT_EQ(0, cond_ctx.wait()); expect_unlock_exclusive_lock(*ictx); } TEST_F(TestMockObjectMapInvalidateRequest, UpdatesSnapOnDiskFlag) { REQUIRE_FEATURE(RBD_FEATURE_OBJECT_MAP); librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); ASSERT_EQ(0, snap_create(*ictx, "snap1")); ASSERT_EQ(0, librbd::api::Image<>::snap_set(ictx, cls::rbd::UserSnapshotNamespace(), "snap1")); C_SaferCond cond_ctx; AsyncRequest<> *request = new InvalidateRequest<>(*ictx, ictx->snap_id, false, &cond_ctx); EXPECT_CALL(get_mock_io_ctx(ictx->md_ctx), exec(ictx->header_oid, _, StrEq("rbd"), StrEq("set_flags"), _, _, _, _)) .WillOnce(DoDefault()); { std::shared_lock owner_locker{ictx->owner_lock}; std::unique_lock image_locker{ictx->image_lock}; request->send(); } ASSERT_EQ(0, cond_ctx.wait()); } TEST_F(TestMockObjectMapInvalidateRequest, ErrorOnDiskUpdateWithoutLock) { REQUIRE_FEATURE(RBD_FEATURE_OBJECT_MAP); librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); C_SaferCond cond_ctx; AsyncRequest<> *request = new InvalidateRequest<>(*ictx, CEPH_NOSNAP, false, &cond_ctx); EXPECT_CALL(get_mock_io_ctx(ictx->md_ctx), exec(ictx->header_oid, _, StrEq("rbd"), StrEq("set_flags"), _, _, _, _)) .Times(0); { std::shared_lock owner_locker{ictx->owner_lock}; std::unique_lock image_locker{ictx->image_lock}; request->send(); } ASSERT_EQ(-EROFS, cond_ctx.wait()); expect_unlock_exclusive_lock(*ictx); } TEST_F(TestMockObjectMapInvalidateRequest, ErrorOnDiskUpdateFailure) { REQUIRE_FEATURE(RBD_FEATURE_OBJECT_MAP); librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); ASSERT_EQ(0, acquire_exclusive_lock(*ictx)); C_SaferCond cond_ctx; AsyncRequest<> *request = new InvalidateRequest<>(*ictx, CEPH_NOSNAP, false, &cond_ctx); EXPECT_CALL(get_mock_io_ctx(ictx->md_ctx), exec(ictx->header_oid, _, StrEq("rbd"), StrEq("set_flags"), _, _, _, _)) .WillOnce(Return(-EINVAL)); { std::shared_lock owner_locker{ictx->owner_lock}; std::unique_lock image_locker{ictx->image_lock}; request->send(); } ASSERT_EQ(-EINVAL, cond_ctx.wait()); expect_unlock_exclusive_lock(*ictx); } } // namespace object_map } // namespace librbd
4,766
28.981132
90
cc
null
ceph-main/src/test/librbd/object_map/test_mock_LockRequest.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "test/librbd/test_mock_fixture.h" #include "test/librbd/test_support.h" #include "test/librbd/mock/MockImageCtx.h" #include "test/librados_test_stub/MockTestMemIoCtxImpl.h" #include "cls/lock/cls_lock_ops.h" #include "librbd/ObjectMap.h" #include "librbd/object_map/LockRequest.h" // template definitions #include "librbd/object_map/LockRequest.cc" namespace librbd { namespace object_map { using ::testing::_; using ::testing::DoAll; using ::testing::InSequence; using ::testing::Return; using ::testing::StrEq; using ::testing::WithArg; class TestMockObjectMapLockRequest : public TestMockFixture { public: typedef LockRequest<MockImageCtx> MockLockRequest; void expect_lock(MockImageCtx &mock_image_ctx, int r) { std::string oid(ObjectMap<>::object_map_name(mock_image_ctx.id, CEPH_NOSNAP)); EXPECT_CALL(get_mock_io_ctx(mock_image_ctx.md_ctx), exec(oid, _, StrEq("lock"), StrEq("lock"), _, _, _, _)) .WillOnce(Return(r)); } void expect_get_lock_info(MockImageCtx &mock_image_ctx, int r) { std::string oid(ObjectMap<>::object_map_name(mock_image_ctx.id, CEPH_NOSNAP)); auto &expect = EXPECT_CALL(get_mock_io_ctx(mock_image_ctx.md_ctx), exec(oid, _, StrEq("lock"), StrEq("get_info"), _, _, _, _)); if (r < 0) { expect.WillOnce(Return(r)); } else { entity_name_t entity1(entity_name_t::CLIENT(1)); entity_name_t entity2(entity_name_t::CLIENT(2)); cls_lock_get_info_reply reply; reply.lockers = decltype(reply.lockers){ {rados::cls::lock::locker_id_t(entity1, "cookie1"), rados::cls::lock::locker_info_t()}, {rados::cls::lock::locker_id_t(entity2, "cookie2"), rados::cls::lock::locker_info_t()}}; bufferlist bl; encode(reply, bl, CEPH_FEATURES_SUPPORTED_DEFAULT); std::string str(bl.c_str(), bl.length()); expect.WillOnce(DoAll(WithArg<5>(CopyInBufferlist(str)), Return(r))); } } void expect_break_lock(MockImageCtx &mock_image_ctx, int r) { std::string oid(ObjectMap<>::object_map_name(mock_image_ctx.id, CEPH_NOSNAP)); auto &expect = EXPECT_CALL(get_mock_io_ctx(mock_image_ctx.md_ctx), exec(oid, _, StrEq("lock"), StrEq("break_lock"), _, _, _, _)); if (r < 0) { expect.WillOnce(Return(r)); } else { expect.Times(2).WillRepeatedly(Return(0)); } } }; TEST_F(TestMockObjectMapLockRequest, Success) { librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); MockImageCtx mock_image_ctx(*ictx); C_SaferCond ctx; MockLockRequest *req = new MockLockRequest(mock_image_ctx, &ctx); InSequence seq; expect_lock(mock_image_ctx, 0); req->send(); ASSERT_EQ(0, ctx.wait()); } TEST_F(TestMockObjectMapLockRequest, LockBusy) { librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); MockImageCtx mock_image_ctx(*ictx); C_SaferCond ctx; MockLockRequest *req = new MockLockRequest(mock_image_ctx, &ctx); InSequence seq; expect_lock(mock_image_ctx, -EBUSY); expect_get_lock_info(mock_image_ctx, 0); expect_break_lock(mock_image_ctx, 0); expect_lock(mock_image_ctx, 0); req->send(); ASSERT_EQ(0, ctx.wait()); } TEST_F(TestMockObjectMapLockRequest, LockError) { librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); MockImageCtx mock_image_ctx(*ictx); C_SaferCond ctx; MockLockRequest *req = new MockLockRequest(mock_image_ctx, &ctx); InSequence seq; expect_lock(mock_image_ctx, -ENOENT); req->send(); ASSERT_EQ(0, ctx.wait()); } TEST_F(TestMockObjectMapLockRequest, GetLockInfoMissing) { librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); MockImageCtx mock_image_ctx(*ictx); C_SaferCond ctx; MockLockRequest *req = new MockLockRequest(mock_image_ctx, &ctx); InSequence seq; expect_lock(mock_image_ctx, -EBUSY); expect_get_lock_info(mock_image_ctx, -ENOENT); expect_lock(mock_image_ctx, 0); req->send(); ASSERT_EQ(0, ctx.wait()); } TEST_F(TestMockObjectMapLockRequest, GetLockInfoError) { librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); MockImageCtx mock_image_ctx(*ictx); C_SaferCond ctx; MockLockRequest *req = new MockLockRequest(mock_image_ctx, &ctx); InSequence seq; expect_lock(mock_image_ctx, -EBUSY); expect_get_lock_info(mock_image_ctx, -EINVAL); req->send(); ASSERT_EQ(0, ctx.wait()); } TEST_F(TestMockObjectMapLockRequest, BreakLockMissing) { librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); MockImageCtx mock_image_ctx(*ictx); C_SaferCond ctx; MockLockRequest *req = new MockLockRequest(mock_image_ctx, &ctx); InSequence seq; expect_lock(mock_image_ctx, -EBUSY); expect_get_lock_info(mock_image_ctx, 0); expect_break_lock(mock_image_ctx, -ENOENT); expect_lock(mock_image_ctx, 0); req->send(); ASSERT_EQ(0, ctx.wait()); } TEST_F(TestMockObjectMapLockRequest, BreakLockError) { librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); MockImageCtx mock_image_ctx(*ictx); C_SaferCond ctx; MockLockRequest *req = new MockLockRequest(mock_image_ctx, &ctx); InSequence seq; expect_lock(mock_image_ctx, -EBUSY); expect_get_lock_info(mock_image_ctx, 0); expect_break_lock(mock_image_ctx, -EINVAL); req->send(); ASSERT_EQ(0, ctx.wait()); } TEST_F(TestMockObjectMapLockRequest, LockErrorAfterBrokeLock) { librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); MockImageCtx mock_image_ctx(*ictx); C_SaferCond ctx; MockLockRequest *req = new MockLockRequest(mock_image_ctx, &ctx); InSequence seq; expect_lock(mock_image_ctx, -EBUSY); expect_get_lock_info(mock_image_ctx, 0); expect_break_lock(mock_image_ctx, 0); expect_lock(mock_image_ctx, -EBUSY); req->send(); ASSERT_EQ(0, ctx.wait()); } } // namespace object_map } // namespace librbd
6,268
27.238739
80
cc
null
ceph-main/src/test/librbd/object_map/test_mock_RefreshRequest.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "test/librbd/test_mock_fixture.h" #include "test/librbd/test_support.h" #include "test/librbd/mock/MockImageCtx.h" #include "test/librbd/object_map/mock/MockInvalidateRequest.h" #include "test/librados_test_stub/MockTestMemIoCtxImpl.h" #include "common/bit_vector.hpp" #include "librbd/ObjectMap.h" #include "librbd/object_map/RefreshRequest.h" #include "librbd/object_map/LockRequest.h" namespace librbd { namespace { struct MockObjectMapImageCtx : public MockImageCtx { MockObjectMapImageCtx(ImageCtx &image_ctx) : MockImageCtx(image_ctx) { } }; } // anonymous namespace namespace object_map { template <> class LockRequest<MockObjectMapImageCtx> { public: static LockRequest *s_instance; static LockRequest *create(MockObjectMapImageCtx &image_ctx, Context *on_finish) { ceph_assert(s_instance != nullptr); s_instance->on_finish = on_finish; return s_instance; } Context *on_finish = nullptr; LockRequest() { s_instance = this; } MOCK_METHOD0(send, void()); }; template<> struct InvalidateRequest<MockObjectMapImageCtx> : public MockInvalidateRequestBase<MockObjectMapImageCtx> { }; LockRequest<MockObjectMapImageCtx> *LockRequest<MockObjectMapImageCtx>::s_instance = nullptr; } // namespace object_map } // namespace librbd // template definitions #include "librbd/object_map/RefreshRequest.cc" #include "librbd/object_map/LockRequest.cc" namespace librbd { namespace object_map { using ::testing::_; using ::testing::DoAll; using ::testing::DoDefault; using ::testing::InSequence; using ::testing::Return; using ::testing::StrEq; using ::testing::WithArg; class TestMockObjectMapRefreshRequest : public TestMockFixture { public: static const uint64_t TEST_SNAP_ID = 123; typedef RefreshRequest<MockObjectMapImageCtx> MockRefreshRequest; typedef LockRequest<MockObjectMapImageCtx> MockLockRequest; typedef InvalidateRequest<MockObjectMapImageCtx> MockInvalidateRequest; void expect_object_map_lock(MockObjectMapImageCtx &mock_image_ctx, MockLockRequest &mock_lock_request) { EXPECT_CALL(mock_lock_request, send()) .WillOnce(FinishRequest(&mock_lock_request, 0, &mock_image_ctx)); } void expect_object_map_load(MockObjectMapImageCtx &mock_image_ctx, ceph::BitVector<2> *object_map, uint64_t snap_id, int r) { std::string oid(ObjectMap<>::object_map_name(mock_image_ctx.id, snap_id)); auto &expect = EXPECT_CALL(get_mock_io_ctx(mock_image_ctx.md_ctx), exec(oid, _, StrEq("rbd"), StrEq("object_map_load"), _, _, _, _)); if (r < 0) { expect.WillOnce(Return(r)); } else { ceph_assert(object_map); object_map->set_crc_enabled(false); bufferlist bl; encode(*object_map, bl); std::string str(bl.c_str(), bl.length()); expect.WillOnce(DoAll(WithArg<5>(CopyInBufferlist(str)), Return(0))); } } void expect_get_image_size(MockObjectMapImageCtx &mock_image_ctx, uint64_t snap_id, uint64_t size) { EXPECT_CALL(mock_image_ctx, get_image_size(snap_id)) .WillOnce(Return(size)); } void expect_invalidate_request(MockObjectMapImageCtx &mock_image_ctx, MockInvalidateRequest &invalidate_request, int r) { EXPECT_CALL(invalidate_request, send()) .WillOnce(FinishRequest(&invalidate_request, r, &mock_image_ctx)); } void expect_truncate_request(MockObjectMapImageCtx &mock_image_ctx) { std::string oid(ObjectMap<>::object_map_name(mock_image_ctx.id, TEST_SNAP_ID)); EXPECT_CALL(get_mock_io_ctx(mock_image_ctx.md_ctx), truncate(oid, 0, _)) .WillOnce(Return(0)); } void expect_object_map_resize(MockObjectMapImageCtx &mock_image_ctx, uint64_t num_objects, int r) { std::string oid(ObjectMap<>::object_map_name(mock_image_ctx.id, TEST_SNAP_ID)); auto &expect = EXPECT_CALL(get_mock_io_ctx(mock_image_ctx.md_ctx), exec(oid, _, StrEq("rbd"), StrEq("object_map_resize"), _, _, _, _)); expect.WillOnce(Return(r)); } void init_object_map(MockObjectMapImageCtx &mock_image_ctx, ceph::BitVector<2> *object_map) { uint64_t num_objs = Striper::get_num_objects( mock_image_ctx.layout, mock_image_ctx.image_ctx->size); object_map->resize(num_objs); for (uint64_t i = 0; i < num_objs; ++i) { (*object_map)[i] = rand() % 3; } } }; TEST_F(TestMockObjectMapRefreshRequest, SuccessHead) { REQUIRE_FEATURE(RBD_FEATURE_OBJECT_MAP); librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); MockObjectMapImageCtx mock_image_ctx(*ictx); ceph::BitVector<2> on_disk_object_map; init_object_map(mock_image_ctx, &on_disk_object_map); C_SaferCond ctx; ceph::shared_mutex object_map_lock = ceph::make_shared_mutex("lock"); ceph::BitVector<2> object_map; MockLockRequest mock_lock_request; MockRefreshRequest *req = new MockRefreshRequest( mock_image_ctx, &object_map_lock, &object_map, CEPH_NOSNAP, &ctx); InSequence seq; expect_get_image_size(mock_image_ctx, CEPH_NOSNAP, mock_image_ctx.image_ctx->size); expect_object_map_lock(mock_image_ctx, mock_lock_request); expect_object_map_load(mock_image_ctx, &on_disk_object_map, CEPH_NOSNAP, 0); expect_get_image_size(mock_image_ctx, CEPH_NOSNAP, mock_image_ctx.image_ctx->size); req->send(); ASSERT_EQ(0, ctx.wait()); ASSERT_EQ(on_disk_object_map, object_map); } TEST_F(TestMockObjectMapRefreshRequest, SuccessSnapshot) { REQUIRE_FEATURE(RBD_FEATURE_OBJECT_MAP); librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); MockObjectMapImageCtx mock_image_ctx(*ictx); ceph::BitVector<2> on_disk_object_map; init_object_map(mock_image_ctx, &on_disk_object_map); C_SaferCond ctx; ceph::shared_mutex object_map_lock = ceph::make_shared_mutex("lock"); ceph::BitVector<2> object_map; MockRefreshRequest *req = new MockRefreshRequest( mock_image_ctx, &object_map_lock, &object_map, TEST_SNAP_ID, &ctx); InSequence seq; expect_get_image_size(mock_image_ctx, TEST_SNAP_ID, mock_image_ctx.image_ctx->size); expect_object_map_load(mock_image_ctx, &on_disk_object_map, TEST_SNAP_ID, 0); expect_get_image_size(mock_image_ctx, TEST_SNAP_ID, mock_image_ctx.image_ctx->size); req->send(); ASSERT_EQ(0, ctx.wait()); ASSERT_EQ(on_disk_object_map, object_map); } TEST_F(TestMockObjectMapRefreshRequest, LoadError) { REQUIRE_FEATURE(RBD_FEATURE_OBJECT_MAP); librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); MockObjectMapImageCtx mock_image_ctx(*ictx); ceph::BitVector<2> on_disk_object_map; init_object_map(mock_image_ctx, &on_disk_object_map); C_SaferCond ctx; ceph::shared_mutex object_map_lock = ceph::make_shared_mutex("lock"); ceph::BitVector<2> object_map; MockRefreshRequest *req = new MockRefreshRequest( mock_image_ctx, &object_map_lock, &object_map, TEST_SNAP_ID, &ctx); InSequence seq; expect_get_image_size(mock_image_ctx, TEST_SNAP_ID, mock_image_ctx.image_ctx->size); expect_object_map_load(mock_image_ctx, nullptr, TEST_SNAP_ID, -ENOENT); MockInvalidateRequest invalidate_request; expect_invalidate_request(mock_image_ctx, invalidate_request, 0); expect_get_image_size(mock_image_ctx, TEST_SNAP_ID, mock_image_ctx.image_ctx->size); req->send(); ASSERT_EQ(0, ctx.wait()); } TEST_F(TestMockObjectMapRefreshRequest, LoadInvalidateError) { REQUIRE_FEATURE(RBD_FEATURE_OBJECT_MAP); librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); MockObjectMapImageCtx mock_image_ctx(*ictx); ceph::BitVector<2> on_disk_object_map; init_object_map(mock_image_ctx, &on_disk_object_map); C_SaferCond ctx; ceph::shared_mutex object_map_lock = ceph::make_shared_mutex("lock"); ceph::BitVector<2> object_map; MockRefreshRequest *req = new MockRefreshRequest( mock_image_ctx, &object_map_lock, &object_map, TEST_SNAP_ID, &ctx); InSequence seq; expect_get_image_size(mock_image_ctx, TEST_SNAP_ID, mock_image_ctx.image_ctx->size); expect_object_map_load(mock_image_ctx, nullptr, TEST_SNAP_ID, -ENOENT); MockInvalidateRequest invalidate_request; expect_invalidate_request(mock_image_ctx, invalidate_request, -EPERM); expect_get_image_size(mock_image_ctx, TEST_SNAP_ID, mock_image_ctx.image_ctx->size); req->send(); ASSERT_EQ(-EPERM, ctx.wait()); } TEST_F(TestMockObjectMapRefreshRequest, LoadCorrupt) { REQUIRE_FEATURE(RBD_FEATURE_OBJECT_MAP); librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); MockObjectMapImageCtx mock_image_ctx(*ictx); ceph::BitVector<2> on_disk_object_map; init_object_map(mock_image_ctx, &on_disk_object_map); C_SaferCond ctx; ceph::shared_mutex object_map_lock = ceph::make_shared_mutex("lock"); ceph::BitVector<2> object_map; MockRefreshRequest *req = new MockRefreshRequest( mock_image_ctx, &object_map_lock, &object_map, TEST_SNAP_ID, &ctx); InSequence seq; expect_get_image_size(mock_image_ctx, TEST_SNAP_ID, mock_image_ctx.image_ctx->size); expect_object_map_load(mock_image_ctx, nullptr, TEST_SNAP_ID, -EINVAL); MockInvalidateRequest invalidate_request; expect_invalidate_request(mock_image_ctx, invalidate_request, 0); expect_truncate_request(mock_image_ctx); expect_object_map_resize(mock_image_ctx, on_disk_object_map.size(), 0); expect_get_image_size(mock_image_ctx, TEST_SNAP_ID, mock_image_ctx.image_ctx->size); req->send(); ASSERT_EQ(0, ctx.wait()); } TEST_F(TestMockObjectMapRefreshRequest, TooSmall) { REQUIRE_FEATURE(RBD_FEATURE_OBJECT_MAP); librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); MockObjectMapImageCtx mock_image_ctx(*ictx); ceph::BitVector<2> on_disk_object_map; init_object_map(mock_image_ctx, &on_disk_object_map); ceph::BitVector<2> small_object_map; C_SaferCond ctx; ceph::shared_mutex object_map_lock = ceph::make_shared_mutex("lock"); ceph::BitVector<2> object_map; MockRefreshRequest *req = new MockRefreshRequest( mock_image_ctx, &object_map_lock, &object_map, TEST_SNAP_ID, &ctx); InSequence seq; expect_get_image_size(mock_image_ctx, TEST_SNAP_ID, mock_image_ctx.image_ctx->size); expect_object_map_load(mock_image_ctx, &small_object_map, TEST_SNAP_ID, 0); MockInvalidateRequest invalidate_request; expect_invalidate_request(mock_image_ctx, invalidate_request, 0); expect_object_map_resize(mock_image_ctx, on_disk_object_map.size(), 0); expect_get_image_size(mock_image_ctx, TEST_SNAP_ID, mock_image_ctx.image_ctx->size); req->send(); ASSERT_EQ(0, ctx.wait()); } TEST_F(TestMockObjectMapRefreshRequest, TooSmallInvalidateError) { REQUIRE_FEATURE(RBD_FEATURE_OBJECT_MAP); librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); MockObjectMapImageCtx mock_image_ctx(*ictx); ceph::BitVector<2> on_disk_object_map; init_object_map(mock_image_ctx, &on_disk_object_map); ceph::BitVector<2> small_object_map; C_SaferCond ctx; ceph::shared_mutex object_map_lock = ceph::make_shared_mutex("lock"); ceph::BitVector<2> object_map; MockRefreshRequest *req = new MockRefreshRequest( mock_image_ctx, &object_map_lock, &object_map, TEST_SNAP_ID, &ctx); InSequence seq; expect_get_image_size(mock_image_ctx, TEST_SNAP_ID, mock_image_ctx.image_ctx->size); expect_object_map_load(mock_image_ctx, &small_object_map, TEST_SNAP_ID, 0); MockInvalidateRequest invalidate_request; expect_invalidate_request(mock_image_ctx, invalidate_request, -EPERM); expect_get_image_size(mock_image_ctx, TEST_SNAP_ID, mock_image_ctx.image_ctx->size); req->send(); ASSERT_EQ(-EPERM, ctx.wait()); } TEST_F(TestMockObjectMapRefreshRequest, TooLarge) { REQUIRE_FEATURE(RBD_FEATURE_OBJECT_MAP); librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); MockObjectMapImageCtx mock_image_ctx(*ictx); ceph::BitVector<2> on_disk_object_map; init_object_map(mock_image_ctx, &on_disk_object_map); ceph::BitVector<2> large_object_map; large_object_map.resize(on_disk_object_map.size() * 2); C_SaferCond ctx; ceph::shared_mutex object_map_lock = ceph::make_shared_mutex("lock"); ceph::BitVector<2> object_map; MockRefreshRequest *req = new MockRefreshRequest( mock_image_ctx, &object_map_lock, &object_map, TEST_SNAP_ID, &ctx); InSequence seq; expect_get_image_size(mock_image_ctx, TEST_SNAP_ID, mock_image_ctx.image_ctx->size); expect_object_map_load(mock_image_ctx, &large_object_map, TEST_SNAP_ID, 0); expect_get_image_size(mock_image_ctx, TEST_SNAP_ID, mock_image_ctx.image_ctx->size); req->send(); ASSERT_EQ(0, ctx.wait()); } TEST_F(TestMockObjectMapRefreshRequest, ResizeError) { REQUIRE_FEATURE(RBD_FEATURE_OBJECT_MAP); librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); MockObjectMapImageCtx mock_image_ctx(*ictx); ceph::BitVector<2> on_disk_object_map; init_object_map(mock_image_ctx, &on_disk_object_map); ceph::BitVector<2> small_object_map; C_SaferCond ctx; ceph::shared_mutex object_map_lock = ceph::make_shared_mutex("lock"); ceph::BitVector<2> object_map; MockRefreshRequest *req = new MockRefreshRequest( mock_image_ctx, &object_map_lock, &object_map, TEST_SNAP_ID, &ctx); InSequence seq; expect_get_image_size(mock_image_ctx, TEST_SNAP_ID, mock_image_ctx.image_ctx->size); expect_object_map_load(mock_image_ctx, &small_object_map, TEST_SNAP_ID, 0); MockInvalidateRequest invalidate_request; expect_invalidate_request(mock_image_ctx, invalidate_request, 0); expect_object_map_resize(mock_image_ctx, on_disk_object_map.size(), -ESTALE); expect_get_image_size(mock_image_ctx, TEST_SNAP_ID, mock_image_ctx.image_ctx->size); req->send(); ASSERT_EQ(0, ctx.wait()); } TEST_F(TestMockObjectMapRefreshRequest, LargeImageError) { REQUIRE_FEATURE(RBD_FEATURE_OBJECT_MAP); librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); MockObjectMapImageCtx mock_image_ctx(*ictx); ceph::BitVector<2> on_disk_object_map; init_object_map(mock_image_ctx, &on_disk_object_map); C_SaferCond ctx; ceph::shared_mutex object_map_lock = ceph::make_shared_mutex("lock"); ceph::BitVector<2> object_map; MockRefreshRequest *req = new MockRefreshRequest( mock_image_ctx, &object_map_lock, &object_map, TEST_SNAP_ID, &ctx); InSequence seq; expect_get_image_size(mock_image_ctx, TEST_SNAP_ID, std::numeric_limits<int64_t>::max()); MockInvalidateRequest invalidate_request; expect_invalidate_request(mock_image_ctx, invalidate_request, 0); req->send(); ASSERT_EQ(-EFBIG, ctx.wait()); } } // namespace object_map } // namespace librbd
15,673
32.635193
93
cc
null
ceph-main/src/test/librbd/object_map/test_mock_ResizeRequest.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "test/librbd/test_mock_fixture.h" #include "test/librbd/test_support.h" #include "test/librados_test_stub/MockTestMemIoCtxImpl.h" #include "common/bit_vector.hpp" #include "librbd/internal.h" #include "librbd/ObjectMap.h" #include "librbd/api/Image.h" #include "librbd/object_map/ResizeRequest.h" #include "gmock/gmock.h" #include "gtest/gtest.h" namespace librbd { namespace object_map { using ::testing::_; using ::testing::DoDefault; using ::testing::Return; using ::testing::StrEq; class TestMockObjectMapResizeRequest : public TestMockFixture { public: void expect_resize(librbd::ImageCtx *ictx, uint64_t snap_id, int r) { std::string oid(ObjectMap<>::object_map_name(ictx->id, snap_id)); if (snap_id == CEPH_NOSNAP) { EXPECT_CALL(get_mock_io_ctx(ictx->md_ctx), exec(oid, _, StrEq("lock"), StrEq("assert_locked"), _, _, _, _)) .WillOnce(DoDefault()); } if (r < 0) { EXPECT_CALL(get_mock_io_ctx(ictx->md_ctx), exec(oid, _, StrEq("rbd"), StrEq("object_map_resize"), _, _, _, _)) .WillOnce(Return(r)); } else { EXPECT_CALL(get_mock_io_ctx(ictx->md_ctx), exec(oid, _, StrEq("rbd"), StrEq("object_map_resize"), _, _, _, _)) .WillOnce(DoDefault()); } } void expect_invalidate(librbd::ImageCtx *ictx) { EXPECT_CALL(get_mock_io_ctx(ictx->md_ctx), exec(ictx->header_oid, _, StrEq("rbd"), StrEq("set_flags"), _, _, _, _)) .WillOnce(DoDefault()); } }; TEST_F(TestMockObjectMapResizeRequest, UpdateInMemory) { REQUIRE_FEATURE(RBD_FEATURE_OBJECT_MAP); librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); ASSERT_EQ(0, acquire_exclusive_lock(*ictx)); ceph::shared_mutex object_map_lock = ceph::make_shared_mutex("lock"); ceph::BitVector<2> object_map; object_map.resize(1); C_SaferCond cond_ctx; AsyncRequest<> *req = new ResizeRequest( *ictx, &object_map_lock, &object_map, CEPH_NOSNAP, object_map.size(), OBJECT_EXISTS, &cond_ctx); req->send(); ASSERT_EQ(0, cond_ctx.wait()); for (uint64_t i = 0; i < object_map.size(); ++i) { ASSERT_EQ(i == 0 ? OBJECT_NONEXISTENT : OBJECT_EXISTS, object_map[i]); } } TEST_F(TestMockObjectMapResizeRequest, UpdateHeadOnDisk) { REQUIRE_FEATURE(RBD_FEATURE_OBJECT_MAP); librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); ASSERT_EQ(0, acquire_exclusive_lock(*ictx)); expect_resize(ictx, CEPH_NOSNAP, 0); ceph::shared_mutex object_map_lock = ceph::make_shared_mutex("lock"); ceph::BitVector<2> object_map; object_map.resize(1); C_SaferCond cond_ctx; AsyncRequest<> *req = new ResizeRequest( *ictx, &object_map_lock, &object_map, CEPH_NOSNAP, object_map.size(), OBJECT_EXISTS, &cond_ctx); req->send(); ASSERT_EQ(0, cond_ctx.wait()); expect_unlock_exclusive_lock(*ictx); } TEST_F(TestMockObjectMapResizeRequest, UpdateSnapOnDisk) { REQUIRE_FEATURE(RBD_FEATURE_OBJECT_MAP); librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); ASSERT_EQ(0, snap_create(*ictx, "snap1")); ASSERT_EQ(0, librbd::api::Image<>::snap_set(ictx, cls::rbd::UserSnapshotNamespace(), "snap1")); uint64_t snap_id = ictx->snap_id; expect_resize(ictx, snap_id, 0); ceph::shared_mutex object_map_lock = ceph::make_shared_mutex("lock"); ceph::BitVector<2> object_map; object_map.resize(1); C_SaferCond cond_ctx; AsyncRequest<> *req = new ResizeRequest( *ictx, &object_map_lock, &object_map, snap_id, object_map.size(), OBJECT_EXISTS, &cond_ctx); req->send(); ASSERT_EQ(0, cond_ctx.wait()); expect_unlock_exclusive_lock(*ictx); } TEST_F(TestMockObjectMapResizeRequest, UpdateOnDiskError) { REQUIRE_FEATURE(RBD_FEATURE_OBJECT_MAP); librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); ASSERT_EQ(0, acquire_exclusive_lock(*ictx)); expect_resize(ictx, CEPH_NOSNAP, -EINVAL); expect_invalidate(ictx); ceph::shared_mutex object_map_lock = ceph::make_shared_mutex("lock"); ceph::BitVector<2> object_map; object_map.resize(1); C_SaferCond cond_ctx; AsyncRequest<> *req = new ResizeRequest( *ictx, &object_map_lock, &object_map, CEPH_NOSNAP, object_map.size(), OBJECT_EXISTS, &cond_ctx); req->send(); ASSERT_EQ(0, cond_ctx.wait()); expect_unlock_exclusive_lock(*ictx); } } // namespace object_map } // namespace librbd
4,695
29.296774
78
cc
null
ceph-main/src/test/librbd/object_map/test_mock_SnapshotCreateRequest.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "test/librbd/test_mock_fixture.h" #include "test/librbd/test_support.h" #include "test/librados_test_stub/MockTestMemIoCtxImpl.h" #include "common/bit_vector.hpp" #include "cls/rbd/cls_rbd_types.h" #include "librbd/internal.h" #include "librbd/ObjectMap.h" #include "librbd/object_map/SnapshotCreateRequest.h" #include "gmock/gmock.h" #include "gtest/gtest.h" namespace librbd { namespace object_map { using ::testing::_; using ::testing::DoDefault; using ::testing::Return; using ::testing::StrEq; class TestMockObjectMapSnapshotCreateRequest : public TestMockFixture { public: void inject_snap_info(librbd::ImageCtx *ictx, uint64_t snap_id) { std::unique_lock image_locker{ictx->image_lock}; ictx->add_snap(cls::rbd::UserSnapshotNamespace(), "snap name", snap_id, ictx->size, ictx->parent_md, RBD_PROTECTION_STATUS_UNPROTECTED, 0, utime_t()); } void expect_read_map(librbd::ImageCtx *ictx, int r) { if (r < 0) { EXPECT_CALL(get_mock_io_ctx(ictx->md_ctx), read(ObjectMap<>::object_map_name(ictx->id, CEPH_NOSNAP), 0, 0, _, _, _)).WillOnce(Return(r)); } else { EXPECT_CALL(get_mock_io_ctx(ictx->md_ctx), read(ObjectMap<>::object_map_name(ictx->id, CEPH_NOSNAP), 0, 0, _, _, _)).WillOnce(DoDefault()); } } void expect_write_map(librbd::ImageCtx *ictx, uint64_t snap_id, int r) { if (r < 0) { EXPECT_CALL(get_mock_io_ctx(ictx->md_ctx), write_full( ObjectMap<>::object_map_name(ictx->id, snap_id), _, _)) .WillOnce(Return(r)); } else { EXPECT_CALL(get_mock_io_ctx(ictx->md_ctx), write_full( ObjectMap<>::object_map_name(ictx->id, snap_id), _, _)) .WillOnce(DoDefault()); } } void expect_add_snapshot(librbd::ImageCtx *ictx, int r) { std::string oid(ObjectMap<>::object_map_name(ictx->id, CEPH_NOSNAP)); if (r < 0) { EXPECT_CALL(get_mock_io_ctx(ictx->md_ctx), exec(oid, _, StrEq("lock"), StrEq("assert_locked"), _, _, _, _)) .WillOnce(Return(r)); } else { EXPECT_CALL(get_mock_io_ctx(ictx->md_ctx), exec(oid, _, StrEq("lock"), StrEq("assert_locked"), _, _, _, _)) .WillOnce(DoDefault()); EXPECT_CALL(get_mock_io_ctx(ictx->md_ctx), exec(oid, _, StrEq("rbd"), StrEq("object_map_snap_add"), _, _, _, _)) .WillOnce(DoDefault()); } } void expect_invalidate(librbd::ImageCtx *ictx) { EXPECT_CALL(get_mock_io_ctx(ictx->md_ctx), exec(ictx->header_oid, _, StrEq("rbd"), StrEq("set_flags"), _, _, _, _)) .WillOnce(DoDefault()); } }; TEST_F(TestMockObjectMapSnapshotCreateRequest, Success) { REQUIRE_FEATURE(RBD_FEATURE_OBJECT_MAP); librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); ASSERT_EQ(0, acquire_exclusive_lock(*ictx)); ceph::shared_mutex object_map_lock = ceph::make_shared_mutex("lock"); ceph::BitVector<2> object_map; uint64_t snap_id = 1; inject_snap_info(ictx, snap_id); expect_read_map(ictx, 0); expect_write_map(ictx, snap_id, 0); if (ictx->test_features(RBD_FEATURE_FAST_DIFF)) { expect_add_snapshot(ictx, 0); } C_SaferCond cond_ctx; AsyncRequest<> *request = new SnapshotCreateRequest( *ictx, &object_map_lock, &object_map, snap_id, &cond_ctx); { std::shared_lock image_locker{ictx->image_lock}; request->send(); } ASSERT_EQ(0, cond_ctx.wait()); expect_unlock_exclusive_lock(*ictx); } TEST_F(TestMockObjectMapSnapshotCreateRequest, ReadMapError) { REQUIRE_FEATURE(RBD_FEATURE_OBJECT_MAP); librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); ASSERT_EQ(0, acquire_exclusive_lock(*ictx)); ceph::shared_mutex object_map_lock = ceph::make_shared_mutex("lock"); ceph::BitVector<2> object_map; uint64_t snap_id = 1; inject_snap_info(ictx, snap_id); expect_read_map(ictx, -ENOENT); expect_invalidate(ictx); C_SaferCond cond_ctx; AsyncRequest<> *request = new SnapshotCreateRequest( *ictx, &object_map_lock, &object_map, snap_id, &cond_ctx); { std::shared_lock image_locker{ictx->image_lock}; request->send(); } ASSERT_EQ(-ENOENT, cond_ctx.wait()); expect_unlock_exclusive_lock(*ictx); } TEST_F(TestMockObjectMapSnapshotCreateRequest, WriteMapError) { REQUIRE_FEATURE(RBD_FEATURE_OBJECT_MAP); librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); ASSERT_EQ(0, acquire_exclusive_lock(*ictx)); ceph::shared_mutex object_map_lock = ceph::make_shared_mutex("lock"); ceph::BitVector<2> object_map; uint64_t snap_id = 1; inject_snap_info(ictx, snap_id); expect_read_map(ictx, 0); expect_write_map(ictx, snap_id, -EINVAL); expect_invalidate(ictx); C_SaferCond cond_ctx; AsyncRequest<> *request = new SnapshotCreateRequest( *ictx, &object_map_lock, &object_map, snap_id, &cond_ctx); { std::shared_lock image_locker{ictx->image_lock}; request->send(); } ASSERT_EQ(-ENOENT, cond_ctx.wait()); expect_unlock_exclusive_lock(*ictx); } TEST_F(TestMockObjectMapSnapshotCreateRequest, AddSnapshotError) { REQUIRE_FEATURE(RBD_FEATURE_FAST_DIFF); librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); ASSERT_EQ(0, acquire_exclusive_lock(*ictx)); ceph::shared_mutex object_map_lock = ceph::make_shared_mutex("lock"); ceph::BitVector<2> object_map; uint64_t snap_id = 1; inject_snap_info(ictx, snap_id); expect_read_map(ictx, 0); expect_write_map(ictx, snap_id, 0); expect_add_snapshot(ictx, -EINVAL); expect_invalidate(ictx); C_SaferCond cond_ctx; AsyncRequest<> *request = new SnapshotCreateRequest( *ictx, &object_map_lock, &object_map, snap_id, &cond_ctx); { std::shared_lock image_locker{ictx->image_lock}; request->send(); } ASSERT_EQ(-ENOENT, cond_ctx.wait()); expect_unlock_exclusive_lock(*ictx); } TEST_F(TestMockObjectMapSnapshotCreateRequest, FlagCleanObjects) { REQUIRE_FEATURE(RBD_FEATURE_FAST_DIFF); librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); ASSERT_EQ(0, acquire_exclusive_lock(*ictx)); ceph::shared_mutex object_map_lock = ceph::make_shared_mutex("lock"); ceph::BitVector<2> object_map; object_map.resize(1024); for (uint64_t i = 0; i < object_map.size(); ++i) { object_map[i] = i % 2 == 0 ? OBJECT_EXISTS : OBJECT_NONEXISTENT; } uint64_t snap_id = 1; inject_snap_info(ictx, snap_id); C_SaferCond cond_ctx; AsyncRequest<> *request = new SnapshotCreateRequest( *ictx, &object_map_lock, &object_map, snap_id, &cond_ctx); { std::shared_lock image_locker{ictx->image_lock}; request->send(); } ASSERT_EQ(0, cond_ctx.wait()); for (uint64_t i = 0; i < object_map.size(); ++i) { ASSERT_EQ(i % 2 == 0 ? OBJECT_EXISTS_CLEAN : OBJECT_NONEXISTENT, object_map[i]); } } } // namespace object_map } // namespace librbd
7,270
30.206009
80
cc
null
ceph-main/src/test/librbd/object_map/test_mock_SnapshotRemoveRequest.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "test/librbd/test_mock_fixture.h" #include "test/librbd/test_support.h" #include "test/librados_test_stub/MockTestMemIoCtxImpl.h" #include "common/bit_vector.hpp" #include "librbd/ImageState.h" #include "librbd/internal.h" #include "librbd/ObjectMap.h" #include "librbd/object_map/SnapshotRemoveRequest.h" #include "gmock/gmock.h" #include "gtest/gtest.h" namespace librbd { namespace object_map { using ::testing::_; using ::testing::DoDefault; using ::testing::Return; using ::testing::StrEq; class TestMockObjectMapSnapshotRemoveRequest : public TestMockFixture { public: void expect_load_map(librbd::ImageCtx *ictx, uint64_t snap_id, int r) { std::string snap_oid(ObjectMap<>::object_map_name(ictx->id, snap_id)); if (r < 0) { EXPECT_CALL(get_mock_io_ctx(ictx->md_ctx), exec(snap_oid, _, StrEq("rbd"), StrEq("object_map_load"), _, _, _, _)) .WillOnce(Return(r)); } else { EXPECT_CALL(get_mock_io_ctx(ictx->md_ctx), exec(snap_oid, _, StrEq("rbd"), StrEq("object_map_load"), _, _, _, _)) .WillOnce(DoDefault()); } } void expect_remove_snapshot(librbd::ImageCtx *ictx, int r) { std::string oid(ObjectMap<>::object_map_name(ictx->id, CEPH_NOSNAP)); if (r < 0) { EXPECT_CALL(get_mock_io_ctx(ictx->md_ctx), exec(oid, _, StrEq("lock"), StrEq("assert_locked"), _, _, _, _)) .WillOnce(Return(r)); } else { EXPECT_CALL(get_mock_io_ctx(ictx->md_ctx), exec(oid, _, StrEq("lock"), StrEq("assert_locked"), _, _, _, _)) .WillOnce(DoDefault()); EXPECT_CALL(get_mock_io_ctx(ictx->md_ctx), exec(oid, _, StrEq("rbd"), StrEq("object_map_snap_remove"), _, _, _, _)) .WillOnce(DoDefault()); } } void expect_remove_map(librbd::ImageCtx *ictx, uint64_t snap_id, int r) { std::string snap_oid(ObjectMap<>::object_map_name(ictx->id, snap_id)); if (r < 0) { EXPECT_CALL(get_mock_io_ctx(ictx->md_ctx), remove(snap_oid, _)) .WillOnce(Return(r)); } else { EXPECT_CALL(get_mock_io_ctx(ictx->md_ctx), remove(snap_oid, _)) .WillOnce(DoDefault()); } } void expect_invalidate(librbd::ImageCtx *ictx) { EXPECT_CALL(get_mock_io_ctx(ictx->md_ctx), exec(ictx->header_oid, _, StrEq("rbd"), StrEq("set_flags"), _, _, _, _)) .WillOnce(DoDefault()); } }; TEST_F(TestMockObjectMapSnapshotRemoveRequest, Success) { REQUIRE_FEATURE(RBD_FEATURE_OBJECT_MAP); librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); ASSERT_EQ(0, snap_create(*ictx, "snap1")); ASSERT_EQ(0, ictx->state->refresh_if_required()); uint64_t snap_id = ictx->snap_info.rbegin()->first; if (ictx->test_features(RBD_FEATURE_FAST_DIFF)) { expect_load_map(ictx, snap_id, 0); expect_remove_snapshot(ictx, 0); } expect_remove_map(ictx, snap_id, 0); ceph::shared_mutex object_map_lock = ceph::make_shared_mutex("lock"); ceph::BitVector<2> object_map; C_SaferCond cond_ctx; AsyncRequest<> *request = new SnapshotRemoveRequest( *ictx, &object_map_lock, &object_map, snap_id, &cond_ctx); { std::shared_lock owner_locker{ictx->owner_lock}; std::unique_lock image_locker{ictx->image_lock}; request->send(); } ASSERT_EQ(0, cond_ctx.wait()); expect_unlock_exclusive_lock(*ictx); } TEST_F(TestMockObjectMapSnapshotRemoveRequest, LoadMapMissing) { REQUIRE_FEATURE(RBD_FEATURE_FAST_DIFF); librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); ASSERT_EQ(0, snap_create(*ictx, "snap1")); ASSERT_EQ(0, ictx->state->refresh_if_required()); uint64_t snap_id = ictx->snap_info.rbegin()->first; auto snap_it = ictx->snap_info.find(snap_id); ASSERT_NE(ictx->snap_info.end(), snap_it); snap_it->second.flags |= RBD_FLAG_OBJECT_MAP_INVALID; expect_load_map(ictx, snap_id, -ENOENT); ceph::shared_mutex object_map_lock = ceph::make_shared_mutex("lock"); ceph::BitVector<2> object_map; C_SaferCond cond_ctx; AsyncRequest<> *request = new SnapshotRemoveRequest( *ictx, &object_map_lock, &object_map, snap_id, &cond_ctx); { std::shared_lock owner_locker{ictx->owner_lock}; std::unique_lock image_locker{ictx->image_lock}; request->send(); } ASSERT_EQ(0, cond_ctx.wait()); { // shouldn't invalidate the HEAD revision when we fail to load // the already deleted snapshot std::shared_lock image_locker{ictx->image_lock}; uint64_t flags; ASSERT_EQ(0, ictx->get_flags(CEPH_NOSNAP, &flags)); ASSERT_EQ(0U, flags & RBD_FLAG_OBJECT_MAP_INVALID); } expect_unlock_exclusive_lock(*ictx); } TEST_F(TestMockObjectMapSnapshotRemoveRequest, LoadMapError) { REQUIRE_FEATURE(RBD_FEATURE_FAST_DIFF); librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); ASSERT_EQ(0, snap_create(*ictx, "snap1")); ASSERT_EQ(0, ictx->state->refresh_if_required()); uint64_t snap_id = ictx->snap_info.rbegin()->first; expect_load_map(ictx, snap_id, -EINVAL); expect_invalidate(ictx); expect_remove_map(ictx, snap_id, 0); ceph::shared_mutex object_map_lock = ceph::make_shared_mutex("lock"); ceph::BitVector<2> object_map; C_SaferCond cond_ctx; AsyncRequest<> *request = new SnapshotRemoveRequest( *ictx, &object_map_lock, &object_map, snap_id, &cond_ctx); { std::shared_lock owner_locker{ictx->owner_lock}; std::unique_lock image_locker{ictx->image_lock}; request->send(); } ASSERT_EQ(0, cond_ctx.wait()); expect_unlock_exclusive_lock(*ictx); } TEST_F(TestMockObjectMapSnapshotRemoveRequest, RemoveSnapshotMissing) { REQUIRE_FEATURE(RBD_FEATURE_FAST_DIFF); librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); ASSERT_EQ(0, snap_create(*ictx, "snap1")); ASSERT_EQ(0, ictx->state->refresh_if_required()); uint64_t snap_id = ictx->snap_info.rbegin()->first; expect_load_map(ictx, snap_id, 0); expect_remove_snapshot(ictx, -ENOENT); expect_remove_map(ictx, snap_id, 0); ceph::shared_mutex object_map_lock = ceph::make_shared_mutex("lock"); ceph::BitVector<2> object_map; C_SaferCond cond_ctx; AsyncRequest<> *request = new SnapshotRemoveRequest( *ictx, &object_map_lock, &object_map, snap_id, &cond_ctx); { std::shared_lock owner_locker{ictx->owner_lock}; std::unique_lock image_locker{ictx->image_lock}; request->send(); } ASSERT_EQ(0, cond_ctx.wait()); expect_unlock_exclusive_lock(*ictx); } TEST_F(TestMockObjectMapSnapshotRemoveRequest, RemoveSnapshotError) { REQUIRE_FEATURE(RBD_FEATURE_FAST_DIFF); librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); ASSERT_EQ(0, snap_create(*ictx, "snap1")); ASSERT_EQ(0, ictx->state->refresh_if_required()); uint64_t snap_id = ictx->snap_info.rbegin()->first; expect_load_map(ictx, snap_id, 0); expect_remove_snapshot(ictx, -EINVAL); expect_invalidate(ictx); expect_remove_map(ictx, snap_id, 0); ceph::shared_mutex object_map_lock = ceph::make_shared_mutex("lock"); ceph::BitVector<2> object_map; C_SaferCond cond_ctx; AsyncRequest<> *request = new SnapshotRemoveRequest( *ictx, &object_map_lock, &object_map, snap_id, &cond_ctx); { std::shared_lock owner_locker{ictx->owner_lock}; std::unique_lock image_locker{ictx->image_lock}; request->send(); } ASSERT_EQ(0, cond_ctx.wait()); expect_unlock_exclusive_lock(*ictx); } TEST_F(TestMockObjectMapSnapshotRemoveRequest, RemoveMapMissing) { REQUIRE_FEATURE(RBD_FEATURE_OBJECT_MAP); librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); ASSERT_EQ(0, snap_create(*ictx, "snap1")); ASSERT_EQ(0, ictx->state->refresh_if_required()); uint64_t snap_id = ictx->snap_info.rbegin()->first; if (ictx->test_features(RBD_FEATURE_FAST_DIFF)) { expect_load_map(ictx, snap_id, 0); expect_remove_snapshot(ictx, 0); } expect_remove_map(ictx, snap_id, -ENOENT); ceph::shared_mutex object_map_lock = ceph::make_shared_mutex("lock"); ceph::BitVector<2> object_map; C_SaferCond cond_ctx; AsyncRequest<> *request = new SnapshotRemoveRequest( *ictx, &object_map_lock, &object_map, snap_id, &cond_ctx); { std::shared_lock owner_locker{ictx->owner_lock}; std::unique_lock image_locker{ictx->image_lock}; request->send(); } ASSERT_EQ(0, cond_ctx.wait()); expect_unlock_exclusive_lock(*ictx); } TEST_F(TestMockObjectMapSnapshotRemoveRequest, RemoveMapError) { REQUIRE_FEATURE(RBD_FEATURE_OBJECT_MAP); librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); ASSERT_EQ(0, snap_create(*ictx, "snap1")); ASSERT_EQ(0, ictx->state->refresh_if_required()); uint64_t snap_id = ictx->snap_info.rbegin()->first; if (ictx->test_features(RBD_FEATURE_FAST_DIFF)) { expect_load_map(ictx, snap_id, 0); expect_remove_snapshot(ictx, 0); } expect_remove_map(ictx, snap_id, -EINVAL); ceph::shared_mutex object_map_lock = ceph::make_shared_mutex("lock"); ceph::BitVector<2> object_map; C_SaferCond cond_ctx; AsyncRequest<> *request = new SnapshotRemoveRequest( *ictx, &object_map_lock, &object_map, snap_id, &cond_ctx); { std::shared_lock owner_locker{ictx->owner_lock}; std::unique_lock image_locker{ictx->image_lock}; request->send(); } ASSERT_EQ(-EINVAL, cond_ctx.wait()); expect_unlock_exclusive_lock(*ictx); } TEST_F(TestMockObjectMapSnapshotRemoveRequest, ScrubCleanObjects) { REQUIRE_FEATURE(RBD_FEATURE_FAST_DIFF); librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); librbd::NoOpProgressContext prog_ctx; uint64_t size = 4294967296; // 4GB = 1024 * 4MB ASSERT_EQ(0, resize(ictx, size)); // update image objectmap for snap inherit ceph::shared_mutex object_map_lock = ceph::make_shared_mutex("lock"); ceph::BitVector<2> object_map; object_map.resize(1024); for (uint64_t i = 512; i < object_map.size(); ++i) { object_map[i] = i % 2 == 0 ? OBJECT_EXISTS : OBJECT_NONEXISTENT; } C_SaferCond cond_ctx1; { librbd::ObjectMap<> *om = new librbd::ObjectMap<>(*ictx, ictx->snap_id); std::shared_lock owner_locker{ictx->owner_lock}; std::unique_lock image_locker{ictx->image_lock}; om->set_object_map(object_map); om->aio_save(&cond_ctx1); om->put(); } ASSERT_EQ(0, cond_ctx1.wait()); ASSERT_EQ(0, snap_create(*ictx, "snap1")); ASSERT_EQ(0, ictx->state->refresh_if_required()); // simutate the image objectmap state after creating snap for (uint64_t i = 512; i < object_map.size(); ++i) { object_map[i] = i % 2 == 0 ? OBJECT_EXISTS_CLEAN : OBJECT_NONEXISTENT; } C_SaferCond cond_ctx2; uint64_t snap_id = ictx->snap_info.rbegin()->first; AsyncRequest<> *request = new SnapshotRemoveRequest( *ictx, &object_map_lock, &object_map, snap_id, &cond_ctx2); { std::shared_lock owner_locker{ictx->owner_lock}; std::unique_lock image_locker{ictx->image_lock}; request->send(); } ASSERT_EQ(0, cond_ctx2.wait()); for (uint64_t i = 512; i < object_map.size(); ++i) { ASSERT_EQ(i % 2 == 0 ? OBJECT_EXISTS : OBJECT_NONEXISTENT, object_map[i]); } } } // namespace object_map } // namespace librbd
11,512
32.274566
80
cc
null
ceph-main/src/test/librbd/object_map/test_mock_SnapshotRollbackRequest.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "test/librbd/test_mock_fixture.h" #include "test/librbd/test_support.h" #include "test/librados_test_stub/MockTestMemIoCtxImpl.h" #include "librbd/ImageState.h" #include "librbd/internal.h" #include "librbd/ObjectMap.h" #include "librbd/object_map/SnapshotRollbackRequest.h" #include "gmock/gmock.h" #include "gtest/gtest.h" namespace librbd { namespace object_map { using ::testing::_; using ::testing::DoDefault; using ::testing::Return; using ::testing::StrEq; class TestMockObjectMapSnapshotRollbackRequest : public TestMockFixture { public: void expect_read_map(librbd::ImageCtx *ictx, uint64_t snap_id, int r) { if (r < 0) { EXPECT_CALL(get_mock_io_ctx(ictx->md_ctx), read(ObjectMap<>::object_map_name(ictx->id, snap_id), 0, 0, _, _, _)).WillOnce(Return(r)); } else { EXPECT_CALL(get_mock_io_ctx(ictx->md_ctx), read(ObjectMap<>::object_map_name(ictx->id, snap_id), 0, 0, _, _, _)).WillOnce(DoDefault()); } } void expect_write_map(librbd::ImageCtx *ictx, int r) { EXPECT_CALL(get_mock_io_ctx(ictx->md_ctx), exec(ObjectMap<>::object_map_name(ictx->id, CEPH_NOSNAP), _, StrEq("lock"), StrEq("assert_locked"), _, _, _, _)) .WillOnce(DoDefault()); if (r < 0) { EXPECT_CALL(get_mock_io_ctx(ictx->md_ctx), write_full( ObjectMap<>::object_map_name(ictx->id, CEPH_NOSNAP), _, _)) .WillOnce(Return(r)); } else { EXPECT_CALL(get_mock_io_ctx(ictx->md_ctx), write_full( ObjectMap<>::object_map_name(ictx->id, CEPH_NOSNAP), _, _)) .WillOnce(DoDefault()); } } void expect_invalidate(librbd::ImageCtx *ictx, uint32_t times) { EXPECT_CALL(get_mock_io_ctx(ictx->md_ctx), exec(ictx->header_oid, _, StrEq("rbd"), StrEq("set_flags"), _, _, _, _)) .Times(times) .WillRepeatedly(DoDefault()); } }; TEST_F(TestMockObjectMapSnapshotRollbackRequest, Success) { REQUIRE_FEATURE(RBD_FEATURE_OBJECT_MAP); librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); ASSERT_EQ(0, snap_create(*ictx, "snap1")); ASSERT_EQ(0, ictx->state->refresh_if_required()); uint64_t snap_id = ictx->snap_info.rbegin()->first; expect_read_map(ictx, snap_id, 0); expect_write_map(ictx, 0); C_SaferCond cond_ctx; AsyncRequest<> *request = new SnapshotRollbackRequest( *ictx, snap_id, &cond_ctx); request->send(); ASSERT_EQ(0, cond_ctx.wait()); expect_unlock_exclusive_lock(*ictx); } TEST_F(TestMockObjectMapSnapshotRollbackRequest, ReadMapError) { REQUIRE_FEATURE(RBD_FEATURE_OBJECT_MAP); librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); ASSERT_EQ(0, snap_create(*ictx, "snap1")); ASSERT_EQ(0, ictx->state->refresh_if_required()); uint64_t snap_id = ictx->snap_info.rbegin()->first; expect_read_map(ictx, snap_id, -ENOENT); expect_invalidate(ictx, 2); C_SaferCond cond_ctx; AsyncRequest<> *request = new SnapshotRollbackRequest( *ictx, snap_id, &cond_ctx); request->send(); ASSERT_EQ(0, cond_ctx.wait()); { std::shared_lock image_locker{ictx->image_lock}; uint64_t flags; ASSERT_EQ(0, ictx->get_flags(snap_id, &flags)); ASSERT_NE(0U, flags & RBD_FLAG_OBJECT_MAP_INVALID); } bool flags_set; ASSERT_EQ(0, ictx->test_flags(CEPH_NOSNAP, RBD_FLAG_OBJECT_MAP_INVALID, &flags_set)); ASSERT_TRUE(flags_set); expect_unlock_exclusive_lock(*ictx); } TEST_F(TestMockObjectMapSnapshotRollbackRequest, WriteMapError) { REQUIRE_FEATURE(RBD_FEATURE_OBJECT_MAP); librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); ASSERT_EQ(0, snap_create(*ictx, "snap1")); ASSERT_EQ(0, ictx->state->refresh_if_required()); uint64_t snap_id = ictx->snap_info.rbegin()->first; expect_read_map(ictx, snap_id, 0); expect_write_map(ictx, -EINVAL); expect_invalidate(ictx, 1); C_SaferCond cond_ctx; AsyncRequest<> *request = new SnapshotRollbackRequest( *ictx, snap_id, &cond_ctx); request->send(); ASSERT_EQ(0, cond_ctx.wait()); { std::shared_lock image_locker{ictx->image_lock}; uint64_t flags; ASSERT_EQ(0, ictx->get_flags(snap_id, &flags)); ASSERT_EQ(0U, flags & RBD_FLAG_OBJECT_MAP_INVALID); } bool flags_set; ASSERT_EQ(0, ictx->test_flags(CEPH_NOSNAP, RBD_FLAG_OBJECT_MAP_INVALID, &flags_set)); ASSERT_TRUE(flags_set); expect_unlock_exclusive_lock(*ictx); } } // namespace object_map } // namespace librbd
4,797
31.201342
79
cc
null
ceph-main/src/test/librbd/object_map/test_mock_UnlockRequest.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "test/librbd/test_mock_fixture.h" #include "test/librbd/test_support.h" #include "test/librbd/mock/MockImageCtx.h" #include "test/librados_test_stub/MockTestMemIoCtxImpl.h" #include "cls/lock/cls_lock_ops.h" #include "librbd/ObjectMap.h" #include "librbd/object_map/UnlockRequest.h" // template definitions #include "librbd/object_map/UnlockRequest.cc" namespace librbd { namespace object_map { using ::testing::_; using ::testing::InSequence; using ::testing::Return; using ::testing::StrEq; class TestMockObjectMapUnlockRequest : public TestMockFixture { public: typedef UnlockRequest<MockImageCtx> MockUnlockRequest; void expect_unlock(MockImageCtx &mock_image_ctx, int r) { std::string oid(ObjectMap<>::object_map_name(mock_image_ctx.id, CEPH_NOSNAP)); EXPECT_CALL(get_mock_io_ctx(mock_image_ctx.md_ctx), exec(oid, _, StrEq("lock"), StrEq("unlock"), _, _, _, _)) .WillOnce(Return(r)); } }; TEST_F(TestMockObjectMapUnlockRequest, Success) { librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); MockImageCtx mock_image_ctx(*ictx); C_SaferCond ctx; MockUnlockRequest *req = new MockUnlockRequest(mock_image_ctx, &ctx); InSequence seq; expect_unlock(mock_image_ctx, 0); req->send(); ASSERT_EQ(0, ctx.wait()); } TEST_F(TestMockObjectMapUnlockRequest, UnlockError) { librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); MockImageCtx mock_image_ctx(*ictx); C_SaferCond ctx; MockUnlockRequest *req = new MockUnlockRequest(mock_image_ctx, &ctx); InSequence seq; expect_unlock(mock_image_ctx, -ENOENT); req->send(); ASSERT_EQ(0, ctx.wait()); } } // namespace object_map } // namespace librbd
1,879
25.857143
73
cc
null
ceph-main/src/test/librbd/object_map/test_mock_UpdateRequest.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "test/librbd/test_mock_fixture.h" #include "test/librbd/test_support.h" #include "test/librados_test_stub/MockTestMemIoCtxImpl.h" #include "common/bit_vector.hpp" #include "librbd/ImageState.h" #include "librbd/internal.h" #include "librbd/ObjectMap.h" #include "librbd/Operations.h" #include "librbd/api/Image.h" #include "librbd/object_map/UpdateRequest.h" #include "gmock/gmock.h" #include "gtest/gtest.h" namespace librbd { namespace object_map { using ::testing::_; using ::testing::DoDefault; using ::testing::InSequence; using ::testing::Return; using ::testing::StrEq; class TestMockObjectMapUpdateRequest : public TestMockFixture { public: void expect_update(librbd::ImageCtx *ictx, uint64_t snap_id, uint64_t start_object_no, uint64_t end_object_no, uint8_t new_state, const boost::optional<uint8_t>& current_state, int r) { bufferlist bl; encode(start_object_no, bl); encode(end_object_no, bl); encode(new_state, bl); encode(current_state, bl); std::string oid(ObjectMap<>::object_map_name(ictx->id, snap_id)); if (snap_id == CEPH_NOSNAP) { EXPECT_CALL(get_mock_io_ctx(ictx->md_ctx), exec(oid, _, StrEq("lock"), StrEq("assert_locked"), _, _, _, _)) .WillOnce(DoDefault()); } if (r < 0) { EXPECT_CALL(get_mock_io_ctx(ictx->md_ctx), exec(oid, _, StrEq("rbd"), StrEq("object_map_update"), ContentsEqual(bl), _, _, _)) .WillOnce(Return(r)); } else { EXPECT_CALL(get_mock_io_ctx(ictx->md_ctx), exec(oid, _, StrEq("rbd"), StrEq("object_map_update"), ContentsEqual(bl), _, _, _)) .WillOnce(DoDefault()); } } void expect_invalidate(librbd::ImageCtx *ictx) { EXPECT_CALL(get_mock_io_ctx(ictx->md_ctx), exec(ictx->header_oid, _, StrEq("rbd"), StrEq("set_flags"), _, _, _, _)) .WillOnce(DoDefault()); } }; TEST_F(TestMockObjectMapUpdateRequest, UpdateInMemory) { REQUIRE_FEATURE(RBD_FEATURE_OBJECT_MAP); librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); librbd::NoOpProgressContext no_progress; ASSERT_EQ(0, ictx->operations->resize(4 << ictx->order, true, no_progress)); ASSERT_EQ(0, acquire_exclusive_lock(*ictx)); ceph::shared_mutex object_map_lock = ceph::make_shared_mutex("lock"); ceph::BitVector<2> object_map; object_map.resize(4); for (uint64_t i = 0; i < object_map.size(); ++i) { object_map[i] = i % 4; } C_SaferCond cond_ctx; AsyncRequest<> *req = new UpdateRequest<>( *ictx, &object_map_lock, &object_map, CEPH_NOSNAP, 0, object_map.size(), OBJECT_NONEXISTENT, OBJECT_EXISTS, {}, false, &cond_ctx); { std::shared_lock image_locker{ictx->image_lock}; std::unique_lock object_map_locker{object_map_lock}; req->send(); } ASSERT_EQ(0, cond_ctx.wait()); for (uint64_t i = 0; i < object_map.size(); ++i) { if (i % 4 == OBJECT_EXISTS || i % 4 == OBJECT_EXISTS_CLEAN) { ASSERT_EQ(OBJECT_NONEXISTENT, object_map[i]); } else { ASSERT_EQ(i % 4, object_map[i]); } } } TEST_F(TestMockObjectMapUpdateRequest, UpdateHeadOnDisk) { REQUIRE_FEATURE(RBD_FEATURE_OBJECT_MAP); librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); ASSERT_EQ(0, acquire_exclusive_lock(*ictx)); expect_update(ictx, CEPH_NOSNAP, 0, 1, OBJECT_NONEXISTENT, OBJECT_EXISTS, 0); ceph::shared_mutex object_map_lock = ceph::make_shared_mutex("lock"); ceph::BitVector<2> object_map; object_map.resize(1); C_SaferCond cond_ctx; AsyncRequest<> *req = new UpdateRequest<>( *ictx, &object_map_lock, &object_map, CEPH_NOSNAP, 0, object_map.size(), OBJECT_NONEXISTENT, OBJECT_EXISTS, {}, false, &cond_ctx); { std::shared_lock image_locker{ictx->image_lock}; std::unique_lock object_map_locker{object_map_lock}; req->send(); } ASSERT_EQ(0, cond_ctx.wait()); expect_unlock_exclusive_lock(*ictx); } TEST_F(TestMockObjectMapUpdateRequest, UpdateSnapOnDisk) { REQUIRE_FEATURE(RBD_FEATURE_OBJECT_MAP); librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); ASSERT_EQ(0, snap_create(*ictx, "snap1")); ASSERT_EQ(0, librbd::api::Image<>::snap_set(ictx, cls::rbd::UserSnapshotNamespace(), "snap1")); uint64_t snap_id = ictx->snap_id; expect_update(ictx, snap_id, 0, 1, OBJECT_NONEXISTENT, OBJECT_EXISTS, 0); ceph::shared_mutex object_map_lock = ceph::make_shared_mutex("lock"); ceph::BitVector<2> object_map; object_map.resize(1); C_SaferCond cond_ctx; AsyncRequest<> *req = new UpdateRequest<>( *ictx, &object_map_lock, &object_map, snap_id, 0, object_map.size(), OBJECT_NONEXISTENT, OBJECT_EXISTS, {}, false, &cond_ctx); { std::shared_lock image_locker{ictx->image_lock}; std::unique_lock object_map_locker{object_map_lock}; req->send(); } ASSERT_EQ(0, cond_ctx.wait()); expect_unlock_exclusive_lock(*ictx); } TEST_F(TestMockObjectMapUpdateRequest, UpdateOnDiskError) { REQUIRE_FEATURE(RBD_FEATURE_OBJECT_MAP); librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); ASSERT_EQ(0, acquire_exclusive_lock(*ictx)); expect_update(ictx, CEPH_NOSNAP, 0, 1, OBJECT_NONEXISTENT, OBJECT_EXISTS, -EINVAL); expect_invalidate(ictx); ceph::shared_mutex object_map_lock = ceph::make_shared_mutex("lock"); ceph::BitVector<2> object_map; object_map.resize(1); C_SaferCond cond_ctx; AsyncRequest<> *req = new UpdateRequest<>( *ictx, &object_map_lock, &object_map, CEPH_NOSNAP, 0, object_map.size(), OBJECT_NONEXISTENT, OBJECT_EXISTS, {}, false, &cond_ctx); { std::shared_lock image_locker{ictx->image_lock}; std::unique_lock object_map_locker{object_map_lock}; req->send(); } ASSERT_EQ(0, cond_ctx.wait()); expect_unlock_exclusive_lock(*ictx); } TEST_F(TestMockObjectMapUpdateRequest, RebuildSnapOnDisk) { REQUIRE_FEATURE(RBD_FEATURE_OBJECT_MAP); librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); ASSERT_EQ(0, snap_create(*ictx, "snap1")); ASSERT_EQ(0, ictx->state->refresh_if_required()); ASSERT_EQ(CEPH_NOSNAP, ictx->snap_id); uint64_t snap_id = ictx->snap_info.rbegin()->first; expect_update(ictx, snap_id, 0, 1, OBJECT_EXISTS_CLEAN, boost::optional<uint8_t>(), 0); expect_unlock_exclusive_lock(*ictx); ceph::shared_mutex object_map_lock = ceph::make_shared_mutex("lock"); ceph::BitVector<2> object_map; object_map.resize(1); C_SaferCond cond_ctx; AsyncRequest<> *req = new UpdateRequest<>( *ictx, &object_map_lock, &object_map, snap_id, 0, object_map.size(), OBJECT_EXISTS_CLEAN, boost::optional<uint8_t>(), {}, false, &cond_ctx); { std::shared_lock image_locker{ictx->image_lock}; std::unique_lock object_map_locker{object_map_lock}; req->send(); } ASSERT_EQ(0, cond_ctx.wait()); // do not update the in-memory map if rebuilding a snapshot ASSERT_NE(OBJECT_EXISTS_CLEAN, object_map[0]); } TEST_F(TestMockObjectMapUpdateRequest, BatchUpdate) { REQUIRE_FEATURE(RBD_FEATURE_OBJECT_MAP); librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); librbd::NoOpProgressContext no_progress; ASSERT_EQ(0, ictx->operations->resize(712312 * ictx->get_object_size(), false, no_progress)); ASSERT_EQ(0, acquire_exclusive_lock(*ictx)); expect_unlock_exclusive_lock(*ictx); InSequence seq; expect_update(ictx, CEPH_NOSNAP, 0, 262144, OBJECT_NONEXISTENT, OBJECT_EXISTS, 0); expect_update(ictx, CEPH_NOSNAP, 262144, 524288, OBJECT_NONEXISTENT, OBJECT_EXISTS, 0); expect_update(ictx, CEPH_NOSNAP, 524288, 712312, OBJECT_NONEXISTENT, OBJECT_EXISTS, 0); ceph::shared_mutex object_map_lock = ceph::make_shared_mutex("lock"); ceph::BitVector<2> object_map; object_map.resize(712312); C_SaferCond cond_ctx; AsyncRequest<> *req = new UpdateRequest<>( *ictx, &object_map_lock, &object_map, CEPH_NOSNAP, 0, object_map.size(), OBJECT_NONEXISTENT, OBJECT_EXISTS, {}, false, &cond_ctx); { std::shared_lock image_locker{ictx->image_lock}; std::unique_lock object_map_locker{object_map_lock}; req->send(); } ASSERT_EQ(0, cond_ctx.wait()); } TEST_F(TestMockObjectMapUpdateRequest, IgnoreMissingObjectMap) { REQUIRE_FEATURE(RBD_FEATURE_OBJECT_MAP); librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); ASSERT_EQ(0, acquire_exclusive_lock(*ictx)); expect_update(ictx, CEPH_NOSNAP, 0, 1, OBJECT_NONEXISTENT, OBJECT_EXISTS, -ENOENT); ceph::shared_mutex object_map_lock = ceph::make_shared_mutex("lock"); ceph::BitVector<2> object_map; object_map.resize(1); C_SaferCond cond_ctx; AsyncRequest<> *req = new UpdateRequest<>( *ictx, &object_map_lock, &object_map, CEPH_NOSNAP, 0, object_map.size(), OBJECT_NONEXISTENT, OBJECT_EXISTS, {}, true, &cond_ctx); { std::shared_lock image_locker{ictx->image_lock}; std::unique_lock object_map_locker{object_map_lock}; req->send(); } ASSERT_EQ(0, cond_ctx.wait()); expect_unlock_exclusive_lock(*ictx); } } // namespace object_map } // namespace librbd
9,522
31.613014
80
cc
null
ceph-main/src/test/librbd/object_map/mock/MockInvalidateRequest.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "librbd/object_map/InvalidateRequest.h" // template definitions #include "librbd/object_map/InvalidateRequest.cc" namespace librbd { namespace object_map { template <typename I> struct MockInvalidateRequestBase { static std::list<InvalidateRequest<I>*> s_requests; uint64_t snap_id = 0; bool force = false; Context *on_finish = nullptr; static InvalidateRequest<I>* create(I &image_ctx, uint64_t snap_id, bool force, Context *on_finish) { ceph_assert(!s_requests.empty()); InvalidateRequest<I>* req = s_requests.front(); req->snap_id = snap_id; req->force = force; req->on_finish = on_finish; s_requests.pop_front(); return req; } MockInvalidateRequestBase() { s_requests.push_back(static_cast<InvalidateRequest<I>*>(this)); } MOCK_METHOD0(send, void()); }; template <typename I> std::list<InvalidateRequest<I>*> MockInvalidateRequestBase<I>::s_requests; } // namespace object_map } // namespace librbd
1,107
25.380952
74
h
null
ceph-main/src/test/librbd/operation/test_mock_DisableFeaturesRequest.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "test/librbd/test_mock_fixture.h" #include "test/librbd/test_support.h" #include "test/librbd/mock/MockImageCtx.h" #include "test/librbd/mock/MockJournalPolicy.h" #include "cls/rbd/cls_rbd_client.h" #include "librbd/internal.h" #include "librbd/Journal.h" #include "librbd/image/SetFlagsRequest.h" #include "librbd/io/AioCompletion.h" #include "librbd/mirror/DisableRequest.h" #include "librbd/journal/RemoveRequest.h" #include "librbd/journal/StandardPolicy.h" #include "librbd/journal/Types.h" #include "librbd/journal/TypeTraits.h" #include "librbd/object_map/RemoveRequest.h" #include "librbd/operation/DisableFeaturesRequest.h" #include "gmock/gmock.h" #include "gtest/gtest.h" namespace librbd { namespace { struct MockOperationImageCtx : public MockImageCtx { MockOperationImageCtx(librbd::ImageCtx& image_ctx) : MockImageCtx(image_ctx) { } }; } // anonymous namespace template<> struct Journal<MockOperationImageCtx> { static void get_work_queue(CephContext*, MockContextWQ**) { } }; namespace image { template<> class SetFlagsRequest<MockOperationImageCtx> { public: static SetFlagsRequest *s_instance; Context *on_finish = nullptr; static SetFlagsRequest *create(MockOperationImageCtx *image_ctx, uint64_t flags, uint64_t mask, Context *on_finish) { ceph_assert(s_instance != nullptr); s_instance->on_finish = on_finish; return s_instance; } SetFlagsRequest() { s_instance = this; } MOCK_METHOD0(send, void()); }; SetFlagsRequest<MockOperationImageCtx> *SetFlagsRequest<MockOperationImageCtx>::s_instance; } // namespace image namespace journal { template<> class RemoveRequest<MockOperationImageCtx> { public: static RemoveRequest *s_instance; Context *on_finish = nullptr; static RemoveRequest *create(IoCtx &ioctx, const std::string &imageid, const std::string &client_id, MockContextWQ *op_work_queue, Context *on_finish) { ceph_assert(s_instance != nullptr); s_instance->on_finish = on_finish; return s_instance; } RemoveRequest() { s_instance = this; } MOCK_METHOD0(send, void()); }; RemoveRequest<MockOperationImageCtx> *RemoveRequest<MockOperationImageCtx>::s_instance; template<> class StandardPolicy<MockOperationImageCtx> : public MockJournalPolicy { public: StandardPolicy(MockOperationImageCtx* image_ctx) { } }; template <> struct TypeTraits<MockOperationImageCtx> { typedef librbd::MockContextWQ ContextWQ; }; } // namespace journal namespace mirror { template<> class DisableRequest<MockOperationImageCtx> { public: static DisableRequest *s_instance; Context *on_finish = nullptr; static DisableRequest *create(MockOperationImageCtx *image_ctx, bool force, bool remove, Context *on_finish) { ceph_assert(s_instance != nullptr); s_instance->on_finish = on_finish; return s_instance; } DisableRequest() { s_instance = this; } MOCK_METHOD0(send, void()); }; DisableRequest<MockOperationImageCtx> *DisableRequest<MockOperationImageCtx>::s_instance; } // namespace mirror namespace object_map { template<> class RemoveRequest<MockOperationImageCtx> { public: static RemoveRequest *s_instance; Context *on_finish = nullptr; static RemoveRequest *create(MockOperationImageCtx *image_ctx, Context *on_finish) { ceph_assert(s_instance != nullptr); s_instance->on_finish = on_finish; return s_instance; } RemoveRequest() { s_instance = this; } MOCK_METHOD0(send, void()); }; RemoveRequest<MockOperationImageCtx> *RemoveRequest<MockOperationImageCtx>::s_instance; } // namespace object_map template <> struct AsyncRequest<MockOperationImageCtx> : public AsyncRequest<MockImageCtx> { MockOperationImageCtx &m_image_ctx; AsyncRequest(MockOperationImageCtx &image_ctx, Context *on_finish) : AsyncRequest<MockImageCtx>(image_ctx, on_finish), m_image_ctx(image_ctx) { } }; } // namespace librbd // template definitions #include "librbd/AsyncRequest.cc" #include "librbd/AsyncObjectThrottle.cc" #include "librbd/operation/Request.cc" #include "librbd/operation/DisableFeaturesRequest.cc" namespace librbd { namespace operation { using ::testing::Invoke; using ::testing::Return; using ::testing::WithArg; using ::testing::_; class TestMockOperationDisableFeaturesRequest : public TestMockFixture { public: typedef librbd::image::SetFlagsRequest<MockOperationImageCtx> MockSetFlagsRequest; typedef librbd::journal::RemoveRequest<MockOperationImageCtx> MockRemoveJournalRequest; typedef librbd::mirror::DisableRequest<MockOperationImageCtx> MockDisableMirrorRequest; typedef librbd::object_map::RemoveRequest<MockOperationImageCtx> MockRemoveObjectMapRequest; typedef DisableFeaturesRequest<MockOperationImageCtx> MockDisableFeaturesRequest; class MirrorModeEnabler { public: MirrorModeEnabler(librados::IoCtx &ioctx, cls::rbd::MirrorMode mirror_mode) : m_ioctx(ioctx), m_mirror_mode(mirror_mode) { EXPECT_EQ(0, librbd::cls_client::mirror_uuid_set(&m_ioctx, "test-uuid")); EXPECT_EQ(0, librbd::cls_client::mirror_mode_set(&m_ioctx, m_mirror_mode)); } ~MirrorModeEnabler() { EXPECT_EQ(0, librbd::cls_client::mirror_mode_set( &m_ioctx, cls::rbd::MIRROR_MODE_DISABLED)); } private: librados::IoCtx &m_ioctx; cls::rbd::MirrorMode m_mirror_mode; }; void expect_prepare_lock(MockOperationImageCtx &mock_image_ctx) { EXPECT_CALL(*mock_image_ctx.state, prepare_lock(_)) .WillOnce(Invoke([](Context *on_ready) { on_ready->complete(0); })); expect_op_work_queue(mock_image_ctx); } void expect_handle_prepare_lock_complete(MockOperationImageCtx &mock_image_ctx) { EXPECT_CALL(*mock_image_ctx.state, handle_prepare_lock_complete()); } void expect_block_writes(MockOperationImageCtx &mock_image_ctx) { EXPECT_CALL(*mock_image_ctx.io_image_dispatcher, block_writes(_)) .WillOnce(CompleteContext(0, mock_image_ctx.image_ctx->op_work_queue)); } void expect_unblock_writes(MockOperationImageCtx &mock_image_ctx) { EXPECT_CALL(*mock_image_ctx.io_image_dispatcher, unblock_writes()).Times(1); } void expect_verify_lock_ownership(MockOperationImageCtx &mock_image_ctx) { if (mock_image_ctx.exclusive_lock != nullptr) { EXPECT_CALL(*mock_image_ctx.exclusive_lock, is_lock_owner()) .WillRepeatedly(Return(true)); } } void expect_block_requests(MockOperationImageCtx &mock_image_ctx) { if (mock_image_ctx.exclusive_lock != nullptr) { EXPECT_CALL(*mock_image_ctx.exclusive_lock, block_requests(0)).Times(1); } } void expect_unblock_requests(MockOperationImageCtx &mock_image_ctx) { if (mock_image_ctx.exclusive_lock != nullptr) { EXPECT_CALL(*mock_image_ctx.exclusive_lock, unblock_requests()).Times(1); } } void expect_set_flags_request_send( MockOperationImageCtx &mock_image_ctx, MockSetFlagsRequest &mock_set_flags_request, int r) { EXPECT_CALL(mock_set_flags_request, send()) .WillOnce(FinishRequest(&mock_set_flags_request, r, &mock_image_ctx)); } void expect_disable_mirror_request_send( MockOperationImageCtx &mock_image_ctx, MockDisableMirrorRequest &mock_disable_mirror_request, int r) { EXPECT_CALL(mock_disable_mirror_request, send()) .WillOnce(FinishRequest(&mock_disable_mirror_request, r, &mock_image_ctx)); } void expect_close_journal(MockOperationImageCtx &mock_image_ctx, int r) { EXPECT_CALL(*mock_image_ctx.journal, close(_)) .WillOnce(Invoke([&mock_image_ctx, r](Context *on_finish) { mock_image_ctx.journal = nullptr; mock_image_ctx.image_ctx->op_work_queue->queue(on_finish, r); })); } void expect_remove_journal_request_send( MockOperationImageCtx &mock_image_ctx, MockRemoveJournalRequest &mock_remove_journal_request, int r) { EXPECT_CALL(mock_remove_journal_request, send()) .WillOnce(FinishRequest(&mock_remove_journal_request, r, &mock_image_ctx)); } void expect_remove_object_map_request_send( MockOperationImageCtx &mock_image_ctx, MockRemoveObjectMapRequest &mock_remove_object_map_request, int r) { EXPECT_CALL(mock_remove_object_map_request, send()) .WillOnce(FinishRequest(&mock_remove_object_map_request, r, &mock_image_ctx)); } void expect_notify_update(MockOperationImageCtx &mock_image_ctx) { EXPECT_CALL(mock_image_ctx, notify_update(_)) .WillOnce(CompleteContext(0, mock_image_ctx.image_ctx->op_work_queue)); } }; TEST_F(TestMockOperationDisableFeaturesRequest, All) { REQUIRE_FORMAT_V2(); librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); uint64_t features; ASSERT_EQ(0, librbd::get_features(ictx, &features)); uint64_t features_to_disable = RBD_FEATURES_MUTABLE & features; REQUIRE(features_to_disable); MockOperationImageCtx mock_image_ctx(*ictx); MockExclusiveLock mock_exclusive_lock; MockJournal mock_journal; MockObjectMap mock_object_map; initialize_features(ictx, mock_image_ctx, mock_exclusive_lock, mock_journal, mock_object_map); expect_verify_lock_ownership(mock_image_ctx); MockSetFlagsRequest mock_set_flags_request; MockRemoveJournalRequest mock_remove_journal_request; MockDisableMirrorRequest mock_disable_mirror_request; MockRemoveObjectMapRequest mock_remove_object_map_request; ::testing::InSequence seq; expect_prepare_lock(mock_image_ctx); expect_block_writes(mock_image_ctx); if (mock_image_ctx.journal != nullptr) { expect_is_journal_replaying(*mock_image_ctx.journal); } expect_block_requests(mock_image_ctx); if (features_to_disable & RBD_FEATURE_JOURNALING) { expect_disable_mirror_request_send(mock_image_ctx, mock_disable_mirror_request, 0); expect_close_journal(mock_image_ctx, 0); expect_remove_journal_request_send(mock_image_ctx, mock_remove_journal_request, 0); } if (features_to_disable & RBD_FEATURE_OBJECT_MAP) { expect_remove_object_map_request_send(mock_image_ctx, mock_remove_object_map_request, 0); } if (features_to_disable & (RBD_FEATURE_OBJECT_MAP | RBD_FEATURE_FAST_DIFF)) { expect_set_flags_request_send(mock_image_ctx, mock_set_flags_request, 0); } expect_notify_update(mock_image_ctx); expect_unblock_requests(mock_image_ctx); expect_unblock_writes(mock_image_ctx); expect_handle_prepare_lock_complete(mock_image_ctx); C_SaferCond cond_ctx; MockDisableFeaturesRequest *req = new MockDisableFeaturesRequest( mock_image_ctx, &cond_ctx, 0, features_to_disable, false); { std::shared_lock owner_locker{mock_image_ctx.owner_lock}; req->send(); } ASSERT_EQ(0, cond_ctx.wait()); } TEST_F(TestMockOperationDisableFeaturesRequest, ObjectMap) { REQUIRE_FEATURE(RBD_FEATURE_OBJECT_MAP); librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); MockOperationImageCtx mock_image_ctx(*ictx); MockExclusiveLock mock_exclusive_lock; MockJournal mock_journal; MockObjectMap mock_object_map; initialize_features(ictx, mock_image_ctx, mock_exclusive_lock, mock_journal, mock_object_map); expect_verify_lock_ownership(mock_image_ctx); MockSetFlagsRequest mock_set_flags_request; MockRemoveObjectMapRequest mock_remove_object_map_request; ::testing::InSequence seq; expect_prepare_lock(mock_image_ctx); expect_block_writes(mock_image_ctx); if (mock_image_ctx.journal != nullptr) { expect_is_journal_replaying(*mock_image_ctx.journal); } expect_block_requests(mock_image_ctx); expect_append_op_event(mock_image_ctx, true, 0); expect_remove_object_map_request_send(mock_image_ctx, mock_remove_object_map_request, 0); expect_set_flags_request_send(mock_image_ctx, mock_set_flags_request, 0); expect_notify_update(mock_image_ctx); expect_unblock_requests(mock_image_ctx); expect_unblock_writes(mock_image_ctx); expect_handle_prepare_lock_complete(mock_image_ctx); expect_commit_op_event(mock_image_ctx, 0); C_SaferCond cond_ctx; MockDisableFeaturesRequest *req = new MockDisableFeaturesRequest( mock_image_ctx, &cond_ctx, 0, RBD_FEATURE_OBJECT_MAP | RBD_FEATURE_FAST_DIFF, false); { std::shared_lock owner_locker{mock_image_ctx.owner_lock}; req->send(); } ASSERT_EQ(0, cond_ctx.wait()); } TEST_F(TestMockOperationDisableFeaturesRequest, ObjectMapError) { REQUIRE_FEATURE(RBD_FEATURE_OBJECT_MAP); librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); MockOperationImageCtx mock_image_ctx(*ictx); MockExclusiveLock mock_exclusive_lock; MockJournal mock_journal; MockObjectMap mock_object_map; initialize_features(ictx, mock_image_ctx, mock_exclusive_lock, mock_journal, mock_object_map); expect_verify_lock_ownership(mock_image_ctx); MockSetFlagsRequest mock_set_flags_request; MockRemoveObjectMapRequest mock_remove_object_map_request; ::testing::InSequence seq; expect_prepare_lock(mock_image_ctx); expect_block_writes(mock_image_ctx); if (mock_image_ctx.journal != nullptr) { expect_is_journal_replaying(*mock_image_ctx.journal); } expect_block_requests(mock_image_ctx); expect_append_op_event(mock_image_ctx, true, 0); expect_remove_object_map_request_send(mock_image_ctx, mock_remove_object_map_request, -EINVAL); expect_unblock_requests(mock_image_ctx); expect_unblock_writes(mock_image_ctx); expect_handle_prepare_lock_complete(mock_image_ctx); expect_commit_op_event(mock_image_ctx, -EINVAL); C_SaferCond cond_ctx; MockDisableFeaturesRequest *req = new MockDisableFeaturesRequest( mock_image_ctx, &cond_ctx, 0, RBD_FEATURE_OBJECT_MAP | RBD_FEATURE_FAST_DIFF, false); { std::shared_lock owner_locker{mock_image_ctx.owner_lock}; req->send(); } ASSERT_EQ(-EINVAL, cond_ctx.wait()); } TEST_F(TestMockOperationDisableFeaturesRequest, Mirroring) { REQUIRE_FEATURE(RBD_FEATURE_JOURNALING); MirrorModeEnabler mirror_mode_enabler(m_ioctx, cls::rbd::MIRROR_MODE_POOL); librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); MockOperationImageCtx mock_image_ctx(*ictx); MockExclusiveLock mock_exclusive_lock; MockJournal mock_journal; MockObjectMap mock_object_map; initialize_features(ictx, mock_image_ctx, mock_exclusive_lock, mock_journal, mock_object_map); expect_verify_lock_ownership(mock_image_ctx); MockRemoveJournalRequest mock_remove_journal_request; MockDisableMirrorRequest mock_disable_mirror_request; ::testing::InSequence seq; expect_prepare_lock(mock_image_ctx); expect_block_writes(mock_image_ctx); expect_is_journal_replaying(*mock_image_ctx.journal); expect_block_requests(mock_image_ctx); expect_disable_mirror_request_send(mock_image_ctx, mock_disable_mirror_request, 0); expect_close_journal(mock_image_ctx, 0); expect_remove_journal_request_send(mock_image_ctx, mock_remove_journal_request, 0); expect_notify_update(mock_image_ctx); expect_unblock_requests(mock_image_ctx); expect_unblock_writes(mock_image_ctx); expect_handle_prepare_lock_complete(mock_image_ctx); C_SaferCond cond_ctx; MockDisableFeaturesRequest *req = new MockDisableFeaturesRequest( mock_image_ctx, &cond_ctx, 0, RBD_FEATURE_JOURNALING, false); { std::shared_lock owner_locker{mock_image_ctx.owner_lock}; req->send(); } ASSERT_EQ(0, cond_ctx.wait()); } TEST_F(TestMockOperationDisableFeaturesRequest, MirroringError) { REQUIRE_FEATURE(RBD_FEATURE_JOURNALING); librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); MockOperationImageCtx mock_image_ctx(*ictx); MockExclusiveLock mock_exclusive_lock; MockJournal mock_journal; MockObjectMap mock_object_map; initialize_features(ictx, mock_image_ctx, mock_exclusive_lock, mock_journal, mock_object_map); expect_verify_lock_ownership(mock_image_ctx); MockRemoveJournalRequest mock_remove_journal_request; MockDisableMirrorRequest mock_disable_mirror_request; ::testing::InSequence seq; expect_prepare_lock(mock_image_ctx); expect_block_writes(mock_image_ctx); expect_is_journal_replaying(*mock_image_ctx.journal); expect_block_requests(mock_image_ctx); expect_disable_mirror_request_send(mock_image_ctx, mock_disable_mirror_request, -EINVAL); expect_close_journal(mock_image_ctx, 0); expect_remove_journal_request_send(mock_image_ctx, mock_remove_journal_request, 0); expect_notify_update(mock_image_ctx); expect_unblock_requests(mock_image_ctx); expect_unblock_writes(mock_image_ctx); expect_handle_prepare_lock_complete(mock_image_ctx); C_SaferCond cond_ctx; MockDisableFeaturesRequest *req = new MockDisableFeaturesRequest( mock_image_ctx, &cond_ctx, 0, RBD_FEATURE_JOURNALING, false); { std::shared_lock owner_locker{mock_image_ctx.owner_lock}; req->send(); } ASSERT_EQ(0, cond_ctx.wait()); } } // namespace operation } // namespace librbd
17,530
31.525046
94
cc
null
ceph-main/src/test/librbd/operation/test_mock_EnableFeaturesRequest.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "test/librbd/test_mock_fixture.h" #include "test/librbd/test_support.h" #include "test/librbd/mock/MockImageCtx.h" #include "cls/rbd/cls_rbd_client.h" #include "librbd/Operations.h" #include "librbd/internal.h" #include "librbd/Journal.h" #include "librbd/image/SetFlagsRequest.h" #include "librbd/io/AioCompletion.h" #include "librbd/mirror/EnableRequest.h" #include "librbd/journal/CreateRequest.h" #include "librbd/journal/Types.h" #include "librbd/journal/TypeTraits.h" #include "librbd/object_map/CreateRequest.h" #include "librbd/operation/EnableFeaturesRequest.h" #include "gmock/gmock.h" #include "gtest/gtest.h" namespace librbd { namespace { struct MockOperationImageCtx : public MockImageCtx { MockOperationImageCtx(librbd::ImageCtx& image_ctx) : MockImageCtx(image_ctx) { } }; } // anonymous namespace template<> struct Journal<MockOperationImageCtx> { static void get_work_queue(CephContext*, MockContextWQ**) { } }; namespace image { template<> class SetFlagsRequest<MockOperationImageCtx> { public: static SetFlagsRequest *s_instance; Context *on_finish = nullptr; static SetFlagsRequest *create(MockOperationImageCtx *image_ctx, uint64_t flags, uint64_t mask, Context *on_finish) { ceph_assert(s_instance != nullptr); s_instance->on_finish = on_finish; return s_instance; } SetFlagsRequest() { s_instance = this; } MOCK_METHOD0(send, void()); }; SetFlagsRequest<MockOperationImageCtx> *SetFlagsRequest<MockOperationImageCtx>::s_instance; } // namespace image namespace journal { template<> class CreateRequest<MockOperationImageCtx> { public: static CreateRequest *s_instance; Context *on_finish = nullptr; static CreateRequest *create(IoCtx &ioctx, const std::string &imageid, uint8_t order, uint8_t splay_width, const std::string &object_pool, uint64_t tag_class, TagData &tag_data, const std::string &client_id, MockContextWQ *op_work_queue, Context *on_finish) { ceph_assert(s_instance != nullptr); s_instance->on_finish = on_finish; return s_instance; } CreateRequest() { s_instance = this; } MOCK_METHOD0(send, void()); }; CreateRequest<MockOperationImageCtx> *CreateRequest<MockOperationImageCtx>::s_instance = nullptr; template <> struct TypeTraits<MockOperationImageCtx> { typedef librbd::MockContextWQ ContextWQ; }; } // namespace journal namespace mirror { template<> class EnableRequest<MockOperationImageCtx> { public: static EnableRequest *s_instance; Context *on_finish = nullptr; static EnableRequest *create(MockOperationImageCtx *image_ctx, cls::rbd::MirrorImageMode mirror_image_mode, const std::string& non_primary_global_image_id, bool image_clean, Context *on_finish) { ceph_assert(s_instance != nullptr); s_instance->on_finish = on_finish; return s_instance; } EnableRequest() { s_instance = this; } MOCK_METHOD0(send, void()); }; EnableRequest<MockOperationImageCtx> *EnableRequest<MockOperationImageCtx>::s_instance = nullptr; } // namespace mirror namespace object_map { template<> class CreateRequest<MockOperationImageCtx> { public: static CreateRequest *s_instance; Context *on_finish = nullptr; static CreateRequest *create(MockOperationImageCtx *image_ctx, Context *on_finish) { ceph_assert(s_instance != nullptr); s_instance->on_finish = on_finish; return s_instance; } CreateRequest() { s_instance = this; } MOCK_METHOD0(send, void()); }; CreateRequest<MockOperationImageCtx> *CreateRequest<MockOperationImageCtx>::s_instance = nullptr; } // namespace object_map template <> struct AsyncRequest<MockOperationImageCtx> : public AsyncRequest<MockImageCtx> { MockOperationImageCtx &m_image_ctx; AsyncRequest(MockOperationImageCtx &image_ctx, Context *on_finish) : AsyncRequest<MockImageCtx>(image_ctx, on_finish), m_image_ctx(image_ctx) { } }; } // namespace librbd // template definitions #include "librbd/AsyncRequest.cc" #include "librbd/AsyncObjectThrottle.cc" #include "librbd/operation/Request.cc" #include "librbd/operation/EnableFeaturesRequest.cc" namespace librbd { namespace operation { using ::testing::Invoke; using ::testing::Return; using ::testing::WithArg; using ::testing::_; class TestMockOperationEnableFeaturesRequest : public TestMockFixture { public: typedef librbd::image::SetFlagsRequest<MockOperationImageCtx> MockSetFlagsRequest; typedef librbd::journal::CreateRequest<MockOperationImageCtx> MockCreateJournalRequest; typedef librbd::mirror::EnableRequest<MockOperationImageCtx> MockEnableMirrorRequest; typedef librbd::object_map::CreateRequest<MockOperationImageCtx> MockCreateObjectMapRequest; typedef EnableFeaturesRequest<MockOperationImageCtx> MockEnableFeaturesRequest; class MirrorModeEnabler { public: MirrorModeEnabler(librados::IoCtx &ioctx, cls::rbd::MirrorMode mirror_mode) : m_ioctx(ioctx), m_mirror_mode(mirror_mode) { EXPECT_EQ(0, librbd::cls_client::mirror_uuid_set(&m_ioctx, "test-uuid")); EXPECT_EQ(0, librbd::cls_client::mirror_mode_set(&m_ioctx, m_mirror_mode)); } ~MirrorModeEnabler() { EXPECT_EQ(0, librbd::cls_client::mirror_mode_set( &m_ioctx, cls::rbd::MIRROR_MODE_DISABLED)); } private: librados::IoCtx &m_ioctx; cls::rbd::MirrorMode m_mirror_mode; }; void ensure_features_disabled(librbd::ImageCtx *ictx, uint64_t features_to_disable) { uint64_t features; ASSERT_EQ(0, librbd::get_features(ictx, &features)); features_to_disable &= features; if (!features_to_disable) { return; } ASSERT_EQ(0, ictx->operations->update_features(features_to_disable, false)); ASSERT_EQ(0, librbd::get_features(ictx, &features)); ASSERT_EQ(0U, features & features_to_disable); } void expect_prepare_lock(MockOperationImageCtx &mock_image_ctx) { EXPECT_CALL(*mock_image_ctx.state, prepare_lock(_)) .WillOnce(Invoke([](Context *on_ready) { on_ready->complete(0); })); expect_op_work_queue(mock_image_ctx); } void expect_handle_prepare_lock_complete(MockOperationImageCtx &mock_image_ctx) { EXPECT_CALL(*mock_image_ctx.state, handle_prepare_lock_complete()); } void expect_block_writes(MockOperationImageCtx &mock_image_ctx) { EXPECT_CALL(*mock_image_ctx.io_image_dispatcher, block_writes(_)) .WillOnce(CompleteContext(0, mock_image_ctx.image_ctx->op_work_queue)); } void expect_unblock_writes(MockOperationImageCtx &mock_image_ctx) { EXPECT_CALL(*mock_image_ctx.io_image_dispatcher, unblock_writes()).Times(1); } void expect_verify_lock_ownership(MockOperationImageCtx &mock_image_ctx) { if (mock_image_ctx.exclusive_lock != nullptr) { EXPECT_CALL(*mock_image_ctx.exclusive_lock, is_lock_owner()) .WillRepeatedly(Return(true)); } } void expect_block_requests(MockOperationImageCtx &mock_image_ctx) { if (mock_image_ctx.exclusive_lock != nullptr) { EXPECT_CALL(*mock_image_ctx.exclusive_lock, block_requests(0)).Times(1); } } void expect_unblock_requests(MockOperationImageCtx &mock_image_ctx) { if (mock_image_ctx.exclusive_lock != nullptr) { EXPECT_CALL(*mock_image_ctx.exclusive_lock, unblock_requests()).Times(1); } } void expect_set_flags_request_send( MockOperationImageCtx &mock_image_ctx, MockSetFlagsRequest &mock_set_flags_request, int r) { EXPECT_CALL(mock_set_flags_request, send()) .WillOnce(FinishRequest(&mock_set_flags_request, r, &mock_image_ctx)); } void expect_create_journal_request_send( MockOperationImageCtx &mock_image_ctx, MockCreateJournalRequest &mock_create_journal_request, int r) { EXPECT_CALL(mock_create_journal_request, send()) .WillOnce(FinishRequest(&mock_create_journal_request, r, &mock_image_ctx)); } void expect_enable_mirror_request_send( MockOperationImageCtx &mock_image_ctx, MockEnableMirrorRequest &mock_enable_mirror_request, int r) { EXPECT_CALL(mock_enable_mirror_request, send()) .WillOnce(FinishRequest(&mock_enable_mirror_request, r, &mock_image_ctx)); } void expect_create_object_map_request_send( MockOperationImageCtx &mock_image_ctx, MockCreateObjectMapRequest &mock_create_object_map_request, int r) { EXPECT_CALL(mock_create_object_map_request, send()) .WillOnce(FinishRequest(&mock_create_object_map_request, r, &mock_image_ctx)); } void expect_notify_update(MockOperationImageCtx &mock_image_ctx) { EXPECT_CALL(mock_image_ctx, notify_update(_)) .WillOnce(CompleteContext(0, mock_image_ctx.image_ctx->op_work_queue)); } }; TEST_F(TestMockOperationEnableFeaturesRequest, All) { REQUIRE_FORMAT_V2(); librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); uint64_t features; ASSERT_EQ(0, librbd::get_features(ictx, &features)); uint64_t features_to_enable = RBD_FEATURES_MUTABLE & features; REQUIRE(features_to_enable); ensure_features_disabled(ictx, features_to_enable); MockOperationImageCtx mock_image_ctx(*ictx); MockSetFlagsRequest mock_set_flags_request; MockCreateJournalRequest mock_create_journal_request; MockCreateObjectMapRequest mock_create_object_map_request; ::testing::InSequence seq; expect_prepare_lock(mock_image_ctx); expect_block_writes(mock_image_ctx); expect_block_requests(mock_image_ctx); if (features_to_enable & RBD_FEATURE_JOURNALING) { expect_create_journal_request_send(mock_image_ctx, mock_create_journal_request, 0); } if (features_to_enable & (RBD_FEATURE_OBJECT_MAP | RBD_FEATURE_FAST_DIFF)) { expect_set_flags_request_send(mock_image_ctx, mock_set_flags_request, 0); } if (features_to_enable & RBD_FEATURE_OBJECT_MAP) { expect_create_object_map_request_send(mock_image_ctx, mock_create_object_map_request, 0); } expect_notify_update(mock_image_ctx); expect_unblock_requests(mock_image_ctx); expect_unblock_writes(mock_image_ctx); expect_handle_prepare_lock_complete(mock_image_ctx); C_SaferCond cond_ctx; MockEnableFeaturesRequest *req = new MockEnableFeaturesRequest( mock_image_ctx, &cond_ctx, 0, features_to_enable); { std::shared_lock owner_locker{mock_image_ctx.owner_lock}; req->send(); } ASSERT_EQ(0, cond_ctx.wait()); } TEST_F(TestMockOperationEnableFeaturesRequest, ObjectMap) { REQUIRE_FEATURE(RBD_FEATURE_OBJECT_MAP); librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); uint64_t features; ASSERT_EQ(0, librbd::get_features(ictx, &features)); ensure_features_disabled( ictx, RBD_FEATURE_OBJECT_MAP | RBD_FEATURE_FAST_DIFF); MockOperationImageCtx mock_image_ctx(*ictx); MockExclusiveLock mock_exclusive_lock; MockJournal mock_journal; MockObjectMap mock_object_map; initialize_features(ictx, mock_image_ctx, mock_exclusive_lock, mock_journal, mock_object_map); expect_verify_lock_ownership(mock_image_ctx); MockSetFlagsRequest mock_set_flags_request; MockCreateObjectMapRequest mock_create_object_map_request; ::testing::InSequence seq; expect_prepare_lock(mock_image_ctx); expect_block_writes(mock_image_ctx); if (mock_image_ctx.journal != nullptr) { expect_is_journal_replaying(*mock_image_ctx.journal); } expect_block_requests(mock_image_ctx); expect_append_op_event(mock_image_ctx, true, 0); expect_set_flags_request_send(mock_image_ctx, mock_set_flags_request, 0); expect_create_object_map_request_send(mock_image_ctx, mock_create_object_map_request, 0); expect_notify_update(mock_image_ctx); expect_unblock_requests(mock_image_ctx); expect_unblock_writes(mock_image_ctx); expect_handle_prepare_lock_complete(mock_image_ctx); expect_commit_op_event(mock_image_ctx, 0); C_SaferCond cond_ctx; MockEnableFeaturesRequest *req = new MockEnableFeaturesRequest( mock_image_ctx, &cond_ctx, 0, RBD_FEATURE_OBJECT_MAP); { std::shared_lock owner_locker{mock_image_ctx.owner_lock}; req->send(); } ASSERT_EQ(0, cond_ctx.wait()); } TEST_F(TestMockOperationEnableFeaturesRequest, ObjectMapError) { REQUIRE_FEATURE(RBD_FEATURE_OBJECT_MAP); librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); uint64_t features; ASSERT_EQ(0, librbd::get_features(ictx, &features)); ensure_features_disabled( ictx, RBD_FEATURE_OBJECT_MAP | RBD_FEATURE_FAST_DIFF); MockOperationImageCtx mock_image_ctx(*ictx); MockExclusiveLock mock_exclusive_lock; MockJournal mock_journal; MockObjectMap mock_object_map; initialize_features(ictx, mock_image_ctx, mock_exclusive_lock, mock_journal, mock_object_map); expect_verify_lock_ownership(mock_image_ctx); MockSetFlagsRequest mock_set_flags_request; MockCreateObjectMapRequest mock_create_object_map_request; ::testing::InSequence seq; expect_prepare_lock(mock_image_ctx); expect_block_writes(mock_image_ctx); if (mock_image_ctx.journal != nullptr) { expect_is_journal_replaying(*mock_image_ctx.journal); } expect_block_requests(mock_image_ctx); expect_append_op_event(mock_image_ctx, true, 0); expect_set_flags_request_send(mock_image_ctx, mock_set_flags_request, 0); expect_create_object_map_request_send( mock_image_ctx, mock_create_object_map_request, -EINVAL); expect_unblock_requests(mock_image_ctx); expect_unblock_writes(mock_image_ctx); expect_handle_prepare_lock_complete(mock_image_ctx); expect_commit_op_event(mock_image_ctx, -EINVAL); C_SaferCond cond_ctx; MockEnableFeaturesRequest *req = new MockEnableFeaturesRequest( mock_image_ctx, &cond_ctx, 0, RBD_FEATURE_OBJECT_MAP); { std::shared_lock owner_locker{mock_image_ctx.owner_lock}; req->send(); } ASSERT_EQ(-EINVAL, cond_ctx.wait()); } TEST_F(TestMockOperationEnableFeaturesRequest, SetFlagsError) { REQUIRE_FEATURE(RBD_FEATURE_OBJECT_MAP); librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); uint64_t features; ASSERT_EQ(0, librbd::get_features(ictx, &features)); ensure_features_disabled( ictx, RBD_FEATURE_OBJECT_MAP | RBD_FEATURE_FAST_DIFF); MockOperationImageCtx mock_image_ctx(*ictx); MockExclusiveLock mock_exclusive_lock; MockJournal mock_journal; MockObjectMap mock_object_map; initialize_features(ictx, mock_image_ctx, mock_exclusive_lock, mock_journal, mock_object_map); expect_verify_lock_ownership(mock_image_ctx); MockSetFlagsRequest mock_set_flags_request; MockCreateObjectMapRequest mock_create_object_map_request; ::testing::InSequence seq; expect_prepare_lock(mock_image_ctx); expect_block_writes(mock_image_ctx); if (mock_image_ctx.journal != nullptr) { expect_is_journal_replaying(*mock_image_ctx.journal); } expect_block_requests(mock_image_ctx); expect_append_op_event(mock_image_ctx, true, 0); expect_set_flags_request_send(mock_image_ctx, mock_set_flags_request, -EINVAL); expect_unblock_requests(mock_image_ctx); expect_unblock_writes(mock_image_ctx); expect_handle_prepare_lock_complete(mock_image_ctx); expect_commit_op_event(mock_image_ctx, -EINVAL); C_SaferCond cond_ctx; MockEnableFeaturesRequest *req = new MockEnableFeaturesRequest( mock_image_ctx, &cond_ctx, 0, RBD_FEATURE_OBJECT_MAP); { std::shared_lock owner_locker{mock_image_ctx.owner_lock}; req->send(); } ASSERT_EQ(-EINVAL, cond_ctx.wait()); } TEST_F(TestMockOperationEnableFeaturesRequest, Mirroring) { REQUIRE_FEATURE(RBD_FEATURE_JOURNALING); MirrorModeEnabler mirror_mode_enabler(m_ioctx, cls::rbd::MIRROR_MODE_POOL); librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); uint64_t features; ASSERT_EQ(0, librbd::get_features(ictx, &features)); ensure_features_disabled(ictx, RBD_FEATURE_JOURNALING); MockOperationImageCtx mock_image_ctx(*ictx); MockExclusiveLock mock_exclusive_lock; MockJournal mock_journal; MockObjectMap mock_object_map; initialize_features(ictx, mock_image_ctx, mock_exclusive_lock, mock_journal, mock_object_map); expect_verify_lock_ownership(mock_image_ctx); MockCreateJournalRequest mock_create_journal_request; MockEnableMirrorRequest mock_enable_mirror_request; ::testing::InSequence seq; expect_prepare_lock(mock_image_ctx); expect_block_writes(mock_image_ctx); expect_block_requests(mock_image_ctx); expect_create_journal_request_send(mock_image_ctx, mock_create_journal_request, 0); expect_enable_mirror_request_send(mock_image_ctx, mock_enable_mirror_request, 0); expect_notify_update(mock_image_ctx); expect_unblock_requests(mock_image_ctx); expect_unblock_writes(mock_image_ctx); expect_handle_prepare_lock_complete(mock_image_ctx); C_SaferCond cond_ctx; MockEnableFeaturesRequest *req = new MockEnableFeaturesRequest( mock_image_ctx, &cond_ctx, 0, RBD_FEATURE_JOURNALING); { std::shared_lock owner_locker{mock_image_ctx.owner_lock}; req->send(); } ASSERT_EQ(0, cond_ctx.wait()); } TEST_F(TestMockOperationEnableFeaturesRequest, JournalingError) { REQUIRE_FEATURE(RBD_FEATURE_JOURNALING); MirrorModeEnabler mirror_mode_enabler(m_ioctx, cls::rbd::MIRROR_MODE_POOL); librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); uint64_t features; ASSERT_EQ(0, librbd::get_features(ictx, &features)); ensure_features_disabled(ictx, RBD_FEATURE_JOURNALING); MockOperationImageCtx mock_image_ctx(*ictx); MockExclusiveLock mock_exclusive_lock; MockJournal mock_journal; MockObjectMap mock_object_map; initialize_features(ictx, mock_image_ctx, mock_exclusive_lock, mock_journal, mock_object_map); expect_verify_lock_ownership(mock_image_ctx); MockCreateJournalRequest mock_create_journal_request; MockEnableMirrorRequest mock_enable_mirror_request; ::testing::InSequence seq; expect_prepare_lock(mock_image_ctx); expect_block_writes(mock_image_ctx); expect_block_requests(mock_image_ctx); expect_create_journal_request_send(mock_image_ctx, mock_create_journal_request, -EINVAL); expect_unblock_requests(mock_image_ctx); expect_unblock_writes(mock_image_ctx); expect_handle_prepare_lock_complete(mock_image_ctx); C_SaferCond cond_ctx; MockEnableFeaturesRequest *req = new MockEnableFeaturesRequest( mock_image_ctx, &cond_ctx, 0, RBD_FEATURE_JOURNALING); { std::shared_lock owner_locker{mock_image_ctx.owner_lock}; req->send(); } ASSERT_EQ(-EINVAL, cond_ctx.wait()); } TEST_F(TestMockOperationEnableFeaturesRequest, MirroringError) { REQUIRE_FEATURE(RBD_FEATURE_JOURNALING); MirrorModeEnabler mirror_mode_enabler(m_ioctx, cls::rbd::MIRROR_MODE_POOL); librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); uint64_t features; ASSERT_EQ(0, librbd::get_features(ictx, &features)); ensure_features_disabled(ictx, RBD_FEATURE_JOURNALING); MockOperationImageCtx mock_image_ctx(*ictx); MockExclusiveLock mock_exclusive_lock; MockJournal mock_journal; MockObjectMap mock_object_map; initialize_features(ictx, mock_image_ctx, mock_exclusive_lock, mock_journal, mock_object_map); expect_verify_lock_ownership(mock_image_ctx); MockCreateJournalRequest mock_create_journal_request; MockEnableMirrorRequest mock_enable_mirror_request; ::testing::InSequence seq; expect_prepare_lock(mock_image_ctx); expect_block_writes(mock_image_ctx); expect_block_requests(mock_image_ctx); expect_create_journal_request_send(mock_image_ctx, mock_create_journal_request, 0); expect_enable_mirror_request_send(mock_image_ctx, mock_enable_mirror_request, -EINVAL); expect_notify_update(mock_image_ctx); expect_unblock_requests(mock_image_ctx); expect_unblock_writes(mock_image_ctx); expect_handle_prepare_lock_complete(mock_image_ctx); C_SaferCond cond_ctx; MockEnableFeaturesRequest *req = new MockEnableFeaturesRequest( mock_image_ctx, &cond_ctx, 0, RBD_FEATURE_JOURNALING); { std::shared_lock owner_locker{mock_image_ctx.owner_lock}; req->send(); } ASSERT_EQ(0, cond_ctx.wait()); } } // namespace operation } // namespace librbd
21,064
31.658915
97
cc
null
ceph-main/src/test/librbd/operation/test_mock_Request.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "test/librbd/test_mock_fixture.h" #include "test/librbd/test_support.h" #include "test/librbd/mock/MockImageCtx.h" #include "test/librbd/mock/MockJournal.h" #include "librbd/AsyncRequest.h" #include "librbd/operation/Request.h" namespace librbd { namespace { struct MockTestImageCtx : public MockImageCtx { MockTestImageCtx(ImageCtx &image_ctx) : MockImageCtx(image_ctx) { } }; } // anonymous namespace template <> struct AsyncRequest<librbd::MockTestImageCtx> { librbd::MockTestImageCtx &m_image_ctx; Context *m_on_finish; AsyncRequest(librbd::MockTestImageCtx &image_ctx, Context *on_finish) : m_image_ctx(image_ctx), m_on_finish(on_finish) { } virtual ~AsyncRequest() { } virtual void finish(int r) { m_on_finish->complete(r); } virtual void finish_and_destroy(int r) { finish(r); delete this; } }; } // namespace librbd #include "librbd/operation/Request.cc" namespace librbd { namespace journal { std::ostream& operator<<(std::ostream& os, const Event&) { return os; } } // namespace journal namespace operation { using ::testing::InSequence; using ::testing::Invoke; using ::testing::Return; struct MockRequest : public Request<librbd::MockTestImageCtx> { MockRequest(librbd::MockTestImageCtx &image_ctx, Context *on_finish, uint64_t journal_op_tid) : Request<librbd::MockTestImageCtx>(image_ctx, on_finish, journal_op_tid) { } void complete(int r) { finish_and_destroy(r); } void send_op_impl(int r) { bool appending = append_op_event< MockRequest, &MockRequest::handle_send>(this); if (!appending) { complete(r); } } MOCK_METHOD1(should_complete, bool(int)); MOCK_METHOD0(send_op, void()); MOCK_METHOD1(handle_send, Context*(int*)); MOCK_CONST_METHOD0(can_affect_io, bool()); MOCK_CONST_METHOD1(create_event, journal::Event(uint64_t)); }; struct TestMockOperationRequest : public TestMockFixture { void expect_can_affect_io(MockRequest &mock_request, bool can_affect) { EXPECT_CALL(mock_request, can_affect_io()) .WillOnce(Return(can_affect)); } void expect_is_journal_replaying(MockJournal &mock_journal, bool replaying) { EXPECT_CALL(mock_journal, is_journal_replaying()) .WillOnce(Return(replaying)); } void expect_is_journal_appending(MockJournal &mock_journal, bool appending) { EXPECT_CALL(mock_journal, is_journal_appending()) .WillOnce(Return(appending)); } void expect_send_op(MockRequest &mock_request, int r) { EXPECT_CALL(mock_request, send_op()) .WillOnce(Invoke([&mock_request, r]() { mock_request.complete(r); })); } void expect_send_op_affects_io(MockImageCtx &mock_image_ctx, MockRequest &mock_request, int r) { EXPECT_CALL(mock_request, send_op()) .WillOnce(Invoke([&mock_image_ctx, &mock_request, r]() { mock_image_ctx.image_ctx->op_work_queue->queue( new LambdaContext([&mock_request, r](int _) { mock_request.send_op_impl(r); }), 0); })); } }; TEST_F(TestMockOperationRequest, SendJournalDisabled) { REQUIRE_FEATURE(RBD_FEATURE_JOURNALING); librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); MockTestImageCtx mock_image_ctx(*ictx); MockJournal mock_journal; mock_image_ctx.journal = &mock_journal; C_SaferCond ctx; MockRequest *mock_request = new MockRequest(mock_image_ctx, &ctx, 0); InSequence seq; expect_can_affect_io(*mock_request, false); expect_is_journal_appending(mock_journal, false); expect_send_op(*mock_request, 0); { std::shared_lock owner_locker{mock_image_ctx.owner_lock}; mock_request->send(); } ASSERT_EQ(0, ctx.wait()); } TEST_F(TestMockOperationRequest, SendAffectsIOJournalDisabled) { REQUIRE_FEATURE(RBD_FEATURE_JOURNALING); librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); MockTestImageCtx mock_image_ctx(*ictx); MockJournal mock_journal; mock_image_ctx.journal = &mock_journal; C_SaferCond ctx; MockRequest *mock_request = new MockRequest(mock_image_ctx, &ctx, 0); InSequence seq; expect_can_affect_io(*mock_request, true); expect_send_op_affects_io(mock_image_ctx, *mock_request, 0); expect_can_affect_io(*mock_request, true); expect_is_journal_replaying(mock_journal, false); expect_is_journal_appending(mock_journal, false); { std::shared_lock owner_locker{mock_image_ctx.owner_lock}; mock_request->send(); } ASSERT_EQ(0, ctx.wait()); } } // namespace operation } // namespace librbd
4,750
25.994318
79
cc
null
ceph-main/src/test/librbd/operation/test_mock_ResizeRequest.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "test/librbd/test_mock_fixture.h" #include "test/librbd/test_support.h" #include "test/librbd/mock/MockImageCtx.h" #include "test/librbd/mock/io/MockObjectDispatch.h" #include "test/librados_test_stub/MockTestMemIoCtxImpl.h" #include "common/bit_vector.hpp" #include "librbd/internal.h" #include "librbd/ObjectMap.h" #include "librbd/operation/ResizeRequest.h" #include "librbd/operation/TrimRequest.h" #include "gmock/gmock.h" #include "gtest/gtest.h" namespace librbd { namespace util { inline ImageCtx* get_image_ctx(MockImageCtx* image_ctx) { return image_ctx->image_ctx; } } // namespace util namespace operation { template <> class TrimRequest<MockImageCtx> { public: static TrimRequest *s_instance; static TrimRequest *create(MockImageCtx &image_ctx, Context *on_finish, uint64_t original_size, uint64_t new_size, ProgressContext &prog_ctx) { ceph_assert(s_instance != nullptr); s_instance->on_finish = on_finish; return s_instance; } Context *on_finish = nullptr; TrimRequest() { s_instance = this; } MOCK_METHOD0(send, void()); }; TrimRequest<MockImageCtx> *TrimRequest<MockImageCtx>::s_instance = nullptr; } // namespace operation } // namespace librbd // template definitions #include "librbd/operation/ResizeRequest.cc" namespace librbd { namespace operation { using ::testing::_; using ::testing::DoAll; using ::testing::Invoke; using ::testing::InSequence; using ::testing::Return; using ::testing::StrEq; using ::testing::WithArg; class TestMockOperationResizeRequest : public TestMockFixture { public: typedef ResizeRequest<MockImageCtx> MockResizeRequest; typedef TrimRequest<MockImageCtx> MockTrimRequest; void expect_block_writes(MockImageCtx &mock_image_ctx, int r) { EXPECT_CALL(*mock_image_ctx.io_image_dispatcher, block_writes(_)) .WillOnce(CompleteContext(r, mock_image_ctx.image_ctx->op_work_queue)); } void expect_unblock_writes(MockImageCtx &mock_image_ctx) { EXPECT_CALL(*mock_image_ctx.io_image_dispatcher, unblock_writes()) .Times(1); } void expect_is_lock_owner(MockImageCtx &mock_image_ctx) { if (mock_image_ctx.exclusive_lock != nullptr) { EXPECT_CALL(*mock_image_ctx.exclusive_lock, is_lock_owner()) .WillOnce(Return(true)); } } void expect_grow_object_map(MockImageCtx &mock_image_ctx) { if (mock_image_ctx.object_map != nullptr) { expect_is_lock_owner(mock_image_ctx); EXPECT_CALL(*mock_image_ctx.object_map, aio_resize(_, _, _)) .WillOnce(WithArg<2>(CompleteContext(0, mock_image_ctx.image_ctx->op_work_queue))); } } void expect_shrink_object_map(MockImageCtx &mock_image_ctx) { if (mock_image_ctx.object_map != nullptr) { expect_is_lock_owner(mock_image_ctx); EXPECT_CALL(*mock_image_ctx.object_map, aio_resize(_, _, _)) .WillOnce(WithArg<2>(CompleteContext(0, mock_image_ctx.image_ctx->op_work_queue))); } } void expect_update_header(MockImageCtx &mock_image_ctx, int r) { if (mock_image_ctx.old_format) { EXPECT_CALL(get_mock_io_ctx(mock_image_ctx.md_ctx), write(mock_image_ctx.header_oid, _, _, _, _)) .WillOnce(Return(r)); } else { expect_is_lock_owner(mock_image_ctx); EXPECT_CALL(get_mock_io_ctx(mock_image_ctx.md_ctx), exec(mock_image_ctx.header_oid, _, StrEq("rbd"), StrEq("set_size"), _, _, _, _)) .WillOnce(Return(r)); } } void expect_trim(MockImageCtx &mock_image_ctx, MockTrimRequest &mock_trim_request, int r) { EXPECT_CALL(mock_trim_request, send()) .WillOnce(FinishRequest(&mock_trim_request, r, &mock_image_ctx)); } void expect_flush_cache(MockImageCtx &mock_image_ctx, int r) { EXPECT_CALL(*mock_image_ctx.io_image_dispatcher, send(_)) .WillOnce(Invoke([&mock_image_ctx, r](io::ImageDispatchSpec* spec) { ASSERT_TRUE(boost::get<io::ImageDispatchSpec::Flush>( &spec->request) != nullptr); spec->dispatch_result = io::DISPATCH_RESULT_COMPLETE; auto aio_comp = spec->aio_comp; auto ctx = new LambdaContext([aio_comp](int r) { if (r < 0) { aio_comp->fail(r); } else { aio_comp->set_request_count(1); aio_comp->add_request(); aio_comp->complete_request(r); } }); mock_image_ctx.image_ctx->op_work_queue->queue(ctx, r); })); } void expect_invalidate_cache(MockImageCtx &mock_image_ctx, int r) { EXPECT_CALL(*mock_image_ctx.io_image_dispatcher, invalidate_cache(_)) .WillOnce(CompleteContext(r, mock_image_ctx.image_ctx->op_work_queue)); expect_op_work_queue(mock_image_ctx); } void expect_resize_object_map(MockImageCtx &mock_image_ctx, uint64_t new_size) { EXPECT_CALL(*mock_image_ctx.object_map, aio_resize(new_size, _, _)) .WillOnce(WithArg<2>(CompleteContext(0, mock_image_ctx.image_ctx->op_work_queue))); } int when_resize(MockImageCtx &mock_image_ctx, uint64_t new_size, bool allow_shrink, uint64_t journal_op_tid, bool disable_journal) { C_SaferCond cond_ctx; librbd::NoOpProgressContext prog_ctx; MockResizeRequest *req = new MockResizeRequest( mock_image_ctx, &cond_ctx, new_size, allow_shrink, prog_ctx, journal_op_tid, disable_journal); { std::shared_lock owner_locker{mock_image_ctx.owner_lock}; req->send(); } return cond_ctx.wait(); } }; TEST_F(TestMockOperationResizeRequest, NoOpSuccess) { librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); MockImageCtx mock_image_ctx(*ictx); MockExclusiveLock mock_exclusive_lock; MockJournal mock_journal; MockObjectMap mock_object_map; initialize_features(ictx, mock_image_ctx, mock_exclusive_lock, mock_journal, mock_object_map); InSequence seq; expect_block_writes(mock_image_ctx, 0); expect_append_op_event(mock_image_ctx, true, 0); expect_unblock_writes(mock_image_ctx); expect_commit_op_event(mock_image_ctx, 0); ASSERT_EQ(0, when_resize(mock_image_ctx, ictx->size, true, 0, false)); } TEST_F(TestMockOperationResizeRequest, GrowSuccess) { librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); MockImageCtx mock_image_ctx(*ictx); MockExclusiveLock mock_exclusive_lock; MockJournal mock_journal; MockObjectMap mock_object_map; initialize_features(ictx, mock_image_ctx, mock_exclusive_lock, mock_journal, mock_object_map); InSequence seq; expect_block_writes(mock_image_ctx, 0); expect_append_op_event(mock_image_ctx, true, 0); expect_grow_object_map(mock_image_ctx); expect_update_header(mock_image_ctx, 0); expect_unblock_writes(mock_image_ctx); expect_commit_op_event(mock_image_ctx, 0); ASSERT_EQ(0, when_resize(mock_image_ctx, ictx->size * 2, true, 0, false)); } TEST_F(TestMockOperationResizeRequest, ShrinkSuccess) { librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); MockImageCtx mock_image_ctx(*ictx); MockExclusiveLock mock_exclusive_lock; MockJournal mock_journal; MockObjectMap mock_object_map; initialize_features(ictx, mock_image_ctx, mock_exclusive_lock, mock_journal, mock_object_map); InSequence seq; expect_block_writes(mock_image_ctx, 0); expect_append_op_event(mock_image_ctx, true, 0); expect_unblock_writes(mock_image_ctx); MockTrimRequest mock_trim_request; expect_flush_cache(mock_image_ctx, 0); expect_invalidate_cache(mock_image_ctx, 0); expect_trim(mock_image_ctx, mock_trim_request, 0); expect_block_writes(mock_image_ctx, 0); expect_update_header(mock_image_ctx, 0); expect_shrink_object_map(mock_image_ctx); expect_unblock_writes(mock_image_ctx); expect_commit_op_event(mock_image_ctx, 0); ASSERT_EQ(0, when_resize(mock_image_ctx, ictx->size / 2, true, 0, false)); } TEST_F(TestMockOperationResizeRequest, ShrinkError) { librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); MockImageCtx mock_image_ctx(*ictx); MockExclusiveLock mock_exclusive_lock; MockJournal mock_journal; MockObjectMap mock_object_map; initialize_features(ictx, mock_image_ctx, mock_exclusive_lock, mock_journal, mock_object_map); InSequence seq; expect_block_writes(mock_image_ctx, -EINVAL); expect_unblock_writes(mock_image_ctx); ASSERT_EQ(-EINVAL, when_resize(mock_image_ctx, ictx->size / 2, false, 0, false)); } TEST_F(TestMockOperationResizeRequest, PreBlockWritesError) { librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); MockImageCtx mock_image_ctx(*ictx); MockExclusiveLock mock_exclusive_lock; MockJournal mock_journal; MockObjectMap mock_object_map; initialize_features(ictx, mock_image_ctx, mock_exclusive_lock, mock_journal, mock_object_map); InSequence seq; expect_block_writes(mock_image_ctx, -EINVAL); expect_unblock_writes(mock_image_ctx); ASSERT_EQ(-EINVAL, when_resize(mock_image_ctx, ictx->size, true, 0, false)); } TEST_F(TestMockOperationResizeRequest, TrimError) { librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); MockImageCtx mock_image_ctx(*ictx); MockExclusiveLock mock_exclusive_lock; MockJournal mock_journal; MockObjectMap mock_object_map; initialize_features(ictx, mock_image_ctx, mock_exclusive_lock, mock_journal, mock_object_map); InSequence seq; expect_block_writes(mock_image_ctx, 0); expect_append_op_event(mock_image_ctx, true, 0); expect_unblock_writes(mock_image_ctx); MockTrimRequest mock_trim_request; expect_flush_cache(mock_image_ctx, 0); expect_invalidate_cache(mock_image_ctx, -EBUSY); expect_trim(mock_image_ctx, mock_trim_request, -EINVAL); expect_commit_op_event(mock_image_ctx, -EINVAL); ASSERT_EQ(-EINVAL, when_resize(mock_image_ctx, ictx->size / 2, true, 0, false)); } TEST_F(TestMockOperationResizeRequest, FlushCacheError) { librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); REQUIRE(ictx->cache); MockImageCtx mock_image_ctx(*ictx); MockExclusiveLock mock_exclusive_lock; MockJournal mock_journal; MockObjectMap mock_object_map; initialize_features(ictx, mock_image_ctx, mock_exclusive_lock, mock_journal, mock_object_map); InSequence seq; expect_block_writes(mock_image_ctx, 0); expect_append_op_event(mock_image_ctx, true, 0); expect_unblock_writes(mock_image_ctx); MockTrimRequest mock_trim_request; expect_flush_cache(mock_image_ctx, -EINVAL); expect_commit_op_event(mock_image_ctx, -EINVAL); ASSERT_EQ(-EINVAL, when_resize(mock_image_ctx, ictx->size / 2, true, 0, false)); } TEST_F(TestMockOperationResizeRequest, InvalidateCacheError) { librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); REQUIRE(ictx->cache); MockImageCtx mock_image_ctx(*ictx); MockExclusiveLock mock_exclusive_lock; MockJournal mock_journal; MockObjectMap mock_object_map; initialize_features(ictx, mock_image_ctx, mock_exclusive_lock, mock_journal, mock_object_map); InSequence seq; expect_block_writes(mock_image_ctx, 0); expect_append_op_event(mock_image_ctx, true, 0); expect_unblock_writes(mock_image_ctx); MockTrimRequest mock_trim_request; expect_flush_cache(mock_image_ctx, 0); expect_invalidate_cache(mock_image_ctx, -EINVAL); expect_commit_op_event(mock_image_ctx, -EINVAL); ASSERT_EQ(-EINVAL, when_resize(mock_image_ctx, ictx->size / 2, true, 0, false)); } TEST_F(TestMockOperationResizeRequest, PostBlockWritesError) { librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); MockImageCtx mock_image_ctx(*ictx); MockExclusiveLock mock_exclusive_lock; MockJournal mock_journal; MockObjectMap mock_object_map; initialize_features(ictx, mock_image_ctx, mock_exclusive_lock, mock_journal, mock_object_map); InSequence seq; expect_block_writes(mock_image_ctx, 0); expect_append_op_event(mock_image_ctx, true, 0); expect_unblock_writes(mock_image_ctx); MockTrimRequest mock_trim_request; expect_flush_cache(mock_image_ctx, 0); expect_invalidate_cache(mock_image_ctx, 0); expect_trim(mock_image_ctx, mock_trim_request, 0); expect_block_writes(mock_image_ctx, -EINVAL); expect_unblock_writes(mock_image_ctx); expect_commit_op_event(mock_image_ctx, -EINVAL); ASSERT_EQ(-EINVAL, when_resize(mock_image_ctx, ictx->size / 2, true, 0, false)); } TEST_F(TestMockOperationResizeRequest, UpdateHeaderError) { librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); MockImageCtx mock_image_ctx(*ictx); MockExclusiveLock mock_exclusive_lock; MockJournal mock_journal; MockObjectMap mock_object_map; initialize_features(ictx, mock_image_ctx, mock_exclusive_lock, mock_journal, mock_object_map); InSequence seq; expect_block_writes(mock_image_ctx, 0); expect_append_op_event(mock_image_ctx, true, 0); expect_grow_object_map(mock_image_ctx); expect_update_header(mock_image_ctx, -EINVAL); expect_unblock_writes(mock_image_ctx); expect_commit_op_event(mock_image_ctx, -EINVAL); ASSERT_EQ(-EINVAL, when_resize(mock_image_ctx, ictx->size * 2, true, 0, false)); } TEST_F(TestMockOperationResizeRequest, JournalAppendError) { REQUIRE_FEATURE(RBD_FEATURE_JOURNALING); librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); MockImageCtx mock_image_ctx(*ictx); MockExclusiveLock mock_exclusive_lock; MockJournal mock_journal; MockObjectMap mock_object_map; initialize_features(ictx, mock_image_ctx, mock_exclusive_lock, mock_journal, mock_object_map); InSequence seq; expect_block_writes(mock_image_ctx, 0); expect_append_op_event(mock_image_ctx, true, -EINVAL); expect_unblock_writes(mock_image_ctx); ASSERT_EQ(-EINVAL, when_resize(mock_image_ctx, ictx->size, true, 0, false)); } TEST_F(TestMockOperationResizeRequest, JournalDisabled) { librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); MockImageCtx mock_image_ctx(*ictx); MockExclusiveLock mock_exclusive_lock; MockJournal mock_journal; MockObjectMap mock_object_map; initialize_features(ictx, mock_image_ctx, mock_exclusive_lock, mock_journal, mock_object_map); InSequence seq; expect_block_writes(mock_image_ctx, 0); expect_unblock_writes(mock_image_ctx); ASSERT_EQ(0, when_resize(mock_image_ctx, ictx->size, true, 0, true)); } } // namespace operation } // namespace librbd
15,166
33.786697
103
cc
null
ceph-main/src/test/librbd/operation/test_mock_SnapshotCreateRequest.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "test/librbd/test_mock_fixture.h" #include "test/librbd/test_support.h" #include "test/librbd/mock/MockImageCtx.h" #include "test/librados_test_stub/MockTestMemIoCtxImpl.h" #include "common/bit_vector.hpp" #include "librbd/internal.h" #include "librbd/ObjectMap.h" #include "librbd/mirror/snapshot/SetImageStateRequest.h" #include "librbd/operation/SnapshotCreateRequest.h" #include "gmock/gmock.h" #include "gtest/gtest.h" namespace librbd { namespace mirror { namespace snapshot { template<> class SetImageStateRequest<MockImageCtx> { public: static SetImageStateRequest *s_instance; Context *on_finish = nullptr; static SetImageStateRequest *create(MockImageCtx *image_ctx, uint64_t snap_id, Context *on_finish) { ceph_assert(s_instance != nullptr); s_instance->on_finish = on_finish; return s_instance; } SetImageStateRequest() { s_instance = this; } MOCK_METHOD0(send, void()); }; SetImageStateRequest<MockImageCtx> *SetImageStateRequest<MockImageCtx>::s_instance; } // namespace snapshot } // namespace mirror } // namespace librbd // template definitions #include "librbd/operation/SnapshotCreateRequest.cc" namespace librbd { namespace operation { using ::testing::_; using ::testing::DoAll; using ::testing::DoDefault; using ::testing::Return; using ::testing::SetArgPointee; using ::testing::StrEq; using ::testing::WithArg; class TestMockOperationSnapshotCreateRequest : public TestMockFixture { public: typedef SnapshotCreateRequest<MockImageCtx> MockSnapshotCreateRequest; typedef mirror::snapshot::SetImageStateRequest<MockImageCtx> MockSetImageStateRequest; void expect_notify_quiesce(MockImageCtx &mock_image_ctx, int r) { EXPECT_CALL(*mock_image_ctx.image_watcher, notify_quiesce(_, _, _)) .WillOnce(WithArg<2>(CompleteContext( r, mock_image_ctx.image_ctx->op_work_queue))); } void expect_block_writes(MockImageCtx &mock_image_ctx) { EXPECT_CALL(*mock_image_ctx.io_image_dispatcher, block_writes(_)) .WillOnce(CompleteContext(0, mock_image_ctx.image_ctx->op_work_queue)); } void expect_verify_lock_ownership(MockImageCtx &mock_image_ctx) { if (mock_image_ctx.exclusive_lock != nullptr) { EXPECT_CALL(*mock_image_ctx.exclusive_lock, is_lock_owner()) .WillRepeatedly(Return(true)); } } void expect_allocate_snap_id(MockImageCtx &mock_image_ctx, int r) { auto &expect = EXPECT_CALL(get_mock_io_ctx(mock_image_ctx.data_ctx), selfmanaged_snap_create(_)); if (r < 0 && r != -ESTALE) { expect.WillOnce(Return(r)); } else { expect.Times(r < 0 ? 2 : 1).WillRepeatedly(DoDefault()); } } void expect_release_snap_id(MockImageCtx &mock_image_ctx, int r) { auto &expect = EXPECT_CALL(get_mock_io_ctx(mock_image_ctx.data_ctx), selfmanaged_snap_remove(_)); if (r < 0) { expect.WillOnce(Return(r)); } else { expect.WillOnce(DoDefault()); } } void expect_snap_create(MockImageCtx &mock_image_ctx, int r) { auto &expect = EXPECT_CALL(get_mock_io_ctx(mock_image_ctx.md_ctx), exec(mock_image_ctx.header_oid, _, StrEq("rbd"), StrEq(mock_image_ctx.old_format ? "snap_add" : "snapshot_add"), _, _, _, _)); if (r == -ESTALE) { expect.WillOnce(Return(r)).WillOnce(DoDefault()); } else if (r < 0) { expect.WillOnce(Return(r)); } else { expect.WillOnce(DoDefault()); } } void expect_object_map_snap_create(MockImageCtx &mock_image_ctx) { if (mock_image_ctx.object_map != nullptr) { EXPECT_CALL(*mock_image_ctx.object_map, snapshot_add(_, _)) .WillOnce(WithArg<1>(CompleteContext( 0, mock_image_ctx.image_ctx->op_work_queue))); } } void expect_set_image_state( MockImageCtx &mock_image_ctx, MockSetImageStateRequest &mock_set_image_state_request, int r) { EXPECT_CALL(mock_set_image_state_request, send()) .WillOnce(FinishRequest(&mock_set_image_state_request, r, &mock_image_ctx)); } void expect_update_snap_context(MockImageCtx &mock_image_ctx) { // state machine checks to ensure a refresh hasn't already added the snap EXPECT_CALL(mock_image_ctx, get_snap_info(_)) .WillOnce(Return(static_cast<const librbd::SnapInfo*>(NULL))); EXPECT_CALL(mock_image_ctx, add_snap(_, "snap1", _, _, _, _, _, _)); } void expect_unblock_writes(MockImageCtx &mock_image_ctx) { EXPECT_CALL(*mock_image_ctx.io_image_dispatcher, unblock_writes()) .Times(1); } void expect_notify_unquiesce(MockImageCtx &mock_image_ctx, int r) { EXPECT_CALL(*mock_image_ctx.image_watcher, notify_unquiesce(_, _)) .WillOnce(WithArg<1>( CompleteContext(r, mock_image_ctx.image_ctx->op_work_queue))); } }; TEST_F(TestMockOperationSnapshotCreateRequest, Success) { REQUIRE_FORMAT_V2(); librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); MockImageCtx mock_image_ctx(*ictx); MockExclusiveLock mock_exclusive_lock; if (ictx->test_features(RBD_FEATURE_EXCLUSIVE_LOCK)) { mock_image_ctx.exclusive_lock = &mock_exclusive_lock; } MockObjectMap mock_object_map; if (ictx->test_features(RBD_FEATURE_OBJECT_MAP)) { mock_image_ctx.object_map = &mock_object_map; } expect_verify_lock_ownership(mock_image_ctx); expect_op_work_queue(mock_image_ctx); ::testing::InSequence seq; expect_notify_quiesce(mock_image_ctx, -EINVAL); expect_block_writes(mock_image_ctx); expect_allocate_snap_id(mock_image_ctx, 0); expect_snap_create(mock_image_ctx, 0); expect_object_map_snap_create(mock_image_ctx); expect_update_snap_context(mock_image_ctx); EXPECT_CALL(mock_image_ctx, rebuild_data_io_context()); expect_unblock_writes(mock_image_ctx); expect_notify_unquiesce(mock_image_ctx, -EINVAL); C_SaferCond cond_ctx; librbd::NoOpProgressContext prog_ctx; MockSnapshotCreateRequest *req = new MockSnapshotCreateRequest( mock_image_ctx, &cond_ctx, cls::rbd::UserSnapshotNamespace(), "snap1", 0, SNAP_CREATE_FLAG_IGNORE_NOTIFY_QUIESCE_ERROR, prog_ctx); { std::shared_lock owner_locker{mock_image_ctx.owner_lock}; req->send(); } ASSERT_EQ(0, cond_ctx.wait()); } TEST_F(TestMockOperationSnapshotCreateRequest, NotifyQuiesceError) { librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); MockImageCtx mock_image_ctx(*ictx); expect_op_work_queue(mock_image_ctx); ::testing::InSequence seq; expect_notify_quiesce(mock_image_ctx, -EINVAL); expect_notify_unquiesce(mock_image_ctx, 0); C_SaferCond cond_ctx; librbd::NoOpProgressContext prog_ctx; MockSnapshotCreateRequest *req = new MockSnapshotCreateRequest( mock_image_ctx, &cond_ctx, cls::rbd::UserSnapshotNamespace(), "snap1", 0, 0, prog_ctx); { std::shared_lock owner_locker{mock_image_ctx.owner_lock}; req->send(); } ASSERT_EQ(-EINVAL, cond_ctx.wait()); } TEST_F(TestMockOperationSnapshotCreateRequest, AllocateSnapIdError) { librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); MockImageCtx mock_image_ctx(*ictx); MockExclusiveLock mock_exclusive_lock; if (ictx->test_features(RBD_FEATURE_EXCLUSIVE_LOCK)) { mock_image_ctx.exclusive_lock = &mock_exclusive_lock; } expect_verify_lock_ownership(mock_image_ctx); expect_op_work_queue(mock_image_ctx); ::testing::InSequence seq; expect_notify_quiesce(mock_image_ctx, 0); expect_block_writes(mock_image_ctx); expect_allocate_snap_id(mock_image_ctx, -EINVAL); expect_unblock_writes(mock_image_ctx); expect_notify_unquiesce(mock_image_ctx, 0); C_SaferCond cond_ctx; librbd::NoOpProgressContext prog_ctx; MockSnapshotCreateRequest *req = new MockSnapshotCreateRequest( mock_image_ctx, &cond_ctx, cls::rbd::UserSnapshotNamespace(), "snap1", 0, 0, prog_ctx); { std::shared_lock owner_locker{mock_image_ctx.owner_lock}; req->send(); } ASSERT_EQ(-EINVAL, cond_ctx.wait()); } TEST_F(TestMockOperationSnapshotCreateRequest, CreateSnapStale) { librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); MockImageCtx mock_image_ctx(*ictx); MockExclusiveLock mock_exclusive_lock; if (ictx->test_features(RBD_FEATURE_EXCLUSIVE_LOCK)) { mock_image_ctx.exclusive_lock = &mock_exclusive_lock; } MockObjectMap mock_object_map; if (ictx->test_features(RBD_FEATURE_OBJECT_MAP)) { mock_image_ctx.object_map = &mock_object_map; } expect_verify_lock_ownership(mock_image_ctx); expect_op_work_queue(mock_image_ctx); expect_notify_quiesce(mock_image_ctx, 0); expect_block_writes(mock_image_ctx); expect_allocate_snap_id(mock_image_ctx, -ESTALE); expect_snap_create(mock_image_ctx, -ESTALE); expect_object_map_snap_create(mock_image_ctx); expect_update_snap_context(mock_image_ctx); EXPECT_CALL(mock_image_ctx, rebuild_data_io_context()); expect_unblock_writes(mock_image_ctx); expect_notify_unquiesce(mock_image_ctx, 0); C_SaferCond cond_ctx; librbd::NoOpProgressContext prog_ctx; MockSnapshotCreateRequest *req = new MockSnapshotCreateRequest( mock_image_ctx, &cond_ctx, cls::rbd::UserSnapshotNamespace(), "snap1", 0, 0, prog_ctx); { std::shared_lock owner_locker{mock_image_ctx.owner_lock}; req->send(); } ASSERT_EQ(0, cond_ctx.wait()); } TEST_F(TestMockOperationSnapshotCreateRequest, CreateSnapError) { librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); MockImageCtx mock_image_ctx(*ictx); MockExclusiveLock mock_exclusive_lock; if (ictx->test_features(RBD_FEATURE_EXCLUSIVE_LOCK)) { mock_image_ctx.exclusive_lock = &mock_exclusive_lock; } expect_verify_lock_ownership(mock_image_ctx); expect_op_work_queue(mock_image_ctx); expect_notify_quiesce(mock_image_ctx, 0); expect_block_writes(mock_image_ctx); expect_allocate_snap_id(mock_image_ctx, 0); expect_snap_create(mock_image_ctx, -EINVAL); expect_release_snap_id(mock_image_ctx, 0); expect_unblock_writes(mock_image_ctx); expect_notify_unquiesce(mock_image_ctx, 0); C_SaferCond cond_ctx; librbd::NoOpProgressContext prog_ctx; MockSnapshotCreateRequest *req = new MockSnapshotCreateRequest( mock_image_ctx, &cond_ctx, cls::rbd::UserSnapshotNamespace(), "snap1", 0, 0, prog_ctx); { std::shared_lock owner_locker{mock_image_ctx.owner_lock}; req->send(); } ASSERT_EQ(-EINVAL, cond_ctx.wait()); } TEST_F(TestMockOperationSnapshotCreateRequest, ReleaseSnapIdError) { librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); MockImageCtx mock_image_ctx(*ictx); MockExclusiveLock mock_exclusive_lock; if (ictx->test_features(RBD_FEATURE_EXCLUSIVE_LOCK)) { mock_image_ctx.exclusive_lock = &mock_exclusive_lock; } expect_verify_lock_ownership(mock_image_ctx); expect_op_work_queue(mock_image_ctx); expect_notify_quiesce(mock_image_ctx, 0); expect_block_writes(mock_image_ctx); expect_allocate_snap_id(mock_image_ctx, 0); expect_snap_create(mock_image_ctx, -EINVAL); expect_release_snap_id(mock_image_ctx, -ESTALE); expect_unblock_writes(mock_image_ctx); expect_notify_unquiesce(mock_image_ctx, 0); C_SaferCond cond_ctx; librbd::NoOpProgressContext prog_ctx; MockSnapshotCreateRequest *req = new MockSnapshotCreateRequest( mock_image_ctx, &cond_ctx, cls::rbd::UserSnapshotNamespace(), "snap1", 0, 0, prog_ctx); { std::shared_lock owner_locker{mock_image_ctx.owner_lock}; req->send(); } ASSERT_EQ(-EINVAL, cond_ctx.wait()); } TEST_F(TestMockOperationSnapshotCreateRequest, SkipObjectMap) { REQUIRE_FEATURE(RBD_FEATURE_OBJECT_MAP); librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); MockImageCtx mock_image_ctx(*ictx); MockExclusiveLock mock_exclusive_lock; if (ictx->test_features(RBD_FEATURE_EXCLUSIVE_LOCK)) { mock_image_ctx.exclusive_lock = &mock_exclusive_lock; } MockObjectMap mock_object_map; mock_image_ctx.object_map = &mock_object_map; expect_verify_lock_ownership(mock_image_ctx); expect_op_work_queue(mock_image_ctx); ::testing::InSequence seq; expect_notify_quiesce(mock_image_ctx, 0); expect_block_writes(mock_image_ctx); expect_allocate_snap_id(mock_image_ctx, 0); expect_snap_create(mock_image_ctx, 0); expect_update_snap_context(mock_image_ctx); EXPECT_CALL(mock_image_ctx, rebuild_data_io_context()); expect_unblock_writes(mock_image_ctx); expect_notify_unquiesce(mock_image_ctx, 0); C_SaferCond cond_ctx; librbd::NoOpProgressContext prog_ctx; MockSnapshotCreateRequest *req = new MockSnapshotCreateRequest( mock_image_ctx, &cond_ctx, cls::rbd::UserSnapshotNamespace(), "snap1", 0, SNAP_CREATE_FLAG_SKIP_OBJECT_MAP, prog_ctx); { std::shared_lock owner_locker{mock_image_ctx.owner_lock}; req->send(); } ASSERT_EQ(0, cond_ctx.wait()); } TEST_F(TestMockOperationSnapshotCreateRequest, SkipNotifyQuiesce) { REQUIRE_FORMAT_V2(); librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); MockImageCtx mock_image_ctx(*ictx); MockExclusiveLock mock_exclusive_lock; if (ictx->test_features(RBD_FEATURE_EXCLUSIVE_LOCK)) { mock_image_ctx.exclusive_lock = &mock_exclusive_lock; } MockObjectMap mock_object_map; if (ictx->test_features(RBD_FEATURE_OBJECT_MAP)) { mock_image_ctx.object_map = &mock_object_map; } expect_verify_lock_ownership(mock_image_ctx); expect_op_work_queue(mock_image_ctx); ::testing::InSequence seq; expect_block_writes(mock_image_ctx); expect_allocate_snap_id(mock_image_ctx, 0); expect_snap_create(mock_image_ctx, 0); expect_object_map_snap_create(mock_image_ctx); expect_update_snap_context(mock_image_ctx); EXPECT_CALL(mock_image_ctx, rebuild_data_io_context()); expect_unblock_writes(mock_image_ctx); C_SaferCond cond_ctx; librbd::NoOpProgressContext prog_ctx; MockSnapshotCreateRequest *req = new MockSnapshotCreateRequest( mock_image_ctx, &cond_ctx, cls::rbd::UserSnapshotNamespace(), "snap1", 0, SNAP_CREATE_FLAG_SKIP_NOTIFY_QUIESCE, prog_ctx); { std::shared_lock owner_locker{mock_image_ctx.owner_lock}; req->send(); } ASSERT_EQ(0, cond_ctx.wait()); } TEST_F(TestMockOperationSnapshotCreateRequest, SetImageState) { REQUIRE_FORMAT_V2(); librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); MockImageCtx mock_image_ctx(*ictx); MockExclusiveLock mock_exclusive_lock; if (ictx->test_features(RBD_FEATURE_EXCLUSIVE_LOCK)) { mock_image_ctx.exclusive_lock = &mock_exclusive_lock; } MockObjectMap mock_object_map; if (ictx->test_features(RBD_FEATURE_OBJECT_MAP)) { mock_image_ctx.object_map = &mock_object_map; } expect_verify_lock_ownership(mock_image_ctx); expect_op_work_queue(mock_image_ctx); ::testing::InSequence seq; expect_notify_quiesce(mock_image_ctx, 0); expect_block_writes(mock_image_ctx); expect_allocate_snap_id(mock_image_ctx, 0); expect_snap_create(mock_image_ctx, 0); expect_object_map_snap_create(mock_image_ctx); MockSetImageStateRequest mock_set_image_state_request; expect_set_image_state(mock_image_ctx, mock_set_image_state_request, 0); expect_update_snap_context(mock_image_ctx); EXPECT_CALL(mock_image_ctx, rebuild_data_io_context()); expect_unblock_writes(mock_image_ctx); expect_notify_unquiesce(mock_image_ctx, 0); C_SaferCond cond_ctx; librbd::NoOpProgressContext prog_ctx; MockSnapshotCreateRequest *req = new MockSnapshotCreateRequest( mock_image_ctx, &cond_ctx, cls::rbd::MirrorSnapshotNamespace{ cls::rbd::MIRROR_SNAPSHOT_STATE_PRIMARY, {}, "", CEPH_NOSNAP}, "snap1", 0, 0, prog_ctx); { std::shared_lock owner_locker{mock_image_ctx.owner_lock}; req->send(); } ASSERT_EQ(0, cond_ctx.wait()); } } // namespace operation } // namespace librbd
16,216
31.695565
89
cc
null
ceph-main/src/test/librbd/operation/test_mock_SnapshotProtectRequest.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "test/librbd/test_mock_fixture.h" #include "test/librbd/test_support.h" #include "test/librbd/mock/MockImageCtx.h" #include "test/librados_test_stub/MockTestMemIoCtxImpl.h" #include "common/bit_vector.hpp" #include "librbd/ImageState.h" #include "librbd/internal.h" #include "librbd/operation/SnapshotProtectRequest.h" #include "gmock/gmock.h" #include "gtest/gtest.h" // template definitions #include "librbd/operation/SnapshotProtectRequest.cc" namespace librbd { namespace operation { using ::testing::_; using ::testing::DoAll; using ::testing::DoDefault; using ::testing::Return; using ::testing::SetArgPointee; using ::testing::StrEq; using ::testing::WithArg; class TestMockOperationSnapshotProtectRequest : public TestMockFixture { public: typedef SnapshotProtectRequest<MockImageCtx> MockSnapshotProtectRequest; void expect_get_snap_id(MockImageCtx &mock_image_ctx, uint64_t snap_id) { EXPECT_CALL(mock_image_ctx, get_snap_id(_, _)) .WillOnce(Return(snap_id)); } void expect_is_snap_protected(MockImageCtx &mock_image_ctx, bool is_protected, int r) { auto &expect = EXPECT_CALL(mock_image_ctx, is_snap_protected(_, _)); if (r < 0) { expect.WillOnce(Return(r)); } else { expect.WillOnce(DoAll(SetArgPointee<1>(is_protected), Return(0))); } } void expect_set_protection_status(MockImageCtx &mock_image_ctx, int r) { auto &expect = EXPECT_CALL(get_mock_io_ctx(mock_image_ctx.md_ctx), exec(mock_image_ctx.header_oid, _, StrEq("rbd"), StrEq("set_protection_status"), _, _, _, _)); if (r < 0) { expect.WillOnce(Return(r)); } else { expect.WillOnce(DoDefault()); } } }; TEST_F(TestMockOperationSnapshotProtectRequest, Success) { REQUIRE_FEATURE(RBD_FEATURE_LAYERING); librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); ASSERT_EQ(0, snap_create(*ictx, "snap1")); ASSERT_EQ(0, ictx->state->refresh_if_required()); MockImageCtx mock_image_ctx(*ictx); expect_op_work_queue(mock_image_ctx); ::testing::InSequence seq; expect_get_snap_id(mock_image_ctx, ictx->snap_info.rbegin()->first); expect_is_snap_protected(mock_image_ctx, false, 0); expect_set_protection_status(mock_image_ctx, 0); C_SaferCond cond_ctx; MockSnapshotProtectRequest *req = new MockSnapshotProtectRequest( mock_image_ctx, &cond_ctx, cls::rbd::UserSnapshotNamespace(), "snap1"); { std::shared_lock owner_locker{mock_image_ctx.owner_lock}; req->send(); } ASSERT_EQ(0, cond_ctx.wait()); } TEST_F(TestMockOperationSnapshotProtectRequest, GetSnapIdMissing) { REQUIRE_FEATURE(RBD_FEATURE_LAYERING); librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); ASSERT_EQ(0, snap_create(*ictx, "snap1")); ASSERT_EQ(0, ictx->state->refresh_if_required()); MockImageCtx mock_image_ctx(*ictx); expect_op_work_queue(mock_image_ctx); ::testing::InSequence seq; expect_get_snap_id(mock_image_ctx, CEPH_NOSNAP); C_SaferCond cond_ctx; MockSnapshotProtectRequest *req = new MockSnapshotProtectRequest( mock_image_ctx, &cond_ctx, cls::rbd::UserSnapshotNamespace(), "snap1"); { std::shared_lock owner_locker{mock_image_ctx.owner_lock}; req->send(); } ASSERT_EQ(-ENOENT, cond_ctx.wait()); } TEST_F(TestMockOperationSnapshotProtectRequest, IsSnapProtectedError) { REQUIRE_FEATURE(RBD_FEATURE_LAYERING); librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); ASSERT_EQ(0, snap_create(*ictx, "snap1")); ASSERT_EQ(0, ictx->state->refresh_if_required()); MockImageCtx mock_image_ctx(*ictx); expect_op_work_queue(mock_image_ctx); ::testing::InSequence seq; expect_get_snap_id(mock_image_ctx, ictx->snap_info.rbegin()->first); expect_is_snap_protected(mock_image_ctx, false, -EINVAL); C_SaferCond cond_ctx; MockSnapshotProtectRequest *req = new MockSnapshotProtectRequest( mock_image_ctx, &cond_ctx, cls::rbd::UserSnapshotNamespace(), "snap1"); { std::shared_lock owner_locker{mock_image_ctx.owner_lock}; req->send(); } ASSERT_EQ(-EINVAL, cond_ctx.wait()); } TEST_F(TestMockOperationSnapshotProtectRequest, SnapAlreadyProtected) { REQUIRE_FEATURE(RBD_FEATURE_LAYERING); librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); ASSERT_EQ(0, snap_create(*ictx, "snap1")); ASSERT_EQ(0, ictx->state->refresh_if_required()); MockImageCtx mock_image_ctx(*ictx); expect_op_work_queue(mock_image_ctx); ::testing::InSequence seq; expect_get_snap_id(mock_image_ctx, ictx->snap_info.rbegin()->first); expect_is_snap_protected(mock_image_ctx, true, 0); C_SaferCond cond_ctx; MockSnapshotProtectRequest *req = new MockSnapshotProtectRequest( mock_image_ctx, &cond_ctx, cls::rbd::UserSnapshotNamespace(), "snap1"); { std::shared_lock owner_locker{mock_image_ctx.owner_lock}; req->send(); } ASSERT_EQ(-EBUSY, cond_ctx.wait()); } TEST_F(TestMockOperationSnapshotProtectRequest, SetProtectionStateError) { REQUIRE_FEATURE(RBD_FEATURE_LAYERING); librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); ASSERT_EQ(0, snap_create(*ictx, "snap1")); ASSERT_EQ(0, ictx->state->refresh_if_required()); MockImageCtx mock_image_ctx(*ictx); expect_op_work_queue(mock_image_ctx); ::testing::InSequence seq; expect_get_snap_id(mock_image_ctx, ictx->snap_info.rbegin()->first); expect_is_snap_protected(mock_image_ctx, false, 0); expect_set_protection_status(mock_image_ctx, -EINVAL); C_SaferCond cond_ctx; MockSnapshotProtectRequest *req = new MockSnapshotProtectRequest( mock_image_ctx, &cond_ctx, cls::rbd::UserSnapshotNamespace(), "snap1"); { std::shared_lock owner_locker{mock_image_ctx.owner_lock}; req->send(); } ASSERT_EQ(-EINVAL, cond_ctx.wait()); } } // namespace operation } // namespace librbd
6,075
30.319588
80
cc
null
ceph-main/src/test/librbd/operation/test_mock_SnapshotRemoveRequest.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "test/librbd/test_mock_fixture.h" #include "test/librbd/test_support.h" #include "test/librbd/mock/MockImageCtx.h" #include "test/librados_test_stub/MockTestMemIoCtxImpl.h" #include "common/bit_vector.hpp" #include "librbd/ImageState.h" #include "librbd/internal.h" #include "librbd/Operations.h" #include "librbd/image/DetachChildRequest.h" #include "librbd/mirror/snapshot/RemoveImageStateRequest.h" #include "librbd/operation/SnapshotRemoveRequest.h" #include "gmock/gmock.h" #include "gtest/gtest.h" namespace librbd { namespace image { template <> class DetachChildRequest<MockImageCtx> { public: static DetachChildRequest *s_instance; static DetachChildRequest *create(MockImageCtx &image_ctx, Context *on_finish) { ceph_assert(s_instance != nullptr); s_instance->on_finish = on_finish; return s_instance; } Context *on_finish = nullptr; DetachChildRequest() { s_instance = this; } MOCK_METHOD0(send, void()); }; DetachChildRequest<MockImageCtx> *DetachChildRequest<MockImageCtx>::s_instance; } // namespace image namespace mirror { namespace snapshot { template<> class RemoveImageStateRequest<MockImageCtx> { public: static RemoveImageStateRequest *s_instance; Context *on_finish = nullptr; static RemoveImageStateRequest *create(MockImageCtx *image_ctx, uint64_t snap_id, Context *on_finish) { ceph_assert(s_instance != nullptr); s_instance->on_finish = on_finish; return s_instance; } RemoveImageStateRequest() { s_instance = this; } MOCK_METHOD0(send, void()); }; RemoveImageStateRequest<MockImageCtx> *RemoveImageStateRequest<MockImageCtx>::s_instance; } // namespace snapshot } // namespace mirror } // namespace librbd // template definitions #include "librbd/operation/SnapshotRemoveRequest.cc" namespace librbd { namespace operation { using ::testing::_; using ::testing::DoAll; using ::testing::DoDefault; using ::testing::Invoke; using ::testing::Return; using ::testing::SetArgPointee; using ::testing::StrEq; using ::testing::WithArg; class TestMockOperationSnapshotRemoveRequest : public TestMockFixture { public: typedef SnapshotRemoveRequest<MockImageCtx> MockSnapshotRemoveRequest; typedef image::DetachChildRequest<MockImageCtx> MockDetachChildRequest; typedef mirror::snapshot::RemoveImageStateRequest<MockImageCtx> MockRemoveImageStateRequest; int create_snapshot(const char *snap_name) { librbd::ImageCtx *ictx; int r = open_image(m_image_name, &ictx); if (r < 0) { return r; } r = snap_create(*ictx, snap_name); if (r < 0) { return r; } r = snap_protect(*ictx, snap_name); if (r < 0) { return r; } close_image(ictx); return 0; } void expect_snapshot_trash_add(MockImageCtx &mock_image_ctx, int r) { if (mock_image_ctx.old_format) { return; } auto &expect = EXPECT_CALL(get_mock_io_ctx(mock_image_ctx.md_ctx), exec(mock_image_ctx.header_oid, _, StrEq("rbd"), StrEq("snapshot_trash_add"), _, _, _, _)); if (r < 0) { expect.WillOnce(Return(r)); } else { expect.WillOnce(DoDefault()); } } void expect_snapshot_get(MockImageCtx &mock_image_ctx, const cls::rbd::SnapshotInfo& snap_info, int r) { if (mock_image_ctx.old_format) { return; } using ceph::encode; EXPECT_CALL(get_mock_io_ctx(mock_image_ctx.md_ctx), exec(mock_image_ctx.header_oid, _, StrEq("rbd"), StrEq("snapshot_get"), _, _, _, _)) .WillOnce(WithArg<5>(Invoke([snap_info, r](bufferlist* bl) { encode(snap_info, *bl); return r; }))); } void expect_children_list(MockImageCtx &mock_image_ctx, const cls::rbd::ChildImageSpecs& child_images, int r) { if (mock_image_ctx.old_format) { return; } using ceph::encode; EXPECT_CALL(get_mock_io_ctx(mock_image_ctx.md_ctx), exec(mock_image_ctx.header_oid, _, StrEq("rbd"), StrEq("children_list"), _, _, _, _)) .WillOnce(WithArg<5>(Invoke([child_images, r](bufferlist* bl) { encode(child_images, *bl); return r; }))); } void expect_detach_stale_child(MockImageCtx &mock_image_ctx, int r) { auto& parent_spec = mock_image_ctx.parent_md.spec; bufferlist bl; encode(parent_spec.snap_id, bl); encode(cls::rbd::ChildImageSpec{mock_image_ctx.md_ctx.get_id(), "", mock_image_ctx.id}, bl); EXPECT_CALL(get_mock_io_ctx(mock_image_ctx.md_ctx), exec(util::header_name(parent_spec.image_id), _, StrEq("rbd"), StrEq("child_detach"), ContentsEqual(bl), _, _, _)) .WillOnce(Return(r)); } void expect_object_map_snap_remove(MockImageCtx &mock_image_ctx, int r) { if (mock_image_ctx.object_map != nullptr) { EXPECT_CALL(*mock_image_ctx.object_map, snapshot_remove(_, _)) .WillOnce(WithArg<1>(CompleteContext( r, mock_image_ctx.image_ctx->op_work_queue))); } } void expect_remove_image_state( MockImageCtx &mock_image_ctx, MockRemoveImageStateRequest &mock_remove_image_state_request, int r) { EXPECT_CALL(mock_remove_image_state_request, send()) .WillOnce(FinishRequest(&mock_remove_image_state_request, r, &mock_image_ctx)); } void expect_get_parent_spec(MockImageCtx &mock_image_ctx, int r) { if (mock_image_ctx.old_format) { return; } auto &expect = EXPECT_CALL(mock_image_ctx, get_parent_spec(_, _)); if (r < 0) { expect.WillOnce(Return(r)); } else { auto &parent_spec = mock_image_ctx.snap_info.rbegin()->second.parent.spec; expect.WillOnce(DoAll(SetArgPointee<1>(parent_spec), Return(0))); } } void expect_detach_child(MockImageCtx &mock_image_ctx, MockDetachChildRequest& mock_request, int r) { EXPECT_CALL(mock_request, send()) .WillOnce(FinishRequest(&mock_request, r, &mock_image_ctx)); } void expect_snap_remove(MockImageCtx &mock_image_ctx, int r) { auto &expect = EXPECT_CALL(get_mock_io_ctx(mock_image_ctx.md_ctx), exec(mock_image_ctx.header_oid, _, StrEq("rbd"), StrEq(mock_image_ctx.old_format ? "snap_remove" : "snapshot_remove"), _, _, _, _)); if (r < 0) { expect.WillOnce(Return(r)); } else { expect.WillOnce(DoDefault()); } } void expect_rm_snap(MockImageCtx &mock_image_ctx) { EXPECT_CALL(mock_image_ctx, rm_snap(_, _, _)).Times(1); } void expect_release_snap_id(MockImageCtx &mock_image_ctx) { EXPECT_CALL(get_mock_io_ctx(mock_image_ctx.data_ctx), selfmanaged_snap_remove(_)) .WillOnce(DoDefault()); } }; TEST_F(TestMockOperationSnapshotRemoveRequest, Success) { librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); ASSERT_EQ(0, snap_create(*ictx, "snap1")); ASSERT_EQ(0, ictx->state->refresh_if_required()); MockImageCtx mock_image_ctx(*ictx); MockExclusiveLock mock_exclusive_lock; if (ictx->test_features(RBD_FEATURE_EXCLUSIVE_LOCK)) { mock_image_ctx.exclusive_lock = &mock_exclusive_lock; } MockObjectMap mock_object_map; if (ictx->test_features(RBD_FEATURE_OBJECT_MAP)) { mock_image_ctx.object_map = &mock_object_map; } expect_op_work_queue(mock_image_ctx); ::testing::InSequence seq; expect_snapshot_trash_add(mock_image_ctx, 0); uint64_t snap_id = ictx->snap_info.rbegin()->first; expect_snapshot_get(mock_image_ctx, {snap_id, {cls::rbd::UserSnapshotNamespace{}}, "snap1", 123, {}, 0}, 0); expect_get_parent_spec(mock_image_ctx, 0); expect_object_map_snap_remove(mock_image_ctx, 0); expect_release_snap_id(mock_image_ctx); expect_snap_remove(mock_image_ctx, 0); expect_rm_snap(mock_image_ctx); C_SaferCond cond_ctx; MockSnapshotRemoveRequest *req = new MockSnapshotRemoveRequest( mock_image_ctx, &cond_ctx, cls::rbd::UserSnapshotNamespace(), "snap1", snap_id); { std::shared_lock owner_locker{mock_image_ctx.owner_lock}; req->send(); } ASSERT_EQ(0, cond_ctx.wait()); } TEST_F(TestMockOperationSnapshotRemoveRequest, SuccessCloneParent) { REQUIRE_FEATURE(RBD_FEATURE_LAYERING); librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); ASSERT_EQ(0, snap_create(*ictx, "snap1")); ASSERT_EQ(0, ictx->state->refresh_if_required()); MockImageCtx mock_image_ctx(*ictx); MockExclusiveLock mock_exclusive_lock; if (ictx->test_features(RBD_FEATURE_EXCLUSIVE_LOCK)) { mock_image_ctx.exclusive_lock = &mock_exclusive_lock; } MockObjectMap mock_object_map; if (ictx->test_features(RBD_FEATURE_OBJECT_MAP)) { mock_image_ctx.object_map = &mock_object_map; } expect_op_work_queue(mock_image_ctx); ::testing::InSequence seq; expect_snapshot_trash_add(mock_image_ctx, 0); uint64_t snap_id = ictx->snap_info.rbegin()->first; expect_snapshot_get(mock_image_ctx, {snap_id, {cls::rbd::UserSnapshotNamespace{}}, "snap1", 123, {}, 1}, 0); const cls::rbd::ChildImageSpecs child_images; expect_children_list(mock_image_ctx, child_images, 0); expect_get_parent_spec(mock_image_ctx, 0); C_SaferCond cond_ctx; MockSnapshotRemoveRequest *req = new MockSnapshotRemoveRequest( mock_image_ctx, &cond_ctx, cls::rbd::UserSnapshotNamespace(), "snap1", snap_id); { std::shared_lock owner_locker{mock_image_ctx.owner_lock}; req->send(); } ASSERT_EQ(0, cond_ctx.wait()); } TEST_F(TestMockOperationSnapshotRemoveRequest, SuccessTrash) { REQUIRE_FEATURE(RBD_FEATURE_LAYERING); librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); ASSERT_EQ(0, snap_create(*ictx, "snap1")); ASSERT_EQ(0, ictx->state->refresh_if_required()); MockImageCtx mock_image_ctx(*ictx); MockExclusiveLock mock_exclusive_lock; if (ictx->test_features(RBD_FEATURE_EXCLUSIVE_LOCK)) { mock_image_ctx.exclusive_lock = &mock_exclusive_lock; } MockObjectMap mock_object_map; if (ictx->test_features(RBD_FEATURE_OBJECT_MAP)) { mock_image_ctx.object_map = &mock_object_map; } expect_op_work_queue(mock_image_ctx); ::testing::InSequence seq; expect_snapshot_trash_add(mock_image_ctx, 0); uint64_t snap_id = ictx->snap_info.rbegin()->first; expect_snapshot_get(mock_image_ctx, {snap_id, {cls::rbd::TrashSnapshotNamespace{ cls::rbd::SNAPSHOT_NAMESPACE_TYPE_USER, "snap1"}}, "snap1", 123, {}, 0}, 0); expect_get_parent_spec(mock_image_ctx, 0); expect_object_map_snap_remove(mock_image_ctx, 0); expect_release_snap_id(mock_image_ctx); expect_snap_remove(mock_image_ctx, 0); expect_rm_snap(mock_image_ctx); C_SaferCond cond_ctx; MockSnapshotRemoveRequest *req = new MockSnapshotRemoveRequest( mock_image_ctx, &cond_ctx, cls::rbd::UserSnapshotNamespace(), "snap1", snap_id); { std::shared_lock owner_locker{mock_image_ctx.owner_lock}; req->send(); } ASSERT_EQ(0, cond_ctx.wait()); } TEST_F(TestMockOperationSnapshotRemoveRequest, FlattenedCloneRemovesChild) { REQUIRE_FEATURE(RBD_FEATURE_LAYERING); REQUIRE(!is_feature_enabled(RBD_FEATURE_DEEP_FLATTEN)) ASSERT_EQ(0, create_snapshot("snap1")); int order = 22; uint64_t features; ASSERT_TRUE(::get_features(&features)); std::string clone_name = get_temp_image_name(); ASSERT_EQ(0, librbd::clone(m_ioctx, m_image_name.c_str(), "snap1", m_ioctx, clone_name.c_str(), features, &order, 0, 0)); librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(clone_name, &ictx)); ASSERT_EQ(0, snap_create(*ictx, "snap1")); librbd::NoOpProgressContext prog_ctx; ASSERT_EQ(0, flatten(*ictx, prog_ctx)); ASSERT_EQ(0, ictx->state->refresh_if_required()); MockImageCtx mock_image_ctx(*ictx); MockExclusiveLock mock_exclusive_lock; if (ictx->test_features(RBD_FEATURE_EXCLUSIVE_LOCK)) { mock_image_ctx.exclusive_lock = &mock_exclusive_lock; } MockObjectMap mock_object_map; if (ictx->test_features(RBD_FEATURE_OBJECT_MAP)) { mock_image_ctx.object_map = &mock_object_map; } expect_op_work_queue(mock_image_ctx); ::testing::InSequence seq; expect_snapshot_trash_add(mock_image_ctx, 0); uint64_t snap_id = ictx->snap_info.rbegin()->first; expect_snapshot_get(mock_image_ctx, {snap_id, {cls::rbd::UserSnapshotNamespace{}}, "snap1", 123, {}, 0}, 0); expect_get_parent_spec(mock_image_ctx, 0); MockDetachChildRequest mock_detach_child_request; expect_detach_child(mock_image_ctx, mock_detach_child_request, -ENOENT); expect_object_map_snap_remove(mock_image_ctx, 0); expect_release_snap_id(mock_image_ctx); expect_snap_remove(mock_image_ctx, 0); expect_rm_snap(mock_image_ctx); C_SaferCond cond_ctx; MockSnapshotRemoveRequest *req = new MockSnapshotRemoveRequest( mock_image_ctx, &cond_ctx, cls::rbd::UserSnapshotNamespace(), "snap1", snap_id); { std::shared_lock owner_locker{mock_image_ctx.owner_lock}; req->send(); } ASSERT_EQ(0, cond_ctx.wait()); } TEST_F(TestMockOperationSnapshotRemoveRequest, TrashCloneParent) { REQUIRE_FEATURE(RBD_FEATURE_LAYERING); librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); NoOpProgressContext prog_ctx; ASSERT_EQ(0, ictx->operations->snap_create( {cls::rbd::TrashSnapshotNamespace{}}, "snap1", 0, prog_ctx)); ASSERT_EQ(0, ictx->state->refresh_if_required()); MockImageCtx mock_image_ctx(*ictx); MockExclusiveLock mock_exclusive_lock; if (ictx->test_features(RBD_FEATURE_EXCLUSIVE_LOCK)) { mock_image_ctx.exclusive_lock = &mock_exclusive_lock; } MockObjectMap mock_object_map; if (ictx->test_features(RBD_FEATURE_OBJECT_MAP)) { mock_image_ctx.object_map = &mock_object_map; } expect_op_work_queue(mock_image_ctx); ::testing::InSequence seq; uint64_t snap_id = ictx->snap_info.rbegin()->first; expect_snapshot_get(mock_image_ctx, {snap_id, {cls::rbd::TrashSnapshotNamespace{}}, "snap1", 123, {}, 1}, 0); const cls::rbd::ChildImageSpecs child_images; expect_children_list(mock_image_ctx, child_images, 0); expect_get_parent_spec(mock_image_ctx, 0); C_SaferCond cond_ctx; MockSnapshotRemoveRequest *req = new MockSnapshotRemoveRequest( mock_image_ctx, &cond_ctx, cls::rbd::TrashSnapshotNamespace{}, "snap1", snap_id); { std::shared_lock owner_locker{mock_image_ctx.owner_lock}; req->send(); } ASSERT_EQ(-EBUSY, cond_ctx.wait()); } TEST_F(TestMockOperationSnapshotRemoveRequest, MirrorSnapshot) { REQUIRE_FORMAT_V2(); librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); ASSERT_EQ(0, snap_create(*ictx, "snap1")); ASSERT_EQ(0, ictx->state->refresh_if_required()); MockImageCtx mock_image_ctx(*ictx); MockExclusiveLock mock_exclusive_lock; if (ictx->test_features(RBD_FEATURE_EXCLUSIVE_LOCK)) { mock_image_ctx.exclusive_lock = &mock_exclusive_lock; } MockObjectMap mock_object_map; if (ictx->test_features(RBD_FEATURE_OBJECT_MAP)) { mock_image_ctx.object_map = &mock_object_map; } expect_op_work_queue(mock_image_ctx); ::testing::InSequence seq; expect_snapshot_trash_add(mock_image_ctx, 0); uint64_t snap_id = ictx->snap_info.rbegin()->first; expect_snapshot_get(mock_image_ctx, {snap_id, {cls::rbd::MirrorSnapshotNamespace{}}, "mirror", 123, {}, 0}, 0); expect_get_parent_spec(mock_image_ctx, 0); expect_object_map_snap_remove(mock_image_ctx, 0); MockRemoveImageStateRequest mock_remove_image_state_request; expect_remove_image_state(mock_image_ctx, mock_remove_image_state_request, 0); expect_release_snap_id(mock_image_ctx); expect_snap_remove(mock_image_ctx, 0); expect_rm_snap(mock_image_ctx); C_SaferCond cond_ctx; MockSnapshotRemoveRequest *req = new MockSnapshotRemoveRequest( mock_image_ctx, &cond_ctx, cls::rbd::MirrorSnapshotNamespace(), "mirror", snap_id); { std::shared_lock owner_locker{mock_image_ctx.owner_lock}; req->send(); } ASSERT_EQ(0, cond_ctx.wait()); } TEST_F(TestMockOperationSnapshotRemoveRequest, SnapshotTrashAddNotSupported) { REQUIRE_FORMAT_V2(); librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); ASSERT_EQ(0, snap_create(*ictx, "snap1")); ASSERT_EQ(0, ictx->state->refresh_if_required()); MockImageCtx mock_image_ctx(*ictx); MockExclusiveLock mock_exclusive_lock; if (ictx->test_features(RBD_FEATURE_EXCLUSIVE_LOCK)) { mock_image_ctx.exclusive_lock = &mock_exclusive_lock; } MockObjectMap mock_object_map; if (ictx->test_features(RBD_FEATURE_OBJECT_MAP)) { mock_image_ctx.object_map = &mock_object_map; } expect_op_work_queue(mock_image_ctx); ::testing::InSequence seq; expect_snapshot_trash_add(mock_image_ctx, -EOPNOTSUPP); uint64_t snap_id = ictx->snap_info.rbegin()->first; expect_get_parent_spec(mock_image_ctx, 0); expect_object_map_snap_remove(mock_image_ctx, 0); expect_release_snap_id(mock_image_ctx); expect_snap_remove(mock_image_ctx, 0); expect_rm_snap(mock_image_ctx); C_SaferCond cond_ctx; MockSnapshotRemoveRequest *req = new MockSnapshotRemoveRequest( mock_image_ctx, &cond_ctx, cls::rbd::UserSnapshotNamespace(), "snap1", snap_id); { std::shared_lock owner_locker{mock_image_ctx.owner_lock}; req->send(); } ASSERT_EQ(0, cond_ctx.wait()); } TEST_F(TestMockOperationSnapshotRemoveRequest, SnapshotTrashAddError) { REQUIRE_FORMAT_V2(); librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); ASSERT_EQ(0, snap_create(*ictx, "snap1")); ASSERT_EQ(0, ictx->state->refresh_if_required()); MockImageCtx mock_image_ctx(*ictx); expect_op_work_queue(mock_image_ctx); ::testing::InSequence seq; uint64_t snap_id = ictx->snap_info.rbegin()->first; expect_snapshot_trash_add(mock_image_ctx, -EINVAL); C_SaferCond cond_ctx; MockSnapshotRemoveRequest *req = new MockSnapshotRemoveRequest( mock_image_ctx, &cond_ctx, cls::rbd::UserSnapshotNamespace(), "snap1", snap_id); { std::shared_lock owner_locker{mock_image_ctx.owner_lock}; req->send(); } ASSERT_EQ(-EINVAL, cond_ctx.wait()); } TEST_F(TestMockOperationSnapshotRemoveRequest, SnapshotGetError) { REQUIRE_FORMAT_V2(); librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); ASSERT_EQ(0, snap_create(*ictx, "snap1")); ASSERT_EQ(0, ictx->state->refresh_if_required()); MockImageCtx mock_image_ctx(*ictx); expect_op_work_queue(mock_image_ctx); ::testing::InSequence seq; expect_snapshot_trash_add(mock_image_ctx, 0); uint64_t snap_id = ictx->snap_info.rbegin()->first; expect_snapshot_get(mock_image_ctx, {snap_id, {cls::rbd::UserSnapshotNamespace{}}, "snap1", 123, {}, 0}, -EOPNOTSUPP); C_SaferCond cond_ctx; MockSnapshotRemoveRequest *req = new MockSnapshotRemoveRequest( mock_image_ctx, &cond_ctx, cls::rbd::UserSnapshotNamespace(), "snap1", snap_id); { std::shared_lock owner_locker{mock_image_ctx.owner_lock}; req->send(); } ASSERT_EQ(-EOPNOTSUPP, cond_ctx.wait()); } TEST_F(TestMockOperationSnapshotRemoveRequest, ObjectMapSnapRemoveError) { REQUIRE_FEATURE(RBD_FEATURE_OBJECT_MAP); librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); ASSERT_EQ(0, snap_create(*ictx, "snap1")); ASSERT_EQ(0, ictx->state->refresh_if_required()); MockImageCtx mock_image_ctx(*ictx); MockObjectMap mock_object_map; mock_image_ctx.object_map = &mock_object_map; expect_op_work_queue(mock_image_ctx); ::testing::InSequence seq; expect_snapshot_trash_add(mock_image_ctx, 0); uint64_t snap_id = ictx->snap_info.rbegin()->first; expect_snapshot_get(mock_image_ctx, {snap_id, {cls::rbd::UserSnapshotNamespace{}}, "snap1", 123, {}, 0}, 0); expect_get_parent_spec(mock_image_ctx, 0); expect_object_map_snap_remove(mock_image_ctx, -EINVAL); C_SaferCond cond_ctx; MockSnapshotRemoveRequest *req = new MockSnapshotRemoveRequest( mock_image_ctx, &cond_ctx, cls::rbd::UserSnapshotNamespace(), "snap1", snap_id); { std::shared_lock owner_locker{mock_image_ctx.owner_lock}; req->send(); } ASSERT_EQ(-EINVAL, cond_ctx.wait()); } TEST_F(TestMockOperationSnapshotRemoveRequest, RemoveChildParentError) { REQUIRE_FEATURE(RBD_FEATURE_LAYERING); librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); ASSERT_EQ(0, snap_create(*ictx, "snap1")); ASSERT_EQ(0, ictx->state->refresh_if_required()); MockImageCtx mock_image_ctx(*ictx); MockObjectMap mock_object_map; if (ictx->test_features(RBD_FEATURE_OBJECT_MAP)) { mock_image_ctx.object_map = &mock_object_map; } expect_op_work_queue(mock_image_ctx); ::testing::InSequence seq; expect_snapshot_trash_add(mock_image_ctx, 0); uint64_t snap_id = ictx->snap_info.rbegin()->first; expect_snapshot_get(mock_image_ctx, {snap_id, {cls::rbd::UserSnapshotNamespace{}}, "snap1", 123, {}, 0}, 0); expect_get_parent_spec(mock_image_ctx, -ENOENT); C_SaferCond cond_ctx; MockSnapshotRemoveRequest *req = new MockSnapshotRemoveRequest( mock_image_ctx, &cond_ctx, cls::rbd::UserSnapshotNamespace(), "snap1", snap_id); { std::shared_lock owner_locker{mock_image_ctx.owner_lock}; req->send(); } ASSERT_EQ(-ENOENT, cond_ctx.wait()); } TEST_F(TestMockOperationSnapshotRemoveRequest, RemoveChildError) { REQUIRE_FEATURE(RBD_FEATURE_LAYERING); ASSERT_EQ(0, create_snapshot("snap1")); int order = 22; uint64_t features; ASSERT_TRUE(::get_features(&features)); std::string clone_name = get_temp_image_name(); ASSERT_EQ(0, librbd::clone(m_ioctx, m_image_name.c_str(), "snap1", m_ioctx, clone_name.c_str(), features, &order, 0, 0)); librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(clone_name, &ictx)); if (ictx->test_features(RBD_FEATURE_DEEP_FLATTEN)) { GTEST_SKIP() << "Skipping due to enabled deep-flatten"; } ASSERT_EQ(0, snap_create(*ictx, "snap1")); librbd::NoOpProgressContext prog_ctx; ASSERT_EQ(0, flatten(*ictx, prog_ctx)); ASSERT_EQ(0, ictx->state->refresh_if_required()); MockImageCtx mock_image_ctx(*ictx); MockObjectMap mock_object_map; if (ictx->test_features(RBD_FEATURE_OBJECT_MAP)) { mock_image_ctx.object_map = &mock_object_map; } expect_op_work_queue(mock_image_ctx); uint64_t snap_id = ictx->snap_info.rbegin()->first; expect_get_parent_spec(mock_image_ctx, 0); MockDetachChildRequest mock_detach_child_request; expect_detach_child(mock_image_ctx, mock_detach_child_request, -EINVAL); C_SaferCond cond_ctx; MockSnapshotRemoveRequest *req = new MockSnapshotRemoveRequest( mock_image_ctx, &cond_ctx, cls::rbd::UserSnapshotNamespace(), "snap1", snap_id); { std::shared_lock owner_locker{mock_image_ctx.owner_lock}; req->send(); } ASSERT_EQ(-EINVAL, cond_ctx.wait()); } TEST_F(TestMockOperationSnapshotRemoveRequest, RemoveSnapError) { librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); ASSERT_EQ(0, snap_create(*ictx, "snap1")); ASSERT_EQ(0, ictx->state->refresh_if_required()); MockImageCtx mock_image_ctx(*ictx); MockExclusiveLock mock_exclusive_lock; if (ictx->test_features(RBD_FEATURE_EXCLUSIVE_LOCK)) { mock_image_ctx.exclusive_lock = &mock_exclusive_lock; } MockObjectMap mock_object_map; if (ictx->test_features(RBD_FEATURE_OBJECT_MAP)) { mock_image_ctx.object_map = &mock_object_map; } expect_op_work_queue(mock_image_ctx); ::testing::InSequence seq; expect_snapshot_trash_add(mock_image_ctx, 0); uint64_t snap_id = ictx->snap_info.rbegin()->first; expect_snapshot_get(mock_image_ctx, {snap_id, {cls::rbd::UserSnapshotNamespace{}}, "snap1", 123, {}, 0}, 0); expect_get_parent_spec(mock_image_ctx, 0); expect_object_map_snap_remove(mock_image_ctx, 0); expect_release_snap_id(mock_image_ctx); expect_snap_remove(mock_image_ctx, -ENOENT); C_SaferCond cond_ctx; MockSnapshotRemoveRequest *req = new MockSnapshotRemoveRequest( mock_image_ctx, &cond_ctx, cls::rbd::UserSnapshotNamespace(), "snap1", snap_id); { std::shared_lock owner_locker{mock_image_ctx.owner_lock}; req->send(); } ASSERT_EQ(-ENOENT, cond_ctx.wait()); } TEST_F(TestMockOperationSnapshotRemoveRequest, MissingSnap) { librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); MockImageCtx mock_image_ctx(*ictx); MockExclusiveLock mock_exclusive_lock; if (ictx->test_features(RBD_FEATURE_EXCLUSIVE_LOCK)) { mock_image_ctx.exclusive_lock = &mock_exclusive_lock; } MockObjectMap mock_object_map; if (ictx->test_features(RBD_FEATURE_OBJECT_MAP)) { mock_image_ctx.object_map = &mock_object_map; } expect_op_work_queue(mock_image_ctx); ::testing::InSequence seq; uint64_t snap_id = 456; C_SaferCond cond_ctx; MockSnapshotRemoveRequest *req = new MockSnapshotRemoveRequest( mock_image_ctx, &cond_ctx, cls::rbd::UserSnapshotNamespace(), "snap1", snap_id); { std::shared_lock owner_locker{mock_image_ctx.owner_lock}; req->send(); } ASSERT_EQ(-ENOENT, cond_ctx.wait()); } TEST_F(TestMockOperationSnapshotRemoveRequest, ListChildrenError) { REQUIRE_FEATURE(RBD_FEATURE_LAYERING); librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); ASSERT_EQ(0, snap_create(*ictx, "snap1")); ASSERT_EQ(0, ictx->state->refresh_if_required()); MockImageCtx mock_image_ctx(*ictx); MockExclusiveLock mock_exclusive_lock; if (ictx->test_features(RBD_FEATURE_EXCLUSIVE_LOCK)) { mock_image_ctx.exclusive_lock = &mock_exclusive_lock; } MockObjectMap mock_object_map; if (ictx->test_features(RBD_FEATURE_OBJECT_MAP)) { mock_image_ctx.object_map = &mock_object_map; } expect_op_work_queue(mock_image_ctx); ::testing::InSequence seq; expect_snapshot_trash_add(mock_image_ctx, 0); uint64_t snap_id = ictx->snap_info.rbegin()->first; expect_snapshot_get(mock_image_ctx, {snap_id, {cls::rbd::UserSnapshotNamespace{}}, "snap1", 123, {}, 1}, 0); const cls::rbd::ChildImageSpecs child_images; expect_children_list(mock_image_ctx, child_images, -EINVAL); C_SaferCond cond_ctx; MockSnapshotRemoveRequest *req = new MockSnapshotRemoveRequest( mock_image_ctx, &cond_ctx, cls::rbd::UserSnapshotNamespace(), "snap1", snap_id); { std::shared_lock owner_locker{mock_image_ctx.owner_lock}; req->send(); } ASSERT_EQ(-EINVAL, cond_ctx.wait()); } TEST_F(TestMockOperationSnapshotRemoveRequest, DetachStaleChildError) { REQUIRE_FEATURE(RBD_FEATURE_LAYERING); ASSERT_EQ(0, create_snapshot("snap1")); int order = 22; uint64_t features; ASSERT_TRUE(::get_features(&features)); std::string clone_name = get_temp_image_name(); ASSERT_EQ(0, librbd::clone(m_ioctx, m_image_name.c_str(), "snap1", m_ioctx, clone_name.c_str(), features, &order, 0, 0)); librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(clone_name, &ictx)); ASSERT_EQ(0, snap_create(*ictx, "snap1")); ASSERT_EQ(0, ictx->state->refresh_if_required()); MockImageCtx mock_image_ctx(*ictx); expect_op_work_queue(mock_image_ctx); ::testing::InSequence seq; expect_snapshot_trash_add(mock_image_ctx, 0); uint64_t snap_id = ictx->snap_info.rbegin()->first; expect_snapshot_get(mock_image_ctx, {snap_id, {cls::rbd::UserSnapshotNamespace{}}, "snap1", 123, {}, 1}, 0); const cls::rbd::ChildImageSpecs child_images; expect_children_list(mock_image_ctx, child_images, -EINVAL); C_SaferCond cond_ctx; MockSnapshotRemoveRequest *req = new MockSnapshotRemoveRequest( mock_image_ctx, &cond_ctx, cls::rbd::UserSnapshotNamespace(), "snap1", snap_id); { std::shared_lock owner_locker{mock_image_ctx.owner_lock}; req->send(); } ASSERT_EQ(-EINVAL, cond_ctx.wait()); } } // namespace operation } // namespace librbd
29,070
30.462121
94
cc
null
ceph-main/src/test/librbd/operation/test_mock_SnapshotRollbackRequest.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "test/librbd/test_mock_fixture.h" #include "test/librbd/test_support.h" #include "test/librbd/mock/MockImageCtx.h" #include "test/librbd/mock/io/MockObjectDispatch.h" #include "test/librados_test_stub/MockTestMemIoCtxImpl.h" #include "include/stringify.h" #include "common/bit_vector.hpp" #include "librbd/ImageState.h" #include "librbd/internal.h" #include "librbd/operation/SnapshotRollbackRequest.h" #include "gmock/gmock.h" #include "gtest/gtest.h" namespace librbd { namespace { struct MockOperationImageCtx : public MockImageCtx { MockOperationImageCtx(ImageCtx &image_ctx) : MockImageCtx(image_ctx) { } }; } // anonymous namespace namespace operation { template <> struct ResizeRequest<MockOperationImageCtx> { static ResizeRequest *s_instance; Context *on_finish = nullptr; static ResizeRequest* create(MockOperationImageCtx &image_ctx, Context *on_finish, uint64_t new_size, bool allow_shrink, ProgressContext &prog_ctx, uint64_t journal_op_tid, bool disable_journal) { ceph_assert(s_instance != nullptr); ceph_assert(journal_op_tid == 0); ceph_assert(disable_journal); s_instance->on_finish = on_finish; return s_instance; } ResizeRequest() { s_instance = this; } MOCK_METHOD0(send, void()); }; ResizeRequest<MockOperationImageCtx> *ResizeRequest<MockOperationImageCtx>::s_instance = nullptr; } // namespace operation template <> struct AsyncRequest<MockOperationImageCtx> : public AsyncRequest<MockImageCtx> { MockOperationImageCtx &m_image_ctx; AsyncRequest(MockOperationImageCtx &image_ctx, Context *on_finish) : AsyncRequest<MockImageCtx>(image_ctx, on_finish), m_image_ctx(image_ctx) { } }; } // namespace librbd // template definitions #include "librbd/AsyncRequest.cc" #include "librbd/AsyncObjectThrottle.cc" #include "librbd/operation/Request.cc" #include "librbd/operation/SnapshotRollbackRequest.cc" namespace librbd { namespace operation { using ::testing::_; using ::testing::InSequence; using ::testing::Return; using ::testing::WithArg; class TestMockOperationSnapshotRollbackRequest : public TestMockFixture { public: typedef SnapshotRollbackRequest<MockOperationImageCtx> MockSnapshotRollbackRequest; typedef ResizeRequest<MockOperationImageCtx> MockResizeRequest; void expect_block_writes(MockOperationImageCtx &mock_image_ctx, int r) { EXPECT_CALL(*mock_image_ctx.io_image_dispatcher, block_writes(_)) .WillOnce(CompleteContext(r, mock_image_ctx.image_ctx->op_work_queue)); } void expect_unblock_writes(MockOperationImageCtx &mock_image_ctx) { EXPECT_CALL(*mock_image_ctx.io_image_dispatcher, unblock_writes()) .Times(1); } void expect_get_image_size(MockOperationImageCtx &mock_image_ctx, uint64_t size) { EXPECT_CALL(mock_image_ctx, get_image_size(CEPH_NOSNAP)) .WillOnce(Return(size)); } void expect_resize(MockOperationImageCtx &mock_image_ctx, MockResizeRequest &mock_resize_request, int r) { expect_get_image_size(mock_image_ctx, 123); EXPECT_CALL(mock_resize_request, send()) .WillOnce(FinishRequest(&mock_resize_request, r, &mock_image_ctx)); } void expect_get_flags(MockOperationImageCtx &mock_image_ctx, uint64_t snap_id, int r) { EXPECT_CALL(mock_image_ctx, get_flags(snap_id, _)) .WillOnce(Return(r)); } void expect_object_may_exist(MockOperationImageCtx &mock_image_ctx, uint64_t object_no, bool exists) { if (mock_image_ctx.object_map != nullptr) { EXPECT_CALL(*mock_image_ctx.object_map, object_may_exist(object_no)) .WillOnce(Return(exists)); } } void expect_get_snap_object_map(MockOperationImageCtx &mock_image_ctx, MockObjectMap *mock_object_map, uint64_t snap_id) { if (mock_image_ctx.object_map != nullptr) { EXPECT_CALL(mock_image_ctx, create_object_map(snap_id)) .WillOnce(Return(mock_object_map)); EXPECT_CALL(*mock_object_map, open(_)) .WillOnce(CompleteContext(0, mock_image_ctx.image_ctx->op_work_queue)); } } void expect_rollback_object_map(MockOperationImageCtx &mock_image_ctx, MockObjectMap &mock_object_map) { if (mock_image_ctx.object_map != nullptr) { EXPECT_CALL(mock_object_map, rollback(_, _)) .WillOnce(WithArg<1>(CompleteContext(0, mock_image_ctx.image_ctx->op_work_queue))); } } void expect_get_object_name(MockOperationImageCtx &mock_image_ctx, uint64_t object_num) { EXPECT_CALL(mock_image_ctx, get_object_name(object_num)) .WillOnce(Return("object-name-" + stringify(object_num))); } void expect_get_current_size(MockOperationImageCtx &mock_image_ctx, uint64_t size) { EXPECT_CALL(mock_image_ctx, get_current_size()) .WillOnce(Return(size)); } void expect_rollback_snap_id(MockOperationImageCtx &mock_image_ctx, const std::string &oid, int r) { EXPECT_CALL(get_mock_io_ctx(mock_image_ctx.data_ctx), selfmanaged_snap_rollback(oid, _)) .WillOnce(Return(r)); } void expect_rollback(MockOperationImageCtx &mock_image_ctx, int r) { expect_get_current_size(mock_image_ctx, 1); expect_object_may_exist(mock_image_ctx, 0, true); expect_get_object_name(mock_image_ctx, 0); expect_rollback_snap_id(mock_image_ctx, "object-name-0", r); } void expect_create_object_map(MockOperationImageCtx &mock_image_ctx, MockObjectMap *mock_object_map) { EXPECT_CALL(mock_image_ctx, create_object_map(_)) .WillOnce(Return(mock_object_map)); } void expect_open_object_map(MockOperationImageCtx &mock_image_ctx, MockObjectMap &mock_object_map) { EXPECT_CALL(mock_object_map, open(_)) .WillOnce(CompleteContext(0, mock_image_ctx.image_ctx->op_work_queue)); } void expect_refresh_object_map(MockOperationImageCtx &mock_image_ctx, MockObjectMap &mock_object_map) { if (mock_image_ctx.object_map != nullptr) { expect_create_object_map(mock_image_ctx, &mock_object_map); expect_open_object_map(mock_image_ctx, mock_object_map); } } void expect_invalidate_cache(MockOperationImageCtx &mock_image_ctx, int r) { EXPECT_CALL(*mock_image_ctx.io_image_dispatcher, invalidate_cache(_)) .WillOnce(CompleteContext(r, mock_image_ctx.image_ctx->op_work_queue)); } int when_snap_rollback(MockOperationImageCtx &mock_image_ctx, const std::string &snap_name, uint64_t snap_id, uint64_t snap_size) { C_SaferCond cond_ctx; librbd::NoOpProgressContext prog_ctx; MockSnapshotRollbackRequest *req = new MockSnapshotRollbackRequest( mock_image_ctx, &cond_ctx, cls::rbd::UserSnapshotNamespace(), snap_name, snap_id, snap_size, prog_ctx); { std::shared_lock owner_locker{mock_image_ctx.owner_lock}; req->send(); } return cond_ctx.wait(); } }; TEST_F(TestMockOperationSnapshotRollbackRequest, Success) { librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); MockOperationImageCtx mock_image_ctx(*ictx); MockExclusiveLock mock_exclusive_lock; MockJournal mock_journal; MockObjectMap mock_object_map; MockObjectMap mock_snap_object_map; initialize_features(ictx, mock_image_ctx, mock_exclusive_lock, mock_journal, mock_object_map); expect_op_work_queue(mock_image_ctx); InSequence seq; MockResizeRequest mock_resize_request; expect_append_op_event(mock_image_ctx, false, 0); expect_block_writes(mock_image_ctx, 0); expect_resize(mock_image_ctx, mock_resize_request, 0); expect_get_flags(mock_image_ctx, 123, 0); expect_get_snap_object_map(mock_image_ctx, &mock_snap_object_map, 123); expect_rollback_object_map(mock_image_ctx, mock_object_map); expect_rollback(mock_image_ctx, 0); expect_refresh_object_map(mock_image_ctx, mock_object_map); expect_invalidate_cache(mock_image_ctx, 0); expect_commit_op_event(mock_image_ctx, 0); expect_unblock_writes(mock_image_ctx); ASSERT_EQ(0, when_snap_rollback(mock_image_ctx, "snap", 123, 0)); } TEST_F(TestMockOperationSnapshotRollbackRequest, BlockWritesError) { librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); MockOperationImageCtx mock_image_ctx(*ictx); MockExclusiveLock mock_exclusive_lock; MockJournal mock_journal; MockObjectMap mock_object_map; initialize_features(ictx, mock_image_ctx, mock_exclusive_lock, mock_journal, mock_object_map); expect_op_work_queue(mock_image_ctx); InSequence seq; expect_append_op_event(mock_image_ctx, false, 0); expect_block_writes(mock_image_ctx, -EINVAL); expect_commit_op_event(mock_image_ctx, -EINVAL); expect_unblock_writes(mock_image_ctx); ASSERT_EQ(-EINVAL, when_snap_rollback(mock_image_ctx, "snap", 123, 0)); } TEST_F(TestMockOperationSnapshotRollbackRequest, SkipResize) { librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); MockOperationImageCtx mock_image_ctx(*ictx); MockExclusiveLock mock_exclusive_lock; MockJournal mock_journal; MockObjectMap mock_object_map; MockObjectMap mock_snap_object_map; initialize_features(ictx, mock_image_ctx, mock_exclusive_lock, mock_journal, mock_object_map); expect_op_work_queue(mock_image_ctx); InSequence seq; expect_append_op_event(mock_image_ctx, false, 0); expect_block_writes(mock_image_ctx, 0); expect_get_image_size(mock_image_ctx, 345); expect_get_flags(mock_image_ctx, 123, 0); expect_get_snap_object_map(mock_image_ctx, &mock_snap_object_map, 123); expect_rollback_object_map(mock_image_ctx, mock_object_map); expect_rollback(mock_image_ctx, 0); expect_refresh_object_map(mock_image_ctx, mock_object_map); expect_invalidate_cache(mock_image_ctx, 0); expect_commit_op_event(mock_image_ctx, 0); expect_unblock_writes(mock_image_ctx); ASSERT_EQ(0, when_snap_rollback(mock_image_ctx, "snap", 123, 345)); } TEST_F(TestMockOperationSnapshotRollbackRequest, ResizeError) { librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); MockOperationImageCtx mock_image_ctx(*ictx); MockExclusiveLock mock_exclusive_lock; MockJournal mock_journal; MockObjectMap mock_object_map; initialize_features(ictx, mock_image_ctx, mock_exclusive_lock, mock_journal, mock_object_map); expect_op_work_queue(mock_image_ctx); InSequence seq; MockResizeRequest mock_resize_request; expect_append_op_event(mock_image_ctx, false, 0); expect_block_writes(mock_image_ctx, 0); expect_resize(mock_image_ctx, mock_resize_request, -EINVAL); expect_commit_op_event(mock_image_ctx, -EINVAL); expect_unblock_writes(mock_image_ctx); ASSERT_EQ(-EINVAL, when_snap_rollback(mock_image_ctx, "snap", 123, 0)); } TEST_F(TestMockOperationSnapshotRollbackRequest, RollbackObjectsError) { librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); MockOperationImageCtx mock_image_ctx(*ictx); MockExclusiveLock mock_exclusive_lock; MockJournal mock_journal; MockObjectMap mock_object_map; MockObjectMap mock_snap_object_map; initialize_features(ictx, mock_image_ctx, mock_exclusive_lock, mock_journal, mock_object_map); expect_op_work_queue(mock_image_ctx); InSequence seq; MockResizeRequest mock_resize_request; expect_append_op_event(mock_image_ctx, false, 0); expect_block_writes(mock_image_ctx, 0); expect_resize(mock_image_ctx, mock_resize_request, 0); expect_get_flags(mock_image_ctx, 123, 0); expect_get_snap_object_map(mock_image_ctx, &mock_snap_object_map, 123); expect_rollback_object_map(mock_image_ctx, mock_object_map); expect_rollback(mock_image_ctx, -EINVAL); expect_commit_op_event(mock_image_ctx, -EINVAL); expect_unblock_writes(mock_image_ctx); ASSERT_EQ(-EINVAL, when_snap_rollback(mock_image_ctx, "snap", 123, 0)); } TEST_F(TestMockOperationSnapshotRollbackRequest, InvalidateCacheError) { librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); REQUIRE(ictx->cache); MockOperationImageCtx mock_image_ctx(*ictx); MockExclusiveLock mock_exclusive_lock; MockJournal mock_journal; MockObjectMap mock_object_map; MockObjectMap mock_snap_object_map; initialize_features(ictx, mock_image_ctx, mock_exclusive_lock, mock_journal, mock_object_map); expect_op_work_queue(mock_image_ctx); InSequence seq; MockResizeRequest mock_resize_request; expect_append_op_event(mock_image_ctx, false, 0); expect_block_writes(mock_image_ctx, 0); expect_resize(mock_image_ctx, mock_resize_request, 0); expect_get_flags(mock_image_ctx, 123, 0); expect_get_snap_object_map(mock_image_ctx, &mock_snap_object_map, 123); expect_rollback_object_map(mock_image_ctx, mock_object_map); expect_rollback(mock_image_ctx, 0); expect_refresh_object_map(mock_image_ctx, mock_object_map); expect_invalidate_cache(mock_image_ctx, -EINVAL); expect_commit_op_event(mock_image_ctx, -EINVAL); expect_unblock_writes(mock_image_ctx); ASSERT_EQ(-EINVAL, when_snap_rollback(mock_image_ctx, "snap", 123, 0)); } } // namespace operation } // namespace librbd
13,800
36.502717
103
cc
null
ceph-main/src/test/librbd/operation/test_mock_SnapshotUnprotectRequest.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "test/librbd/test_mock_fixture.h" #include "test/librbd/test_support.h" #include "test/librbd/mock/MockImageCtx.h" #include "test/librados_test_stub/MockTestMemIoCtxImpl.h" #include "test/librados_test_stub/MockTestMemRadosClient.h" #include "include/rados/librados.hpp" #include "common/bit_vector.hpp" #include "librbd/ImageState.h" #include "librbd/internal.h" #include "librbd/operation/SnapshotUnprotectRequest.h" #include "gmock/gmock.h" #include "gtest/gtest.h" // template definitions #include "librbd/operation/SnapshotUnprotectRequest.cc" namespace librbd { namespace operation { using ::testing::_; using ::testing::DoAll; using ::testing::DoDefault; using ::testing::Return; using ::testing::SetArgReferee; using ::testing::SetArgPointee; using ::testing::StrEq; using ::testing::WithArg; class TestMockOperationSnapshotUnprotectRequest : public TestMockFixture { public: typedef SnapshotUnprotectRequest<MockImageCtx> MockSnapshotUnprotectRequest; void expect_get_snap_id(MockImageCtx &mock_image_ctx, uint64_t snap_id) { EXPECT_CALL(mock_image_ctx, get_snap_id(_, _)) .WillOnce(Return(snap_id)); } void expect_is_snap_unprotected(MockImageCtx &mock_image_ctx, bool is_unprotected, int r) { auto &expect = EXPECT_CALL(mock_image_ctx, is_snap_unprotected(_, _)); if (r < 0) { expect.WillOnce(Return(r)); } else { expect.WillOnce(DoAll(SetArgPointee<1>(is_unprotected), Return(0))); } } void expect_set_protection_status(MockImageCtx &mock_image_ctx, uint64_t snap_id, uint8_t status, int r) { bufferlist bl; encode(snap_id, bl); encode(status, bl); auto &expect = EXPECT_CALL(get_mock_io_ctx(mock_image_ctx.md_ctx), exec(mock_image_ctx.header_oid, _, StrEq("rbd"), StrEq("set_protection_status"), ContentsEqual(bl), _, _, _)); if (r < 0) { expect.WillOnce(Return(r)); } else { expect.WillOnce(DoDefault()); } } size_t expect_create_pool_io_contexts(MockImageCtx &mock_image_ctx) { librados::MockTestMemIoCtxImpl &io_ctx_impl = get_mock_io_ctx(mock_image_ctx.md_ctx); librados::MockTestMemRadosClient *rados_client = io_ctx_impl.get_mock_rados_client(); std::list<std::pair<int64_t, std::string> > pools; int r = rados_client->pool_list(pools); if (r < 0) { ADD_FAILURE() << "failed to list pools"; return 0; } EXPECT_CALL(*rados_client, create_ioctx(_, _)) .Times(pools.size()).WillRepeatedly(DoAll( GetReference(&io_ctx_impl), Return(&io_ctx_impl))); return pools.size(); } void expect_get_children(MockImageCtx &mock_image_ctx, size_t pools, int r) { bufferlist bl; std::set<std::string> children; encode(children, bl); auto &expect = EXPECT_CALL(get_mock_io_ctx(mock_image_ctx.md_ctx), exec(RBD_CHILDREN, _, StrEq("rbd"), StrEq("get_children"), _, _, _, _)); if (r < 0) { expect.WillRepeatedly(Return(r)); } else { expect.Times(pools).WillRepeatedly(DoAll( SetArgPointee<5>(bl), Return(0))); } } }; TEST_F(TestMockOperationSnapshotUnprotectRequest, Success) { REQUIRE_FEATURE(RBD_FEATURE_LAYERING); librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); ASSERT_EQ(0, snap_create(*ictx, "snap1")); ASSERT_EQ(0, ictx->state->refresh_if_required()); MockImageCtx mock_image_ctx(*ictx); expect_op_work_queue(mock_image_ctx); ::testing::InSequence seq; uint64_t snap_id = ictx->snap_info.rbegin()->first; expect_get_snap_id(mock_image_ctx, snap_id); expect_is_snap_unprotected(mock_image_ctx, false, 0); expect_set_protection_status(mock_image_ctx, snap_id, RBD_PROTECTION_STATUS_UNPROTECTING, 0); size_t pools = expect_create_pool_io_contexts(mock_image_ctx); expect_get_children(mock_image_ctx, pools, -ENOENT); expect_set_protection_status(mock_image_ctx, snap_id, RBD_PROTECTION_STATUS_UNPROTECTED, 0); C_SaferCond cond_ctx; MockSnapshotUnprotectRequest *req = new MockSnapshotUnprotectRequest( mock_image_ctx, &cond_ctx, cls::rbd::UserSnapshotNamespace(), "snap1"); { std::shared_lock owner_locker{mock_image_ctx.owner_lock}; req->send(); } ASSERT_EQ(0, cond_ctx.wait()); } TEST_F(TestMockOperationSnapshotUnprotectRequest, GetSnapIdMissing) { REQUIRE_FEATURE(RBD_FEATURE_LAYERING); librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); ASSERT_EQ(0, snap_create(*ictx, "snap1")); ASSERT_EQ(0, ictx->state->refresh_if_required()); MockImageCtx mock_image_ctx(*ictx); expect_op_work_queue(mock_image_ctx); ::testing::InSequence seq; expect_get_snap_id(mock_image_ctx, CEPH_NOSNAP); C_SaferCond cond_ctx; MockSnapshotUnprotectRequest *req = new MockSnapshotUnprotectRequest( mock_image_ctx, &cond_ctx, cls::rbd::UserSnapshotNamespace(), "snap1"); { std::shared_lock owner_locker{mock_image_ctx.owner_lock}; req->send(); } ASSERT_EQ(-ENOENT, cond_ctx.wait()); } TEST_F(TestMockOperationSnapshotUnprotectRequest, IsSnapUnprotectedError) { REQUIRE_FEATURE(RBD_FEATURE_LAYERING); librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); ASSERT_EQ(0, snap_create(*ictx, "snap1")); ASSERT_EQ(0, ictx->state->refresh_if_required()); MockImageCtx mock_image_ctx(*ictx); expect_op_work_queue(mock_image_ctx); ::testing::InSequence seq; expect_get_snap_id(mock_image_ctx, ictx->snap_info.rbegin()->first); expect_is_snap_unprotected(mock_image_ctx, false, -EBADMSG); C_SaferCond cond_ctx; MockSnapshotUnprotectRequest *req = new MockSnapshotUnprotectRequest( mock_image_ctx, &cond_ctx, cls::rbd::UserSnapshotNamespace(), "snap1"); { std::shared_lock owner_locker{mock_image_ctx.owner_lock}; req->send(); } ASSERT_EQ(-EBADMSG, cond_ctx.wait()); } TEST_F(TestMockOperationSnapshotUnprotectRequest, SnapAlreadyUnprotected) { REQUIRE_FEATURE(RBD_FEATURE_LAYERING); librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); ASSERT_EQ(0, snap_create(*ictx, "snap1")); ASSERT_EQ(0, ictx->state->refresh_if_required()); MockImageCtx mock_image_ctx(*ictx); expect_op_work_queue(mock_image_ctx); ::testing::InSequence seq; expect_get_snap_id(mock_image_ctx, ictx->snap_info.rbegin()->first); expect_is_snap_unprotected(mock_image_ctx, true, 0); C_SaferCond cond_ctx; MockSnapshotUnprotectRequest *req = new MockSnapshotUnprotectRequest( mock_image_ctx, &cond_ctx, cls::rbd::UserSnapshotNamespace(), "snap1"); { std::shared_lock owner_locker{mock_image_ctx.owner_lock}; req->send(); } ASSERT_EQ(-EINVAL, cond_ctx.wait()); } TEST_F(TestMockOperationSnapshotUnprotectRequest, SetProtectionStatusError) { REQUIRE_FEATURE(RBD_FEATURE_LAYERING); librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); ASSERT_EQ(0, snap_create(*ictx, "snap1")); ASSERT_EQ(0, ictx->state->refresh_if_required()); MockImageCtx mock_image_ctx(*ictx); expect_op_work_queue(mock_image_ctx); ::testing::InSequence seq; uint64_t snap_id = ictx->snap_info.rbegin()->first; expect_get_snap_id(mock_image_ctx, snap_id); expect_is_snap_unprotected(mock_image_ctx, false, 0); expect_set_protection_status(mock_image_ctx, snap_id, RBD_PROTECTION_STATUS_UNPROTECTING, -EINVAL); C_SaferCond cond_ctx; MockSnapshotUnprotectRequest *req = new MockSnapshotUnprotectRequest( mock_image_ctx, &cond_ctx, cls::rbd::UserSnapshotNamespace(), "snap1"); { std::shared_lock owner_locker{mock_image_ctx.owner_lock}; req->send(); } ASSERT_EQ(-EINVAL, cond_ctx.wait()); } TEST_F(TestMockOperationSnapshotUnprotectRequest, ChildrenExist) { REQUIRE_FEATURE(RBD_FEATURE_LAYERING); librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); ASSERT_EQ(0, snap_create(*ictx, "snap1")); ASSERT_EQ(0, ictx->state->refresh_if_required()); MockImageCtx mock_image_ctx(*ictx); expect_op_work_queue(mock_image_ctx); ::testing::InSequence seq; uint64_t snap_id = ictx->snap_info.rbegin()->first; expect_get_snap_id(mock_image_ctx, snap_id); expect_is_snap_unprotected(mock_image_ctx, false, 0); expect_set_protection_status(mock_image_ctx, snap_id, RBD_PROTECTION_STATUS_UNPROTECTING, 0); size_t pools = expect_create_pool_io_contexts(mock_image_ctx); expect_get_children(mock_image_ctx, pools, 0); expect_set_protection_status(mock_image_ctx, snap_id, RBD_PROTECTION_STATUS_PROTECTED, 0); C_SaferCond cond_ctx; MockSnapshotUnprotectRequest *req = new MockSnapshotUnprotectRequest( mock_image_ctx, &cond_ctx, cls::rbd::UserSnapshotNamespace(), "snap1"); { std::shared_lock owner_locker{mock_image_ctx.owner_lock}; req->send(); } ASSERT_EQ(-EBUSY, cond_ctx.wait()); } } // namespace operation } // namespace librbd
9,303
32.467626
79
cc
null
ceph-main/src/test/librbd/operation/test_mock_TrimRequest.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "test/librbd/test_mock_fixture.h" #include "test/librbd/test_support.h" #include "test/librbd/mock/MockImageCtx.h" #include "test/librbd/mock/io/MockObjectDispatch.h" #include "test/librados_test_stub/MockTestMemIoCtxImpl.h" #include "common/bit_vector.hpp" #include "librbd/AsyncRequest.h" #include "librbd/internal.h" #include "librbd/ObjectMap.h" #include "librbd/Utils.h" #include "librbd/operation/TrimRequest.h" #include "gmock/gmock.h" #include "gtest/gtest.h" #include <boost/variant.hpp> namespace librbd { namespace { struct MockTestImageCtx : public MockImageCtx { MockTestImageCtx(ImageCtx &image_ctx) : MockImageCtx(image_ctx) { } }; } // anonymous namespace template<> struct AsyncRequest<librbd::MockTestImageCtx> { librbd::MockTestImageCtx& m_image_ctx; Context *on_finish; AsyncRequest(librbd::MockTestImageCtx& image_ctx, Context* on_finish) : m_image_ctx(image_ctx), on_finish(on_finish) { } virtual ~AsyncRequest() { } Context* create_callback_context() { return util::create_context_callback(this); } Context* create_async_callback_context() { return util::create_context_callback<AsyncRequest, &AsyncRequest::async_complete>(this); } void complete(int r) { if (should_complete(r)) { async_complete(r); } } void async_complete(int r) { on_finish->complete(r); delete this; } bool is_canceled() const { return false; } virtual void send() = 0; virtual bool should_complete(int r) = 0; }; namespace io { struct DiscardVisitor : public boost::static_visitor<ObjectDispatchSpec::DiscardRequest*> { ObjectDispatchSpec::DiscardRequest* operator()(ObjectDispatchSpec::DiscardRequest& discard) const { return &discard; } template <typename T> ObjectDispatchSpec::DiscardRequest* operator()(T& t) const { return nullptr; } }; } // namespace io } // namespace librbd // template definitions #include "librbd/AsyncObjectThrottle.cc" #include "librbd/operation/TrimRequest.cc" namespace librbd { namespace operation { using ::testing::_; using ::testing::DoAll; using ::testing::InSequence; using ::testing::Invoke; using ::testing::Return; using ::testing::StrEq; using ::testing::WithArg; class TestMockOperationTrimRequest : public TestMockFixture { public: typedef TrimRequest<MockTestImageCtx> MockTrimRequest; int create_snapshot(const char *snap_name) { librbd::ImageCtx *ictx; int r = open_image(m_image_name, &ictx); if (r < 0) { return r; } r = snap_create(*ictx, snap_name); if (r < 0) { return r; } r = snap_protect(*ictx, snap_name); if (r < 0) { return r; } close_image(ictx); return 0; } void expect_is_lock_owner(MockTestImageCtx &mock_image_ctx) { if (mock_image_ctx.exclusive_lock != nullptr) { EXPECT_CALL(*mock_image_ctx.exclusive_lock, is_lock_owner()) .WillRepeatedly(Return(true)); } } void expect_object_map_update(MockTestImageCtx &mock_image_ctx, uint64_t start_object, uint64_t end_object, uint8_t state, uint8_t current_state, bool updated, int ret_val) { if (mock_image_ctx.object_map != nullptr) { EXPECT_CALL(*mock_image_ctx.object_map, aio_update(CEPH_NOSNAP, start_object, end_object, state, boost::optional<uint8_t>(current_state), _, false, _)) .WillOnce(WithArg<7>(Invoke([&mock_image_ctx, updated, ret_val](Context *ctx) { if (updated) { mock_image_ctx.op_work_queue->queue(ctx, ret_val); } return updated; }))); } } void expect_get_parent_overlap(MockTestImageCtx &mock_image_ctx, uint64_t overlap) { EXPECT_CALL(mock_image_ctx, get_parent_overlap(CEPH_NOSNAP, _)) .WillOnce(WithArg<1>(Invoke([overlap](uint64_t *o) { *o = overlap; return 0; }))); } void expect_reduce_parent_overlap(MockTestImageCtx& mock_image_ctx, uint64_t overlap) { EXPECT_CALL(mock_image_ctx, reduce_parent_overlap(overlap, false)) .WillOnce(Return(std::make_pair(overlap, io::ImageArea::DATA))); } void expect_get_area_size(MockTestImageCtx& mock_image_ctx) { EXPECT_CALL(mock_image_ctx, get_area_size(io::ImageArea::CRYPTO_HEADER)) .WillOnce(Return(0)); } void expect_object_may_exist(MockTestImageCtx &mock_image_ctx, uint64_t object_no, bool exists) { if (mock_image_ctx.object_map != nullptr) { EXPECT_CALL(*mock_image_ctx.object_map, object_may_exist(object_no)) .WillOnce(Return(exists)); } } void expect_get_object_name(MockTestImageCtx &mock_image_ctx, uint64_t object_no, const std::string& oid) { EXPECT_CALL(mock_image_ctx, get_object_name(object_no)) .WillOnce(Return(oid)); } void expect_aio_remove(MockTestImageCtx &mock_image_ctx, const std::string& oid, int ret_val) { EXPECT_CALL(get_mock_io_ctx(mock_image_ctx.data_ctx), remove(oid, _)) .WillOnce(Return(ret_val)); } void expect_object_discard(MockImageCtx &mock_image_ctx, io::MockObjectDispatch& mock_io_object_dispatch, uint64_t offset, uint64_t length, bool update_object_map, int r) { EXPECT_CALL(*mock_image_ctx.io_object_dispatcher, send(_)) .WillOnce(Invoke([&mock_image_ctx, offset, length, update_object_map, r] (io::ObjectDispatchSpec* spec) { auto discard = boost::apply_visitor(io::DiscardVisitor(), spec->request); ASSERT_TRUE(discard != nullptr); ASSERT_EQ(offset, discard->object_off); ASSERT_EQ(length, discard->object_len); int flags = 0; if (!update_object_map) { flags = io::OBJECT_DISCARD_FLAG_DISABLE_OBJECT_MAP_UPDATE; } ASSERT_EQ(flags, discard->discard_flags); spec->dispatch_result = io::DISPATCH_RESULT_COMPLETE; mock_image_ctx.op_work_queue->queue(&spec->dispatcher_ctx, r); })); } }; TEST_F(TestMockOperationTrimRequest, SuccessRemove) { librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); MockTestImageCtx mock_image_ctx(*ictx); MockExclusiveLock mock_exclusive_lock; MockJournal mock_journal; MockObjectMap mock_object_map; initialize_features(ictx, mock_image_ctx, mock_exclusive_lock, mock_journal, mock_object_map); expect_op_work_queue(mock_image_ctx); expect_is_lock_owner(mock_image_ctx); InSequence seq; EXPECT_CALL(mock_image_ctx, get_stripe_period()).WillOnce(Return(ictx->get_object_size())); EXPECT_CALL(mock_image_ctx, get_stripe_count()).WillOnce(Return(ictx->get_stripe_count())); // pre expect_object_map_update(mock_image_ctx, 0, 1, OBJECT_PENDING, OBJECT_EXISTS, true, 0); // copy-up expect_get_area_size(mock_image_ctx); expect_get_parent_overlap(mock_image_ctx, 0); expect_reduce_parent_overlap(mock_image_ctx, 0); // remove expect_object_may_exist(mock_image_ctx, 0, true); expect_get_object_name(mock_image_ctx, 0, "object0"); expect_aio_remove(mock_image_ctx, "object0", 0); // post expect_object_map_update(mock_image_ctx, 0, 1, OBJECT_NONEXISTENT, OBJECT_PENDING, true, 0); C_SaferCond cond_ctx; librbd::NoOpProgressContext progress_ctx; MockTrimRequest *req = new MockTrimRequest( mock_image_ctx, &cond_ctx, m_image_size, 0, progress_ctx); { std::shared_lock owner_locker{mock_image_ctx.owner_lock}; req->send(); } ASSERT_EQ(0, cond_ctx.wait()); } TEST_F(TestMockOperationTrimRequest, SuccessCopyUp) { REQUIRE_FEATURE(RBD_FEATURE_LAYERING) ASSERT_EQ(0, create_snapshot("snap1")); int order = 22; uint64_t features; ASSERT_TRUE(::get_features(&features)); std::string clone_name = get_temp_image_name(); ASSERT_EQ(0, librbd::clone(m_ioctx, m_image_name.c_str(), "snap1", m_ioctx, clone_name.c_str(), features, &order, 0, 0)); librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(clone_name, &ictx)); ASSERT_EQ(0, snap_create(*ictx, "snap")); MockTestImageCtx mock_image_ctx(*ictx); MockExclusiveLock mock_exclusive_lock; MockJournal mock_journal; MockObjectMap mock_object_map; initialize_features(ictx, mock_image_ctx, mock_exclusive_lock, mock_journal, mock_object_map); expect_op_work_queue(mock_image_ctx); expect_is_lock_owner(mock_image_ctx); InSequence seq; EXPECT_CALL(mock_image_ctx, get_stripe_period()).WillOnce(Return(ictx->get_object_size())); EXPECT_CALL(mock_image_ctx, get_stripe_count()).WillOnce(Return(ictx->get_stripe_count())); // pre expect_object_map_update(mock_image_ctx, 0, 2, OBJECT_PENDING, OBJECT_EXISTS, true, 0); // copy-up io::MockObjectDispatch mock_io_object_dispatch; expect_get_area_size(mock_image_ctx); expect_get_parent_overlap(mock_image_ctx, ictx->get_object_size()); expect_reduce_parent_overlap(mock_image_ctx, ictx->get_object_size()); expect_get_object_name(mock_image_ctx, 0, "object0"); expect_object_discard(mock_image_ctx, mock_io_object_dispatch, 0, ictx->get_object_size(), false, 0); // remove expect_object_may_exist(mock_image_ctx, 1, true); expect_get_object_name(mock_image_ctx, 1, "object1"); expect_aio_remove(mock_image_ctx, "object1", 0); // post expect_object_map_update(mock_image_ctx, 0, 2, OBJECT_NONEXISTENT, OBJECT_PENDING, true, 0); C_SaferCond cond_ctx; librbd::NoOpProgressContext progress_ctx; MockTrimRequest *req = new MockTrimRequest( mock_image_ctx, &cond_ctx, 2 * ictx->get_object_size(), 0, progress_ctx); { std::shared_lock owner_locker{mock_image_ctx.owner_lock}; req->send(); } ASSERT_EQ(0, cond_ctx.wait()); } TEST_F(TestMockOperationTrimRequest, SuccessBoundary) { librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); MockTestImageCtx mock_image_ctx(*ictx); MockExclusiveLock mock_exclusive_lock; MockJournal mock_journal; MockObjectMap mock_object_map; initialize_features(ictx, mock_image_ctx, mock_exclusive_lock, mock_journal, mock_object_map); expect_op_work_queue(mock_image_ctx); expect_is_lock_owner(mock_image_ctx); InSequence seq; EXPECT_CALL(mock_image_ctx, get_stripe_period()).WillOnce(Return(ictx->get_object_size())); EXPECT_CALL(mock_image_ctx, get_stripe_count()).WillOnce(Return(ictx->get_stripe_count())); // boundary io::MockObjectDispatch mock_io_object_dispatch; expect_object_discard(mock_image_ctx, mock_io_object_dispatch, 1, ictx->get_object_size() - 1, true, 0); C_SaferCond cond_ctx; librbd::NoOpProgressContext progress_ctx; MockTrimRequest *req = new MockTrimRequest( mock_image_ctx, &cond_ctx, ictx->get_object_size(), 1, progress_ctx); { std::shared_lock owner_locker{mock_image_ctx.owner_lock}; req->send(); } ASSERT_EQ(0, cond_ctx.wait()); } TEST_F(TestMockOperationTrimRequest, SuccessNoOp) { librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); MockTestImageCtx mock_image_ctx(*ictx); MockExclusiveLock mock_exclusive_lock; MockJournal mock_journal; MockObjectMap mock_object_map; initialize_features(ictx, mock_image_ctx, mock_exclusive_lock, mock_journal, mock_object_map); } TEST_F(TestMockOperationTrimRequest, RemoveError) { librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); MockTestImageCtx mock_image_ctx(*ictx); MockExclusiveLock mock_exclusive_lock; MockJournal mock_journal; MockObjectMap mock_object_map; initialize_features(ictx, mock_image_ctx, mock_exclusive_lock, mock_journal, mock_object_map); expect_op_work_queue(mock_image_ctx); expect_is_lock_owner(mock_image_ctx); InSequence seq; EXPECT_CALL(mock_image_ctx, get_stripe_period()).WillOnce(Return(ictx->get_object_size())); EXPECT_CALL(mock_image_ctx, get_stripe_count()).WillOnce(Return(ictx->get_stripe_count())); // pre expect_object_map_update(mock_image_ctx, 0, 1, OBJECT_PENDING, OBJECT_EXISTS, false, 0); // copy-up expect_get_area_size(mock_image_ctx); expect_get_parent_overlap(mock_image_ctx, 0); expect_reduce_parent_overlap(mock_image_ctx, 0); // remove expect_object_may_exist(mock_image_ctx, 0, true); expect_get_object_name(mock_image_ctx, 0, "object0"); expect_aio_remove(mock_image_ctx, "object0", -EPERM); C_SaferCond cond_ctx; librbd::NoOpProgressContext progress_ctx; MockTrimRequest *req = new MockTrimRequest( mock_image_ctx, &cond_ctx, m_image_size, 0, progress_ctx); { std::shared_lock owner_locker{mock_image_ctx.owner_lock}; req->send(); } ASSERT_EQ(-EPERM, cond_ctx.wait()); } TEST_F(TestMockOperationTrimRequest, CopyUpError) { REQUIRE_FEATURE(RBD_FEATURE_LAYERING) ASSERT_EQ(0, create_snapshot("snap1")); int order = 22; uint64_t features; ASSERT_TRUE(::get_features(&features)); std::string clone_name = get_temp_image_name(); ASSERT_EQ(0, librbd::clone(m_ioctx, m_image_name.c_str(), "snap1", m_ioctx, clone_name.c_str(), features, &order, 0, 0)); librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(clone_name, &ictx)); ASSERT_EQ(0, snap_create(*ictx, "snap")); MockTestImageCtx mock_image_ctx(*ictx); MockExclusiveLock mock_exclusive_lock; MockJournal mock_journal; MockObjectMap mock_object_map; initialize_features(ictx, mock_image_ctx, mock_exclusive_lock, mock_journal, mock_object_map); expect_op_work_queue(mock_image_ctx); expect_is_lock_owner(mock_image_ctx); InSequence seq; EXPECT_CALL(mock_image_ctx, get_stripe_period()).WillOnce(Return(ictx->get_object_size())); EXPECT_CALL(mock_image_ctx, get_stripe_count()).WillOnce(Return(ictx->get_stripe_count())); // pre expect_object_map_update(mock_image_ctx, 0, 2, OBJECT_PENDING, OBJECT_EXISTS, false, 0); // copy-up io::MockObjectDispatch mock_io_object_dispatch; expect_get_area_size(mock_image_ctx); expect_get_parent_overlap(mock_image_ctx, ictx->get_object_size()); expect_reduce_parent_overlap(mock_image_ctx, ictx->get_object_size()); expect_get_object_name(mock_image_ctx, 0, "object0"); expect_object_discard(mock_image_ctx, mock_io_object_dispatch, 0, ictx->get_object_size(), false, -EINVAL); C_SaferCond cond_ctx; librbd::NoOpProgressContext progress_ctx; MockTrimRequest *req = new MockTrimRequest( mock_image_ctx, &cond_ctx, 2 * ictx->get_object_size(), 0, progress_ctx); { std::shared_lock owner_locker{mock_image_ctx.owner_lock}; req->send(); } ASSERT_EQ(-EINVAL, cond_ctx.wait()); } TEST_F(TestMockOperationTrimRequest, BoundaryError) { librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); MockTestImageCtx mock_image_ctx(*ictx); MockExclusiveLock mock_exclusive_lock; MockJournal mock_journal; MockObjectMap mock_object_map; initialize_features(ictx, mock_image_ctx, mock_exclusive_lock, mock_journal, mock_object_map); expect_op_work_queue(mock_image_ctx); expect_is_lock_owner(mock_image_ctx); InSequence seq; EXPECT_CALL(mock_image_ctx, get_stripe_period()).WillOnce(Return(ictx->get_object_size())); EXPECT_CALL(mock_image_ctx, get_stripe_count()).WillOnce(Return(ictx->get_stripe_count())); // boundary io::MockObjectDispatch mock_io_object_dispatch; expect_object_discard(mock_image_ctx, mock_io_object_dispatch, 1, ictx->get_object_size() - 1, true, -EINVAL); C_SaferCond cond_ctx; librbd::NoOpProgressContext progress_ctx; MockTrimRequest *req = new MockTrimRequest( mock_image_ctx, &cond_ctx, ictx->get_object_size(), 1, progress_ctx); { std::shared_lock owner_locker{mock_image_ctx.owner_lock}; req->send(); } ASSERT_EQ(-EINVAL, cond_ctx.wait()); } } // namespace operation } // namespace librbd
16,781
32.97166
93
cc
null
ceph-main/src/test/librbd/trash/test_mock_MoveRequest.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "test/librbd/test_mock_fixture.h" #include "test/librbd/test_support.h" #include "test/librbd/mock/MockExclusiveLock.h" #include "test/librbd/mock/MockImageCtx.h" #include "test/librbd/mock/MockImageState.h" #include "test/librados_test_stub/MockTestMemIoCtxImpl.h" #include "test/librados_test_stub/MockTestMemRadosClient.h" #include "include/rbd/librbd.hpp" #include "librbd/Utils.h" #include "librbd/trash/MoveRequest.h" namespace librbd { namespace { struct MockTestImageCtx : public MockImageCtx { static MockTestImageCtx *s_instance; static MockTestImageCtx *create(const std::string &image_name, const std::string &image_id, const char *snap, librados::IoCtx& p, bool read_only) { ceph_assert(s_instance != nullptr); s_instance->construct(image_name, image_id); return s_instance; } MOCK_METHOD2(construct, void(const std::string&, const std::string&)); MockTestImageCtx(librbd::ImageCtx &image_ctx) : librbd::MockImageCtx(image_ctx) { s_instance = this; } }; MockTestImageCtx *MockTestImageCtx::s_instance = nullptr; } // anonymous namespace } // namespace librbd #include "librbd/trash/MoveRequest.cc" namespace librbd { namespace trash { using ::testing::_; using ::testing::InSequence; using ::testing::Invoke; using ::testing::Return; using ::testing::StrEq; using ::testing::WithArg; struct TestMockTrashMoveRequest : public TestMockFixture { typedef MoveRequest<librbd::MockTestImageCtx> MockMoveRequest; void expect_trash_add(MockTestImageCtx &mock_image_ctx, const std::string& image_id, cls::rbd::TrashImageSource trash_image_source, const std::string& name, const utime_t& end_time, int r) { EXPECT_CALL(get_mock_io_ctx(mock_image_ctx.md_ctx), exec(StrEq("rbd_trash"), _, StrEq("rbd"), StrEq("trash_add"), _, _, _, _)) .WillOnce(WithArg<4>(Invoke([=](bufferlist& in_bl) { std::string id; cls::rbd::TrashImageSpec trash_image_spec; auto bl_it = in_bl.cbegin(); decode(id, bl_it); decode(trash_image_spec, bl_it); EXPECT_EQ(id, image_id); EXPECT_EQ(trash_image_spec.source, trash_image_source); EXPECT_EQ(trash_image_spec.name, name); EXPECT_EQ(trash_image_spec.deferment_end_time, end_time); return r; }))); } void expect_aio_remove(MockTestImageCtx &mock_image_ctx, const std::string& oid, int r) { EXPECT_CALL(get_mock_io_ctx(mock_image_ctx.md_ctx), remove(oid, _)) .WillOnce(Return(r)); } void expect_dir_remove(MockTestImageCtx& mock_image_ctx, const std::string& name, const std::string& id, int r) { bufferlist in_bl; encode(name, in_bl); encode(id, in_bl); EXPECT_CALL(get_mock_io_ctx(mock_image_ctx.md_ctx), exec(StrEq("rbd_directory"), _, StrEq("rbd"), StrEq("dir_remove_image"), ContentsEqual(in_bl), _, _, _)) .WillOnce(Return(r)); } }; TEST_F(TestMockTrashMoveRequest, Success) { REQUIRE_FORMAT_V2(); librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); MockTestImageCtx mock_image_ctx(*ictx); MockExclusiveLock mock_exclusive_lock; if (ictx->test_features(RBD_FEATURE_EXCLUSIVE_LOCK)) { mock_image_ctx.exclusive_lock = &mock_exclusive_lock; } expect_op_work_queue(mock_image_ctx); InSequence seq; utime_t delete_time{ceph_clock_now()}; expect_trash_add(mock_image_ctx, "image id", cls::rbd::TRASH_IMAGE_SOURCE_USER, "image name", delete_time, 0); expect_aio_remove(mock_image_ctx, util::id_obj_name("image name"), 0); expect_dir_remove(mock_image_ctx, "image name", "image id", 0); C_SaferCond ctx; cls::rbd::TrashImageSpec trash_image_spec{ cls::rbd::TRASH_IMAGE_SOURCE_USER, "image name", delete_time, delete_time}; auto req = MockMoveRequest::create(mock_image_ctx.md_ctx, "image id", trash_image_spec, &ctx); req->send(); ASSERT_EQ(0, ctx.wait()); } TEST_F(TestMockTrashMoveRequest, TrashAddError) { REQUIRE_FORMAT_V2(); librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); MockTestImageCtx mock_image_ctx(*ictx); MockExclusiveLock mock_exclusive_lock; if (ictx->test_features(RBD_FEATURE_EXCLUSIVE_LOCK)) { mock_image_ctx.exclusive_lock = &mock_exclusive_lock; } expect_op_work_queue(mock_image_ctx); InSequence seq; utime_t delete_time{ceph_clock_now()}; expect_trash_add(mock_image_ctx, "image id", cls::rbd::TRASH_IMAGE_SOURCE_USER, "image name", delete_time, -EPERM); C_SaferCond ctx; cls::rbd::TrashImageSpec trash_image_spec{ cls::rbd::TRASH_IMAGE_SOURCE_USER, "image name", delete_time, delete_time}; auto req = MockMoveRequest::create(mock_image_ctx.md_ctx, "image id", trash_image_spec, &ctx); req->send(); ASSERT_EQ(-EPERM, ctx.wait()); } TEST_F(TestMockTrashMoveRequest, RemoveIdError) { REQUIRE_FORMAT_V2(); librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); MockTestImageCtx mock_image_ctx(*ictx); MockExclusiveLock mock_exclusive_lock; if (ictx->test_features(RBD_FEATURE_EXCLUSIVE_LOCK)) { mock_image_ctx.exclusive_lock = &mock_exclusive_lock; } expect_op_work_queue(mock_image_ctx); InSequence seq; utime_t delete_time{ceph_clock_now()}; expect_trash_add(mock_image_ctx, "image id", cls::rbd::TRASH_IMAGE_SOURCE_USER, "image name", delete_time, 0); expect_aio_remove(mock_image_ctx, util::id_obj_name("image name"), -EPERM); C_SaferCond ctx; cls::rbd::TrashImageSpec trash_image_spec{ cls::rbd::TRASH_IMAGE_SOURCE_USER, "image name", delete_time, delete_time}; auto req = MockMoveRequest::create(mock_image_ctx.md_ctx, "image id", trash_image_spec, &ctx); req->send(); ASSERT_EQ(-EPERM, ctx.wait()); } TEST_F(TestMockTrashMoveRequest, DirectoryRemoveError) { REQUIRE_FORMAT_V2(); librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); MockTestImageCtx mock_image_ctx(*ictx); MockExclusiveLock mock_exclusive_lock; if (ictx->test_features(RBD_FEATURE_EXCLUSIVE_LOCK)) { mock_image_ctx.exclusive_lock = &mock_exclusive_lock; } expect_op_work_queue(mock_image_ctx); InSequence seq; utime_t delete_time{ceph_clock_now()}; expect_trash_add(mock_image_ctx, "image id", cls::rbd::TRASH_IMAGE_SOURCE_USER, "image name", delete_time, 0); expect_aio_remove(mock_image_ctx, util::id_obj_name("image name"), 0); expect_dir_remove(mock_image_ctx, "image name", "image id", -EPERM); C_SaferCond ctx; cls::rbd::TrashImageSpec trash_image_spec{ cls::rbd::TRASH_IMAGE_SOURCE_USER, "image name", delete_time, delete_time}; auto req = MockMoveRequest::create(mock_image_ctx.md_ctx, "image id", trash_image_spec, &ctx); req->send(); ASSERT_EQ(-EPERM, ctx.wait()); } } // namespace trash } // namespace librbd
7,803
32.78355
88
cc
null
ceph-main/src/test/librbd/trash/test_mock_RemoveRequest.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "test/librbd/test_mock_fixture.h" #include "test/librbd/test_support.h" #include "test/librbd/mock/MockExclusiveLock.h" #include "test/librbd/mock/MockImageCtx.h" #include "test/librbd/mock/MockImageState.h" #include "test/librados_test_stub/MockTestMemIoCtxImpl.h" #include "test/librados_test_stub/MockTestMemRadosClient.h" #include "include/rbd/librbd.hpp" #include "librbd/Utils.h" #include "librbd/image/TypeTraits.h" #include "librbd/image/RemoveRequest.h" #include "librbd/internal.h" #include "librbd/trash/RemoveRequest.h" namespace librbd { namespace { struct MockTestImageCtx : public MockImageCtx { MockTestImageCtx(ImageCtx &image_ctx) : MockImageCtx(image_ctx) { } }; } // anonymous namespace namespace image { // template <> // struct TypeTraits<MockTestImageCtx> { // typedef librbd::MockContextWQ ContextWQ; // }; template <> class RemoveRequest<MockTestImageCtx> { private: typedef ::librbd::image::TypeTraits<MockTestImageCtx> TypeTraits; typedef typename TypeTraits::ContextWQ ContextWQ; public: static RemoveRequest *s_instance; static RemoveRequest *create(librados::IoCtx &ioctx, const std::string &image_name, const std::string &image_id, bool force, bool from_trash_remove, ProgressContext &prog_ctx, ContextWQ *op_work_queue, Context *on_finish) { ceph_assert(s_instance != nullptr); s_instance->on_finish = on_finish; return s_instance; } static RemoveRequest *create(librados::IoCtx &ioctx, MockTestImageCtx *image_ctx, bool force, bool from_trash_remove, ProgressContext &prog_ctx, ContextWQ *op_work_queue, Context *on_finish) { ceph_assert(s_instance != nullptr); s_instance->on_finish = on_finish; return s_instance; } Context *on_finish = nullptr; RemoveRequest() { s_instance = this; } MOCK_METHOD0(send, void()); }; RemoveRequest<MockTestImageCtx> *RemoveRequest<MockTestImageCtx>::s_instance; } // namespace image } // namespace librbd #include "librbd/trash/RemoveRequest.cc" namespace librbd { namespace trash { using ::testing::_; using ::testing::InSequence; using ::testing::Invoke; using ::testing::Return; using ::testing::StrEq; struct TestMockTrashRemoveRequest : public TestMockFixture { typedef RemoveRequest<librbd::MockTestImageCtx> MockRemoveRequest; typedef image::RemoveRequest<librbd::MockTestImageCtx> MockImageRemoveRequest; NoOpProgressContext m_prog_ctx; void expect_set_state(MockTestImageCtx& mock_image_ctx, cls::rbd::TrashImageState trash_set_state, cls::rbd::TrashImageState trash_expect_state, int r) { bufferlist in_bl; encode(mock_image_ctx.id, in_bl); encode(trash_set_state, in_bl); encode(trash_expect_state, in_bl); EXPECT_CALL(get_mock_io_ctx(mock_image_ctx.md_ctx), exec(StrEq("rbd_trash"), _, StrEq("rbd"), StrEq("trash_state_set"), ContentsEqual(in_bl), _, _, _)) .WillOnce(Return(r)); } void expect_set_deleting_state(MockTestImageCtx& mock_image_ctx, int r) { expect_set_state(mock_image_ctx, cls::rbd::TRASH_IMAGE_STATE_REMOVING, cls::rbd::TRASH_IMAGE_STATE_NORMAL, r); } void expect_restore_normal_state(MockTestImageCtx& mock_image_ctx, int r) { expect_set_state(mock_image_ctx, cls::rbd::TRASH_IMAGE_STATE_NORMAL, cls::rbd::TRASH_IMAGE_STATE_REMOVING, r); } void expect_close_image(MockTestImageCtx &mock_image_ctx, int r) { EXPECT_CALL(*mock_image_ctx.state, close(_)) .WillOnce(Invoke([r](Context *on_finish) { on_finish->complete(r); })); } void expect_remove_image(MockImageRemoveRequest& mock_image_remove_request, int r) { EXPECT_CALL(mock_image_remove_request, send()) .WillOnce(Invoke([&mock_image_remove_request, r]() { mock_image_remove_request.on_finish->complete(r); })); } void expect_remove_trash_entry(MockTestImageCtx& mock_image_ctx, int r) { bufferlist in_bl; encode(mock_image_ctx.id, in_bl); EXPECT_CALL(get_mock_io_ctx(mock_image_ctx.md_ctx), exec(StrEq("rbd_trash"), _, StrEq("rbd"), StrEq("trash_remove"), ContentsEqual(in_bl), _, _, _)) .WillOnce(Return(r)); } }; TEST_F(TestMockTrashRemoveRequest, Success) { REQUIRE_FORMAT_V2(); librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); MockTestImageCtx mock_image_ctx(*ictx); MockImageRemoveRequest mock_image_remove_request; InSequence seq; expect_set_deleting_state(mock_image_ctx, 0); expect_remove_image(mock_image_remove_request, 0); expect_remove_trash_entry(mock_image_ctx, 0); C_SaferCond ctx; auto req = MockRemoveRequest::create(mock_image_ctx.md_ctx, &mock_image_ctx, nullptr, false, m_prog_ctx, &ctx); req->send(); ASSERT_EQ(0, ctx.wait()); } TEST_F(TestMockTrashRemoveRequest, SetDeletingStateError) { REQUIRE_FORMAT_V2(); librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); MockTestImageCtx mock_image_ctx(*ictx); MockImageRemoveRequest mock_image_remove_request; InSequence seq; expect_set_deleting_state(mock_image_ctx, -EINVAL); expect_close_image(mock_image_ctx, 0); C_SaferCond ctx; auto req = MockRemoveRequest::create(mock_image_ctx.md_ctx, &mock_image_ctx, nullptr, false, m_prog_ctx, &ctx); req->send(); ASSERT_EQ(-EINVAL, ctx.wait()); } TEST_F(TestMockTrashRemoveRequest, RemoveImageError) { REQUIRE_FORMAT_V2(); librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); MockTestImageCtx mock_image_ctx(*ictx); MockImageRemoveRequest mock_image_remove_request; InSequence seq; expect_set_deleting_state(mock_image_ctx, 0); expect_remove_image(mock_image_remove_request, -EINVAL); expect_restore_normal_state(mock_image_ctx, 0); C_SaferCond ctx; auto req = MockRemoveRequest::create(mock_image_ctx.md_ctx, &mock_image_ctx, nullptr, false, m_prog_ctx, &ctx); req->send(); ASSERT_EQ(-EINVAL, ctx.wait()); } TEST_F(TestMockTrashRemoveRequest, RemoveTrashEntryError) { REQUIRE_FORMAT_V2(); librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); MockTestImageCtx mock_image_ctx(*ictx); MockImageRemoveRequest mock_image_remove_request; InSequence seq; expect_set_deleting_state(mock_image_ctx, 0); expect_remove_image(mock_image_remove_request, 0); expect_remove_trash_entry(mock_image_ctx, -EINVAL); C_SaferCond ctx; auto req = MockRemoveRequest::create(mock_image_ctx.md_ctx, &mock_image_ctx, nullptr, false, m_prog_ctx, &ctx); req->send(); ASSERT_EQ(0, ctx.wait()); } } // namespace trash } // namespace librbd
7,328
30.727273
83
cc
null
ceph-main/src/test/librbd/watcher/test_mock_RewatchRequest.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "test/librbd/test_mock_fixture.h" #include "include/rados/librados.hpp" #include "test/librados_test_stub/MockTestMemIoCtxImpl.h" #include "test/librados_test_stub/MockTestMemRadosClient.h" #include "test/librbd/test_support.h" #include "test/librbd/mock/MockImageCtx.h" #include "librados/AioCompletionImpl.h" #include "librbd/watcher/RewatchRequest.h" namespace librbd { namespace watcher { using ::testing::_; using ::testing::DoAll; using ::testing::InSequence; using ::testing::Invoke; using ::testing::Return; using ::testing::WithArg; using ::testing::WithArgs; struct TestMockWatcherRewatchRequest : public TestMockFixture { typedef RewatchRequest MockRewatchRequest; TestMockWatcherRewatchRequest() = default; void expect_aio_watch(MockImageCtx &mock_image_ctx, int r) { librados::MockTestMemIoCtxImpl &mock_io_ctx(get_mock_io_ctx( mock_image_ctx.md_ctx)); EXPECT_CALL(mock_io_ctx, aio_watch(mock_image_ctx.header_oid, _, _, _)) .WillOnce(DoAll(WithArgs<1, 2>(Invoke([&mock_image_ctx, &mock_io_ctx, r](librados::AioCompletionImpl *c, uint64_t *cookie) { *cookie = 234; c->get(); mock_image_ctx.image_ctx->op_work_queue->queue(new LambdaContext([&mock_io_ctx, c](int r) { mock_io_ctx.get_mock_rados_client()->finish_aio_completion(c, r); }), r); })), Return(0))); } void expect_aio_unwatch(MockImageCtx &mock_image_ctx, int r) { librados::MockTestMemIoCtxImpl &mock_io_ctx(get_mock_io_ctx( mock_image_ctx.md_ctx)); EXPECT_CALL(mock_io_ctx, aio_unwatch(m_watch_handle, _)) .WillOnce(DoAll(Invoke([&mock_image_ctx, &mock_io_ctx, r](uint64_t handle, librados::AioCompletionImpl *c) { c->get(); mock_image_ctx.image_ctx->op_work_queue->queue(new LambdaContext([&mock_io_ctx, c](int r) { mock_io_ctx.get_mock_rados_client()->finish_aio_completion(c, r); }), r); }), Return(0))); } struct WatchCtx : public librados::WatchCtx2 { void handle_notify(uint64_t, uint64_t, uint64_t, ceph::bufferlist&) override { ceph_abort(); } void handle_error(uint64_t, int) override { ceph_abort(); } }; ceph::shared_mutex m_watch_lock = ceph::make_shared_mutex("watch_lock"); WatchCtx m_watch_ctx; uint64_t m_watch_handle = 123; }; TEST_F(TestMockWatcherRewatchRequest, Success) { librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); MockImageCtx mock_image_ctx(*ictx); InSequence seq; expect_aio_unwatch(mock_image_ctx, 0); expect_aio_watch(mock_image_ctx, 0); C_SaferCond ctx; MockRewatchRequest *req = MockRewatchRequest::create(mock_image_ctx.md_ctx, mock_image_ctx.header_oid, m_watch_lock, &m_watch_ctx, &m_watch_handle, &ctx); { std::unique_lock watch_locker{m_watch_lock}; req->send(); } ASSERT_EQ(0, ctx.wait()); ASSERT_EQ(234U, m_watch_handle); } TEST_F(TestMockWatcherRewatchRequest, UnwatchError) { librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); MockImageCtx mock_image_ctx(*ictx); InSequence seq; expect_aio_unwatch(mock_image_ctx, -EINVAL); expect_aio_watch(mock_image_ctx, 0); C_SaferCond ctx; MockRewatchRequest *req = MockRewatchRequest::create(mock_image_ctx.md_ctx, mock_image_ctx.header_oid, m_watch_lock, &m_watch_ctx, &m_watch_handle, &ctx); { std::unique_lock watch_locker{m_watch_lock}; req->send(); } ASSERT_EQ(0, ctx.wait()); ASSERT_EQ(234U, m_watch_handle); } TEST_F(TestMockWatcherRewatchRequest, WatchBlocklist) { librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); MockImageCtx mock_image_ctx(*ictx); InSequence seq; expect_aio_unwatch(mock_image_ctx, 0); expect_aio_watch(mock_image_ctx, -EBLOCKLISTED); C_SaferCond ctx; MockRewatchRequest *req = MockRewatchRequest::create(mock_image_ctx.md_ctx, mock_image_ctx.header_oid, m_watch_lock, &m_watch_ctx, &m_watch_handle, &ctx); { std::unique_lock watch_locker{m_watch_lock}; req->send(); } ASSERT_EQ(-EBLOCKLISTED, ctx.wait()); ASSERT_EQ(0U, m_watch_handle); } TEST_F(TestMockWatcherRewatchRequest, WatchDNE) { librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); MockImageCtx mock_image_ctx(*ictx); InSequence seq; expect_aio_unwatch(mock_image_ctx, 0); expect_aio_watch(mock_image_ctx, -ENOENT); C_SaferCond ctx; MockRewatchRequest *req = MockRewatchRequest::create(mock_image_ctx.md_ctx, mock_image_ctx.header_oid, m_watch_lock, &m_watch_ctx, &m_watch_handle, &ctx); { std::unique_lock watch_locker{m_watch_lock}; req->send(); } ASSERT_EQ(-ENOENT, ctx.wait()); ASSERT_EQ(0U, m_watch_handle); } TEST_F(TestMockWatcherRewatchRequest, WatchError) { librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); MockImageCtx mock_image_ctx(*ictx); InSequence seq; expect_aio_unwatch(mock_image_ctx, 0); expect_aio_watch(mock_image_ctx, -EINVAL); C_SaferCond ctx; MockRewatchRequest *req = MockRewatchRequest::create(mock_image_ctx.md_ctx, mock_image_ctx.header_oid, m_watch_lock, &m_watch_ctx, &m_watch_handle, &ctx); { std::unique_lock watch_locker{m_watch_lock}; req->send(); } ASSERT_EQ(-EINVAL, ctx.wait()); ASSERT_EQ(0U, m_watch_handle); } TEST_F(TestMockWatcherRewatchRequest, InvalidWatchHandler) { librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); MockImageCtx mock_image_ctx(*ictx); InSequence seq; expect_aio_watch(mock_image_ctx, 0); m_watch_handle = 0; C_SaferCond ctx; MockRewatchRequest *req = MockRewatchRequest::create(mock_image_ctx.md_ctx, mock_image_ctx.header_oid, m_watch_lock, &m_watch_ctx, &m_watch_handle, &ctx); { std::unique_lock watch_locker{m_watch_lock}; req->send(); } ASSERT_EQ(0, ctx.wait()); ASSERT_EQ(234U, m_watch_handle); } } // namespace watcher } // namespace librbd
8,036
34.25
130
cc
null
ceph-main/src/test/mds/TestMDSAuthCaps.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system * * Copyright (C) 2012 Inktank * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #include <iostream> #include "include/stringify.h" #include "mds/MDSAuthCaps.h" #include "gtest/gtest.h" using namespace std; entity_addr_t addr; string fsnamecap = "fsname=a"; string pathcap = "path=/dir1"; string rscap = "root_squash"; string uidcap = "uid=1000"; string gidscap = "gids=1000,1001,1002"; vector<string> parse_good = { "allow rw uid=1 gids=1", "allow * path=\"/foo\"", "allow * path=/foo", "allow * path=/foo-bar_baz", "allow * path=\"/foo bar/baz\"", "allow * uid=1", "allow * path=\"/foo\" uid=1", "allow *", "allow r", "allow rw", "allow r, allow rw path=/foo", "allow r, allow * uid=1", "allow r ,allow * uid=1", "allow r ;allow * uid=1", "allow r ; allow * uid=1", "allow r ; allow * uid=1", "allow r uid=1 gids=1,2,3, allow * uid=2", "allow r network 1.2.3.4/8", "allow rw path=/foo uid=1 gids=1,2,3 network 2.3.4.5/16", // Following are all types of MDS caps, or in other words, all // (mathematical) combinations of fsnamecap, pathcap, rscap, uidcap, and // gidscaps. "allow rw " + fsnamecap, "allow rw " + pathcap, "allow rw " + rscap, "allow rw " + uidcap, "allow rw " + gidscap, "allow rw " + fsnamecap + " " + pathcap, "allow rw " + fsnamecap + " " + rscap, "allow rw " + fsnamecap + " " + uidcap, "allow rw " + fsnamecap + " " + gidscap, "allow rw " + pathcap + " " + rscap, "allow rw " + pathcap + " " + uidcap, "allow rw " + pathcap + " " + gidscap, "allow rw " + rscap + " " + uidcap, "allow rw " + rscap + " " + gidscap, "allow rw " + uidcap + " " + gidscap, "allow rw " + fsnamecap + " " + pathcap + " " + rscap, "allow rw " + fsnamecap + " " + pathcap + " " + uidcap, "allow rw " + fsnamecap + " " + pathcap + " " + gidscap, "allow rw " + fsnamecap + " " + rscap + " " + uidcap, "allow rw " + fsnamecap + " " + rscap + " " + gidscap, "allow rw " + fsnamecap + " " + uidcap + " " + gidscap, "allow rw " + pathcap + " " + rscap + " " + uidcap, "allow rw " + pathcap + " " + rscap + " " + gidscap, "allow rw " + pathcap + " " + uidcap + " " + gidscap, "allow rw " + rscap + " " + uidcap + " " + gidscap, "allow rw " + fsnamecap + " " + pathcap + " " + rscap + " " + uidcap, "allow rw " + fsnamecap + " " + pathcap + " " + rscap + " " + gidscap, "allow rw " + fsnamecap + " " + pathcap + " " + uidcap + " " + gidscap, "allow rw " + fsnamecap + " " + rscap + " " + uidcap + " " + gidscap, "allow rw " + pathcap + " " + rscap + " " + uidcap + " " + gidscap, "allow rw " + fsnamecap + " " + pathcap + " " + rscap + " " + uidcap + " " + gidscap }; TEST(MDSAuthCaps, ParseGood) { for (auto str : parse_good) { MDSAuthCaps cap; std::cout << "Testing good input: '" << str << "'" << std::endl; ASSERT_TRUE(cap.parse(str, &cout)); } } TEST(MDSAuthCaps, ParseDumpReparseCaps) { for (auto str : parse_good) { MDSAuthCaps cap1; ASSERT_TRUE(cap1.parse(str, &cout)); std::cout << "Testing by parsing caps, dumping to string, reparsing " "string and then redumping and checking strings from " "first and second dumps: '" << str << "'" << std::endl; // Convert cap object to string, reparse and check if converting again // gives same string as before. MDSAuthCaps cap2; std::ostringstream cap1_ostream; cap1_ostream << cap1; string cap1_str = cap1_ostream.str(); // Removing "MDSAuthCaps[" from cap1_str cap1_str.replace(0, 12, ""); // Removing "]" from cap1_str cap1_str.replace(cap1_str.length() - 1, 1, ""); ASSERT_TRUE(cap2.parse(cap1_str, &cout)); std::ostringstream cap2_ostream; cap2_ostream << cap2; ASSERT_TRUE(cap1_ostream.str().compare(cap2_ostream.str()) == 0); } } const char *parse_bad[] = { "allow r poolfoo", "allow r w", "ALLOW r", "allow w", "allow rwx,", "allow rwx x", "allow r pool foo r", "allow wwx pool taco", "allow wwx pool taco^funny&chars", "allow rwx pool 'weird name''", "allow rwx object_prefix \"beforepool\" pool weird", "allow rwx auid 123 pool asdf", "allow xrwx pool foo,, allow r pool bar", ";allow rwx pool foo rwx ; allow r pool bar", "allow rwx pool foo ;allow r pool bar gibberish", "allow rwx auid 123 pool asdf namespace=foo", "allow rwx auid 123 namespace", "allow rwx namespace", "allow namespace", "allow namespace=foo", "allow rwx auid 123 namespace asdf", "allow wwx pool ''", "allow rw uid=123 gids=asdf", "allow rw uid=123 gids=1,2,asdf", 0 }; TEST(MDSAuthCaps, ParseBad) { for (int i=0; parse_bad[i]; i++) { string str = parse_bad[i]; MDSAuthCaps cap; std::cout << "Testing bad input: '" << str << "'" << std::endl; ASSERT_FALSE(cap.parse(str, &cout)); // error message from parse() doesn't have newline char at the end of it std::cout << std::endl; } } TEST(MDSAuthCaps, AllowAll) { MDSAuthCaps cap; ASSERT_FALSE(cap.allow_all()); ASSERT_TRUE(cap.parse("allow r", NULL)); ASSERT_FALSE(cap.allow_all()); cap = MDSAuthCaps(); ASSERT_TRUE(cap.parse("allow rw", NULL)); ASSERT_FALSE(cap.allow_all()); cap = MDSAuthCaps(); ASSERT_TRUE(cap.parse("allow", NULL)); ASSERT_FALSE(cap.allow_all()); cap = MDSAuthCaps(); ASSERT_TRUE(cap.parse("allow *", NULL)); ASSERT_TRUE(cap.allow_all()); ASSERT_TRUE(cap.is_capable("foo/bar", 0, 0, 0777, 0, 0, NULL, MAY_READ | MAY_WRITE, 0, 0, addr)); } TEST(MDSAuthCaps, AllowUid) { MDSAuthCaps cap; ASSERT_TRUE(cap.parse("allow * uid=10", NULL)); ASSERT_FALSE(cap.allow_all()); // uid/gid must be valid ASSERT_FALSE(cap.is_capable("foo", 0, 0, 0777, 0, 0, NULL, MAY_READ, 0, 0, addr)); ASSERT_TRUE(cap.is_capable("foo", 0, 0, 0777, 10, 0, NULL, MAY_READ, 0, 0, addr)); ASSERT_TRUE(cap.is_capable("foo", 0, 0, 0777, 10, 10, NULL, MAY_READ, 0, 0, addr)); ASSERT_FALSE(cap.is_capable("foo", 0, 0, 0777, 12, 12, NULL, MAY_READ, 0, 0, addr)); ASSERT_TRUE(cap.is_capable("foo", 0, 0, 0777, 10, 13, NULL, MAY_READ, 0, 0, addr)); } TEST(MDSAuthCaps, AllowUidGid) { MDSAuthCaps cap; ASSERT_TRUE(cap.parse("allow * uid=10 gids=10,11,12; allow * uid=12 gids=12,10", NULL)); ASSERT_FALSE(cap.allow_all()); // uid/gid must be valid ASSERT_FALSE(cap.is_capable("foo", 0, 0, 0777, 0, 0, NULL, MAY_READ, 0, 0, addr)); ASSERT_FALSE(cap.is_capable("foo", 0, 0, 0777, 10, 0, NULL, MAY_READ, 0, 0, addr)); ASSERT_FALSE(cap.is_capable("foo", 0, 0, 0777, 9, 10, NULL, MAY_READ, 0, 0, addr)); ASSERT_TRUE(cap.is_capable("foo", 0, 0, 0777, 10, 10, NULL, MAY_READ, 0, 0, addr)); ASSERT_TRUE(cap.is_capable("foo", 0, 0, 0777, 12, 12, NULL, MAY_READ, 0, 0, addr)); ASSERT_FALSE(cap.is_capable("foo", 0, 0, 0777, 10, 13, NULL, MAY_READ, 0, 0, addr)); // user ASSERT_TRUE(cap.is_capable("foo", 10, 10, 0500, 10, 11, NULL, MAY_READ, 0, 0, addr)); ASSERT_FALSE(cap.is_capable("foo", 10, 10, 0500, 10, 11, NULL, MAY_WRITE, 0, 0, addr)); ASSERT_FALSE(cap.is_capable("foo", 10, 10, 0500, 10, 11, NULL, MAY_READ | MAY_WRITE, 0, 0, addr)); ASSERT_TRUE(cap.is_capable("foo", 10, 10, 0700, 10, 11, NULL, MAY_READ, 0, 0, addr)); ASSERT_TRUE(cap.is_capable("foo", 10, 10, 0700, 10, 11, NULL, MAY_WRITE, 0, 0, addr)); ASSERT_TRUE(cap.is_capable("foo", 10, 10, 0700, 10, 10, NULL, MAY_READ | MAY_WRITE, 0, 0, addr)); ASSERT_TRUE(cap.is_capable("foo", 10, 0, 0700, 10, 10, NULL, MAY_READ | MAY_WRITE, 0, 0, addr)); ASSERT_FALSE(cap.is_capable("foo", 12, 0, 0700, 10, 10, NULL, MAY_READ | MAY_WRITE, 0, 0, addr)); ASSERT_TRUE(cap.is_capable("foo", 12, 0, 0700, 12, 12, NULL, MAY_READ | MAY_WRITE, 0, 0, addr)); ASSERT_FALSE(cap.is_capable("foo", 0, 0, 0700, 10, 10, NULL, MAY_READ | MAY_WRITE, 0, 0, addr)); // group vector<uint64_t> glist10; glist10.push_back(10); vector<uint64_t> dglist10; dglist10.push_back(8); dglist10.push_back(10); vector<uint64_t> glist11; glist11.push_back(11); vector<uint64_t> glist12; glist12.push_back(12); ASSERT_TRUE(cap.is_capable("foo", 0, 10, 0750, 10, 10, NULL, MAY_READ, 0, 0, addr)); ASSERT_FALSE(cap.is_capable("foo", 0, 10, 0750, 10, 10, NULL, MAY_WRITE, 0, 0, addr)); ASSERT_TRUE(cap.is_capable("foo", 0, 10, 0770, 10, 10, NULL, MAY_READ | MAY_WRITE, 0, 0, addr)); ASSERT_TRUE(cap.is_capable("foo", 0, 10, 0770, 10, 11, &glist10, MAY_READ | MAY_WRITE, 0, 0, addr)); ASSERT_TRUE(cap.is_capable("foo", 0, 11, 0770, 10, 10, &glist11, MAY_READ | MAY_WRITE, 0, 0, addr)); ASSERT_TRUE(cap.is_capable("foo", 0, 11, 0770, 10, 11, NULL, MAY_READ | MAY_WRITE, 0, 0, addr)); ASSERT_TRUE(cap.is_capable("foo", 0, 12, 0770, 12, 12, NULL, MAY_READ | MAY_WRITE, 0, 0, addr)); ASSERT_FALSE(cap.is_capable("foo", 0, 10, 0770, 12, 12, NULL, MAY_READ | MAY_WRITE, 0, 0, addr)); ASSERT_TRUE(cap.is_capable("foo", 0, 10, 0770, 12, 12, &glist10, MAY_READ | MAY_WRITE, 0, 0, addr)); ASSERT_TRUE(cap.is_capable("foo", 0, 10, 0770, 12, 12, &dglist10, MAY_READ | MAY_WRITE, 0, 0, addr)); ASSERT_FALSE(cap.is_capable("foo", 0, 11, 0770, 12, 12, &glist11, MAY_READ | MAY_WRITE, 0, 0, addr)); ASSERT_FALSE(cap.is_capable("foo", 0, 12, 0770, 10, 10, NULL, MAY_READ | MAY_WRITE, 0, 0, addr)); ASSERT_TRUE(cap.is_capable("foo", 0, 12, 0770, 10, 10, &glist12, MAY_READ | MAY_WRITE, 0, 0, addr)); // user > group ASSERT_TRUE(cap.is_capable("foo", 10, 10, 0570, 10, 10, NULL, MAY_READ, 0, 0, addr)); ASSERT_FALSE(cap.is_capable("foo", 10, 10, 0570, 10, 10, NULL, MAY_WRITE, 0, 0, addr)); // other ASSERT_TRUE(cap.is_capable("foo", 0, 0, 0775, 10, 10, NULL, MAY_READ, 0, 0, addr)); ASSERT_FALSE(cap.is_capable("foo", 0, 0, 0770, 10, 10, NULL, MAY_READ, 0, 0, addr)); ASSERT_FALSE(cap.is_capable("foo", 0, 0, 0775, 10, 10, NULL, MAY_WRITE, 0, 0, addr)); ASSERT_FALSE(cap.is_capable("foo", 0, 0, 0775, 10, 10, NULL, MAY_READ | MAY_WRITE, 0, 0, addr)); ASSERT_TRUE(cap.is_capable("foo", 0, 0, 0777, 10, 10, NULL, MAY_READ | MAY_WRITE, 0, 0, addr)); ASSERT_FALSE(cap.is_capable("foo", 0, 0, 0773, 10, 10, NULL, MAY_READ, 0, 0, addr)); // group > other ASSERT_TRUE(cap.is_capable("foo", 0, 0, 0557, 10, 10, NULL, MAY_READ, 0, 0, addr)); ASSERT_FALSE(cap.is_capable("foo", 0, 10, 0557, 10, 10, NULL, MAY_WRITE, 0, 0, addr)); // user > other ASSERT_TRUE(cap.is_capable("foo", 0, 0, 0557, 10, 10, NULL, MAY_READ, 0, 0, addr)); ASSERT_FALSE(cap.is_capable("foo", 10, 0, 0557, 10, 10, NULL, MAY_WRITE, 0, 0, addr)); } TEST(MDSAuthCaps, AllowPath) { MDSAuthCaps cap; ASSERT_TRUE(cap.parse("allow * path=/sandbox", NULL)); ASSERT_FALSE(cap.allow_all()); ASSERT_TRUE(cap.is_capable("sandbox/foo", 0, 0, 0777, 0, 0, NULL, MAY_READ | MAY_WRITE, 0, 0, addr)); ASSERT_TRUE(cap.is_capable("sandbox", 0, 0, 0777, 0, 0, NULL, MAY_READ | MAY_WRITE, 0, 0, addr)); ASSERT_FALSE(cap.is_capable("sandboxed", 0, 0, 0777, 0, 0, NULL, MAY_READ | MAY_WRITE, 0, 0, addr)); ASSERT_FALSE(cap.is_capable("foo", 0, 0, 0777, 0, 0, NULL, MAY_READ | MAY_WRITE, 0, 0, addr)); } TEST(MDSAuthCaps, AllowPathChars) { MDSAuthCaps unquo_cap; ASSERT_TRUE(unquo_cap.parse("allow * path=/sandbox-._foo", NULL)); ASSERT_FALSE(unquo_cap.allow_all()); ASSERT_TRUE(unquo_cap.is_capable("sandbox-._foo/foo", 0, 0, 0777, 0, 0, NULL, MAY_READ | MAY_WRITE, 0, 0, addr)); ASSERT_FALSE(unquo_cap.is_capable("sandbox", 0, 0, 0777, 0, 0, NULL, MAY_READ | MAY_WRITE, 0, 0, addr)); ASSERT_FALSE(unquo_cap.is_capable("sandbox-._food", 0, 0, 0777, 0, 0, NULL, MAY_READ | MAY_WRITE, 0, 0, addr)); ASSERT_FALSE(unquo_cap.is_capable("foo", 0, 0, 0777, 0, 0, NULL, MAY_READ | MAY_WRITE, 0, 0, addr)); } TEST(MDSAuthCaps, AllowPathCharsQuoted) { MDSAuthCaps quo_cap; ASSERT_TRUE(quo_cap.parse("allow * path=\"/sandbox-._foo\"", NULL)); ASSERT_FALSE(quo_cap.allow_all()); ASSERT_TRUE(quo_cap.is_capable("sandbox-._foo/foo", 0, 0, 0777, 0, 0, NULL, MAY_READ | MAY_WRITE, 0, 0, addr)); ASSERT_FALSE(quo_cap.is_capable("sandbox", 0, 0, 0777, 0, 0, NULL, MAY_READ | MAY_WRITE, 0, 0, addr)); ASSERT_FALSE(quo_cap.is_capable("sandbox-._food", 0, 0, 0777, 0, 0, NULL, MAY_READ | MAY_WRITE, 0, 0, addr)); ASSERT_FALSE(quo_cap.is_capable("foo", 0, 0, 0777, 0, 0, NULL, MAY_READ | MAY_WRITE, 0, 0, addr)); } TEST(MDSAuthCaps, RootSquash) { MDSAuthCaps rs_cap; ASSERT_TRUE(rs_cap.parse("allow rw root_squash, allow rw path=/sandbox", NULL)); ASSERT_TRUE(rs_cap.is_capable("foo", 0, 0, 0777, 0, 0, NULL, MAY_READ, 0, 0, addr)); ASSERT_TRUE(rs_cap.is_capable("foo", 0, 0, 0777, 10, 10, NULL, MAY_READ | MAY_WRITE, 0, 0, addr)); ASSERT_FALSE(rs_cap.is_capable("foo", 0, 0, 0777, 0, 0, NULL, MAY_READ | MAY_WRITE, 0, 0, addr)); ASSERT_TRUE(rs_cap.is_capable("sandbox", 0, 0, 0777, 0, 0, NULL, MAY_READ | MAY_WRITE, 0, 0, addr)); ASSERT_TRUE(rs_cap.is_capable("sandbox/foo", 0, 0, 0777, 0, 0, NULL, MAY_READ | MAY_WRITE, 0, 0, addr)); ASSERT_TRUE(rs_cap.is_capable("sandbox/foo", 0, 0, 0777, 10, 10, NULL, MAY_READ | MAY_WRITE, 0, 0, addr)); } TEST(MDSAuthCaps, OutputParsed) { struct CapsTest { const char *input; const char *output; }; CapsTest test_values[] = { {"allow", "MDSAuthCaps[allow rwps]"}, {"allow *", "MDSAuthCaps[allow *]"}, {"allow r", "MDSAuthCaps[allow r]"}, {"allow rw", "MDSAuthCaps[allow rw]"}, {"allow * uid=1", "MDSAuthCaps[allow * uid=1]"}, {"allow * uid=1 gids=1", "MDSAuthCaps[allow * uid=1 gids=1]"}, {"allow * uid=1 gids=1,2,3", "MDSAuthCaps[allow * uid=1 gids=1,2,3]"}, {"allow * path=/foo", "MDSAuthCaps[allow * path=\"/foo\"]"}, {"allow * path=\"/foo\"", "MDSAuthCaps[allow * path=\"/foo\"]"}, {"allow rw root_squash", "MDSAuthCaps[allow rw root_squash]"}, {"allow rw fsname=a root_squash", "MDSAuthCaps[allow rw fsname=a root_squash]"}, {"allow * path=\"/foo\" root_squash", "MDSAuthCaps[allow * path=\"/foo\" root_squash]"}, {"allow * path=\"/foo\" uid=1", "MDSAuthCaps[allow * path=\"/foo\" uid=1]"}, {"allow * path=\"/foo\" uid=1 gids=1,2,3", "MDSAuthCaps[allow * path=\"/foo\" uid=1 gids=1,2,3]"}, {"allow r uid=1 gids=1,2,3, allow * uid=2", "MDSAuthCaps[allow r uid=1 gids=1,2,3, allow * uid=2]"}, {"allow r uid=1 gids=1,2,3, allow * uid=2 network 10.0.0.0/8", "MDSAuthCaps[allow r uid=1 gids=1,2,3, allow * uid=2 network 10.0.0.0/8]"}, {"allow rw fsname=b, allow rw fsname=a root_squash", "MDSAuthCaps[allow rw fsname=b, allow rw fsname=a root_squash]"}, }; size_t num_tests = sizeof(test_values) / sizeof(*test_values); for (size_t i = 0; i < num_tests; ++i) { MDSAuthCaps cap; std::cout << "Testing input '" << test_values[i].input << "'" << std::endl; ASSERT_TRUE(cap.parse(test_values[i].input, &cout)); ASSERT_EQ(test_values[i].output, stringify(cap)); } } TEST(MDSAuthCaps, network) { entity_addr_t a, b, c; a.parse("10.1.2.3"); b.parse("192.168.2.3"); c.parse("192.167.2.3"); MDSAuthCaps cap; ASSERT_TRUE(cap.parse("allow * network 192.168.0.0/16, allow * network 10.0.0.0/8", NULL)); ASSERT_TRUE(cap.is_capable("foo", 0, 0, 0777, 0, 0, NULL, MAY_READ, 0, 0, a)); ASSERT_TRUE(cap.is_capable("foo", 0, 0, 0777, 0, 0, NULL, MAY_READ, 0, 0, b)); ASSERT_FALSE(cap.is_capable("foo", 0, 0, 0777, 0, 0, NULL, MAY_READ, 0, 0, c)); }
15,600
40.381963
115
cc
null
ceph-main/src/test/mds/TestSessionFilter.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system * * Copyright (C) 2012 Inktank * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #include <iostream> #include "include/stringify.h" #include "mds/SessionMap.h" #include "gtest/gtest.h" typedef std::vector<std::string> args_eg; typedef std::vector<args_eg> args_eg_set; TEST(MDSSessionFilter, ParseGood) { args_eg_set examples = { {"id=34"}, {"auth_name=foxtrot"}, {"state=reconnecting"}, {"reconnecting=true"}, {"client_metadata.root=/foo/bar"}, {}, {"id=123"}, {"id=34", "client_metadata.root=/foo/bar", "auth_name=foxtrot", "state=reconnecting", "reconnecting=true"} }; for (auto ex : examples) { SessionFilter f; std::stringstream ss; std::cout << "Testing '" << ex << "'" << std::endl; int r = f.parse(ex, &ss); ASSERT_EQ(r, 0); ASSERT_TRUE(ss.str().empty()); } } TEST(MDSSessionFilter, ParseBad) { args_eg_set examples = { {"rhubarb"}, {"id="}, {"id=custard"}, {"=custard"}, {"reconnecting=MAYBE"}, {"reconnecting=2"} }; for (auto ex : examples) { SessionFilter f; std::stringstream ss; std::cout << "Testing '" << ex << "'" << std::endl; int r = f.parse(ex, &ss); ASSERT_EQ(r, -EINVAL); ASSERT_FALSE(ss.str().empty()); } } TEST(MDSSessionFilter, IdEquality) { SessionFilter filter; std::stringstream ss; filter.parse({"id=123"}, &ss); auto a = ceph::make_ref<Session>(nullptr);; auto b = ceph::make_ref<Session>(nullptr);; a->info.inst.name.parse("client.123"); b->info.inst.name.parse("client.456"); ASSERT_TRUE(filter.match(*a, [](client_t c) -> bool {return false;})); ASSERT_FALSE(filter.match(*b, [](client_t c) -> bool {return false;})); } TEST(MDSSessionFilter, StateEquality) { SessionFilter filter; std::stringstream ss; filter.parse({"state=closing"}, &ss); auto a = ceph::make_ref<Session>(nullptr); a->set_state(Session::STATE_CLOSING); auto b = ceph::make_ref<Session>(nullptr); b->set_state(Session::STATE_OPENING); ASSERT_TRUE(filter.match(*a, [](client_t c) -> bool {return false;})); ASSERT_FALSE(filter.match(*b, [](client_t c) -> bool {return false;})); } TEST(MDSSessionFilter, AuthEquality) { SessionFilter filter; std::stringstream ss; filter.parse({"auth_name=rhubarb"}, &ss); auto a = ceph::make_ref<Session>(nullptr); a->info.auth_name.set_id("rhubarb"); auto b = ceph::make_ref<Session>(nullptr); b->info.auth_name.set_id("custard"); ASSERT_TRUE(filter.match(*a, [](client_t c) -> bool {return false;})); ASSERT_FALSE(filter.match(*b, [](client_t c) -> bool {return false;})); } TEST(MDSSessionFilter, MetadataEquality) { SessionFilter filter; std::stringstream ss; int r = filter.parse({"client_metadata.root=/rhubarb"}, &ss); ASSERT_EQ(r, 0); client_metadata_t meta; auto a = ceph::make_ref<Session>(nullptr); meta.kv_map = {{"root", "/rhubarb"}}; a->set_client_metadata(meta); auto b = ceph::make_ref<Session>(nullptr); meta.kv_map = {{"root", "/custard"}}; b->set_client_metadata(meta); ASSERT_TRUE(filter.match(*a, [](client_t c) -> bool {return false;})); ASSERT_FALSE(filter.match(*b, [](client_t c) -> bool {return false;})); } TEST(MDSSessionFilter, ReconnectingEquality) { SessionFilter filter; std::stringstream ss; int r = filter.parse({"reconnecting=true"}, &ss); ASSERT_EQ(r, 0); auto a = ceph::make_ref<Session>(nullptr); ASSERT_TRUE(filter.match(*a, [](client_t c) -> bool {return true;})); ASSERT_FALSE(filter.match(*a, [](client_t c) -> bool {return false;})); }
3,870
26.06993
73
cc
null
ceph-main/src/test/memuse/test_pool_memuse.sh
#! /bin/sh -x # # Create a bunch of pools in parallel # This test isn't very smart -- run it from your src dir. # set -e CEPH_NUM_MON=1 CEPH_NUM_MDS=1 CEPH_NUM_OSD=$2 ./vstart.sh -n -d --valgrind_osd 'massif' for i in `seq 0 $1`; do for j in `seq 0 9`; do poolnum=$((i*10+j)) poolname="pool$poolnum" ./ceph osd pool create $poolname 8 & done wait done
371
17.6
87
sh
null
ceph-main/src/test/memuse/test_pool_memuse_tcmalloc.sh
#! /bin/sh -x # # Create a bunch of pools in parallel # This test isn't very smart -- run it from your src dir. # set -e CEPH_NUM_MON=1 CEPH_NUM_MDS=1 CEPH_NUM_OSD=$2 ./vstart.sh -n -d num_osd=$2 maxosd=$((num_osd-1)) for osd_num in `seq 0 $maxosd`; do ./ceph osd tell $osd_num start_profiler done for i in `seq 0 $1`; do for j in `seq 0 9`; do poolnum=$((i*10+j)) poolname="pool$poolnum" ./ceph osd pool create $poolname 8 & done wait done
465
16.923077
63
sh
null
ceph-main/src/test/memuse/test_written_pool_memuse.sh
#! /bin/sh -x set -e for i in `seq 0 $1`; do for j in `seq 0 9`; do poolnum=$((i*10+j)) poolname="pool$poolnum" ./rados -p $poolname bench 1 write -t 1 & done wait done
185
14.5
42
sh
null
ceph-main/src/test/memuse/test_written_pool_memuse_tcmalloc.sh
#!/bin/sh -x set -e num_osd=$2 maxosd=$((num_osd-1)) eval "rm out/*.heap" || echo "no heap dumps to rm" mkdir -p out/pg_stable for osd_num in `seq 0 $maxosd`; do ./ceph osd tell $osd_num heapdump sleep 1 eval "mv out/*.heap out/pg_stable" done for i in `seq 0 $1`; do for j in `seq 0 9`; do poolnum=$((i*10+j)) poolname="pool$poolnum" ./rados -p $poolname bench 1 write -t 1 & done wait done eval "rm out/*.heap" || echo "no heap dumps to rm" mkdir out/one_write for osd_num in `seq 0 $maxosd`; do ./ceph osd tell $osd_num heapdump sleep 1 eval "mv out/*.heap out/one_write" done for i in `seq 0 $1`; do for j in `seq 0 9`; do poolnum=$((i*10+j)) poolname="pool$poolnum" ./rados -p $poolname bench 1 write -t 4 & done wait done eval "rm out/*.heap" mkdir out/five_writes for osd_num in `seq 0 $maxosd`; do ./ceph osd tell $osd_num heapdump sleep 1 eval "mv out/*.heap out/five_writes" done
969
16.636364
50
sh
null
ceph-main/src/test/mgr/mgr-dashboard-smoke.sh
#!/usr/bin/env bash # # Copyright (C) 2014,2015,2017 Red Hat <contact@redhat.com> # Copyright (C) 2018 SUSE LLC # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Library Public License as published by # the Free Software Foundation; either version 2, or (at your option) # any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Library Public License for more details. # source $(dirname $0)/../detect-build-env-vars.sh source $CEPH_ROOT/qa/standalone/ceph-helpers.sh mon_port=$(get_unused_port) dashboard_port=$((mon_port+1)) function run() { local dir=$1 shift export CEPH_MON=127.0.0.1:$mon_port export CEPH_ARGS CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none " CEPH_ARGS+="--mon-initial-members=a --mon-host=$MON " CEPH_ARGS+="--mgr-initial-modules=dashboard " CEPH_ARGS+="--mon-host=$CEPH_MON" setup $dir || return 1 TEST_dashboard $dir || return 1 teardown $dir || return 1 } function TEST_dashboard() { local dir=$1 shift run_mon $dir a || return 1 timeout 30 ceph mon stat || return 1 ceph config-key set mgr/dashboard/x/server_port $dashboard_port MGR_ARGS+="--mgr_module_path=${CEPH_ROOT}/src/pybind/mgr " run_mgr $dir x ${MGR_ARGS} || return 1 tries=0 while [[ $tries < 30 ]] ; do if [ $(ceph status -f json | jq .mgrmap.available) = "true" ] then break fi tries=$((tries+1)) sleep 1 done DASHBOARD_ADMIN_SECRET_FILE="/tmp/dashboard-admin-secret.txt" printf 'admin' > "${DASHBOARD_ADMIN_SECRET_FILE}" ceph_adm dashboard ac-user-create admin -i "${DASHBOARD_ADMIN_SECRET_FILE}" --force-password tries=0 while [[ $tries < 30 ]] ; do if curl -c $dir/cookiefile -X POST -d '{"username":"admin","password":"admin"}' http://127.0.0.1:$dashboard_port/api/auth then if curl -b $dir/cookiefile -s http://127.0.0.1:$dashboard_port/api/summary | \ jq '.health.overall_status' | grep HEALTH_ then break fi fi tries=$((tries+1)) sleep 0.5 done } main mgr-dashboard-smoke "$@" # Local Variables: # compile-command: "cd ../.. ; make -j4 TESTS=test/mgr/mgr-dashboard-smoke.sh check" # End:
2,492
29.402439
129
sh
null
ceph-main/src/test/mgr/test_mgrcap.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system * * Copyright (C) 2012 Inktank * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #include <iostream> #include "include/stringify.h" #include "mgr/MgrCap.h" #include "gtest/gtest.h" using namespace std; const char *parse_good[] = { // MgrCapMatch "allow *", "allow r", "allow rwx", "allow r", " allow rwx", "allow rwx ", " allow rwx ", " allow\t rwx ", "\tallow\nrwx\t", "allow service=foo x", "allow service=\"froo\" x", "allow profile read-only", "allow profile read-write", "allow profile \"rbd-read-only\", allow *", "allow command \"a b c\"", "allow command abc", "allow command abc with arg=foo", "allow command abc with arg=foo arg2=bar", "allow command abc with arg=foo arg2=bar", "allow command abc with arg=foo arg2 prefix bar arg3 prefix baz", "allow command abc with arg=foo arg2 prefix \"bar bingo\" arg3 prefix baz", "allow command abc with arg regex \"^[0-9a-z.]*$\"", "allow command abc with arg regex \"\(invaluid regex\"", "allow service foo x", "allow service foo x; allow service bar x", "allow service foo w ;allow service bar x", "allow service foo w , allow service bar x", "allow service foo r , allow service bar x", "allow service foo_foo r, allow service bar r", "allow service foo-foo r, allow service bar r", "allow service \" foo \" w, allow service bar r", "allow module foo x", "allow module=foo x", "allow module foo_foo r", "allow module \" foo \" w", "allow module foo with arg1=value1 x", "allow command abc with arg=foo arg2=bar, allow service foo r", "allow command abc.def with arg=foo arg2=bar, allow service foo r", "allow command \"foo bar\" with arg=\"baz\"", "allow command \"foo bar\" with arg=\"baz.xx\"", "allow command \"foo bar\" with arg = \"baz.xx\"", "profile crash", "profile osd", "profile mds", "profile rbd pool=ABC namespace=NS", "profile \"rbd-read-only\", profile crash", "allow * network 1.2.3.4/24", "allow * network ::1/128", "allow * network [aa:bb::1]/128", "allow service=foo x network 1.2.3.4/16", "allow command abc network 1.2.3.4/8", "profile crash network 1.2.3.4/32", "allow profile crash network 1.2.3.4/32", 0 }; TEST(MgrCap, ParseGood) { for (int i=0; parse_good[i]; ++i) { string str = parse_good[i]; MgrCap cap; std::cout << "Testing good input: '" << str << "'" << std::endl; ASSERT_TRUE(cap.parse(str, &cout)); std::cout << " -> " << cap << std::endl; } } // these should stringify to the input value const char *parse_identity[] = { "allow *", "allow r", "allow rwx", "allow service foo x", "profile crash", "profile rbd-read-only, allow *", "profile rbd namespace=NS pool=ABC", "allow command abc", "allow command \"a b c\"", "allow command abc with arg=foo", "allow command abc with arg=foo arg2=bar", "allow command abc with arg=foo arg2=bar", "allow command abc with arg=foo arg2 prefix bar arg3 prefix baz", "allow command abc with arg=foo arg2 prefix \"bar bingo\" arg3 prefix baz", "allow service foo x", "allow service foo x, allow service bar x", "allow service foo w, allow service bar x", "allow service foo r, allow service bar x", "allow service foo_foo r, allow service bar r", "allow service foo-foo r, allow service bar r", "allow service \" foo \" w, allow service bar r", "allow module foo x", "allow module \" foo_foo \" r", "allow module foo with arg1=value1 x", "allow command abc with arg=foo arg2=bar, allow service foo r", 0 }; TEST(MgrCap, ParseIdentity) { for (int i=0; parse_identity[i]; ++i) { string str = parse_identity[i]; MgrCap cap; std::cout << "Testing good input: '" << str << "'" << std::endl; ASSERT_TRUE(cap.parse(str, &cout)); string out = stringify(cap); ASSERT_EQ(out, str); } } const char *parse_bad[] = { "allow r foo", "allow*", "foo allow *", "profile foo rwx", "profile", "profile foo bar rwx", "allow profile foo rwx", "allow profile", "allow profile foo bar rwx", "allow service bar", "allow command baz x", "allow r w", "ALLOW r", "allow rwx,", "allow rwx x", "allow r pool foo r", "allow wwx pool taco", "allow wwx pool taco^funny&chars", "allow rwx pool 'weird name''", "allow rwx object_prefix \"beforepool\" pool weird", "allow rwx auid 123 pool asdf", "allow command foo a prefix b", "allow command foo with a prefixb", "allow command foo with a = prefix b", "allow command foo with a prefix b c", 0 }; TEST(MgrCap, ParseBad) { for (int i=0; parse_bad[i]; ++i) { string str = parse_bad[i]; MgrCap cap; std::cout << "Testing bad input: '" << str << "'" << std::endl; ASSERT_FALSE(cap.parse(str, &cout)); } } TEST(MgrCap, AllowAll) { MgrCap cap; ASSERT_FALSE(cap.is_allow_all()); ASSERT_TRUE(cap.parse("allow r", nullptr)); ASSERT_FALSE(cap.is_allow_all()); cap.grants.clear(); ASSERT_TRUE(cap.parse("allow w", nullptr)); ASSERT_FALSE(cap.is_allow_all()); cap.grants.clear(); ASSERT_TRUE(cap.parse("allow x", nullptr)); ASSERT_FALSE(cap.is_allow_all()); cap.grants.clear(); ASSERT_TRUE(cap.parse("allow rwx", nullptr)); ASSERT_FALSE(cap.is_allow_all()); cap.grants.clear(); ASSERT_TRUE(cap.parse("allow rw", nullptr)); ASSERT_FALSE(cap.is_allow_all()); cap.grants.clear(); ASSERT_TRUE(cap.parse("allow rx", nullptr)); ASSERT_FALSE(cap.is_allow_all()); cap.grants.clear(); ASSERT_TRUE(cap.parse("allow wx", nullptr)); ASSERT_FALSE(cap.is_allow_all()); cap.grants.clear(); ASSERT_TRUE(cap.parse("allow *", nullptr)); ASSERT_TRUE(cap.is_allow_all()); ASSERT_TRUE(cap.is_capable(nullptr, {}, "foo", "", "asdf", {}, true, true, true, {})); MgrCap cap2; ASSERT_FALSE(cap2.is_allow_all()); cap2.set_allow_all(); ASSERT_TRUE(cap2.is_allow_all()); } TEST(MgrCap, Network) { MgrCap cap; bool r = cap.parse("allow * network 192.168.0.0/16, allow * network 10.0.0.0/8", nullptr); ASSERT_TRUE(r); entity_addr_t a, b, c; a.parse("10.1.2.3"); b.parse("192.168.2.3"); c.parse("192.167.2.3"); ASSERT_TRUE(cap.is_capable(nullptr, {}, "foo", "", "asdf", {}, true, true, true, a)); ASSERT_TRUE(cap.is_capable(nullptr, {}, "foo", "", "asdf", {}, true, true, true, b)); ASSERT_FALSE(cap.is_capable(nullptr, {}, "foo", "", "asdf", {}, true, true, true, c)); } TEST(MgrCap, CommandRegEx) { MgrCap cap; ASSERT_FALSE(cap.is_allow_all()); ASSERT_TRUE(cap.parse("allow command abc with arg regex \"^[0-9a-z.]*$\"", nullptr)); EntityName name; name.from_str("osd.123"); ASSERT_TRUE(cap.is_capable(nullptr, name, "", "", "abc", {{"arg", "12345abcde"}}, true, true, true, {})); ASSERT_FALSE(cap.is_capable(nullptr, name, "", "", "abc", {{"arg", "~!@#$"}}, true, true, true, {})); ASSERT_TRUE(cap.parse("allow command abc with arg regex \"[*\"", nullptr)); ASSERT_FALSE(cap.is_capable(nullptr, name, "", "", "abc", {{"arg", ""}}, true, true, true, {})); } TEST(MgrCap, Module) { MgrCap cap; ASSERT_FALSE(cap.is_allow_all()); ASSERT_TRUE(cap.parse("allow module abc r, allow module bcd w", nullptr)); ASSERT_FALSE(cap.is_capable(nullptr, {}, "", "abc", "", {}, true, true, false, {})); ASSERT_TRUE(cap.is_capable(nullptr, {}, "", "abc", "", {}, true, false, false, {})); ASSERT_FALSE(cap.is_capable(nullptr, {}, "", "bcd", "", {}, true, true, false, {})); ASSERT_TRUE(cap.is_capable(nullptr, {}, "", "bcd", "", {}, false, true, false, {})); } TEST(MgrCap, Profile) { MgrCap cap; ASSERT_FALSE(cap.is_allow_all()); ASSERT_FALSE(cap.parse("profile unknown")); ASSERT_FALSE(cap.parse("profile rbd invalid-key=value")); ASSERT_TRUE(cap.parse("profile rbd", nullptr)); ASSERT_FALSE(cap.is_capable(nullptr, {}, "", "abc", "", {}, true, false, false, {})); ASSERT_TRUE(cap.is_capable(nullptr, {}, "", "rbd_support", "", {}, true, true, false, {})); ASSERT_TRUE(cap.is_capable(nullptr, {}, "", "rbd_support", "", {}, true, false, false, {})); ASSERT_TRUE(cap.parse("profile rbd pool=abc namespace prefix def", nullptr)); ASSERT_FALSE(cap.is_capable(nullptr, {}, "", "rbd_support", "", {}, true, true, false, {})); ASSERT_FALSE(cap.is_capable(nullptr, {}, "", "rbd_support", "", {{"pool", "abc"}}, true, true, false, {})); ASSERT_TRUE(cap.is_capable(nullptr, {}, "", "rbd_support", "", {{"pool", "abc"}, {"namespace", "defghi"}}, true, true, false, {})); ASSERT_TRUE(cap.parse("profile rbd-read-only", nullptr)); ASSERT_FALSE(cap.is_capable(nullptr, {}, "", "abc", "", {}, true, false, false, {})); ASSERT_FALSE(cap.is_capable(nullptr, {}, "", "rbd_support", "", {}, true, true, false, {})); ASSERT_TRUE(cap.is_capable(nullptr, {}, "", "rbd_support", "", {}, true, false, false, {})); }
9,787
31.518272
92
cc
null
ceph-main/src/test/mgr/test_ttlcache.cc
#include <iostream> #include "mgr/TTLCache.h" #include "gtest/gtest.h" using namespace std; TEST(TTLCache, Get) { TTLCache<string, int> c{100}; c.insert("foo", 1); int foo = c.get("foo"); ASSERT_EQ(foo, 1); } TEST(TTLCache, Erase) { TTLCache<string, int> c{100}; c.insert("foo", 1); int foo = c.get("foo"); ASSERT_EQ(foo, 1); c.erase("foo"); try{ foo = c.get("foo"); FAIL(); } catch (std::out_of_range& e) { SUCCEED(); } } TEST(TTLCache, Clear) { TTLCache<string, int> c{100}; c.insert("foo", 1); c.insert("foo2", 2); c.clear(); ASSERT_FALSE(c.size()); } TEST(TTLCache, NoTTL) { TTLCache<string, int> c{100}; c.insert("foo", 1); int foo = c.get("foo"); ASSERT_EQ(foo, 1); c.set_ttl(0); c.insert("foo2", 2); try{ foo = c.get("foo2"); FAIL(); } catch (std::out_of_range& e) { SUCCEED(); } } TEST(TTLCache, SizeLimit) { TTLCache<string, int> c{100, 2}; c.insert("foo", 1); c.insert("foo2", 2); c.insert("foo3", 3); ASSERT_EQ(c.size(), 2); } TEST(TTLCache, HitRatio) { TTLCache<string, int> c{100}; c.insert("foo", 1); c.insert("foo2", 2); c.insert("foo3", 3); c.get("foo2"); c.get("foo3"); std::pair<uint64_t, uint64_t> hit_miss_ratio = c.get_hit_miss_ratio(); ASSERT_EQ(std::get<1>(hit_miss_ratio), 3); ASSERT_EQ(std::get<0>(hit_miss_ratio), 2); }
1,324
17.661972
71
cc
null
ceph-main/src/test/mon/MonMap.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system * * Copyright (C) 2016 SUSE LINUX GmbH * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #include "mon/MonMap.h" #include "common/ceph_context.h" #include "common/dns_resolve.h" #include "test/common/dns_messages.h" #include "common/debug.h" #include "gmock/gmock.h" #include "gtest/gtest.h" #include <boost/smart_ptr/intrusive_ptr.hpp> #include <sstream> #define TEST_DEBUG 20 #define dout_subsys ceph_subsys_mon using ::testing::Return; using ::testing::_; using ::testing::SetArrayArgument; using ::testing::DoAll; using ::testing::StrEq; class MonMapTest : public ::testing::Test { protected: virtual void SetUp() { g_ceph_context->_conf->subsys.set_log_level(dout_subsys, TEST_DEBUG); } virtual void TearDown() { DNSResolver::get_instance(nullptr); } }; TEST_F(MonMapTest, DISABLED_build_initial_config_from_dns) { MockResolvHWrapper *resolvH = new MockResolvHWrapper(); DNSResolver::get_instance(resolvH); int len = sizeof(ns_search_msg_ok_payload); int lena = sizeof(ns_query_msg_mon_a_payload); int lenb = sizeof(ns_query_msg_mon_b_payload); int lenc = sizeof(ns_query_msg_mon_c_payload); using ::testing::InSequence; { InSequence s; #ifdef HAVE_RES_NQUERY EXPECT_CALL(*resolvH, res_nsearch(_, StrEq("_cephmon._tcp"), C_IN, T_SRV, _, _)) .WillOnce(DoAll(SetArrayArgument<4>(ns_search_msg_ok_payload, ns_search_msg_ok_payload+len), Return(len))); EXPECT_CALL(*resolvH, res_nquery(_,StrEq("mon.a.ceph.com"), C_IN, T_A,_,_)) .WillOnce(DoAll(SetArrayArgument<4>(ns_query_msg_mon_a_payload, ns_query_msg_mon_a_payload+lena), Return(lena))); EXPECT_CALL(*resolvH, res_nquery(_, StrEq("mon.c.ceph.com"), C_IN, T_A,_,_)) .WillOnce(DoAll(SetArrayArgument<4>(ns_query_msg_mon_c_payload, ns_query_msg_mon_c_payload+lenc), Return(lenc))); EXPECT_CALL(*resolvH, res_nquery(_,StrEq("mon.b.ceph.com"), C_IN, T_A, _,_)) .WillOnce(DoAll(SetArrayArgument<4>(ns_query_msg_mon_b_payload, ns_query_msg_mon_b_payload+lenb), Return(lenb))); #else EXPECT_CALL(*resolvH, res_search(StrEq("_cephmon._tcp"), C_IN, T_SRV, _, _)) .WillOnce(DoAll(SetArrayArgument<3>(ns_search_msg_ok_payload, ns_search_msg_ok_payload+len), Return(len))); EXPECT_CALL(*resolvH, res_query(StrEq("mon.a.ceph.com"), C_IN, T_A,_,_)) .WillOnce(DoAll(SetArrayArgument<3>(ns_query_msg_mon_a_payload, ns_query_msg_mon_a_payload+lena), Return(lena))); EXPECT_CALL(*resolvH, res_query(StrEq("mon.c.ceph.com"), C_IN, T_A,_,_)) .WillOnce(DoAll(SetArrayArgument<3>(ns_query_msg_mon_c_payload, ns_query_msg_mon_c_payload+lenc), Return(lenc))); EXPECT_CALL(*resolvH, res_query(StrEq("mon.b.ceph.com"), C_IN, T_A, _,_)) .WillOnce(DoAll(SetArrayArgument<3>(ns_query_msg_mon_b_payload, ns_query_msg_mon_b_payload+lenb), Return(lenb))); #endif } boost::intrusive_ptr<CephContext> cct = new CephContext(CEPH_ENTITY_TYPE_MON); cct->_conf.set_val("mon_dns_srv_name", "cephmon"); MonMap monmap; int r = monmap.build_initial(cct.get(), false, std::cerr); ASSERT_EQ(r, 0); ASSERT_EQ(monmap.mon_info.size(), (unsigned int)3); auto it = monmap.mon_info.find("mon.a"); ASSERT_NE(it, monmap.mon_info.end()); std::ostringstream os; os << it->second.public_addrs; ASSERT_EQ(os.str(), "192.168.1.11:6789/0"); os.str(""); it = monmap.mon_info.find("mon.b"); ASSERT_NE(it, monmap.mon_info.end()); os << it->second.public_addrs; ASSERT_EQ(os.str(), "192.168.1.12:6789/0"); os.str(""); it = monmap.mon_info.find("mon.c"); ASSERT_NE(it, monmap.mon_info.end()); os << it->second.public_addrs; ASSERT_EQ(os.str(), "192.168.1.13:6789/0"); } TEST_F(MonMapTest, DISABLED_build_initial_config_from_dns_fail) { MockResolvHWrapper *resolvH = new MockResolvHWrapper(); DNSResolver::get_instance(resolvH); #ifdef HAVE_RES_NQUERY EXPECT_CALL(*resolvH, res_nsearch(_, StrEq("_ceph-mon._tcp"), C_IN, T_SRV, _, _)) .WillOnce(Return(0)); #else EXPECT_CALL(*resolvH, res_search(StrEq("_ceph-mon._tcp"), C_IN, T_SRV, _, _)) .WillOnce(Return(0)); #endif boost::intrusive_ptr<CephContext> cct = new CephContext(CEPH_ENTITY_TYPE_MON); // using default value of mon_dns_srv_name option MonMap monmap; int r = monmap.build_initial(cct.get(), false, std::cerr); ASSERT_EQ(r, -ENOENT); ASSERT_EQ(monmap.mon_info.size(), (unsigned int)0); } TEST_F(MonMapTest, DISABLED_build_initial_config_from_dns_with_domain) { MockResolvHWrapper *resolvH = new MockResolvHWrapper(); DNSResolver::get_instance(resolvH); int len = sizeof(ns_search_msg_ok_payload); int lena = sizeof(ns_query_msg_mon_a_payload); int lenb = sizeof(ns_query_msg_mon_b_payload); int lenc = sizeof(ns_query_msg_mon_c_payload); using ::testing::InSequence; { InSequence s; #ifdef HAVE_RES_NQUERY EXPECT_CALL(*resolvH, res_nsearch(_, StrEq("_cephmon._tcp.ceph.com"), C_IN, T_SRV, _, _)) .WillOnce(DoAll(SetArrayArgument<4>(ns_search_msg_ok_payload, ns_search_msg_ok_payload+len), Return(len))); EXPECT_CALL(*resolvH, res_nquery(_,StrEq("mon.a.ceph.com"), C_IN, T_A,_,_)) .WillOnce(DoAll(SetArrayArgument<4>(ns_query_msg_mon_a_payload, ns_query_msg_mon_a_payload+lena), Return(lena))); EXPECT_CALL(*resolvH, res_nquery(_, StrEq("mon.c.ceph.com"), C_IN, T_A,_,_)) .WillOnce(DoAll(SetArrayArgument<4>(ns_query_msg_mon_c_payload, ns_query_msg_mon_c_payload+lenc), Return(lenc))); EXPECT_CALL(*resolvH, res_nquery(_,StrEq("mon.b.ceph.com"), C_IN, T_A, _,_)) .WillOnce(DoAll(SetArrayArgument<4>(ns_query_msg_mon_b_payload, ns_query_msg_mon_b_payload+lenb), Return(lenb))); #else EXPECT_CALL(*resolvH, res_search(StrEq("_cephmon._tcp.ceph.com"), C_IN, T_SRV, _, _)) .WillOnce(DoAll(SetArrayArgument<3>(ns_search_msg_ok_payload, ns_search_msg_ok_payload+len), Return(len))); EXPECT_CALL(*resolvH, res_query(StrEq("mon.a.ceph.com"), C_IN, T_A,_,_)) .WillOnce(DoAll(SetArrayArgument<3>(ns_query_msg_mon_a_payload, ns_query_msg_mon_a_payload+lena), Return(lena))); EXPECT_CALL(*resolvH, res_query(StrEq("mon.c.ceph.com"), C_IN, T_A,_,_)) .WillOnce(DoAll(SetArrayArgument<3>(ns_query_msg_mon_c_payload, ns_query_msg_mon_c_payload+lenc), Return(lenc))); EXPECT_CALL(*resolvH, res_query(StrEq("mon.b.ceph.com"), C_IN, T_A, _,_)) .WillOnce(DoAll(SetArrayArgument<3>(ns_query_msg_mon_b_payload, ns_query_msg_mon_b_payload+lenb), Return(lenb))); #endif } boost::intrusive_ptr<CephContext> cct = new CephContext(CEPH_ENTITY_TYPE_MON); cct->_conf.set_val("mon_dns_srv_name", "cephmon_ceph.com"); MonMap monmap; int r = monmap.build_initial(cct.get(), false, std::cerr); ASSERT_EQ(r, 0); ASSERT_EQ(monmap.mon_info.size(), (unsigned int)3); auto it = monmap.mon_info.find("mon.a"); ASSERT_NE(it, monmap.mon_info.end()); std::ostringstream os; os << it->second.public_addrs; ASSERT_EQ(os.str(), "192.168.1.11:6789/0"); os.str(""); it = monmap.mon_info.find("mon.b"); ASSERT_NE(it, monmap.mon_info.end()); os << it->second.public_addrs; ASSERT_EQ(os.str(), "192.168.1.12:6789/0"); os.str(""); it = monmap.mon_info.find("mon.c"); ASSERT_NE(it, monmap.mon_info.end()); os << it->second.public_addrs; ASSERT_EQ(os.str(), "192.168.1.13:6789/0"); } TEST(MonMapBuildInitial, build_initial_mon_host_from_dns) { boost::intrusive_ptr<CephContext> cct = new CephContext(CEPH_ENTITY_TYPE_MON); cct->_conf.set_val("mon_host", "ceph.io"); MonMap monmap; int r = monmap.build_initial(cct.get(), false, std::cerr); ASSERT_EQ(r, 0); ASSERT_GE(monmap.mon_info.size(), 1u); for (const auto& [name, info] : monmap.mon_info) { std::cerr << info << std::endl; } } TEST(MonMapBuildInitial, build_initial_mon_host_from_dns_fail) { boost::intrusive_ptr<CephContext> cct = new CephContext(CEPH_ENTITY_TYPE_MON); cct->_conf.set_val("mon_host", "ceph.noname"); MonMap monmap; int r = monmap.build_initial(cct.get(), false, std::cerr); ASSERT_EQ(r, -EINVAL); }
8,513
34.181818
93
cc
null
ceph-main/src/test/mon/PGMap.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system * * Copyright (C) 2014 Inktank <info@inktank.com> * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License version 2, as published by the Free Software * Foundation. See file COPYING. */ #include "mon/PGMap.h" #include "gtest/gtest.h" #include "include/stringify.h" using namespace std; namespace { class CheckTextTable : public TextTable { public: explicit CheckTextTable(bool verbose) { for (int i = 0; i < 5; i++) { define_column("", TextTable::LEFT, TextTable::LEFT); } if (verbose) { for (int i = 0; i < 9; i++) { define_column("", TextTable::LEFT, TextTable::LEFT); } } } const string& get(unsigned r, unsigned c) const { ceph_assert(r < row.size()); ceph_assert(c < row[r].size()); return row[r][c]; } }; // copied from PGMap.cc string percentify(float a) { stringstream ss; if (a < 0.01) ss << "0"; else ss << std::fixed << std::setprecision(2) << a; return ss.str(); } } // dump_object_stat_sum() is called by "ceph df" command // with table, without formatter, verbose = true, not empty, avail > 0 TEST(pgmap, dump_object_stat_sum_0) { bool verbose = true; CheckTextTable tbl(verbose); pool_stat_t pool_stat; object_stat_sum_t& sum = pool_stat.stats.sum; sum.num_bytes = 42 * 1024 * 1024; sum.num_objects = 42; sum.num_objects_degraded = 13; // there are 13 missings + not_yet_backfilled sum.num_objects_dirty = 2; sum.num_rd = 100; sum.num_rd_kb = 123; sum.num_wr = 101; sum.num_wr_kb = 321; pool_stat.num_store_stats = 3; store_statfs_t &statfs = pool_stat.store_stats; statfs.data_stored = 40 * 1024 * 1024; statfs.allocated = 41 * 1024 * 1024 * 2; statfs.data_compressed_allocated = 4334; statfs.data_compressed_original = 1213; sum.calc_copies(3); // assuming we have 3 copies for each obj // nominal amount of space available for new objects in this pool uint64_t avail = 2016 * 1024 * 1024; pg_pool_t pool; pool.quota_max_objects = 2000; pool.quota_max_bytes = 2000 * 1024 * 1024; pool.size = 2; pool.type = pg_pool_t::TYPE_REPLICATED; pool.tier_of = 0; PGMap::dump_object_stat_sum(tbl, nullptr, pool_stat, avail, pool.get_size(), verbose, true, true, &pool); float copies_rate = (static_cast<float>(sum.num_object_copies - sum.num_objects_degraded) / sum.num_object_copies) * pool.get_size(); float used_percent = (float)statfs.allocated / (statfs.allocated + avail) * 100; uint64_t stored = statfs.data_stored / copies_rate; unsigned col = 0; ASSERT_EQ(stringify(byte_u_t(stored)), tbl.get(0, col++)); ASSERT_EQ(stringify(byte_u_t(stored)), tbl.get(0, col++)); ASSERT_EQ(stringify(byte_u_t(0)), tbl.get(0, col++)); ASSERT_EQ(stringify(si_u_t(sum.num_objects)), tbl.get(0, col++)); ASSERT_EQ(stringify(byte_u_t(statfs.allocated)), tbl.get(0, col++)); ASSERT_EQ(stringify(byte_u_t(statfs.allocated)), tbl.get(0, col++)); ASSERT_EQ(stringify(byte_u_t(0)), tbl.get(0, col++)); ASSERT_EQ(percentify(used_percent), tbl.get(0, col++)); ASSERT_EQ(stringify(byte_u_t(avail/copies_rate)), tbl.get(0, col++)); ASSERT_EQ(stringify(si_u_t(pool.quota_max_objects)), tbl.get(0, col++)); ASSERT_EQ(stringify(byte_u_t(pool.quota_max_bytes)), tbl.get(0, col++)); ASSERT_EQ(stringify(si_u_t(sum.num_objects_dirty)), tbl.get(0, col++)); ASSERT_EQ(stringify(byte_u_t(statfs.data_compressed_allocated)), tbl.get(0, col++)); ASSERT_EQ(stringify(byte_u_t(statfs.data_compressed_original)), tbl.get(0, col++)); } // with table, without formatter, verbose = true, empty, avail > 0 TEST(pgmap, dump_object_stat_sum_1) { bool verbose = true; CheckTextTable tbl(verbose); pool_stat_t pool_stat; object_stat_sum_t& sum = pool_stat.stats.sum; // zero by default ASSERT_TRUE(sum.is_zero()); // nominal amount of space available for new objects in this pool uint64_t avail = 2016 * 1024 * 1024; pg_pool_t pool; pool.quota_max_objects = 2000; pool.quota_max_bytes = 2000 * 1024 * 1024; pool.size = 2; pool.type = pg_pool_t::TYPE_REPLICATED; pool.tier_of = 0; PGMap::dump_object_stat_sum(tbl, nullptr, pool_stat, avail, pool.get_size(), verbose, true, true, &pool); unsigned col = 0; ASSERT_EQ(stringify(byte_u_t(0)), tbl.get(0, col++)); ASSERT_EQ(stringify(byte_u_t(0)), tbl.get(0, col++)); ASSERT_EQ(stringify(byte_u_t(0)), tbl.get(0, col++)); ASSERT_EQ(stringify(si_u_t(0)), tbl.get(0, col++)); ASSERT_EQ(stringify(byte_u_t(0)), tbl.get(0, col++)); ASSERT_EQ(stringify(byte_u_t(0)), tbl.get(0, col++)); ASSERT_EQ(stringify(byte_u_t(0)), tbl.get(0, col++)); ASSERT_EQ(percentify(0), tbl.get(0, col++)); ASSERT_EQ(stringify(byte_u_t(avail/pool.size)), tbl.get(0, col++)); ASSERT_EQ(stringify(si_u_t(pool.quota_max_objects)), tbl.get(0, col++)); ASSERT_EQ(stringify(byte_u_t(pool.quota_max_bytes)), tbl.get(0, col++)); ASSERT_EQ(stringify(si_u_t(0)), tbl.get(0, col++)); ASSERT_EQ(stringify(byte_u_t(0)), tbl.get(0, col++)); ASSERT_EQ(stringify(byte_u_t(0)), tbl.get(0, col++)); } // with table, without formatter, verbose = false, empty, avail = 0 TEST(pgmap, dump_object_stat_sum_2) { bool verbose = false; CheckTextTable tbl(verbose); pool_stat_t pool_stat; object_stat_sum_t& sum = pool_stat.stats.sum; // zero by default ASSERT_TRUE(sum.is_zero()); // nominal amount of space available for new objects in this pool uint64_t avail = 0; pg_pool_t pool; pool.quota_max_objects = 2000; pool.quota_max_bytes = 2000 * 1024 * 1024; pool.size = 2; pool.type = pg_pool_t::TYPE_REPLICATED; PGMap::dump_object_stat_sum(tbl, nullptr, pool_stat, avail, pool.get_size(), verbose, true, true, &pool); unsigned col = 0; ASSERT_EQ(stringify(byte_u_t(0)), tbl.get(0, col++)); ASSERT_EQ(stringify(si_u_t(0)), tbl.get(0, col++)); ASSERT_EQ(stringify(byte_u_t(0)), tbl.get(0, col++)); ASSERT_EQ(percentify(0), tbl.get(0, col++)); ASSERT_EQ(stringify(byte_u_t(avail/pool.size)), tbl.get(0, col++)); }
6,264
35.852941
86
cc
null
ceph-main/src/test/mon/bench_auth.py
#!/usr/bin/python3 import argparse import copy import json import rados import time import multiprocessing caps_base = ["mon", "profile rbd", "osd", "profile rbd pool=rbd namespace=test"] def create_users(conn, num_namespaces, num_users): cmd = {'prefix': 'auth get-or-create'} for i in range(num_namespaces): caps_base[-1] += ", profile rbd pool=rbd namespace=namespace{}".format(i) cmd['caps'] = caps_base for i in range(num_users): cmd['entity'] = "client.{}".format(i) conn.mon_command(json.dumps(cmd), b'') class Worker(multiprocessing.Process): def __init__(self, conn, num, queue, duration): super().__init__() self.conn = conn self.num = num self.queue = queue self.duration = duration def run(self): client = "client.{}".format(self.num) cmd = {'prefix': 'auth caps', 'entity': client} start_time = time.time() num_complete = 0 with rados.Rados(conffile='') as conn: while True: now = time.time() diff = now - start_time if diff > self.duration: self.queue.put((num_complete, diff)) return caps = copy.deepcopy(caps_base) caps[-1] += ", profile rbd pool=rbd namespace=namespace{}".format(self.num * 10000 + num_complete) cmd['caps'] = caps cmd_start = time.time() ret, buf, out = conn.mon_command(json.dumps(cmd), b'') cmd_end = time.time() if ret != 0: self.queue.put((Exception("{0}: {1}".format(ret, out)), 0)) return num_complete += 1 print("Process {} finished op {} - latency: {}".format(self.num, num_complete, cmd_end - cmd_start)) def main(): parser = argparse.ArgumentParser(description=""" Benchmark updates to ceph users' capabilities. Run one update at a time in each thread. """) parser.add_argument( '-n', '--num-namespaces', type=int, default=300, help='number of namespaces per user', ) parser.add_argument( '-t', '--threads', type=int, default=10, help='number of threads (and thus parallel operations) to use', ) parser.add_argument( '-d', '--duration', type=int, default=30, help='how long to run, in seconds', ) args = parser.parse_args() num_namespaces = args.num_namespaces num_threads = args.threads duration = args.duration workers = [] results = [] q = multiprocessing.Queue() with rados.Rados(conffile=rados.Rados.DEFAULT_CONF_FILES) as conn: create_users(conn, num_namespaces, num_threads) for i in range(num_threads): workers.append(Worker(conn, i, q, duration)) workers[-1].start() for i in range(num_threads): num_complete, seconds = q.get() if isinstance(num_complete, Exception): raise num_complete results.append((num_complete, seconds)) total = 0 total_rate = 0 for num, sec in results: print("Completed {} in {} ({} / s)".format(num, sec, num / sec)) total += num total_rate += num / sec print("Total: ", total) print("Avg rate: ", total_rate / len(results)) if __name__ == '__main__': main()
3,432
31.386792
116
py
null
ceph-main/src/test/mon/moncap.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system * * Copyright (C) 2012 Inktank * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #include <iostream> #include "include/stringify.h" #include "mon/MonCap.h" #include "gtest/gtest.h" using namespace std; const char *parse_good[] = { // MonCapMatch "allow *", "allow r", "allow rwx", "allow r", " allow rwx", "allow rwx ", " allow rwx ", " allow\t rwx ", "\tallow\nrwx\t", "allow service=foo x", "allow service=\"froo\" x", "allow profile osd", "allow profile osd-bootstrap", "allow profile \"mds-bootstrap\", allow *", "allow command \"a b c\"", "allow command abc", "allow command abc with arg=foo", "allow command abc with arg=foo arg2=bar", "allow command abc with arg=foo arg2=bar", "allow command abc with arg=foo arg2 prefix bar arg3 prefix baz", "allow command abc with arg=foo arg2 prefix \"bar bingo\" arg3 prefix baz", "allow command abc with arg regex \"^[0-9a-z.]*$\"", "allow command abc with arg regex \"\(invaluid regex\"", "allow service foo x", "allow service foo x; allow service bar x", "allow service foo w ;allow service bar x", "allow service foo w , allow service bar x", "allow service foo r , allow service bar x", "allow service foo_foo r, allow service bar r", "allow service foo-foo r, allow service bar r", "allow service \" foo \" w, allow service bar r", "allow command abc with arg=foo arg2=bar, allow service foo r", "allow command abc.def with arg=foo arg2=bar, allow service foo r", "allow command \"foo bar\" with arg=\"baz\"", "allow command \"foo bar\" with arg=\"baz.xx\"", "profile osd", "profile \"mds-bootstrap\", profile foo", "allow * network 1.2.3.4/24", "allow * network ::1/128", "allow * network [aa:bb::1]/128", "allow service=foo x network 1.2.3.4/16", "allow command abc network 1.2.3.4/8", "profile osd network 1.2.3.4/32", "allow profile mon network 1.2.3.4/32", 0 }; TEST(MonCap, ParseGood) { for (int i=0; parse_good[i]; ++i) { string str = parse_good[i]; MonCap cap; std::cout << "Testing good input: '" << str << "'" << std::endl; ASSERT_TRUE(cap.parse(str, &cout)); std::cout << " -> " << cap << std::endl; } } // these should stringify to the input value const char *parse_identity[] = { "allow *", "allow r", "allow rwx", "allow service foo x", "allow profile osd", "allow profile osd-bootstrap", "allow profile mds-bootstrap, allow *", "allow profile \"foo bar\", allow *", "allow command abc", "allow command \"a b c\"", "allow command abc with arg=foo", "allow command abc with arg=foo arg2=bar", "allow command abc with arg=foo arg2=bar", "allow command abc with arg=foo arg2 prefix bar arg3 prefix baz", "allow command abc with arg=foo arg2 prefix \"bar bingo\" arg3 prefix baz", "allow service foo x", "allow service foo x, allow service bar x", "allow service foo w, allow service bar x", "allow service foo r, allow service bar x", "allow service foo_foo r, allow service bar r", "allow service foo-foo r, allow service bar r", "allow service \" foo \" w, allow service bar r", "allow command abc with arg=foo arg2=bar, allow service foo r", 0 }; TEST(MonCap, ParseIdentity) { for (int i=0; parse_identity[i]; ++i) { string str = parse_identity[i]; MonCap cap; std::cout << "Testing good input: '" << str << "'" << std::endl; ASSERT_TRUE(cap.parse(str, &cout)); string out = stringify(cap); ASSERT_EQ(out, str); } } const char *parse_bad[] = { "allow r foo", "allow*", "foo allow *", "allow profile foo rwx", "allow profile", "allow profile foo bar rwx", "allow service bar", "allow command baz x", "allow r w", "ALLOW r", "allow rwx,", "allow rwx x", "allow r pool foo r", "allow wwx pool taco", "allow wwx pool taco^funny&chars", "allow rwx pool 'weird name''", "allow rwx object_prefix \"beforepool\" pool weird", "allow rwx auid 123 pool asdf", "allow command foo a prefix b", "allow command foo with a prefixb", "allow command foo with a = prefix b", "allow command foo with a prefix b c", 0 }; TEST(MonCap, ParseBad) { for (int i=0; parse_bad[i]; ++i) { string str = parse_bad[i]; MonCap cap; std::cout << "Testing bad input: '" << str << "'" << std::endl; ASSERT_FALSE(cap.parse(str, &cout)); } } TEST(MonCap, AllowAll) { MonCap cap; ASSERT_FALSE(cap.is_allow_all()); ASSERT_TRUE(cap.parse("allow r", NULL)); ASSERT_FALSE(cap.is_allow_all()); cap.grants.clear(); ASSERT_TRUE(cap.parse("allow w", NULL)); ASSERT_FALSE(cap.is_allow_all()); cap.grants.clear(); ASSERT_TRUE(cap.parse("allow x", NULL)); ASSERT_FALSE(cap.is_allow_all()); cap.grants.clear(); ASSERT_TRUE(cap.parse("allow rwx", NULL)); ASSERT_FALSE(cap.is_allow_all()); cap.grants.clear(); ASSERT_TRUE(cap.parse("allow rw", NULL)); ASSERT_FALSE(cap.is_allow_all()); cap.grants.clear(); ASSERT_TRUE(cap.parse("allow rx", NULL)); ASSERT_FALSE(cap.is_allow_all()); cap.grants.clear(); ASSERT_TRUE(cap.parse("allow wx", NULL)); ASSERT_FALSE(cap.is_allow_all()); cap.grants.clear(); ASSERT_TRUE(cap.parse("allow *", NULL)); ASSERT_TRUE(cap.is_allow_all()); ASSERT_TRUE(cap.is_capable(NULL, {}, "foo", "asdf", {}, true, true, true, {})); MonCap cap2; ASSERT_FALSE(cap2.is_allow_all()); cap2.set_allow_all(); ASSERT_TRUE(cap2.is_allow_all()); } TEST(MonCap, Network) { MonCap cap; bool r = cap.parse("allow * network 192.168.0.0/16, allow * network 10.0.0.0/8", NULL); ASSERT_TRUE(r); entity_addr_t a, b, c; a.parse("10.1.2.3"); b.parse("192.168.2.3"); c.parse("192.167.2.3"); ASSERT_TRUE(cap.is_capable(NULL, {}, "foo", "asdf", {}, true, true, true, a)); ASSERT_TRUE(cap.is_capable(NULL, {}, "foo", "asdf", {}, true, true, true, b)); ASSERT_FALSE(cap.is_capable(NULL, {}, "foo", "asdf", {}, true, true, true, c)); } TEST(MonCap, ProfileOSD) { MonCap cap; bool r = cap.parse("allow profile osd", NULL); ASSERT_TRUE(r); EntityName name; name.from_str("osd.123"); map<string,string> ca; ASSERT_TRUE(cap.is_capable(NULL, name, "osd", "", ca, true, false, false, {})); ASSERT_TRUE(cap.is_capable(NULL, name, "osd", "", ca, true, true, false, {})); ASSERT_TRUE(cap.is_capable(NULL, name, "osd", "", ca, true, true, true, {})); ASSERT_TRUE(cap.is_capable(NULL, name, "osd", "", ca, true, true, true, {})); ASSERT_TRUE(cap.is_capable(NULL, name, "mon", "", ca, true, false, false, {})); ASSERT_FALSE(cap.is_capable(NULL, name, "mds", "", ca, true, true, true, {})); ASSERT_FALSE(cap.is_capable(NULL, name, "mon", "", ca, true, true, true, {})); ca.clear(); ASSERT_FALSE(cap.is_capable(NULL, name, "", "config-key get", ca, true, true, true, {})); ca["key"] = "daemon-private/osd.123"; ASSERT_FALSE(cap.is_capable(NULL, name, "", "config-key get", ca, true, true, true, {})); ca["key"] = "daemon-private/osd.12/asdf"; ASSERT_FALSE(cap.is_capable(NULL, name, "", "config-key get", ca, true, true, true, {})); ca["key"] = "daemon-private/osd.123/"; ASSERT_TRUE(cap.is_capable(NULL, name, "", "config-key get", ca, true, true, true, {})); ASSERT_TRUE(cap.is_capable(NULL, name, "", "config-key get", ca, true, true, true, {})); ASSERT_TRUE(cap.is_capable(NULL, name, "", "config-key get", ca, true, true, true, {})); ca["key"] = "daemon-private/osd.123/foo"; ASSERT_TRUE(cap.is_capable(NULL, name, "", "config-key get", ca, true, true, true, {})); ASSERT_TRUE(cap.is_capable(NULL, name, "", "config-key put", ca, true, true, true, {})); ASSERT_TRUE(cap.is_capable(NULL, name, "", "config-key set", ca, true, true, true, {})); ASSERT_TRUE(cap.is_capable(NULL, name, "", "config-key exists", ca, true, true, true, {})); ASSERT_TRUE(cap.is_capable(NULL, name, "", "config-key delete", ca, true, true, true, {})); } TEST(MonCap, CommandRegEx) { MonCap cap; ASSERT_FALSE(cap.is_allow_all()); ASSERT_TRUE(cap.parse("allow command abc with arg regex \"^[0-9a-z.]*$\"", NULL)); EntityName name; name.from_str("osd.123"); ASSERT_TRUE(cap.is_capable(nullptr, name, "", "abc", {{"arg", "12345abcde"}}, true, true, true, {})); ASSERT_FALSE(cap.is_capable(nullptr, name, "", "abc", {{"arg", "~!@#$"}}, true, true, true, {})); ASSERT_TRUE(cap.parse("allow command abc with arg regex \"[*\"", NULL)); ASSERT_FALSE(cap.is_capable(nullptr, name, "", "abc", {{"arg", ""}}, true, true, true, {})); } TEST(MonCap, ProfileBootstrapRBD) { MonCap cap; ASSERT_FALSE(cap.is_allow_all()); ASSERT_TRUE(cap.parse("profile bootstrap-rbd", NULL)); EntityName name; name.from_str("mon.a"); ASSERT_TRUE(cap.is_capable(nullptr, name, "", "auth get-or-create", { {"entity", "client.rbd"}, {"caps_mon", "profile rbd"}, {"caps_osd", "profile rbd pool=foo, profile rbd-read-only"}, }, true, true, true, {})); ASSERT_FALSE(cap.is_capable(nullptr, name, "", "auth get-or-create", { {"entity", "client.rbd"}, {"caps_mon", "allow *"}, {"caps_osd", "profile rbd"}, }, true, true, true, {})); ASSERT_FALSE(cap.is_capable(nullptr, name, "", "auth get-or-create", { {"entity", "client.rbd"}, {"caps_mon", "profile rbd"}, {"caps_osd", "profile rbd pool=foo, allow *, profile rbd-read-only"}, }, true, true, true, {})); } TEST(MonCap, ProfileBootstrapRBDMirror) { MonCap cap; ASSERT_FALSE(cap.is_allow_all()); ASSERT_TRUE(cap.parse("profile bootstrap-rbd-mirror", NULL)); EntityName name; name.from_str("mon.a"); ASSERT_TRUE(cap.is_capable(nullptr, name, "", "auth get-or-create", { {"entity", "client.rbd"}, {"caps_mon", "profile rbd-mirror"}, {"caps_osd", "profile rbd pool=foo, profile rbd-read-only"}, }, true, true, true, {})); ASSERT_FALSE(cap.is_capable(nullptr, name, "", "auth get-or-create", { {"entity", "client.rbd"}, {"caps_mon", "profile rbd"}, {"caps_osd", "profile rbd pool=foo, profile rbd-read-only"}, }, true, true, true, {})); ASSERT_FALSE(cap.is_capable(nullptr, name, "", "auth get-or-create", { {"entity", "client.rbd"}, {"caps_mon", "allow *"}, {"caps_osd", "profile rbd"}, }, true, true, true, {})); ASSERT_FALSE(cap.is_capable(nullptr, name, "", "auth get-or-create", { {"entity", "client.rbd"}, {"caps_mon", "profile rbd-mirror"}, {"caps_osd", "profile rbd pool=foo, allow *, profile rbd-read-only"}, }, true, true, true, {})); } TEST(MonCap, ProfileRBD) { MonCap cap; ASSERT_FALSE(cap.is_allow_all()); ASSERT_TRUE(cap.parse("profile rbd", NULL)); EntityName name; name.from_str("mon.a"); ASSERT_FALSE(cap.is_capable(nullptr, name, "config-key", "config-key get", { {"key", "rbd/mirror/peer/1/1234"}, }, true, false, false, {})); } TEST(MonCap, ProfileRBDMirror) { MonCap cap; ASSERT_FALSE(cap.is_allow_all()); ASSERT_TRUE(cap.parse("profile rbd-mirror", NULL)); EntityName name; name.from_str("mon.a"); ASSERT_TRUE(cap.is_capable(nullptr, name, "config-key", "config-key get", { {"key", "rbd/mirror/peer/1/1234"}, }, true, false, false, {})); }
13,171
33.754617
101
cc
null
ceph-main/src/test/mon/test-mon-msg.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system * * Copyright (C) 2014 Red Hat * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. */ #include <stdio.h> #include <string.h> #include <iostream> #include <sstream> #include <time.h> #include <stdlib.h> #include <map> #include "global/global_init.h" #include "global/global_context.h" #include "common/async/context_pool.h" #include "common/ceph_argparse.h" #include "common/version.h" #include "common/dout.h" #include "common/debug.h" #include "common/ceph_mutex.h" #include "common/Timer.h" #include "common/errno.h" #include "mon/MonClient.h" #include "msg/Dispatcher.h" #include "include/err.h" #include <boost/scoped_ptr.hpp> #include "gtest/gtest.h" #include "common/config.h" #include "include/ceph_assert.h" #include "messages/MMonProbe.h" #include "messages/MRoute.h" #include "messages/MGenericMessage.h" #include "messages/MMonJoin.h" #define dout_context g_ceph_context #define dout_subsys ceph_subsys_ #undef dout_prefix #define dout_prefix *_dout << "test-mon-msg " using namespace std; class MonClientHelper : public Dispatcher { protected: CephContext *cct; ceph::async::io_context_pool poolctx; Messenger *msg; MonClient monc; ceph::mutex lock = ceph::make_mutex("mon-msg-test::lock"); set<int> wanted; public: explicit MonClientHelper(CephContext *cct_) : Dispatcher(cct_), cct(cct_), poolctx(1), msg(NULL), monc(cct_, poolctx) { } int post_init() { dout(1) << __func__ << dendl; if (!msg) return -EINVAL; msg->add_dispatcher_tail(this); return 0; } int init_messenger() { dout(1) << __func__ << dendl; std::string public_msgr_type = cct->_conf->ms_public_type.empty() ? cct->_conf.get_val<std::string>("ms_type") : cct->_conf->ms_public_type; msg = Messenger::create(cct, public_msgr_type, entity_name_t::CLIENT(-1), "test-mon-msg", 0); ceph_assert(msg != NULL); msg->set_default_policy(Messenger::Policy::lossy_client(0)); dout(0) << __func__ << " starting messenger at " << msg->get_myaddrs() << dendl; msg->start(); return 0; } int init_monc() { dout(1) << __func__ << dendl; ceph_assert(msg != NULL); int err = monc.build_initial_monmap(); if (err < 0) { derr << __func__ << " error building monmap: " << cpp_strerror(err) << dendl; return err; } monc.set_messenger(msg); msg->add_dispatcher_head(&monc); monc.set_want_keys(CEPH_ENTITY_TYPE_MON); err = monc.init(); if (err < 0) { derr << __func__ << " monc init failed: " << cpp_strerror(err) << dendl; goto fail; } err = monc.authenticate(); if (err < 0) { derr << __func__ << " monc auth failed: " << cpp_strerror(err) << dendl; goto fail_monc; } monc.wait_auth_rotating(30.0); monc.renew_subs(); dout(0) << __func__ << " finished" << dendl; return 0; fail_monc: derr << __func__ << " failing monc" << dendl; monc.shutdown(); fail: return err; } void shutdown_messenger() { dout(0) << __func__ << dendl; msg->shutdown(); msg->wait(); } void shutdown_monc() { dout(0) << __func__ << dendl; monc.shutdown(); } void shutdown() { dout(0) << __func__ << dendl; shutdown_monc(); shutdown_messenger(); } MonMap *get_monmap() { return &monc.monmap; } int init() { int err = init_messenger(); if (err < 0) goto fail; err = init_monc(); if (err < 0) goto fail_msgr; err = post_init(); if (err < 0) goto fail_monc; return 0; fail_monc: shutdown_monc(); fail_msgr: shutdown_messenger(); fail: return err; } virtual void handle_wanted(Message *m) { } bool handle_message(Message *m) { dout(1) << __func__ << " " << *m << dendl; if (!is_wanted(m)) { dout(10) << __func__ << " not wanted" << dendl; return false; } handle_wanted(m); m->put(); return true; } bool ms_dispatch(Message *m) override { return handle_message(m); } void ms_handle_connect(Connection *con) override { } void ms_handle_remote_reset(Connection *con) override { } bool ms_handle_reset(Connection *con) override { return false; } bool ms_handle_refused(Connection *con) override { return false; } bool is_wanted(Message *m) { dout(20) << __func__ << " " << *m << " type " << m->get_type() << dendl; return (wanted.find(m->get_type()) != wanted.end()); } void add_wanted(int t) { dout(20) << __func__ << " type " << t << dendl; wanted.insert(t); } void rm_wanted(int t) { dout(20) << __func__ << " type " << t << dendl; wanted.erase(t); } void send_message(Message *m) { dout(15) << __func__ << " " << *m << dendl; monc.send_mon_message(m); } void wait() { msg->wait(); } }; class MonMsgTest : public MonClientHelper, public ::testing::Test { protected: int reply_type = 0; Message *reply_msg = nullptr; ceph::mutex lock = ceph::make_mutex("lock"); ceph::condition_variable cond; MonMsgTest() : MonClientHelper(g_ceph_context) { } public: void SetUp() override { reply_type = -1; if (reply_msg) { reply_msg->put(); reply_msg = nullptr; } ASSERT_EQ(init(), 0); } void TearDown() override { shutdown(); if (reply_msg) { reply_msg->put(); reply_msg = nullptr; } } void handle_wanted(Message *m) override { std::lock_guard l{lock}; // caller will put() after they call us, so hold on to a ref m->get(); reply_msg = m; cond.notify_all(); } Message *send_wait_reply(Message *m, int t, double timeout=30.0) { std::unique_lock l{lock}; reply_type = t; add_wanted(t); send_message(m); std::cv_status status = std::cv_status::no_timeout; if (timeout > 0) { utime_t s = ceph_clock_now(); status = cond.wait_for(l, ceph::make_timespan(timeout)); utime_t e = ceph_clock_now(); dout(20) << __func__ << " took " << (e-s) << " seconds" << dendl; } else { cond.wait(l); } rm_wanted(t); l.unlock(); if (status == std::cv_status::timeout) { dout(20) << __func__ << " error: " << cpp_strerror(ETIMEDOUT) << dendl; return (Message*)((long)-ETIMEDOUT); } if (!reply_msg) dout(20) << __func__ << " reply_msg is nullptr" << dendl; else dout(20) << __func__ << " reply_msg " << *reply_msg << dendl; return reply_msg; } }; TEST_F(MonMsgTest, MMonProbeTest) { Message *m = new MMonProbe(get_monmap()->fsid, MMonProbe::OP_PROBE, "b", false, ceph_release()); Message *r = send_wait_reply(m, MSG_MON_PROBE); ASSERT_NE(IS_ERR(r), 0); ASSERT_EQ(PTR_ERR(r), -ETIMEDOUT); } TEST_F(MonMsgTest, MRouteTest) { Message *payload = new MGenericMessage(CEPH_MSG_SHUTDOWN); MRoute *m = new MRoute; m->msg = payload; Message *r = send_wait_reply(m, CEPH_MSG_SHUTDOWN); // we want an error ASSERT_NE(IS_ERR(r), 0); ASSERT_EQ(PTR_ERR(r), -ETIMEDOUT); } /* MMonScrub and MMonSync have other safeguards in place that prevent * us from actually receiving a reply even if the message is handled * by the monitor due to lack of cap checking. */ TEST_F(MonMsgTest, MMonJoin) { Message *m = new MMonJoin(get_monmap()->fsid, string("client"), msg->get_myaddrs()); send_wait_reply(m, MSG_MON_PAXOS, 10.0); int r = monc.get_monmap(); ASSERT_EQ(r, 0); ASSERT_FALSE(monc.monmap.contains("client")); } int main(int argc, char *argv[]) { auto args = argv_to_vec(argc, argv); auto cct = global_init(nullptr, args, CEPH_ENTITY_TYPE_CLIENT, CODE_ENVIRONMENT_UTILITY, CINIT_FLAG_NO_DEFAULT_CONFIG_FILE); common_init_finish(g_ceph_context); g_ceph_context->_conf.apply_changes(nullptr); ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); }
8,231
23.211765
144
cc
null
ceph-main/src/test/mon/test_election.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "gtest/gtest.h" #include "mon/ElectionLogic.h" #include "mon/ConnectionTracker.h" #include "common/dout.h" #include "global/global_context.h" #include "global/global_init.h" #include "common/common_init.h" #include "common/ceph_argparse.h" using namespace std; #define dout_subsys ceph_subsys_test #undef dout_prefix #define dout_prefix _prefix(_dout, prefix_name(), timestep_count()) static ostream& _prefix(std::ostream *_dout, const char *prefix, int timesteps) { return *_dout << prefix << timesteps << " "; } const char* prefix_name() { return "test_election: "; } int timestep_count() { return -1; } int main(int argc, char **argv) { vector<const char*> args(argv, argv+argc); bool user_set_debug = false; for (auto& arg : args) { if (strncmp("--debug_mon", arg, 11) == 0) user_set_debug = true; } auto cct = global_init(NULL, args, CEPH_ENTITY_TYPE_CLIENT, CODE_ENVIRONMENT_UTILITY, CINIT_FLAG_NO_DEFAULT_CONFIG_FILE); common_init_finish(g_ceph_context); if (!user_set_debug) g_ceph_context->_conf.set_val("debug mon", "0/20"); ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } class Owner; struct Election { map<int, Owner*> electors; map<int, set<int> > blocked_messages; int count; ElectionLogic::election_strategy election_strategy; int ping_interval; set<int> disallowed_leaders; vector< function<void()> > messages; int pending_election_messages; int timesteps_run = 0; int last_quorum_change = 0; int last_quorum_formed = -1; set<int> last_quorum_reported; int last_leader = -1; Election(int c, ElectionLogic::election_strategy es, int pingi=1, double tracker_halflife=5); ~Election(); // ElectionOwner interfaces int get_paxos_size() { return count; } const set<int>& get_disallowed_leaders() const { return disallowed_leaders; } void propose_to(int from, int to, epoch_t e, bufferlist& cbl); void defer_to(int from, int to, epoch_t e); void claim_victory(int from, int to, epoch_t e, const set<int>& members); void accept_victory(int from, int to, epoch_t e); void report_quorum(const set<int>& quorum); void queue_stable_message(int from, int to, function<void()> m); void queue_timeout_message(int from, int to, function<void()> m); void queue_stable_or_timeout(int from, int to, function<void()> m, function<void()> t); void queue_election_message(int from, int to, function<void()> m); // test runner interfaces int run_timesteps(int max); void start_one(int who); void start_all(); bool election_stable() const; bool quorum_stable(int timesteps_stable) const; bool all_agree_on_leader() const; bool check_epoch_agreement() const; void block_messages(int from, int to); void block_bidirectional_messages(int a, int b); void unblock_messages(int from, int to); void unblock_bidirectional_messages(int a, int b); void add_disallowed_leader(int disallowed) { disallowed_leaders.insert(disallowed); } void remove_elector(int rank); const char* prefix_name() const { return "Election: "; } int timestep_count() const { return timesteps_run; } }; struct Owner : public ElectionOwner, RankProvider { Election *parent; int rank; epoch_t persisted_epoch; bool ever_joined; ConnectionTracker peer_tracker; ElectionLogic logic; set<int> quorum; int victory_accepters; int timer_steps; // timesteps until we trigger timeout bool timer_election; // the timeout is for normal election, or victory bool rank_deleted = false; string prefix_str; Owner(int r, ElectionLogic::election_strategy es, double tracker_halflife, Election *p) : parent(p), rank(r), persisted_epoch(0), ever_joined(false), peer_tracker(this, rank, tracker_halflife, 5, g_ceph_context), logic(this, es, &peer_tracker, 0.0005, g_ceph_context), victory_accepters(0), timer_steps(-1), timer_election(true) { std::stringstream str; str << "Owner" << rank << " "; prefix_str = str.str(); } // in-memory store: just save to variable void persist_epoch(epoch_t e) { persisted_epoch = e; } // in-memory store: just return variable epoch_t read_persisted_epoch() const { return persisted_epoch; } // in-memory store: don't need to validate void validate_store() { return; } // don't need to do anything with our state right now void notify_bump_epoch() {} void notify_rank_removed(int removed_rank) { ldout(g_ceph_context, 1) << "removed_rank: " << removed_rank << dendl; ldout(g_ceph_context, 1) << "rank before: " << rank << dendl; if (removed_rank < rank) { --rank; } peer_tracker.notify_rank_removed(removed_rank, rank); ldout(g_ceph_context, 1) << "rank after: " << rank << dendl; } void notify_deleted() { rank_deleted = true; rank = -1; cancel_timer(); } // pass back to ElectionLogic; we don't need this redirect ourselves void trigger_new_election() { ceph_assert (!rank_deleted); logic.start(); } int get_my_rank() const { return rank; } // we don't need to persist scores as we don't reset and lose memory state void persist_connectivity_scores() {} void propose_to_peers(epoch_t e, bufferlist& bl) { ceph_assert (!rank_deleted); for (int i = 0; i < parent->get_paxos_size(); ++i) { if (i == rank) continue; parent->propose_to(rank, i, e, bl); } } void reset_election() { ceph_assert (!rank_deleted); _start(); logic.start(); } bool ever_participated() const { return ever_joined; } unsigned paxos_size() const { return parent->get_paxos_size(); } const set<int>& get_disallowed_leaders() const { return parent->get_disallowed_leaders(); } void cancel_timer() { timer_steps = -1; } void reset_timer(int steps) { cancel_timer(); timer_steps = 3 + steps; // FIXME? magic number, current step + roundtrip timer_election = true; } void start_victory_timer() { cancel_timer(); timer_election = false; timer_steps = 3; // FIXME? current step + roundtrip } void _start() { reset_timer(0); quorum.clear(); } void _defer_to(int who) { ceph_assert (!rank_deleted); parent->defer_to(rank, who, logic.get_epoch()); reset_timer(0); // wtf does changing this 0->1 cause breakage? } void message_victory(const std::set<int>& members) { ceph_assert (!rank_deleted); for (auto i : members) { if (i == rank) continue; parent->claim_victory(rank, i, logic.get_epoch(), members); } start_victory_timer(); quorum = members; victory_accepters = 1; } bool is_current_member(int r) const { return quorum.count(r) != 0; } void receive_propose(int from, epoch_t e, ConnectionTracker *oct) { if (rank_deleted) return; logic.receive_propose(from, e, oct); delete oct; } void receive_ack(int from, epoch_t e) { if (rank_deleted) return; if (e < logic.get_epoch()) return; logic.receive_ack(from, e); } void receive_victory_claim(int from, epoch_t e, const set<int>& members) { if (rank_deleted) return; if (e < logic.get_epoch()) return; if (logic.receive_victory_claim(from, e)) { quorum = members; cancel_timer(); parent->accept_victory(rank, from, e); } } void receive_victory_ack(int from, epoch_t e) { if (rank_deleted) return; if (e < logic.get_epoch()) return; ++victory_accepters; if (victory_accepters == static_cast<int>(quorum.size())) { cancel_timer(); parent->report_quorum(quorum); } } void receive_scores(bufferlist bl) { ConnectionTracker oct(bl, g_ceph_context); peer_tracker.receive_peer_report(oct); ldout(g_ceph_context, 10) << "received scores " << oct << dendl; } void receive_ping(int from_rank, bufferlist bl) { ldout(g_ceph_context, 6) << "receive ping from " << from_rank << dendl; peer_tracker.report_live_connection(from_rank, parent->ping_interval); receive_scores(bl); } void receive_ping_timeout(int from_rank) { ldout(g_ceph_context, 6) << "timeout ping from " << from_rank << dendl; peer_tracker.report_dead_connection(from_rank, parent->ping_interval); } void election_timeout() { ldout(g_ceph_context, 2) << "election epoch " << logic.get_epoch() << " timed out for " << rank << ", electing me:" << logic.electing_me << ", acked_me:" << logic.acked_me << dendl; ceph_assert (!rank_deleted); logic.end_election_period(); } void victory_timeout() { ldout(g_ceph_context, 2) << "victory epoch " << logic.get_epoch() << " timed out for " << rank << ", electing me:" << logic.electing_me << ", acked_me:" << logic.acked_me << dendl; ceph_assert (!rank_deleted); reset_election(); } void encode_scores(bufferlist& bl) { encode(peer_tracker, bl); } void send_pings() { ceph_assert (!rank_deleted); if (!parent->ping_interval || parent->timesteps_run % parent->ping_interval != 0) { return; } bufferlist bl; encode_scores(bl); for (int i = 0; i < parent->get_paxos_size(); ++i) { if (i == rank) continue; Owner *o = parent->electors[i]; parent->queue_stable_or_timeout(rank, i, [o, r=rank, bl] { o->receive_ping(r, bl); }, [o, r=rank] { o->receive_ping_timeout(r); } ); } } void notify_timestep() { ceph_assert (!rank_deleted); assert(timer_steps != 0); if (timer_steps > 0) { --timer_steps; } if (timer_steps == 0) { if (timer_election) { election_timeout(); } else { victory_timeout(); } } send_pings(); } const char *prefix_name() const { return prefix_str.c_str(); } int timestep_count() const { return parent->timesteps_run; } }; Election::Election(int c, ElectionLogic::election_strategy es, int pingi, double tracker_halflife) : count(c), election_strategy(es), ping_interval(pingi), pending_election_messages(0), timesteps_run(0), last_quorum_change(0), last_quorum_formed(-1) { for (int i = 0; i < count; ++i) { electors[i] = new Owner(i, election_strategy, tracker_halflife, this); } } Election::~Election() { { for (auto i : electors) { delete i.second; } } } void Election::queue_stable_message(int from, int to, function<void()> m) { if (!blocked_messages[from].count(to)) { messages.push_back(m); } } void Election::queue_election_message(int from, int to, function<void()> m) { if (last_quorum_reported.count(from)) { last_quorum_change = timesteps_run; last_quorum_reported.clear(); last_leader = -1; } if (!blocked_messages[from].count(to)) { bufferlist bl; electors[from]->encode_scores(bl); Owner *o = electors[to]; messages.push_back([this,m,o,bl] { --this->pending_election_messages; o->receive_scores(bl); m(); }); ++pending_election_messages; } } void Election::queue_timeout_message(int from, int to, function<void()> m) { ceph_assert(blocked_messages[from].count(to)); messages.push_back(m); } void Election::queue_stable_or_timeout(int from, int to, function<void()> m, function<void()> t) { if (blocked_messages[from].count(to)) { queue_timeout_message(from, to, t); } else { queue_stable_message(from, to, m); } } void Election::defer_to(int from, int to, epoch_t e) { Owner *o = electors[to]; queue_election_message(from, to, [o, from, e] { o->receive_ack(from, e); }); } void Election::propose_to(int from, int to, epoch_t e, bufferlist& cbl) { Owner *o = electors[to]; ConnectionTracker *oct = NULL; if (cbl.length()) { oct = new ConnectionTracker(cbl, g_ceph_context); // we leak these on blocked cons, meh } queue_election_message(from, to, [o, from, e, oct] { o->receive_propose(from, e, oct); }); } void Election::claim_victory(int from, int to, epoch_t e, const set<int>& members) { Owner *o = electors[to]; queue_election_message(from, to, [o, from, e, members] { o->receive_victory_claim(from, e, members); }); } void Election::accept_victory(int from, int to, epoch_t e) { Owner *o = electors[to]; queue_election_message(from, to, [o, from, e] { o->receive_victory_ack(from, e); }); } void Election::report_quorum(const set<int>& quorum) { for (int i : quorum) { electors[i]->ever_joined = true; } last_quorum_formed = last_quorum_change = timesteps_run; last_quorum_reported = quorum; last_leader = electors[*(quorum.begin())]->logic.get_election_winner(); } int Election::run_timesteps(int max) { vector< function<void()> > current_m; int steps = 0; for (; (!max || steps < max) && // we have timesteps left AND ONE OF (pending_election_messages || // there are messages pending. !election_stable()); // somebody's not happy and will act in future ++steps) { current_m.clear(); current_m.swap(messages); ++timesteps_run; for (auto& m : current_m) { m(); } for (auto o : electors) { o.second->notify_timestep(); } } return steps; } void Election::start_one(int who) { assert(who < static_cast<int>(electors.size())); electors[who]->logic.start(); } void Election::start_all() { for (auto e : electors) { e.second->logic.start(); } } bool Election::election_stable() const { // see if anybody has a timer running for (auto i : electors) { if (i.second->timer_steps != -1) { ldout(g_ceph_context, 30) << "rank " << i.first << " has timer value " << i.second->timer_steps << dendl; return false; } } return (pending_election_messages == 0); } bool Election::quorum_stable(int timesteps_stable) const { ldout(g_ceph_context, 1) << "quorum_stable? last formed:" << last_quorum_formed << ", last changed " << last_quorum_change << ", last reported members " << last_quorum_reported << dendl; if (last_quorum_reported.empty()) { return false; } if (last_quorum_formed < last_quorum_change) { return false; } for (auto i : last_quorum_reported) { if (electors.find(i)->second->timer_steps != -1) { return false; } } if (timesteps_run - timesteps_stable > last_quorum_change) return true; return election_stable(); } bool Election::all_agree_on_leader() const { int leader = electors.find(0)->second->logic.get_election_winner(); ldout(g_ceph_context, 10) << "all_agree_on_leader on " << leader << dendl; for (auto& i: electors) { if (leader != i.second->logic.get_election_winner()) { ldout(g_ceph_context, 10) << "rank " << i.first << " has different leader " << i.second->logic.get_election_winner() << dendl; return false; } } if (disallowed_leaders.count(leader)) { ldout(g_ceph_context, 10) << "that leader is disallowed! member of " << disallowed_leaders << dendl; return false; } return true; } bool Election::check_epoch_agreement() const { epoch_t epoch = electors.find(0)->second->logic.get_epoch(); for (auto& i : electors) { if (epoch != i.second->logic.get_epoch()) { return false; } } return true; } void Election::block_messages(int from, int to) { blocked_messages[from].insert(to); } void Election::block_bidirectional_messages(int a, int b) { block_messages(a, b); block_messages(b, a); } void Election::unblock_messages(int from, int to) { blocked_messages[from].erase(to); } void Election::unblock_bidirectional_messages(int a, int b) { unblock_messages(a, b); unblock_messages(b, a); } void Election::remove_elector(int rank) { for (auto ei = electors.begin(); ei != electors.end(); ) { if (ei->first == rank) { ei->second->notify_deleted(); electors.erase(ei++); continue; } ei->second->notify_rank_removed(rank); if (ei->first > rank) { electors[ei->first - 1] = ei->second; electors.erase(ei++); continue; } ++ei; } for (auto bi = blocked_messages.begin(); bi != blocked_messages.end(); ) { if (bi->first == rank) { blocked_messages.erase(bi++); continue; } bi->second.erase(rank); for (auto i = bi->second.upper_bound(rank); i != bi->second.end();) { bi->second.insert(*i - 1); bi->second.erase(*(i++)); } ++bi; } --count; } void single_startup_election_completes(ElectionLogic::election_strategy strategy) { for (int starter = 0; starter < 5; ++starter) { Election election(5, strategy); election.start_one(starter); // This test is not actually legit since you should start // all the ElectionLogics, but it seems to work int steps = election.run_timesteps(0); ldout(g_ceph_context, 1) << "ran in " << steps << " timesteps" << dendl; ASSERT_TRUE(election.election_stable()); ASSERT_TRUE(election.quorum_stable(6)); // double the timer_steps we use ASSERT_TRUE(election.all_agree_on_leader()); ASSERT_TRUE(election.check_epoch_agreement()); } } void everybody_starts_completes(ElectionLogic::election_strategy strategy) { Election election(5, strategy); election.start_all(); int steps = election.run_timesteps(0); ldout(g_ceph_context, 1) << "ran in " << steps << " timesteps" << dendl; ASSERT_TRUE(election.election_stable()); ASSERT_TRUE(election.quorum_stable(6)); // double the timer_steps we use ASSERT_TRUE(election.all_agree_on_leader()); ASSERT_TRUE(election.check_epoch_agreement()); } void blocked_connection_continues_election(ElectionLogic::election_strategy strategy) { Election election(5, strategy); election.block_bidirectional_messages(0, 1); election.start_all(); int steps = election.run_timesteps(100); ldout(g_ceph_context, 1) << "ran in " << steps << " timesteps" << dendl; // This is a failure mode! ASSERT_FALSE(election.election_stable()); ASSERT_FALSE(election.quorum_stable(6)); // double the timer_steps we use election.unblock_bidirectional_messages(0, 1); steps = election.run_timesteps(100); ldout(g_ceph_context, 1) << "ran in " << steps << " timesteps" << dendl; ASSERT_TRUE(election.election_stable()); ASSERT_TRUE(election.quorum_stable(6)); // double the timer_steps we use ASSERT_TRUE(election.all_agree_on_leader()); ASSERT_TRUE(election.check_epoch_agreement()); } void blocked_connection_converges_election(ElectionLogic::election_strategy strategy) { Election election(5, strategy); election.block_bidirectional_messages(0, 1); election.start_all(); int steps = election.run_timesteps(100); ldout(g_ceph_context, 1) << "ran in " << steps << " timesteps" << dendl; ASSERT_TRUE(election.election_stable()); ASSERT_TRUE(election.all_agree_on_leader()); ASSERT_TRUE(election.check_epoch_agreement()); election.unblock_bidirectional_messages(0, 1); steps = election.run_timesteps(100); ldout(g_ceph_context, 1) << "ran in " << steps << " timesteps" << dendl; ASSERT_TRUE(election.election_stable()); ASSERT_TRUE(election.all_agree_on_leader()); ASSERT_TRUE(election.check_epoch_agreement()); } void disallowed_doesnt_win(ElectionLogic::election_strategy strategy) { int MON_COUNT = 5; for (int i = 0; i < MON_COUNT - 1; ++i) { Election election(MON_COUNT, strategy); for (int j = 0; j <= i; ++j) { election.add_disallowed_leader(j); } election.start_all(); int steps = election.run_timesteps(0); ldout(g_ceph_context, 1) << "ran in " << steps << " timesteps" << dendl; ASSERT_TRUE(election.election_stable()); ASSERT_TRUE(election.quorum_stable(6)); // double the timer_steps we use ASSERT_TRUE(election.all_agree_on_leader()); ASSERT_TRUE(election.check_epoch_agreement()); int leader = election.electors[0]->logic.get_election_winner(); for (int j = 0; j <= i; ++j) { ASSERT_NE(j, leader); } } for (int i = MON_COUNT - 1; i > 0; --i) { Election election(MON_COUNT, strategy); for (int j = i; j <= MON_COUNT - 1; ++j) { election.add_disallowed_leader(j); } election.start_all(); int steps = election.run_timesteps(0); ldout(g_ceph_context, 1) << "ran in " << steps << " timesteps" << dendl; ASSERT_TRUE(election.election_stable()); ASSERT_TRUE(election.quorum_stable(6)); // double the timer_steps we use ASSERT_TRUE(election.all_agree_on_leader()); ASSERT_TRUE(election.check_epoch_agreement()); int leader = election.electors[0]->logic.get_election_winner(); for (int j = i; j < MON_COUNT; ++j) { ASSERT_NE(j, leader); } } } void converges_after_flapping(ElectionLogic::election_strategy strategy) { Election election(5, strategy); auto block_cons = [&] { auto& e = election; // leave 4 connected to both sides so it will trigger but not trivially win e.block_bidirectional_messages(0, 2); e.block_bidirectional_messages(0, 3); e.block_bidirectional_messages(1, 2); e.block_bidirectional_messages(1, 3); }; auto unblock_cons = [&] { auto& e = election; e.unblock_bidirectional_messages(0, 2); e.unblock_bidirectional_messages(0, 3); e.unblock_bidirectional_messages(1, 2); e.unblock_bidirectional_messages(1, 3); }; block_cons(); election.start_all(); for (int i = 0; i < 5; ++i) { election.run_timesteps(5); unblock_cons(); election.run_timesteps(5); block_cons(); } unblock_cons(); election.run_timesteps(100); ASSERT_TRUE(election.election_stable()); ASSERT_TRUE(election.quorum_stable(6)); // double the timer_steps we use ASSERT_TRUE(election.all_agree_on_leader()); ASSERT_TRUE(election.check_epoch_agreement()); } void converges_while_flapping(ElectionLogic::election_strategy strategy) { Election election(5, strategy); auto block_cons = [&] { auto& e = election; // leave 4 connected to both sides so it will trigger but not trivially win e.block_bidirectional_messages(0, 2); e.block_bidirectional_messages(0, 3); e.block_bidirectional_messages(1, 2); e.block_bidirectional_messages(1, 3); }; auto unblock_cons = [&] { auto& e = election; e.unblock_bidirectional_messages(0, 2); e.unblock_bidirectional_messages(0, 3); e.unblock_bidirectional_messages(1, 2); e.unblock_bidirectional_messages(1, 3); }; block_cons(); election.start_all(); for (int i = 0; i < 5; ++i) { election.run_timesteps(10); ASSERT_TRUE(election.quorum_stable(6)); unblock_cons(); election.run_timesteps(5); block_cons(); ASSERT_TRUE(election.election_stable()); ASSERT_TRUE(election.all_agree_on_leader()); ASSERT_TRUE(election.check_epoch_agreement()); } unblock_cons(); election.run_timesteps(100); ASSERT_TRUE(election.election_stable()); ASSERT_TRUE(election.quorum_stable(6)); ASSERT_TRUE(election.all_agree_on_leader()); ASSERT_TRUE(election.check_epoch_agreement()); } void netsplit_with_disallowed_tiebreaker_converges(ElectionLogic::election_strategy strategy) { Election election(5, strategy); election.add_disallowed_leader(4); auto netsplit = [&] { auto& e = election; e.block_bidirectional_messages(0, 2); e.block_bidirectional_messages(0, 3); e.block_bidirectional_messages(1, 2); e.block_bidirectional_messages(1, 3); }; auto unsplit = [&] { auto& e = election; e.unblock_bidirectional_messages(0, 2); e.unblock_bidirectional_messages(0, 3); e.unblock_bidirectional_messages(1, 2); e.unblock_bidirectional_messages(1, 3); }; // hmm, we don't have timeouts to call elections automatically yet auto call_elections = [&] { for (auto i : election.electors) { i.second->trigger_new_election(); } }; // turn everybody on, run happy for a while election.start_all(); election.run_timesteps(0); ASSERT_TRUE(election.election_stable()); ASSERT_TRUE(election.quorum_stable(6)); ASSERT_TRUE(election.all_agree_on_leader()); ASSERT_TRUE(election.check_epoch_agreement()); int starting_leader = election.last_leader; // do some netsplits, but leave disallowed tiebreaker alive for (int i = 0; i < 5; ++i) { netsplit(); call_elections(); election.run_timesteps(15); // tests fail when I run 10 because 0 and 1 time out on same timestamp for some reason, why? // this ASSERT_EQ only holds while we bias for ranks ASSERT_EQ(starting_leader, election.last_leader); ASSERT_TRUE(election.quorum_stable(6)); ASSERT_FALSE(election.election_stable()); unsplit(); call_elections(); election.run_timesteps(10); ASSERT_EQ(starting_leader, election.last_leader); ASSERT_TRUE(election.quorum_stable(6)); ASSERT_TRUE(election.election_stable()); ASSERT_TRUE(election.all_agree_on_leader()); ASSERT_TRUE(election.check_epoch_agreement()); } // now disconnect the tiebreaker and make sure nobody can win int presplit_quorum_time = election.last_quorum_formed; netsplit(); election.block_bidirectional_messages(4, 0); election.block_bidirectional_messages(4, 1); election.block_bidirectional_messages(4, 2); election.block_bidirectional_messages(4, 3); call_elections(); election.run_timesteps(100); ASSERT_EQ(election.last_quorum_formed, presplit_quorum_time); // now let in the previously-losing side election.unblock_bidirectional_messages(4, 2); election.unblock_bidirectional_messages(4, 3); call_elections(); election.run_timesteps(100); ASSERT_TRUE(election.quorum_stable(50)); ASSERT_FALSE(election.election_stable()); // now reconnect everybody unsplit(); election.unblock_bidirectional_messages(4, 0); election.unblock_bidirectional_messages(4, 1); call_elections(); election.run_timesteps(100); ASSERT_TRUE(election.quorum_stable(50)); ASSERT_TRUE(election.election_stable()); ASSERT_TRUE(election.all_agree_on_leader()); ASSERT_TRUE(election.check_epoch_agreement()); } void handles_singly_connected_peon(ElectionLogic::election_strategy strategy) { Election election(5, strategy); election.block_bidirectional_messages(0, 1); election.block_bidirectional_messages(0, 2); election.block_bidirectional_messages(0, 3); election.block_bidirectional_messages(0, 4); election.start_all(); election.run_timesteps(20); ASSERT_TRUE(election.quorum_stable(5)); ASSERT_FALSE(election.election_stable()); election.unblock_bidirectional_messages(0, 1); election.run_timesteps(100); ASSERT_TRUE(election.quorum_stable(50)); ASSERT_TRUE(election.election_stable()); ASSERT_TRUE(election.all_agree_on_leader()); ASSERT_TRUE(election.check_epoch_agreement()); election.block_bidirectional_messages(0, 1); election.unblock_bidirectional_messages(0, 4); for (auto i : election.electors) { i.second->trigger_new_election(); } election.run_timesteps(15); ASSERT_TRUE(election.quorum_stable(50)); ASSERT_TRUE(election.election_stable()); ASSERT_TRUE(election.all_agree_on_leader()); ASSERT_TRUE(election.check_epoch_agreement()); } ConnectionReport *get_connection_reports(ConnectionTracker& ct) { return &ct.my_reports; } map<int,ConnectionReport> *get_peer_reports(ConnectionTracker& ct) { return &ct.peer_reports; } void handles_outdated_scoring(ElectionLogic::election_strategy strategy) { Election election(3, strategy, 5); // ping every 5 timesteps so they start elections before settling scores! // start everybody up and run for a bit election.start_all(); election.run_timesteps(20); ASSERT_TRUE(election.quorum_stable(5)); ASSERT_TRUE(election.election_stable()); ASSERT_TRUE(election.all_agree_on_leader()); ASSERT_TRUE(election.check_epoch_agreement()); // now mess up the scores to disagree ConnectionTracker& ct0 = election.electors[0]->peer_tracker; ConnectionReport& cr0 = *get_connection_reports(ct0); cr0.history[1] = 0.5; cr0.history[2] = 0.5; ct0.increase_version(); ConnectionTracker& ct1 = election.electors[1]->peer_tracker; ConnectionReport& cr1 = *get_connection_reports(ct1); cr1.history[0] = 0.5; cr1.history[2] = 0.5; ct1.increase_version(); ConnectionTracker& ct2 = election.electors[2]->peer_tracker; ConnectionReport& cr2 = *get_connection_reports(ct2); cr2.history[0] = 0.5; map<int,ConnectionReport>&cp2 = *get_peer_reports(ct2); cp2[0].history[2] = 0; cp2[1].history[2] = 0; ct2.increase_version(); election.ping_interval = 0; // disable pinging to update the scores ldout(g_ceph_context, 5) << "mangled the scores to be different" << dendl; election.start_all(); election.run_timesteps(50); ASSERT_TRUE(election.quorum_stable(30)); ASSERT_TRUE(election.election_stable()); ASSERT_TRUE(election.all_agree_on_leader()); ASSERT_TRUE(election.check_epoch_agreement()); } void handles_disagreeing_connectivity(ElectionLogic::election_strategy strategy) { Election election(5, strategy, 5); // ping every 5 timesteps so they start elections before settling scores! // start everybody up and run for a bit election.start_all(); election.run_timesteps(20); ASSERT_TRUE(election.quorum_stable(5)); ASSERT_TRUE(election.election_stable()); ASSERT_TRUE(election.all_agree_on_leader()); ASSERT_TRUE(election.check_epoch_agreement()); // block all the connections for (int i = 0; i < 5; ++i) { for (int j = i+1; j < 5; ++j) { election.block_bidirectional_messages(i, j); } } // now start them electing, which will obviously fail election.start_all(); election.run_timesteps(50); // let them all demote scores of their peers ASSERT_FALSE(election.quorum_stable(10)); ASSERT_FALSE(election.election_stable()); // now reconnect them, at which point they should start running an election before exchanging scores for (int i = 0; i < 5; ++i) { for (int j = i+1; j < 5; ++j) { election.unblock_bidirectional_messages(i, j); } } election.run_timesteps(100); // these will pass if the nodes managed to converge on scores, but I expect failure ASSERT_TRUE(election.quorum_stable(5)); ASSERT_TRUE(election.election_stable()); ASSERT_TRUE(election.all_agree_on_leader()); ASSERT_TRUE(election.check_epoch_agreement()); } void handles_removing_ranks(ElectionLogic::election_strategy strategy) { ceph_assert(strategy == ElectionLogic::CONNECTIVITY); for (int deletee = 0; deletee < 5; ++deletee) { Election election(5, strategy); election.start_all(); int steps = election.run_timesteps(0); ldout(g_ceph_context, 10) << "ran in " << steps << " timesteps" << dendl; ASSERT_TRUE(election.election_stable()); ASSERT_TRUE(election.quorum_stable(6)); // double the timer_steps we use ASSERT_TRUE(election.all_agree_on_leader()); ASSERT_TRUE(election.check_epoch_agreement()); election.remove_elector(deletee); ldout(g_ceph_context, 1) << "removed rank " << deletee << " from set" << dendl; election.start_all(); steps = election.run_timesteps(0); ASSERT_TRUE(election.election_stable()); ASSERT_TRUE(election.quorum_stable(6)); // double the timer_steps we use ASSERT_TRUE(election.all_agree_on_leader()); ASSERT_TRUE(election.check_epoch_agreement()); } { Election election(7, strategy); for (int i = 0; i < (7 - 3); ++i) { election.start_all(); election.remove_elector(0); int steps = election.run_timesteps(0); ldout(g_ceph_context, 1) << "ran in " << steps << " timesteps" << dendl; ASSERT_TRUE(election.election_stable()); ASSERT_TRUE(election.quorum_stable(6)); // double the timer_steps we use ASSERT_TRUE(election.all_agree_on_leader()); ASSERT_TRUE(election.check_epoch_agreement()); } } } // TODO: write a test with more complicated connectivity graphs and make sure // they are stable with multiple disconnected ranks pinging peons // TODO: Write a test that disallowing and disconnecting 0 is otherwise stable? // TODO: figure out how to test for bumping election epochs with changing scores, // a la what happened in run // http://pulpito.ceph.com/gregf-2019-11-26_10:50:50-rados:monthrash-wip-elector-distro-basic-mira/ #define test_classic(utest) TEST(classic, utest) { utest(ElectionLogic::CLASSIC); } #define test_disallowed(utest) TEST(disallowed, utest) { utest(ElectionLogic::DISALLOW); } #define test_connectivity(utest) TEST(connectivity, utest) { utest(ElectionLogic::CONNECTIVITY); } // TODO: test for expected failures; gtest probably supports that? test_classic(single_startup_election_completes) test_classic(everybody_starts_completes) test_classic(blocked_connection_continues_election) test_classic(converges_after_flapping) test_disallowed(single_startup_election_completes) test_disallowed(everybody_starts_completes) test_disallowed(blocked_connection_continues_election) test_disallowed(disallowed_doesnt_win) test_disallowed(converges_after_flapping) /* skip single_startup_election_completes because we crash on init conditions. That's fine since as noted above it's not quite following the rules anyway. */ test_connectivity(everybody_starts_completes) test_connectivity(blocked_connection_converges_election) test_connectivity(disallowed_doesnt_win) test_connectivity(converges_after_flapping) test_connectivity(converges_while_flapping) test_connectivity(netsplit_with_disallowed_tiebreaker_converges) test_connectivity(handles_singly_connected_peon) test_connectivity(handles_disagreeing_connectivity) test_connectivity(handles_outdated_scoring) test_connectivity(handles_removing_ranks)
33,536
32.403386
124
cc
null
ceph-main/src/test/mon/test_log_rss_usage.cc
#include <sys/types.h> #include <cstdint> #include <dirent.h> #include <errno.h> #include <vector> #include <string> #include <iostream> #include <fstream> #include <stdlib.h> #include <stdio.h> #include <unistd.h> using namespace std; int getPidByName(string procName) { int pid = -1; // Open the /proc directory DIR *dp = opendir("/proc"); if (dp != NULL) { // Enumerate all entries in '/proc' until process is found struct dirent *dirp; while (pid < 0 && (dirp = readdir(dp))) { // Skip non-numeric entries int id = atoi(dirp->d_name); if (id > 0) { // Read contents of virtual /proc/{pid}/cmdline file string cmdPath = string("/proc/") + dirp->d_name + "/cmdline"; ifstream cmdFile(cmdPath.c_str()); string cmdLine; getline(cmdFile, cmdLine); if (!cmdLine.empty()) { // Keep first cmdline item which contains the program path size_t pos = cmdLine.find('\0'); if (pos != string::npos) { cmdLine = cmdLine.substr(0, pos); } // Get program name only, removing the path pos = cmdLine.rfind('/'); if (pos != string::npos) { cmdLine = cmdLine.substr(pos + 1); } // Compare against requested process name if (procName == cmdLine) { pid = id; } } } } } closedir(dp); return pid; } uint64_t getRssUsage(string pid) { int totalSize = 0; int resSize = 0; string statmPath = string("/proc/") + pid + "/statm"; ifstream buffer(statmPath); buffer >> totalSize >> resSize; buffer.close(); long page_size = sysconf(_SC_PAGE_SIZE); uint64_t rss = resSize * page_size; return rss; } int main(int argc, char* argv[]) { if (argc != 2) { cout << "Syntax: " << "ceph_test_log_rss_usage <process name>" << endl; exit(EINVAL); } uint64_t rss = 0; int pid = getPidByName(argv[1]); string rssUsage; // Use the pid to get RSS memory usage // and print it to stdout if (pid != -1) { rss = getRssUsage(to_string(pid)); } else { cout << "Process " << argv[1] << " NOT FOUND!\n" << endl; exit(ESRCH); } rssUsage = to_string(rss) + ":" + to_string(pid) + ":"; cout << rssUsage.c_str() << endl; return 0; }
2,348
21.805825
70
cc
null
ceph-main/src/test/mon/test_mon_memory_target.cc
#include <algorithm> #include <cmath> #include <iostream> #include <string> #include <numeric> #include <regex> #include <system_error> #include <boost/process.hpp> #include <boost/tokenizer.hpp> namespace bp = boost::process; using namespace std; int main(int argc, char** argv) { cout << "Mon Memory Target Test" << endl; if (argc != 2) { cout << "Syntax: " << "ceph_test_mon_memory_target <mon-memory-target-bytes>" << endl; exit(EINVAL); } string target_directory("/var/log/ceph/"); unsigned long maxallowed = stoul(argv[1], nullptr, 10); regex reg(R"(cache_size:(\d*)\s)"); string grep_command("grep _set_new_cache_sizes " + target_directory + "ceph-mon.a.log"); bp::ipstream is; error_code ec; bp::child grep(grep_command, bp::std_out > is, ec); if (ec) { cout << "Error grepping logs! Exiting" << endl; cout << "Error: " << ec.value() << " " << ec.message() << endl; exit(ec.value()); } string line; vector<unsigned long> results; while (grep.running() && getline(is, line) && !line.empty()) { smatch match; if (regex_search(line, match, reg)) { results.push_back(stoul(match[1].str())); } } if (results.empty()) { cout << "Error: No grep results found!" << endl; exit(ENOENT); } auto maxe = *(max_element(results.begin(), results.end())); cout << "Results for mon_memory_target:" << endl; cout << "Max: " << maxe << endl; cout << "Min: " << *(min_element(results.begin(), results.end())) << endl; auto sum = accumulate(results.begin(), results.end(), static_cast<unsigned long long>(0)); auto mean = sum / results.size(); cout << "Mean average: " << mean << endl; vector<unsigned long> diff(results.size()); transform(results.begin(), results.end(), diff.begin(), [mean](unsigned long x) { return x - mean; }); auto sump = inner_product(diff.begin(), diff.end(), diff.begin(), 0.0); auto stdev = sqrt(sump / results.size()); cout << "Standard deviation: " << stdev << endl; if (maxe > maxallowed) { cout << "Error: Mon memory consumption exceeds maximum allowed!" << endl; exit(ENOMEM); } grep.wait(); cout << "Completed successfully" << endl; return 0; }
2,276
27.4625
77
cc
null
ceph-main/src/test/mon/test_mon_rss_usage.cc
#include <algorithm> #include <iostream> #include <fstream> #include <string> #include <numeric> #include <regex> #include <cmath> #include <system_error> using namespace std; int main(int argc, char **argv) { cout << "Mon RSS Usage Test" << endl; if (argc != 2) { cout << "Syntax: " << "ceph_test_mon_rss_usage <mon-memory-target-bytes>" << endl; exit(EINVAL); } unsigned long maxallowed = stoul(argv[1], nullptr, 10); // Set max allowed RSS usage to be 125% of mon-memory-target maxallowed *= 1.25; string target_directory("/var/log/ceph/"); string filePath = target_directory + "ceph-mon-rss-usage.log"; ifstream buffer(filePath.c_str()); string line; vector<unsigned long> results; while(getline(buffer, line) && !line.empty()) { string rssUsage; size_t pos = line.find(':'); if (pos != string::npos) { rssUsage = line.substr(0, pos); } if (!rssUsage.empty()) { results.push_back(stoul(rssUsage)); } } buffer.close(); if (results.empty()) { cout << "Error: No grep results found!" << endl; exit(ENOENT); } auto maxe = *(max_element(results.begin(), results.end())); cout << "Stats for mon RSS Memory Usage:" << endl; cout << "Parsed " << results.size() << " entries." << endl; cout << "Max: " << maxe << endl; cout << "Min: " << *(min_element(results.begin(), results.end())) << endl; auto sum = accumulate(results.begin(), results.end(), static_cast<unsigned long long>(0)); auto mean = sum / results.size(); cout << "Mean average: " << mean << endl; vector<unsigned long> diff(results.size()); transform(results.begin(), results.end(), diff.begin(), [mean](unsigned long x) { return x - mean; }); auto sump = inner_product(diff.begin(), diff.end(), diff.begin(), 0.0); auto stdev = sqrt(sump / results.size()); cout << fixed << "Standard deviation: " << stdev << endl; if (maxe > maxallowed) { cout << "Error: Mon RSS memory usage exceeds maximum allowed!" << endl; exit(ENOMEM); } cout << "Completed successfully" << endl; return 0; }
2,132
28.219178
76
cc
null
ceph-main/src/test/mon/test_mon_types.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system * * Copyright (C) 2012 Inktank * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #include <iostream> #include "mon/mon_types.h" #include "gtest/gtest.h" TEST(mon_features, supported_v_persistent) { mon_feature_t supported = ceph::features::mon::get_supported(); mon_feature_t persistent = ceph::features::mon::get_persistent(); ASSERT_EQ(supported.intersection(persistent), persistent); ASSERT_TRUE(supported.contains_all(persistent)); mon_feature_t diff = supported.diff(persistent); ASSERT_TRUE((persistent | diff) == supported); ASSERT_TRUE((supported & persistent) == persistent); } TEST(mon_features, binary_ops) { mon_feature_t FEATURE_NONE(0ULL); mon_feature_t FEATURE_A((1ULL << 1)); mon_feature_t FEATURE_B((1ULL << 2)); mon_feature_t FEATURE_C((1ULL << 3)); mon_feature_t FEATURE_D((1ULL << 4)); mon_feature_t FEATURE_ALL( FEATURE_A | FEATURE_B | FEATURE_C | FEATURE_D ); mon_feature_t foo(FEATURE_A|FEATURE_B); mon_feature_t bar(FEATURE_C|FEATURE_D); ASSERT_EQ(FEATURE_A|FEATURE_B, foo); ASSERT_EQ(FEATURE_C|FEATURE_D, bar); ASSERT_NE(FEATURE_C, foo); ASSERT_NE(FEATURE_B, bar); ASSERT_NE(FEATURE_NONE, foo); ASSERT_NE(FEATURE_NONE, bar); ASSERT_FALSE(foo.empty()); ASSERT_FALSE(bar.empty()); ASSERT_TRUE(FEATURE_NONE.empty()); ASSERT_EQ(FEATURE_ALL, (foo ^ bar)); ASSERT_EQ(FEATURE_NONE, (foo & bar)); mon_feature_t baz = foo; ASSERT_EQ(baz, foo); baz |= bar; ASSERT_EQ(FEATURE_ALL, baz); baz ^= foo; ASSERT_EQ(baz, bar); baz |= FEATURE_A; ASSERT_EQ(FEATURE_C, baz & FEATURE_C); ASSERT_EQ((FEATURE_A|FEATURE_D), baz & (FEATURE_A|FEATURE_D)); ASSERT_EQ(FEATURE_B|FEATURE_C|FEATURE_D, (baz ^ foo)); } TEST(mon_features, set_funcs) { mon_feature_t FEATURE_A((1ULL << 1)); mon_feature_t FEATURE_B((1ULL << 2)); mon_feature_t FEATURE_C((1ULL << 3)); mon_feature_t FEATURE_D((1ULL << 4)); mon_feature_t FEATURE_ALL( FEATURE_A | FEATURE_B | FEATURE_C | FEATURE_D ); mon_feature_t foo(FEATURE_A|FEATURE_B); mon_feature_t bar(FEATURE_C|FEATURE_D); ASSERT_TRUE(FEATURE_ALL.contains_all(foo)); ASSERT_TRUE(FEATURE_ALL.contains_all(bar)); ASSERT_TRUE(FEATURE_ALL.contains_all(foo|bar)); ASSERT_EQ(foo.diff(bar), foo); ASSERT_EQ(bar.diff(foo), bar); ASSERT_EQ(FEATURE_ALL.diff(foo), bar); ASSERT_EQ(FEATURE_ALL.diff(bar), foo); ASSERT_TRUE(foo.contains_any(FEATURE_A|bar)); ASSERT_TRUE(bar.contains_any(FEATURE_ALL)); ASSERT_TRUE(FEATURE_ALL.contains_any(foo)); mon_feature_t FEATURE_X((1ULL << 10)); ASSERT_FALSE(FEATURE_ALL.contains_any(FEATURE_X)); ASSERT_FALSE(FEATURE_ALL.contains_all(FEATURE_X)); ASSERT_EQ(FEATURE_ALL.diff(FEATURE_X), FEATURE_ALL); ASSERT_EQ(foo.intersection(FEATURE_ALL), foo); ASSERT_EQ(bar.intersection(FEATURE_ALL), bar); } TEST(mon_features, set_unset) { mon_feature_t FEATURE_A((1ULL << 1)); mon_feature_t FEATURE_B((1ULL << 2)); mon_feature_t FEATURE_C((1ULL << 3)); mon_feature_t foo; ASSERT_EQ(ceph::features::mon::FEATURE_NONE, foo); foo.set_feature(FEATURE_A); ASSERT_EQ(FEATURE_A, foo); ASSERT_TRUE(foo.contains_all(FEATURE_A)); foo.set_feature(FEATURE_B|FEATURE_C); ASSERT_EQ((FEATURE_A|FEATURE_B|FEATURE_C), foo); ASSERT_TRUE(foo.contains_all((FEATURE_A|FEATURE_B|FEATURE_C))); foo.unset_feature(FEATURE_A); ASSERT_EQ((FEATURE_B|FEATURE_C), foo); ASSERT_FALSE(foo.contains_any(FEATURE_A)); ASSERT_TRUE(foo.contains_all((FEATURE_B|FEATURE_C))); foo.unset_feature(FEATURE_B|FEATURE_C); ASSERT_EQ(ceph::features::mon::FEATURE_NONE, foo); ASSERT_FALSE(foo.contains_any(FEATURE_A|FEATURE_B|FEATURE_C)); }
3,969
27.156028
70
cc
null
ceph-main/src/test/mon/test_mon_workloadgen.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system * * Copyright (C) 2004-2006 Sage Weil <sage@newdream.net> * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #include "acconfig.h" #ifdef HAVE_SYS_MOUNT_H #include <sys/mount.h> #endif #ifdef HAVE_SYS_PARAM_H #include <sys/param.h> #endif #ifdef HAVE_SYS_VFS_H #include <sys/vfs.h> #endif #include <iostream> #include <string> #include <map> #include <boost/scoped_ptr.hpp> #include <boost/random/mersenne_twister.hpp> #include <boost/random/uniform_int.hpp> #include "osd/osd_types.h" #include "osdc/Objecter.h" #include "mon/MonClient.h" #include "msg/Dispatcher.h" #include "msg/Messenger.h" #include "common/async/context_pool.h" #include "common/Timer.h" #include "common/ceph_argparse.h" #include "global/global_init.h" #include "global/signal_handler.h" #include "common/config.h" #include "common/debug.h" #include "common/errno.h" #include "common/ceph_mutex.h" #include "common/strtol.h" #include "common/LogEntry.h" #include "auth/KeyRing.h" #include "auth/AuthAuthorizeHandler.h" #include "include/uuid.h" #include "include/ceph_assert.h" #include "messages/MOSDBoot.h" #include "messages/MOSDAlive.h" #include "messages/MOSDPGRemove.h" #include "messages/MOSDMap.h" #include "messages/MPGStats.h" #include "messages/MLog.h" #include "messages/MOSDPGTemp.h" using namespace std; #define dout_context g_ceph_context #define dout_subsys ceph_subsys_ #undef dout_prefix #define dout_prefix _prefix(_dout, get_name()) static ostream& _prefix(std::ostream *_dout, const string &n) { return *_dout << " stub(" << n << ") "; } typedef boost::mt11213b rngen_t; typedef boost::scoped_ptr<Messenger> MessengerRef; typedef boost::scoped_ptr<Objecter> ObjecterRef; class TestStub : public Dispatcher { protected: MessengerRef messenger; ceph::async::io_context_pool poolctx; MonClient monc; ceph::mutex lock; ceph::condition_variable cond; SafeTimer timer; bool do_shutdown; double tick_seconds; struct C_Tick : public Context { TestStub *s; explicit C_Tick(TestStub *stub) : s(stub) {} void finish(int r) override { generic_dout(20) << "C_Tick::" << __func__ << dendl; if (r == -ECANCELED) { generic_dout(20) << "C_Tick::" << __func__ << " shutdown" << dendl; return; } s->tick(); } }; bool ms_dispatch(Message *m) override = 0; void ms_handle_connect(Connection *con) override = 0; void ms_handle_remote_reset(Connection *con) override = 0; virtual int _shutdown() = 0; // courtesy method to be implemented by the stubs at their // own discretion virtual void _tick() { } // different stubs may have different needs; if a stub needs // to tick, then it must call this function. void start_ticking(double t=1.0) { tick_seconds = t; if (t <= 0) { stop_ticking(); return; } dout(20) << __func__ << " adding tick timer" << dendl; timer.add_event_after(tick_seconds, new C_Tick(this)); } // If we have a function to start ticking that the stubs can // use at their own discretion, then we should also have a // function to disable said ticking to be used the same way. // Just in case. // For simplicity's sake, we don't cancel the tick right off // the bat; instead, we wait for the next tick to kick in and // disable itself. void stop_ticking() { dout(20) << __func__ << " disable tick" << dendl; tick_seconds = 0; } public: void tick() { std::cout << __func__ << std::endl; if (do_shutdown || (tick_seconds <= 0)) { std::cout << __func__ << " " << (do_shutdown ? "shutdown" : "stop ticking") << std::endl; return; } _tick(); timer.add_event_after(tick_seconds, new C_Tick(this)); } virtual const string get_name() = 0; virtual int init() = 0; virtual int shutdown() { std::lock_guard l{lock}; do_shutdown = true; int r = _shutdown(); if (r < 0) { dout(10) << __func__ << " error shutting down: " << cpp_strerror(-r) << dendl; return r; } monc.shutdown(); timer.shutdown(); messenger->shutdown(); poolctx.finish(); return 0; } virtual void print(ostream &out) { out << "stub(" << get_name() << ")"; } void wait() { if (messenger != NULL) messenger->wait(); } TestStub(CephContext *cct, string who) : Dispatcher(cct), monc(cct, poolctx), lock(ceph::make_mutex(who.append("::lock"))), timer(cct, lock), do_shutdown(false), tick_seconds(0.0) { } }; class ClientStub : public TestStub { ObjecterRef objecter; rngen_t gen; protected: bool ms_dispatch(Message *m) override { std::lock_guard l{lock}; dout(1) << "client::" << __func__ << " " << *m << dendl; switch (m->get_type()) { case CEPH_MSG_OSD_MAP: objecter->handle_osd_map((MOSDMap*)m); cond.notify_all(); break; } return true; } void ms_handle_connect(Connection *con) override { dout(1) << "client::" << __func__ << " " << con << dendl; std::lock_guard l{lock}; objecter->ms_handle_connect(con); } void ms_handle_remote_reset(Connection *con) override { dout(1) << "client::" << __func__ << " " << con << dendl; std::lock_guard l{lock}; objecter->ms_handle_remote_reset(con); } bool ms_handle_reset(Connection *con) override { dout(1) << "client::" << __func__ << dendl; std::lock_guard l{lock}; objecter->ms_handle_reset(con); return false; } bool ms_handle_refused(Connection *con) override { return false; } const string get_name() override { return "client"; } int _shutdown() override { if (objecter) { objecter->shutdown(); } return 0; } public: explicit ClientStub(CephContext *cct) : TestStub(cct, "client"), gen((int) time(NULL)) { } int init() override { int err; poolctx.start(1); err = monc.build_initial_monmap(); if (err < 0) { derr << "ClientStub::" << __func__ << " ERROR: build initial monmap: " << cpp_strerror(err) << dendl; return err; } messenger.reset(Messenger::create_client_messenger(cct, "stubclient")); ceph_assert(messenger.get() != NULL); messenger->set_default_policy( Messenger::Policy::lossy_client(CEPH_FEATURE_OSDREPLYMUX)); dout(10) << "ClientStub::" << __func__ << " starting messenger at " << messenger->get_myaddrs() << dendl; objecter.reset(new Objecter(cct, messenger.get(), &monc, poolctx)); ceph_assert(objecter.get() != NULL); objecter->set_balanced_budget(); monc.set_messenger(messenger.get()); objecter->init(); messenger->add_dispatcher_head(this); messenger->start(); monc.set_want_keys(CEPH_ENTITY_TYPE_MON|CEPH_ENTITY_TYPE_OSD); err = monc.init(); if (err < 0) { derr << "ClientStub::" << __func__ << " monc init error: " << cpp_strerror(-err) << dendl; return err; } err = monc.authenticate(); if (err < 0) { derr << "ClientStub::" << __func__ << " monc authenticate error: " << cpp_strerror(-err) << dendl; monc.shutdown(); return err; } monc.wait_auth_rotating(30.0); objecter->set_client_incarnation(0); objecter->start(); lock.lock(); timer.init(); monc.renew_subs(); lock.unlock(); objecter->wait_for_osd_map(); dout(10) << "ClientStub::" << __func__ << " done" << dendl; return 0; } }; class OSDStub : public TestStub { int whoami; OSDSuperblock sb; OSDMap osdmap; osd_stat_t osd_stat; map<pg_t,pg_stat_t> pgs; set<pg_t> pgs_changes; rngen_t gen; boost::uniform_int<> mon_osd_rng; utime_t last_boot_attempt; static const double STUB_BOOT_INTERVAL; public: enum { STUB_MON_OSD_ALIVE = 1, STUB_MON_OSD_PGTEMP = 2, STUB_MON_OSD_FAILURE = 3, STUB_MON_OSD_PGSTATS = 4, STUB_MON_LOG = 5, STUB_MON_OSD_FIRST = STUB_MON_OSD_ALIVE, STUB_MON_OSD_LAST = STUB_MON_LOG, }; struct C_CreatePGs : public Context { OSDStub *s; explicit C_CreatePGs(OSDStub *stub) : s(stub) {} void finish(int r) override { if (r == -ECANCELED) { generic_dout(20) << "C_CreatePGs::" << __func__ << " shutdown" << dendl; return; } generic_dout(20) << "C_CreatePGs::" << __func__ << dendl; s->auto_create_pgs(); } }; OSDStub(int _whoami, CephContext *cct) : TestStub(cct, "osd"), whoami(_whoami), gen(whoami), mon_osd_rng(STUB_MON_OSD_FIRST, STUB_MON_OSD_LAST) { dout(20) << __func__ << " auth supported: " << cct->_conf->auth_supported << dendl; stringstream ss; ss << "client-osd" << whoami; std::string public_msgr_type = cct->_conf->ms_public_type.empty() ? cct->_conf.get_val<std::string>("ms_type") : cct->_conf->ms_public_type; messenger.reset(Messenger::create(cct, public_msgr_type, entity_name_t::OSD(whoami), ss.str().c_str(), getpid())); Throttle throttler(g_ceph_context, "osd_client_bytes", g_conf()->osd_client_message_size_cap); messenger->set_default_policy( Messenger::Policy::stateless_server(0)); messenger->set_policy_throttlers(entity_name_t::TYPE_CLIENT, &throttler, NULL); messenger->set_policy(entity_name_t::TYPE_MON, Messenger::Policy::lossy_client( CEPH_FEATURE_UID | CEPH_FEATURE_PGID64 | CEPH_FEATURE_OSDENC)); messenger->set_policy(entity_name_t::TYPE_OSD, Messenger::Policy::stateless_server(0)); dout(10) << __func__ << " public addr " << g_conf()->public_addr << dendl; int err = messenger->bind(g_conf()->public_addr); if (err < 0) exit(1); if (monc.build_initial_monmap() < 0) exit(1); messenger->start(); monc.set_messenger(messenger.get()); } int init() override { dout(10) << __func__ << dendl; std::lock_guard l{lock}; dout(1) << __func__ << " fsid " << monc.monmap.fsid << " osd_fsid " << g_conf()->osd_uuid << dendl; dout(1) << __func__ << " name " << g_conf()->name << dendl; timer.init(); messenger->add_dispatcher_head(this); monc.set_want_keys(CEPH_ENTITY_TYPE_MON | CEPH_ENTITY_TYPE_OSD); int err = monc.init(); if (err < 0) { derr << __func__ << " monc init error: " << cpp_strerror(-err) << dendl; return err; } err = monc.authenticate(); if (err < 0) { derr << __func__ << " monc authenticate error: " << cpp_strerror(-err) << dendl; monc.shutdown(); return err; } ceph_assert(!monc.get_fsid().is_zero()); monc.wait_auth_rotating(30.0); dout(10) << __func__ << " creating osd superblock" << dendl; sb.cluster_fsid = monc.monmap.fsid; sb.osd_fsid.generate_random(); sb.whoami = whoami; sb.compat_features = CompatSet(); dout(20) << __func__ << " " << sb << dendl; dout(20) << __func__ << " osdmap " << osdmap << dendl; update_osd_stat(); start_ticking(); // give a chance to the mons to inform us of what PGs we should create timer.add_event_after(30.0, new C_CreatePGs(this)); return 0; } int _shutdown() override { return 0; } void boot() { dout(1) << __func__ << " boot?" << dendl; utime_t now = ceph_clock_now(); if ((last_boot_attempt > 0.0) && ((now - last_boot_attempt)) <= STUB_BOOT_INTERVAL) { dout(1) << __func__ << " backoff and try again later." << dendl; return; } dout(1) << __func__ << " boot!" << dendl; MOSDBoot *mboot = new MOSDBoot; mboot->sb = sb; last_boot_attempt = now; monc.send_mon_message(mboot); } void add_pg(pg_t pgid, epoch_t epoch, pg_t parent) { utime_t now = ceph_clock_now(); pg_stat_t s; s.created = epoch; s.last_epoch_clean = epoch; s.parent = parent; s.state |= PG_STATE_CLEAN | PG_STATE_ACTIVE; s.last_fresh = now; s.last_change = now; s.last_clean = now; s.last_active = now; s.last_unstale = now; pgs[pgid] = s; pgs_changes.insert(pgid); } void auto_create_pgs() { bool has_pgs = !pgs.empty(); dout(10) << __func__ << ": " << (has_pgs ? "has pgs; ignore" : "create pgs") << dendl; if (has_pgs) return; if (!osdmap.get_epoch()) { dout(1) << __func__ << " still don't have osdmap; reschedule pg creation" << dendl; timer.add_event_after(10.0, new C_CreatePGs(this)); return; } auto& osdmap_pools = osdmap.get_pools(); for (auto pit = osdmap_pools.begin(); pit != osdmap_pools.end(); ++pit) { const int64_t pool_id = pit->first; const pg_pool_t &pool = pit->second; int ruleno = pool.get_crush_rule(); if (!osdmap.crush->rule_exists(ruleno)) { dout(20) << __func__ << " no crush rule for pool id " << pool_id << " rule no " << ruleno << dendl; continue; } epoch_t pool_epoch = pool.get_last_change(); dout(20) << __func__ << " pool num pgs " << pool.get_pg_num() << " epoch " << pool_epoch << dendl; for (ps_t ps = 0; ps < pool.get_pg_num(); ++ps) { pg_t pgid(ps, pool_id); pg_t parent; dout(20) << __func__ << " pgid " << pgid << " parent " << parent << dendl; add_pg(pgid, pool_epoch, parent); } } } void update_osd_stat() { struct statfs stbuf; int ret = statfs(".", &stbuf); if (ret < 0) { ret = -errno; dout(0) << __func__ << " cannot statfs ." << cpp_strerror(ret) << dendl; return; } osd_stat.statfs.total = stbuf.f_blocks * stbuf.f_bsize; osd_stat.statfs.available = stbuf.f_bavail * stbuf.f_bsize; osd_stat.statfs.internally_reserved = 0; } void send_pg_stats() { dout(10) << __func__ << " pgs " << pgs.size() << " osdmap " << osdmap << dendl; MPGStats *mstats = new MPGStats(monc.get_fsid(), osdmap.get_epoch()); mstats->set_tid(1); mstats->osd_stat = osd_stat; set<pg_t>::iterator it; for (it = pgs_changes.begin(); it != pgs_changes.end(); ++it) { pg_t pgid = (*it); if (pgs.count(pgid) == 0) { derr << __func__ << " pgid " << pgid << " not on our map" << dendl; ceph_abort_msg("pgid not on our map"); } pg_stat_t &s = pgs[pgid]; mstats->pg_stat[pgid] = s; JSONFormatter f(true); s.dump(&f); dout(20) << __func__ << " pg " << pgid << " stats:\n"; f.flush(*_dout); *_dout << dendl; } dout(10) << __func__ << " send " << *mstats << dendl; monc.send_mon_message(mstats); } void modify_pg(pg_t pgid) { dout(10) << __func__ << " pg " << pgid << dendl; ceph_assert(pgs.count(pgid) > 0); pg_stat_t &s = pgs[pgid]; utime_t now = ceph_clock_now(); if (now - s.last_change < 10.0) { dout(10) << __func__ << " pg " << pgid << " changed in the last 10s" << dendl; return; } s.state ^= PG_STATE_CLEAN; if (s.state & PG_STATE_CLEAN) s.last_clean = now; s.last_change = now; s.reported_seq++; pgs_changes.insert(pgid); } void modify_pgs() { dout(10) << __func__ << dendl; if (pgs.empty()) { dout(1) << __func__ << " no pgs available! don't attempt to modify." << dendl; return; } boost::uniform_int<> pg_rng(0, pgs.size()-1); set<int> pgs_pos; int num_pgs = pg_rng(gen); while ((int)pgs_pos.size() < num_pgs) pgs_pos.insert(pg_rng(gen)); map<pg_t,pg_stat_t>::iterator it = pgs.begin(); set<int>::iterator pos_it = pgs_pos.begin(); int pgs_at = 0; while (pos_it != pgs_pos.end()) { int at = *pos_it; dout(20) << __func__ << " pg at pos " << at << dendl; while ((pgs_at != at) && (it != pgs.end())) { ++it; ++pgs_at; } ceph_assert(it != pgs.end()); dout(20) << __func__ << " pg at pos " << at << ": " << it->first << dendl; modify_pg(it->first); ++pos_it; } } void op_alive() { dout(10) << __func__ << dendl; if (!osdmap.exists(whoami)) { dout(0) << __func__ << " I'm not in the osdmap!!\n"; JSONFormatter f(true); osdmap.dump(&f); f.flush(*_dout); *_dout << dendl; } if (osdmap.get_epoch() == 0) { dout(1) << __func__ << " wait for osdmap" << dendl; return; } epoch_t up_thru = osdmap.get_up_thru(whoami); dout(10) << __func__ << "up_thru: " << osdmap.get_up_thru(whoami) << dendl; monc.send_mon_message(new MOSDAlive(osdmap.get_epoch(), up_thru)); } void op_pgtemp() { if (osdmap.get_epoch() == 0) { dout(1) << __func__ << " wait for osdmap" << dendl; return; } dout(10) << __func__ << dendl; MOSDPGTemp *m = new MOSDPGTemp(osdmap.get_epoch()); monc.send_mon_message(m); } void op_failure() { dout(10) << __func__ << dendl; } void op_pgstats() { dout(10) << __func__ << dendl; modify_pgs(); if (!pgs_changes.empty()) send_pg_stats(); monc.sub_want("osd_pg_creates", 0, CEPH_SUBSCRIBE_ONETIME); monc.renew_subs(); dout(20) << __func__ << " pg pools:\n"; JSONFormatter f(true); f.open_array_section("pools"); auto& osdmap_pools = osdmap.get_pools(); for (auto pit = osdmap_pools.begin(); pit != osdmap_pools.end(); ++pit) { const int64_t pool_id = pit->first; const pg_pool_t &pool = pit->second; f.open_object_section("pool"); f.dump_int("pool_id", pool_id); f.open_object_section("pool_dump"); pool.dump(&f); f.close_section(); f.close_section(); } f.close_section(); f.flush(*_dout); *_dout << dendl; } void op_log() { dout(10) << __func__ << dendl; MLog *m = new MLog(monc.get_fsid()); boost::uniform_int<> log_rng(1, 10); size_t num_entries = log_rng(gen); dout(10) << __func__ << " send " << num_entries << " log messages" << dendl; utime_t now = ceph_clock_now(); int seq = 0; for (; num_entries > 0; --num_entries) { LogEntry e; e.rank = messenger->get_myname(); e.addrs = messenger->get_myaddrs(); e.stamp = now; e.seq = seq++; e.prio = CLOG_DEBUG; e.msg = "OSDStub::op_log"; m->entries.push_back(e); } monc.send_mon_message(m); } void _tick() override { if (!osdmap.exists(whoami)) { std::cout << __func__ << " not in the cluster; boot!" << std::endl; boot(); return; } update_osd_stat(); boost::uniform_int<> op_rng(STUB_MON_OSD_FIRST, STUB_MON_OSD_LAST); int op = op_rng(gen); switch (op) { case STUB_MON_OSD_ALIVE: op_alive(); break; case STUB_MON_OSD_PGTEMP: op_pgtemp(); break; case STUB_MON_OSD_FAILURE: op_failure(); break; case STUB_MON_OSD_PGSTATS: op_pgstats(); break; case STUB_MON_LOG: op_log(); break; } } void handle_osd_map(MOSDMap *m) { dout(1) << __func__ << dendl; if (m->fsid != monc.get_fsid()) { dout(0) << __func__ << " message fsid " << m->fsid << " != " << monc.get_fsid() << dendl; dout(0) << __func__ << " " << m << " from " << m->get_source_inst() << dendl; dout(0) << monc.get_monmap() << dendl; } ceph_assert(m->fsid == monc.get_fsid()); epoch_t first = m->get_first(); epoch_t last = m->get_last(); dout(5) << __func__ << " epochs [" << first << "," << last << "]" << " current " << osdmap.get_epoch() << dendl; if (last <= osdmap.get_epoch()) { dout(5) << __func__ << " no new maps here; dropping" << dendl; m->put(); return; } if (first > osdmap.get_epoch() + 1) { dout(5) << __func__ << osdmap.get_epoch() + 1 << ".." << (first-1) << dendl; if ((m->cluster_osdmap_trim_lower_bound < first && osdmap.get_epoch() == 0) || m->cluster_osdmap_trim_lower_bound <= osdmap.get_epoch()) { monc.sub_want("osdmap", osdmap.get_epoch()+1, CEPH_SUBSCRIBE_ONETIME); monc.renew_subs(); m->put(); return; } } epoch_t start_full = std::max(osdmap.get_epoch() + 1, first); if (m->maps.size() > 0) { map<epoch_t,bufferlist>::reverse_iterator rit; rit = m->maps.rbegin(); if (start_full <= rit->first) { start_full = rit->first; dout(5) << __func__ << " full epoch " << start_full << dendl; bufferlist &bl = rit->second; auto p = bl.cbegin(); osdmap.decode(p); } } for (epoch_t e = start_full; e <= last; e++) { map<epoch_t,bufferlist>::iterator it; it = m->incremental_maps.find(e); if (it == m->incremental_maps.end()) continue; dout(20) << __func__ << " incremental epoch " << e << " on full epoch " << start_full << dendl; OSDMap::Incremental inc; bufferlist &bl = it->second; auto p = bl.cbegin(); inc.decode(p); int err = osdmap.apply_incremental(inc); if (err < 0) { derr << "osd." << whoami << "::" << __func__ << "** ERROR: applying incremental: " << cpp_strerror(err) << dendl; ceph_abort_msg("error applying incremental"); } } dout(30) << __func__ << "\nosdmap:\n"; JSONFormatter f(true); osdmap.dump(&f); f.flush(*_dout); *_dout << dendl; if (osdmap.is_up(whoami) && osdmap.get_addrs(whoami) == messenger->get_myaddrs()) { dout(1) << __func__ << " got into the osdmap and we're up!" << dendl; } if (m->newest_map && m->newest_map > last) { dout(1) << __func__ << " they have more maps; requesting them!" << dendl; monc.sub_want("osdmap", osdmap.get_epoch()+1, CEPH_SUBSCRIBE_ONETIME); monc.renew_subs(); } dout(10) << __func__ << " done" << dendl; m->put(); } bool ms_dispatch(Message *m) override { dout(1) << __func__ << " " << *m << dendl; switch (m->get_type()) { case CEPH_MSG_OSD_MAP: handle_osd_map((MOSDMap*)m); break; default: m->put(); break; } return true; } void ms_handle_connect(Connection *con) override { dout(1) << __func__ << " " << con << dendl; if (con->get_peer_type() == CEPH_ENTITY_TYPE_MON) { dout(10) << __func__ << " on mon" << dendl; } } void ms_handle_remote_reset(Connection *con) override {} bool ms_handle_reset(Connection *con) override { dout(1) << __func__ << dendl; return con->get_priv().get(); } bool ms_handle_refused(Connection *con) override { return false; } const string get_name() override { stringstream ss; ss << "osd." << whoami; return ss.str(); } }; double const OSDStub::STUB_BOOT_INTERVAL = 10.0; #undef dout_prefix #define dout_prefix *_dout << "main " const char *our_name = NULL; vector<TestStub*> stubs; ceph::mutex shutdown_lock = ceph::make_mutex("main::shutdown_lock"); ceph::condition_variable shutdown_cond; Context *shutdown_cb = NULL; SafeTimer *shutdown_timer = NULL; struct C_Shutdown : public Context { void finish(int r) override { generic_dout(10) << "main::shutdown time has ran out" << dendl; shutdown_cond.notify_all(); } }; void handle_test_signal(int signum) { if ((signum != SIGINT) && (signum != SIGTERM)) return; std::cerr << "*** Got signal " << sig_str(signum) << " ***" << std::endl; std::lock_guard l{shutdown_lock}; if (shutdown_timer) { shutdown_timer->cancel_all_events(); shutdown_cond.notify_all(); } } void usage() { ceph_assert(our_name != NULL); std::cout << "usage: " << our_name << " <--stub-id ID> [--stub-id ID...]" << std::endl; std::cout << "\n\ Global Options:\n\ -c FILE Read configuration from FILE\n\ --keyring FILE Read keyring from FILE\n\ --help This message\n\ \n\ Test-specific Options:\n\ --stub-id ID1..ID2 Interval of OSD ids for multiple stubs to mimic.\n\ --stub-id ID OSD id a stub will mimic to be\n\ (same as --stub-id ID..ID)\n\ " << std::endl; } int get_id_interval(int &first, int &last, string &str) { size_t found = str.find(".."); string first_str, last_str; if (found == string::npos) { first_str = last_str = str; } else { first_str = str.substr(0, found); last_str = str.substr(found+2); } string err; first = strict_strtol(first_str.c_str(), 10, &err); if ((first == 0) && (!err.empty())) { std::cerr << err << std::endl; return -1; } last = strict_strtol(last_str.c_str(), 10, &err); if ((last == 0) && (!err.empty())) { std::cerr << err << std::endl; return -1; } return 0; } int main(int argc, const char *argv[]) { our_name = argv[0]; auto args = argv_to_vec(argc, argv); auto cct = global_init(nullptr, args, CEPH_ENTITY_TYPE_OSD, CODE_ENVIRONMENT_UTILITY, CINIT_FLAG_NO_DEFAULT_CONFIG_FILE); common_init_finish(g_ceph_context); g_ceph_context->_conf.apply_changes(nullptr); set<int> stub_ids; double duration = 300.0; for (std::vector<const char*>::iterator i = args.begin(); i != args.end();) { string val; if (ceph_argparse_double_dash(args, i)) { break; } else if (ceph_argparse_witharg(args, i, &val, "--stub-id", (char*) NULL)) { int first = -1, last = -1; if (get_id_interval(first, last, val) < 0) { std::cerr << "** error parsing stub id '" << val << "'" << std::endl; exit(1); } for (; first <= last; ++first) stub_ids.insert(first); } else if (ceph_argparse_witharg(args, i, &val, "--duration", (char*) NULL)) { string err; duration = (double) strict_strtol(val.c_str(), 10, &err); if ((duration == 0) && (!err.empty())) { std::cerr << "** error parsing '--duration " << val << "': '" << err << std::endl; exit(1); } } else if (ceph_argparse_flag(args, i, "--help", (char*) NULL)) { usage(); exit(0); } else { std::cerr << "unknown argument '" << *i << "'" << std::endl; return 1; } } if (stub_ids.empty()) { std::cerr << "** error: must specify at least one '--stub-id <ID>'" << std::endl; usage(); return 1; } for (set<int>::iterator i = stub_ids.begin(); i != stub_ids.end(); ++i) { int whoami = *i; std::cout << __func__ << " starting stub." << whoami << std::endl; OSDStub *stub = new OSDStub(whoami, g_ceph_context); int err = stub->init(); if (err < 0) { std::cerr << "** osd stub error: " << cpp_strerror(-err) << std::endl; return 1; } stubs.push_back(stub); } std::cout << __func__ << " starting client stub" << std::endl; ClientStub *cstub = new ClientStub(g_ceph_context); int err = cstub->init(); if (err < 0) { std::cerr << "** client stub error: " << cpp_strerror(-err) << std::endl; return 1; } stubs.push_back(cstub); init_async_signal_handler(); register_async_signal_handler_oneshot(SIGINT, handle_test_signal); register_async_signal_handler_oneshot(SIGTERM, handle_test_signal); { unique_lock locker{shutdown_lock}; shutdown_timer = new SafeTimer(g_ceph_context, shutdown_lock); shutdown_timer->init(); if (duration != 0) { std::cout << __func__ << " run test for " << duration << " seconds" << std::endl; shutdown_timer->add_event_after((double) duration, new C_Shutdown); } shutdown_cond.wait(locker); shutdown_timer->shutdown(); delete shutdown_timer; shutdown_timer = NULL; } unregister_async_signal_handler(SIGINT, handle_test_signal); unregister_async_signal_handler(SIGTERM, handle_test_signal); std::cout << __func__ << " waiting for stubs to finish" << std::endl; vector<TestStub*>::iterator it; int i; for (i = 0, it = stubs.begin(); it != stubs.end(); ++it, ++i) { if (*it != NULL) { (*it)->shutdown(); (*it)->wait(); std::cout << __func__ << " finished " << (*it)->get_name() << std::endl; delete (*it); (*it) = NULL; } } return 0; }
28,304
25.330233
144
cc
null
ceph-main/src/test/msgr/perf_msgr_client.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system * * Copyright (C) 2015 Haomai Wang * * Author: Haomai Wang <haomaiwang@gmail.com> * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #include <stdlib.h> #include <stdint.h> #include <string> #include <unistd.h> #include <iostream> using namespace std; #include "common/ceph_argparse.h" #include "common/debug.h" #include "common/Cycles.h" #include "global/global_init.h" #include "msg/Messenger.h" #include "messages/MOSDOp.h" #include "auth/DummyAuth.h" #include <atomic> class MessengerClient { class ClientThread; class ClientDispatcher : public Dispatcher { uint64_t think_time; ClientThread *thread; public: ClientDispatcher(uint64_t delay, ClientThread *t): Dispatcher(g_ceph_context), think_time(delay), thread(t) {} bool ms_can_fast_dispatch_any() const override { return true; } bool ms_can_fast_dispatch(const Message *m) const override { switch (m->get_type()) { case CEPH_MSG_OSD_OPREPLY: return true; default: return false; } } void ms_handle_fast_connect(Connection *con) override {} void ms_handle_fast_accept(Connection *con) override {} bool ms_dispatch(Message *m) override { return true; } void ms_fast_dispatch(Message *m) override; bool ms_handle_reset(Connection *con) override { return true; } void ms_handle_remote_reset(Connection *con) override {} bool ms_handle_refused(Connection *con) override { return false; } int ms_handle_authentication(Connection *con) override { return 1; } }; class ClientThread : public Thread { Messenger *msgr; int concurrent; ConnectionRef conn; std::atomic<unsigned> client_inc = { 0 }; object_t oid; object_locator_t oloc; pg_t pgid; int msg_len; bufferlist data; int ops; ClientDispatcher dispatcher; public: ceph::mutex lock = ceph::make_mutex("MessengerBenchmark::ClientThread::lock"); ceph::condition_variable cond; uint64_t inflight; ClientThread(Messenger *m, int c, ConnectionRef con, int len, int ops, int think_time_us): msgr(m), concurrent(c), conn(con), oid("object-name"), oloc(1, 1), msg_len(len), ops(ops), dispatcher(think_time_us, this), inflight(0) { m->add_dispatcher_head(&dispatcher); bufferptr ptr(msg_len); memset(ptr.c_str(), 0, msg_len); data.append(ptr); } void *entry() override { std::unique_lock locker{lock}; for (int i = 0; i < ops; ++i) { if (inflight > uint64_t(concurrent)) { cond.wait(locker); } hobject_t hobj(oid, oloc.key, CEPH_NOSNAP, pgid.ps(), pgid.pool(), oloc.nspace); spg_t spgid(pgid); MOSDOp *m = new MOSDOp(client_inc, 0, hobj, spgid, 0, 0, 0); bufferlist msg_data(data); m->write(0, msg_len, msg_data); inflight++; conn->send_message(m); //cerr << __func__ << " send m=" << m << std::endl; } locker.unlock(); msgr->shutdown(); return 0; } }; string type; string serveraddr; int think_time_us; vector<Messenger*> msgrs; vector<ClientThread*> clients; DummyAuthClientServer dummy_auth; public: MessengerClient(const string &t, const string &addr, int delay): type(t), serveraddr(addr), think_time_us(delay), dummy_auth(g_ceph_context) { } ~MessengerClient() { for (uint64_t i = 0; i < clients.size(); ++i) delete clients[i]; for (uint64_t i = 0; i < msgrs.size(); ++i) { msgrs[i]->shutdown(); msgrs[i]->wait(); } } void ready(int c, int jobs, int ops, int msg_len) { entity_addr_t addr; addr.parse(serveraddr.c_str()); addr.set_nonce(0); dummy_auth.auth_registry.refresh_config(); for (int i = 0; i < jobs; ++i) { Messenger *msgr = Messenger::create(g_ceph_context, type, entity_name_t::CLIENT(0), "client", getpid()+i); msgr->set_default_policy(Messenger::Policy::lossless_client(0)); msgr->set_auth_client(&dummy_auth); msgr->start(); entity_addrvec_t addrs(addr); ConnectionRef conn = msgr->connect_to_osd(addrs); ClientThread *t = new ClientThread(msgr, c, conn, msg_len, ops, think_time_us); msgrs.push_back(msgr); clients.push_back(t); } usleep(1000*1000); } void start() { for (uint64_t i = 0; i < clients.size(); ++i) clients[i]->create("client"); for (uint64_t i = 0; i < msgrs.size(); ++i) msgrs[i]->wait(); } }; void MessengerClient::ClientDispatcher::ms_fast_dispatch(Message *m) { usleep(think_time); m->put(); std::lock_guard l{thread->lock}; thread->inflight--; thread->cond.notify_all(); } void usage(const string &name) { cout << "Usage: " << name << " [server ip:port] [numjobs] [concurrency] [ios] [thinktime us] [msg length]" << std::endl; cout << " [server ip:port]: connect to the ip:port pair" << std::endl; cout << " [numjobs]: how much client threads spawned and do benchmark" << std::endl; cout << " [concurrency]: the max inflight messages(like iodepth in fio)" << std::endl; cout << " [ios]: how much messages sent for each client" << std::endl; cout << " [thinktime]: sleep time when do fast dispatching(match client logic)" << std::endl; cout << " [msg length]: message data bytes" << std::endl; } int main(int argc, char **argv) { auto args = argv_to_vec(argc, argv); auto cct = global_init(NULL, args, CEPH_ENTITY_TYPE_CLIENT, CODE_ENVIRONMENT_UTILITY, CINIT_FLAG_NO_DEFAULT_CONFIG_FILE); common_init_finish(g_ceph_context); g_ceph_context->_conf.apply_changes(nullptr); if (args.size() < 6) { usage(argv[0]); return 1; } int numjobs = atoi(args[1]); int concurrent = atoi(args[2]); int ios = atoi(args[3]); int think_time = atoi(args[4]); int len = atoi(args[5]); std::string public_msgr_type = g_ceph_context->_conf->ms_public_type.empty() ? g_ceph_context->_conf.get_val<std::string>("ms_type") : g_ceph_context->_conf->ms_public_type; cout << " using ms-public-type " << public_msgr_type << std::endl; cout << " server ip:port " << args[0] << std::endl; cout << " numjobs " << numjobs << std::endl; cout << " concurrency " << concurrent << std::endl; cout << " ios " << ios << std::endl; cout << " thinktime(us) " << think_time << std::endl; cout << " message data bytes " << len << std::endl; MessengerClient client(public_msgr_type, args[0], think_time); client.ready(concurrent, numjobs, ios, len); Cycles::init(); uint64_t start = Cycles::rdtsc(); client.start(); uint64_t stop = Cycles::rdtsc(); cout << " Total op " << (ios * numjobs) << " run time " << Cycles::to_microseconds(stop - start) << "us." << std::endl; return 0; }
7,103
31.290909
175
cc
null
ceph-main/src/test/msgr/perf_msgr_server.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system * * Copyright (C) 2015 Haomai Wang * * Author: Haomai Wang <haomaiwang@gmail.com> * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #include <stdlib.h> #include <stdint.h> #include <string> #include <unistd.h> #include <iostream> using namespace std; #include "common/ceph_argparse.h" #include "common/debug.h" #include "common/WorkQueue.h" #include "global/global_init.h" #include "msg/Messenger.h" #include "messages/MOSDOp.h" #include "messages/MOSDOpReply.h" #include "auth/DummyAuth.h" class ServerDispatcher : public Dispatcher { uint64_t think_time; ThreadPool op_tp; class OpWQ : public ThreadPool::WorkQueue<Message> { list<Message*> messages; public: OpWQ(ceph::timespan timeout, ceph::timespan suicide_timeout, ThreadPool *tp) : ThreadPool::WorkQueue<Message>("ServerDispatcher::OpWQ", timeout, suicide_timeout, tp) {} bool _enqueue(Message *m) override { messages.push_back(m); return true; } void _dequeue(Message *m) override { ceph_abort(); } bool _empty() override { return messages.empty(); } Message *_dequeue() override { if (messages.empty()) return NULL; Message *m = messages.front(); messages.pop_front(); return m; } void _process(Message *m, ThreadPool::TPHandle &handle) override { MOSDOp *osd_op = static_cast<MOSDOp*>(m); MOSDOpReply *reply = new MOSDOpReply(osd_op, 0, 0, 0, false); m->get_connection()->send_message(reply); m->put(); } void _process_finish(Message *m) override { } void _clear() override { ceph_assert(messages.empty()); } } op_wq; public: ServerDispatcher(int threads, uint64_t delay): Dispatcher(g_ceph_context), think_time(delay), op_tp(g_ceph_context, "ServerDispatcher::op_tp", "tp_serv_disp", threads, "serverdispatcher_op_threads"), op_wq(ceph::make_timespan(30), ceph::make_timespan(30), &op_tp) { op_tp.start(); } ~ServerDispatcher() override { op_tp.stop(); } bool ms_can_fast_dispatch_any() const override { return true; } bool ms_can_fast_dispatch(const Message *m) const override { switch (m->get_type()) { case CEPH_MSG_OSD_OP: return true; default: return false; } } void ms_handle_fast_connect(Connection *con) override {} void ms_handle_fast_accept(Connection *con) override {} bool ms_dispatch(Message *m) override { return true; } bool ms_handle_reset(Connection *con) override { return true; } void ms_handle_remote_reset(Connection *con) override {} bool ms_handle_refused(Connection *con) override { return false; } void ms_fast_dispatch(Message *m) override { usleep(think_time); //cerr << __func__ << " reply message=" << m << std::endl; op_wq.queue(m); } int ms_handle_authentication(Connection *con) override { return 1; } }; class MessengerServer { Messenger *msgr; string type; string bindaddr; ServerDispatcher dispatcher; DummyAuthClientServer dummy_auth; public: MessengerServer(const string &t, const string &addr, int threads, int delay): msgr(NULL), type(t), bindaddr(addr), dispatcher(threads, delay), dummy_auth(g_ceph_context) { msgr = Messenger::create(g_ceph_context, type, entity_name_t::OSD(0), "server", 0); msgr->set_default_policy(Messenger::Policy::stateless_server(0)); dummy_auth.auth_registry.refresh_config(); msgr->set_auth_server(&dummy_auth); } ~MessengerServer() { msgr->shutdown(); msgr->wait(); } void start() { entity_addr_t addr; addr.parse(bindaddr.c_str()); msgr->bind(addr); msgr->add_dispatcher_head(&dispatcher); msgr->start(); msgr->wait(); } }; void usage(const string &name) { cerr << "Usage: " << name << " [bind ip:port] [server worker threads] [thinktime us]" << std::endl; cerr << " [bind ip:port]: The ip:port pair to bind, client need to specify this pair to connect" << std::endl; cerr << " [server worker threads]: threads will process incoming messages and reply(matching pg threads)" << std::endl; cerr << " [thinktime]: sleep time when do dispatching(match fast dispatch logic in OSD.cc)" << std::endl; } int main(int argc, char **argv) { auto args = argv_to_vec(argc, argv); auto cct = global_init(NULL, args, CEPH_ENTITY_TYPE_CLIENT, CODE_ENVIRONMENT_UTILITY, CINIT_FLAG_NO_DEFAULT_CONFIG_FILE); common_init_finish(g_ceph_context); g_ceph_context->_conf.apply_changes(nullptr); if (args.size() < 3) { usage(argv[0]); return 1; } int worker_threads = atoi(args[1]); int think_time = atoi(args[2]); std::string public_msgr_type = g_ceph_context->_conf->ms_public_type.empty() ? g_ceph_context->_conf.get_val<std::string>("ms_type") : g_ceph_context->_conf->ms_public_type; cerr << " This tool won't handle connection error alike things, " << std::endl; cerr << "please ensure the proper network environment to test." << std::endl; cerr << " Or ctrl+c when meeting error and restart tests" << std::endl; cerr << " using ms-public-type " << public_msgr_type << std::endl; cerr << " bind ip:port " << args[0] << std::endl; cerr << " worker threads " << worker_threads << std::endl; cerr << " thinktime(us) " << think_time << std::endl; MessengerServer server(public_msgr_type, args[0], worker_threads, think_time); server.start(); return 0; }
5,735
31.40678
175
cc
null
ceph-main/src/test/msgr/test_async_driver.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system * * Copyright (C) 2014 UnitedStack <haomai@unitedstack.com> * * Author: Haomai Wang <haomaiwang@gmail.com> * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #ifdef __APPLE__ #include <AvailabilityMacros.h> #endif #include <fcntl.h> #include <sys/socket.h> #include <pthread.h> #include <stdint.h> #include <arpa/inet.h> #include "include/Context.h" #include "common/ceph_mutex.h" #include "common/Cond.h" #include "global/global_init.h" #include "common/ceph_argparse.h" #include "msg/async/Event.h" #include <atomic> // We use epoll, kqueue, evport, select in descending order by performance. #if defined(__linux__) #define HAVE_EPOLL 1 #endif #if (defined(__APPLE__) && defined(MAC_OS_X_VERSION_10_6)) || defined(__FreeBSD__) || defined(__OpenBSD__) || defined (__NetBSD__) #define HAVE_KQUEUE 1 #endif #ifdef __sun #include <sys/feature_tests.h> #ifdef _DTRACE_VERSION #define HAVE_EVPORT 1 #endif #endif #ifdef HAVE_EPOLL #include "msg/async/EventEpoll.h" #endif #ifdef HAVE_KQUEUE #include "msg/async/EventKqueue.h" #endif #include "msg/async/EventSelect.h" #include <gtest/gtest.h> using namespace std; class EventDriverTest : public ::testing::TestWithParam<const char*> { public: EventDriver *driver; EventDriverTest(): driver(0) {} void SetUp() override { cerr << __func__ << " start set up " << GetParam() << std::endl; #ifdef HAVE_EPOLL if (strcmp(GetParam(), "epoll")) driver = new EpollDriver(g_ceph_context); #endif #ifdef HAVE_KQUEUE if (strcmp(GetParam(), "kqueue")) driver = new KqueueDriver(g_ceph_context); #endif if (strcmp(GetParam(), "select")) driver = new SelectDriver(g_ceph_context); driver->init(NULL, 100); } void TearDown() override { delete driver; } }; int set_nonblock(int sd) { int flags; /* Set the socket nonblocking. * Note that fcntl(2) for F_GETFL and F_SETFL can't be * interrupted by a signal. */ if ((flags = fcntl(sd, F_GETFL)) < 0 ) { return -1; } if (fcntl(sd, F_SETFL, flags | O_NONBLOCK) < 0) { return -1; } return 0; } TEST_P(EventDriverTest, PipeTest) { int fds[2]; vector<FiredFileEvent> fired_events; int r; struct timeval tv; tv.tv_sec = 0; tv.tv_usec = 1; r = pipe(fds); ASSERT_EQ(r, 0); r = driver->add_event(fds[0], EVENT_NONE, EVENT_READABLE); ASSERT_EQ(r, 0); r = driver->event_wait(fired_events, &tv); ASSERT_EQ(r, 0); char c = 'A'; r = write(fds[1], &c, sizeof(c)); ASSERT_EQ(r, 1); r = driver->event_wait(fired_events, &tv); ASSERT_EQ(r, 1); ASSERT_EQ(fired_events[0].fd, fds[0]); fired_events.clear(); r = write(fds[1], &c, sizeof(c)); ASSERT_EQ(r, 1); r = driver->event_wait(fired_events, &tv); ASSERT_EQ(r, 1); ASSERT_EQ(fired_events[0].fd, fds[0]); fired_events.clear(); driver->del_event(fds[0], EVENT_READABLE, EVENT_READABLE); r = write(fds[1], &c, sizeof(c)); ASSERT_EQ(r, 1); r = driver->event_wait(fired_events, &tv); ASSERT_EQ(r, 0); } void* echoclient(void *arg) { intptr_t port = (intptr_t)arg; struct sockaddr_in sa; memset(&sa, 0, sizeof(sa)); sa.sin_family = AF_INET; sa.sin_port = htons(port); char addr[] = "127.0.0.1"; int r = inet_pton(AF_INET, addr, &sa.sin_addr); ceph_assert(r == 1); int connect_sd = ::socket(AF_INET, SOCK_STREAM, 0); if (connect_sd >= 0) { r = connect(connect_sd, (struct sockaddr*)&sa, sizeof(sa)); ceph_assert(r == 0); int t = 0; do { char c[] = "banner"; r = write(connect_sd, c, sizeof(c)); char d[100]; r = read(connect_sd, d, sizeof(d)); if (r == 0) break; if (t++ == 30) break; } while (1); ::close(connect_sd); } return 0; } TEST_P(EventDriverTest, NetworkSocketTest) { int listen_sd = ::socket(AF_INET, SOCK_STREAM, 0); ASSERT_TRUE(listen_sd > 0); int on = 1; int r = ::setsockopt(listen_sd, SOL_SOCKET, SO_REUSEADDR, &on, sizeof(on)); ASSERT_EQ(r, 0); r = set_nonblock(listen_sd); ASSERT_EQ(r, 0); struct sockaddr_in sa; long port = 0; for (port = 38788; port < 40000; port++) { memset(&sa,0,sizeof(sa)); sa.sin_family = AF_INET; sa.sin_port = htons(port); sa.sin_addr.s_addr = htonl(INADDR_ANY); r = ::bind(listen_sd, (struct sockaddr *)&sa, sizeof(sa)); if (r == 0) { break; } } ASSERT_EQ(r, 0); r = listen(listen_sd, 511); ASSERT_EQ(r, 0); vector<FiredFileEvent> fired_events; struct timeval tv; tv.tv_sec = 0; tv.tv_usec = 1; r = driver->add_event(listen_sd, EVENT_NONE, EVENT_READABLE); ASSERT_EQ(r, 0); r = driver->event_wait(fired_events, &tv); ASSERT_EQ(r, 0); fired_events.clear(); pthread_t thread1; r = pthread_create(&thread1, NULL, echoclient, (void*)(intptr_t)port); ASSERT_EQ(r, 0); tv.tv_sec = 5; tv.tv_usec = 0; r = driver->event_wait(fired_events, &tv); ASSERT_EQ(r, 1); ASSERT_EQ(fired_events[0].fd, listen_sd); fired_events.clear(); int client_sd = ::accept(listen_sd, NULL, NULL); ASSERT_TRUE(client_sd > 0); r = driver->add_event(client_sd, EVENT_NONE, EVENT_READABLE); ASSERT_EQ(r, 0); do { fired_events.clear(); tv.tv_sec = 5; tv.tv_usec = 0; r = driver->event_wait(fired_events, &tv); ASSERT_EQ(1, r); ASSERT_EQ(EVENT_READABLE, fired_events[0].mask); fired_events.clear(); char data[100]; r = ::read(client_sd, data, sizeof(data)); if (r == 0) break; ASSERT_GT(r, 0); r = driver->add_event(client_sd, EVENT_READABLE, EVENT_WRITABLE); ASSERT_EQ(0, r); r = driver->event_wait(fired_events, &tv); ASSERT_EQ(1, r); ASSERT_EQ(fired_events[0].mask, EVENT_WRITABLE); r = write(client_sd, data, strlen(data)); ASSERT_EQ((int)strlen(data), r); driver->del_event(client_sd, EVENT_READABLE|EVENT_WRITABLE, EVENT_WRITABLE); } while (1); ::close(client_sd); ::close(listen_sd); } class FakeEvent : public EventCallback { public: void do_request(uint64_t fd_or_id) override {} }; TEST(EventCenterTest, FileEventExpansion) { vector<int> sds; EventCenter center(g_ceph_context); center.init(100, 0, "posix"); center.set_owner(); EventCallbackRef e(new FakeEvent()); for (int i = 0; i < 300; i++) { int sd = ::socket(AF_INET, SOCK_STREAM, 0); center.create_file_event(sd, EVENT_READABLE, e); sds.push_back(sd); } for (vector<int>::iterator it = sds.begin(); it != sds.end(); ++it) center.delete_file_event(*it, EVENT_READABLE); } class Worker : public Thread { CephContext *cct; bool done; public: EventCenter center; explicit Worker(CephContext *c, int idx): cct(c), done(false), center(c) { center.init(100, idx, "posix"); } void stop() { done = true; center.wakeup(); } void* entry() override { center.set_owner(); while (!done) center.process_events(1000000); return 0; } }; class CountEvent: public EventCallback { std::atomic<unsigned> *count; ceph::mutex *lock; ceph::condition_variable *cond; public: CountEvent(std::atomic<unsigned> *atomic, ceph::mutex *l, ceph::condition_variable *c) : count(atomic), lock(l), cond(c) {} void do_request(uint64_t id) override { std::scoped_lock l{*lock}; (*count)--; cond->notify_all(); } }; TEST(EventCenterTest, DispatchTest) { Worker worker1(g_ceph_context, 1), worker2(g_ceph_context, 2); std::atomic<unsigned> count = { 0 }; ceph::mutex lock = ceph::make_mutex("DispatchTest::lock"); ceph::condition_variable cond; worker1.create("worker_1"); worker2.create("worker_2"); for (int i = 0; i < 10000; ++i) { count++; worker1.center.dispatch_event_external(EventCallbackRef(new CountEvent(&count, &lock, &cond))); count++; worker2.center.dispatch_event_external(EventCallbackRef(new CountEvent(&count, &lock, &cond))); std::unique_lock l{lock}; cond.wait(l, [&] { return count == 0; }); } worker1.stop(); worker2.stop(); worker1.join(); worker2.join(); } INSTANTIATE_TEST_SUITE_P( AsyncMessenger, EventDriverTest, ::testing::Values( #ifdef HAVE_EPOLL "epoll", #endif #ifdef HAVE_KQUEUE "kqueue", #endif "select" ) ); /* * Local Variables: * compile-command: "cd ../.. ; make ceph_test_async_driver && * ./ceph_test_async_driver * * End: */
8,636
23.329577
130
cc
null
ceph-main/src/test/msgr/test_async_networkstack.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system * * Copyright (C) 2016 XSky <haomai@xsky.com> * * Author: Haomai Wang <haomaiwang@gmail.com> * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #include <algorithm> #include <atomic> #include <iostream> #include <list> #include <random> #include <string> #include <set> #include <vector> #include <gtest/gtest.h> #include "acconfig.h" #include "common/config_obs.h" #include "include/Context.h" #include "msg/async/Event.h" #include "msg/async/Stack.h" using namespace std; class NoopConfigObserver : public md_config_obs_t { std::list<std::string> options; const char **ptrs = 0; public: NoopConfigObserver(std::list<std::string> l) : options(l) { ptrs = new const char*[options.size() + 1]; unsigned j = 0; for (auto& i : options) { ptrs[j++] = i.c_str(); } ptrs[j] = 0; } ~NoopConfigObserver() { delete[] ptrs; } const char** get_tracked_conf_keys() const override { return ptrs; } void handle_conf_change(const ConfigProxy& conf, const std::set <std::string> &changed) override { } }; class NetworkWorkerTest : public ::testing::TestWithParam<const char*> { public: std::shared_ptr<NetworkStack> stack; string addr, port_addr; NoopConfigObserver fake_obs = {{"ms_type", "ms_dpdk_coremask", "ms_dpdk_host_ipv4_addr", "ms_dpdk_gateway_ipv4_addr", "ms_dpdk_netmask_ipv4_addr"}}; NetworkWorkerTest() {} void SetUp() override { cerr << __func__ << " start set up " << GetParam() << std::endl; if (strncmp(GetParam(), "dpdk", 4)) { g_ceph_context->_conf.set_val("ms_type", "async+posix"); addr = "127.0.0.1:15000"; port_addr = "127.0.0.1:15001"; } else { g_ceph_context->_conf.set_val_or_die("ms_dpdk_debug_allow_loopback", "true"); g_ceph_context->_conf.set_val_or_die("ms_async_op_threads", "2"); string ipv4_addr = g_ceph_context->_conf.get_val<std::string>("ms_dpdk_host_ipv4_addr"); addr = ipv4_addr + std::string(":15000"); port_addr = ipv4_addr + std::string(":15001"); } stack = NetworkStack::create(g_ceph_context, GetParam()); stack->start(); } void TearDown() override { stack->stop(); } string get_addr() const { return addr; } string get_ip_different_port() const { return port_addr; } string get_different_ip() const { return "10.0.123.100:4323"; } EventCenter *get_center(unsigned i) { return &stack->get_worker(i)->center; } Worker *get_worker(unsigned i) { return stack->get_worker(i); } template<typename func> class C_dispatch : public EventCallback { Worker *worker; func f; std::atomic_bool done; public: C_dispatch(Worker *w, func &&_f): worker(w), f(std::move(_f)), done(false) {} void do_request(uint64_t id) override { f(worker); done = true; } void wait() { int us = 1000 * 1000 * 1000; while (!done) { ASSERT_TRUE(us > 0); usleep(100); us -= 100; } } }; template<typename func> void exec_events(func &&f) { std::vector<C_dispatch<func>*> dis; for (unsigned i = 0; i < stack->get_num_worker(); ++i) { Worker *w = stack->get_worker(i); C_dispatch<func> *e = new C_dispatch<func>(w, std::move(f)); stack->get_worker(i)->center.dispatch_event_external(e); dis.push_back(e); } for (auto &&e : dis) { e->wait(); delete e; } } }; class C_poll : public EventCallback { EventCenter *center; std::atomic<bool> woken; static const int sleepus = 500; public: explicit C_poll(EventCenter *c): center(c), woken(false) {} void do_request(uint64_t r) override { woken = true; } bool poll(int milliseconds) { auto start = ceph::coarse_real_clock::now(); while (!woken) { center->process_events(sleepus); usleep(sleepus); auto r = std::chrono::duration_cast<std::chrono::milliseconds>( ceph::coarse_real_clock::now() - start); if (r >= std::chrono::milliseconds(milliseconds)) break; } return woken; } void reset() { woken = false; } }; TEST_P(NetworkWorkerTest, SimpleTest) { entity_addr_t bind_addr; ASSERT_TRUE(bind_addr.parse(get_addr().c_str())); std::atomic_bool accepted(false); std::atomic_bool *accepted_p = &accepted; exec_events([this, accepted_p, bind_addr](Worker *worker) mutable { entity_addr_t cli_addr; SocketOptions options; ServerSocket bind_socket; EventCenter *center = &worker->center; ssize_t r = 0; if (stack->support_local_listen_table() || worker->id == 0) r = worker->listen(bind_addr, 0, options, &bind_socket); ASSERT_EQ(0, r); ConnectedSocket cli_socket, srv_socket; if (worker->id == 0) { r = worker->connect(bind_addr, options, &cli_socket); ASSERT_EQ(0, r); } bool is_my_accept = false; if (bind_socket) { C_poll cb(center); center->create_file_event(bind_socket.fd(), EVENT_READABLE, &cb); if (cb.poll(500)) { *accepted_p = true; is_my_accept = true; } ASSERT_TRUE(*accepted_p); center->delete_file_event(bind_socket.fd(), EVENT_READABLE); } if (is_my_accept) { r = bind_socket.accept(&srv_socket, options, &cli_addr, worker); ASSERT_EQ(0, r); ASSERT_TRUE(srv_socket.fd() > 0); } if (worker->id == 0) { C_poll cb(center); center->create_file_event(cli_socket.fd(), EVENT_READABLE, &cb); r = cli_socket.is_connected(); if (r == 0) { ASSERT_EQ(true, cb.poll(500)); r = cli_socket.is_connected(); } ASSERT_EQ(1, r); center->delete_file_event(cli_socket.fd(), EVENT_READABLE); } const char *message = "this is a new message"; int len = strlen(message); bufferlist bl; bl.append(message, len); if (worker->id == 0) { r = cli_socket.send(bl, false); ASSERT_EQ(len, r); } char buf[1024]; C_poll cb(center); if (is_my_accept) { center->create_file_event(srv_socket.fd(), EVENT_READABLE, &cb); { r = srv_socket.read(buf, sizeof(buf)); while (r == -EAGAIN) { ASSERT_TRUE(cb.poll(500)); r = srv_socket.read(buf, sizeof(buf)); cb.reset(); } ASSERT_EQ(len, r); ASSERT_EQ(0, memcmp(buf, message, len)); } bind_socket.abort_accept(); } if (worker->id == 0) { cli_socket.shutdown(); // ack delay is 200 ms } bl.clear(); bl.append(message, len); if (worker->id == 0) { r = cli_socket.send(bl, false); ASSERT_EQ(-EPIPE, r); } if (is_my_accept) { cb.reset(); ASSERT_TRUE(cb.poll(500)); r = srv_socket.read(buf, sizeof(buf)); if (r == -EAGAIN) { cb.reset(); ASSERT_TRUE(cb.poll(1000*500)); r = srv_socket.read(buf, sizeof(buf)); } ASSERT_EQ(0, r); center->delete_file_event(srv_socket.fd(), EVENT_READABLE); srv_socket.close(); } }); } TEST_P(NetworkWorkerTest, ConnectFailedTest) { entity_addr_t bind_addr; ASSERT_TRUE(bind_addr.parse(get_addr().c_str())); exec_events([this, bind_addr](Worker *worker) mutable { EventCenter *center = &worker->center; entity_addr_t cli_addr; SocketOptions options; ServerSocket bind_socket; int r = 0; if (stack->support_local_listen_table() || worker->id == 0) r = worker->listen(bind_addr, 0, options, &bind_socket); ASSERT_EQ(0, r); ConnectedSocket cli_socket1, cli_socket2; if (worker->id == 0) { ASSERT_TRUE(cli_addr.parse(get_ip_different_port().c_str())); r = worker->connect(cli_addr, options, &cli_socket1); ASSERT_EQ(0, r); C_poll cb(center); center->create_file_event(cli_socket1.fd(), EVENT_READABLE, &cb); r = cli_socket1.is_connected(); if (r == 0) { ASSERT_TRUE(cb.poll(500)); r = cli_socket1.is_connected(); } ASSERT_TRUE(r == -ECONNREFUSED || r == -ECONNRESET); } if (worker->id == 1) { ASSERT_TRUE(cli_addr.parse(get_different_ip().c_str())); r = worker->connect(cli_addr, options, &cli_socket2); ASSERT_EQ(0, r); C_poll cb(center); center->create_file_event(cli_socket2.fd(), EVENT_READABLE, &cb); r = cli_socket2.is_connected(); if (r == 0) { cb.poll(500); r = cli_socket2.is_connected(); } ASSERT_TRUE(r != 1); center->delete_file_event(cli_socket2.fd(), EVENT_READABLE); } }); } TEST_P(NetworkWorkerTest, ListenTest) { Worker *worker = get_worker(0); entity_addr_t bind_addr; ASSERT_TRUE(bind_addr.parse(get_addr().c_str())); SocketOptions options; ServerSocket bind_socket1, bind_socket2; int r = worker->listen(bind_addr, 0, options, &bind_socket1); ASSERT_EQ(0, r); r = worker->listen(bind_addr, 0, options, &bind_socket2); ASSERT_EQ(-EADDRINUSE, r); } TEST_P(NetworkWorkerTest, AcceptAndCloseTest) { entity_addr_t bind_addr; ASSERT_TRUE(bind_addr.parse(get_addr().c_str())); std::atomic_bool accepted(false); std::atomic_bool *accepted_p = &accepted; std::atomic_int unbind_count(stack->get_num_worker()); std::atomic_int *count_p = &unbind_count; exec_events([this, bind_addr, accepted_p, count_p](Worker *worker) mutable { SocketOptions options; EventCenter *center = &worker->center; entity_addr_t cli_addr; int r = 0; { ServerSocket bind_socket; if (stack->support_local_listen_table() || worker->id == 0) r = worker->listen(bind_addr, 0, options, &bind_socket); ASSERT_EQ(0, r); ConnectedSocket srv_socket, cli_socket; if (bind_socket) { r = bind_socket.accept(&srv_socket, options, &cli_addr, worker); ASSERT_EQ(-EAGAIN, r); } C_poll cb(center); if (worker->id == 0) { center->create_file_event(bind_socket.fd(), EVENT_READABLE, &cb); r = worker->connect(bind_addr, options, &cli_socket); ASSERT_EQ(0, r); ASSERT_TRUE(cb.poll(500)); } if (bind_socket) { cb.reset(); cb.poll(500); ConnectedSocket srv_socket2; do { r = bind_socket.accept(&srv_socket2, options, &cli_addr, worker); usleep(100); } while (r == -EAGAIN && !*accepted_p); if (r == 0) *accepted_p = true; ASSERT_TRUE(*accepted_p); // srv_socket2 closed center->delete_file_event(bind_socket.fd(), EVENT_READABLE); } if (worker->id == 0) { char buf[100]; cb.reset(); center->create_file_event(cli_socket.fd(), EVENT_READABLE, &cb); int i = 3; while (!i--) { ASSERT_TRUE(cb.poll(500)); r = cli_socket.read(buf, sizeof(buf)); if (r == 0) break; } ASSERT_EQ(0, r); center->delete_file_event(cli_socket.fd(), EVENT_READABLE); } if (bind_socket) center->create_file_event(bind_socket.fd(), EVENT_READABLE, &cb); if (worker->id == 0) { *accepted_p = false; r = worker->connect(bind_addr, options, &cli_socket); ASSERT_EQ(0, r); cb.reset(); ASSERT_TRUE(cb.poll(500)); cli_socket.close(); } if (bind_socket) { do { r = bind_socket.accept(&srv_socket, options, &cli_addr, worker); usleep(100); } while (r == -EAGAIN && !*accepted_p); if (r == 0) *accepted_p = true; ASSERT_TRUE(*accepted_p); center->delete_file_event(bind_socket.fd(), EVENT_READABLE); } // unbind } --*count_p; while (*count_p > 0) usleep(100); ConnectedSocket cli_socket; r = worker->connect(bind_addr, options, &cli_socket); ASSERT_EQ(0, r); { C_poll cb(center); center->create_file_event(cli_socket.fd(), EVENT_READABLE, &cb); r = cli_socket.is_connected(); if (r == 0) { ASSERT_TRUE(cb.poll(500)); r = cli_socket.is_connected(); } ASSERT_TRUE(r == -ECONNREFUSED || r == -ECONNRESET); } }); } TEST_P(NetworkWorkerTest, ComplexTest) { entity_addr_t bind_addr; std::atomic_bool listen_done(false); std::atomic_bool *listen_p = &listen_done; std::atomic_bool accepted(false); std::atomic_bool *accepted_p = &accepted; std::atomic_bool done(false); std::atomic_bool *done_p = &done; ASSERT_TRUE(bind_addr.parse(get_addr().c_str())); exec_events([this, bind_addr, listen_p, accepted_p, done_p](Worker *worker) mutable { entity_addr_t cli_addr; EventCenter *center = &worker->center; SocketOptions options; ServerSocket bind_socket; int r = 0; if (stack->support_local_listen_table() || worker->id == 0) { r = worker->listen(bind_addr, 0, options, &bind_socket); ASSERT_EQ(0, r); *listen_p = true; } ConnectedSocket cli_socket, srv_socket; if (worker->id == 1) { while (!*listen_p || stack->support_local_listen_table()) { usleep(50); r = worker->connect(bind_addr, options, &cli_socket); ASSERT_EQ(0, r); if (stack->support_local_listen_table()) break; } } if (bind_socket) { C_poll cb(center); center->create_file_event(bind_socket.fd(), EVENT_READABLE, &cb); int count = 3; while (count--) { if (cb.poll(500)) { r = bind_socket.accept(&srv_socket, options, &cli_addr, worker); ASSERT_EQ(0, r); *accepted_p = true; break; } } ASSERT_TRUE(*accepted_p); center->delete_file_event(bind_socket.fd(), EVENT_READABLE); } if (worker->id == 1) { C_poll cb(center); center->create_file_event(cli_socket.fd(), EVENT_WRITABLE, &cb); r = cli_socket.is_connected(); if (r == 0) { ASSERT_TRUE(cb.poll(500)); r = cli_socket.is_connected(); } ASSERT_EQ(1, r); center->delete_file_event(cli_socket.fd(), EVENT_WRITABLE); } const size_t message_size = 10240; size_t count = 100; string message(message_size, '!'); for (size_t i = 0; i < message_size; i += 100) message[i] = ','; size_t len = message_size * count; C_poll cb(center); if (worker->id == 1) center->create_file_event(cli_socket.fd(), EVENT_WRITABLE, &cb); if (srv_socket) center->create_file_event(srv_socket.fd(), EVENT_READABLE, &cb); size_t left = len; len *= 2; string read_string; int again_count = 0; int c = 2; bufferlist bl; for (size_t i = 0; i < count; ++i) bl.push_back(bufferptr((char*)message.data(), message_size)); while (!*done_p) { again_count = 0; if (worker->id == 1) { if (c > 0) { ssize_t r = 0; usleep(100); if (left > 0) { r = cli_socket.send(bl, false); ASSERT_TRUE(r >= 0 || r == -EAGAIN); if (r > 0) left -= r; if (r == -EAGAIN) ++again_count; } if (left == 0) { --c; left = message_size * count; ASSERT_EQ(0U, bl.length()); for (size_t i = 0; i < count; ++i) bl.push_back(bufferptr((char*)message.data(), message_size)); } } } if (srv_socket) { char buf[1000]; if (len > 0) { r = srv_socket.read(buf, sizeof(buf)); ASSERT_TRUE(r > 0 || r == -EAGAIN); if (r > 0) { read_string.append(buf, r); len -= r; } else if (r == -EAGAIN) { ++again_count; } } if (len == 0) { for (size_t i = 0; i < read_string.size(); i += message_size) ASSERT_EQ(0, memcmp(read_string.c_str()+i, message.c_str(), message_size)); *done_p = true; } } if (again_count) { cb.reset(); cb.poll(500); } } if (worker->id == 1) center->delete_file_event(cli_socket.fd(), EVENT_WRITABLE); if (srv_socket) center->delete_file_event(srv_socket.fd(), EVENT_READABLE); if (bind_socket) bind_socket.abort_accept(); if (srv_socket) srv_socket.close(); if (worker->id == 1) cli_socket.close(); }); } class StressFactory { struct Client; struct Server; struct ThreadData { Worker *worker; std::set<Client*> clients; std::set<Server*> servers; ~ThreadData() { for (auto && i : clients) delete i; for (auto && i : servers) delete i; } }; struct RandomString { size_t slen; vector<std::string> strs; std::random_device rd; std::default_random_engine rng; explicit RandomString(size_t s): slen(s), rng(rd()) {} void prepare(size_t n) { static const char alphabet[] = "abcdefghijklmnopqrstuvwxyz" "ABCDEFGHIJKLMNOPQRSTUVWXYZ" "0123456789"; std::uniform_int_distribution<> dist( 0, sizeof(alphabet) / sizeof(*alphabet) - 2); strs.reserve(n); std::generate_n( std::back_inserter(strs), strs.capacity(), [&] { std::string str; str.reserve(slen); std::generate_n(std::back_inserter(str), slen, [&]() { return alphabet[dist(rng)]; }); return str; } ); } std::string &get_random_string() { std::uniform_int_distribution<> dist( 0, strs.size() - 1); return strs[dist(rng)]; } }; struct Message { size_t idx; size_t len; std::string content; explicit Message(RandomString &rs, size_t i, size_t l): idx(i) { size_t slen = rs.slen; len = std::max(slen, l); std::vector<std::string> strs; strs.reserve(len / slen); std::generate_n( std::back_inserter(strs), strs.capacity(), [&] { return rs.get_random_string(); } ); len = slen * strs.size(); content.reserve(len); for (auto &&s : strs) content.append(s); } bool verify(const char *b, size_t len = 0) const { return content.compare(0, len, b, 0, len) == 0; } }; template <typename T> class C_delete : public EventCallback { T *ctxt; public: explicit C_delete(T *c): ctxt(c) {} void do_request(uint64_t id) override { delete ctxt; delete this; } }; class Client { StressFactory *factory; EventCenter *center; ConnectedSocket socket; std::deque<StressFactory::Message*> acking; std::deque<StressFactory::Message*> writings; std::string buffer; size_t index = 0; size_t left; bool write_enabled = false; size_t read_offset = 0, write_offset = 0; bool first = true; bool dead = false; StressFactory::Message homeless_message; class Client_read_handle : public EventCallback { Client *c; public: explicit Client_read_handle(Client *_c): c(_c) {} void do_request(uint64_t id) override { c->do_read_request(); } } read_ctxt; class Client_write_handle : public EventCallback { Client *c; public: explicit Client_write_handle(Client *_c): c(_c) {} void do_request(uint64_t id) override { c->do_write_request(); } } write_ctxt; public: Client(StressFactory *f, EventCenter *cen, ConnectedSocket s, size_t c) : factory(f), center(cen), socket(std::move(s)), left(c), homeless_message(factory->rs, -1, 1024), read_ctxt(this), write_ctxt(this) { center->create_file_event( socket.fd(), EVENT_READABLE, &read_ctxt); center->dispatch_event_external(&read_ctxt); } void close() { ASSERT_FALSE(write_enabled); dead = true; socket.shutdown(); center->delete_file_event(socket.fd(), EVENT_READABLE); center->dispatch_event_external(new C_delete<Client>(this)); } void do_read_request() { if (dead) return ; ASSERT_TRUE(socket.is_connected() >= 0); if (!socket.is_connected()) return ; ASSERT_TRUE(!acking.empty() || first); if (first) { first = false; center->dispatch_event_external(&write_ctxt); if (acking.empty()) return ; } StressFactory::Message *m = acking.front(); int r = 0; if (buffer.empty()) buffer.resize(m->len); bool must_no = false; while (true) { r = socket.read((char*)buffer.data() + read_offset, m->len - read_offset); ASSERT_TRUE(r == -EAGAIN || r > 0); if (r == -EAGAIN) break; read_offset += r; std::cerr << " client " << this << " receive " << m->idx << " len " << r << " content: " << std::endl; ASSERT_FALSE(must_no); if ((m->len - read_offset) == 0) { ASSERT_TRUE(m->verify(buffer.data(), 0)); delete m; acking.pop_front(); read_offset = 0; buffer.clear(); if (acking.empty()) { m = &homeless_message; must_no = true; } else { m = acking.front(); buffer.resize(m->len); } } } if (acking.empty()) { center->dispatch_event_external(&write_ctxt); return ; } } void do_write_request() { if (dead) return ; ASSERT_TRUE(socket.is_connected() > 0); while (left > 0 && factory->queue_depth > writings.size() + acking.size()) { StressFactory::Message *m = new StressFactory::Message( factory->rs, ++index, factory->rd() % factory->max_message_length); std::cerr << " client " << this << " generate message " << m->idx << " length " << m->len << std::endl; ASSERT_EQ(m->len, m->content.size()); writings.push_back(m); --left; --factory->message_left; } while (!writings.empty()) { StressFactory::Message *m = writings.front(); bufferlist bl; bl.append(m->content.data() + write_offset, m->content.size() - write_offset); ssize_t r = socket.send(bl, false); if (r == 0) break; std::cerr << " client " << this << " send " << m->idx << " len " << r << " content: " << std::endl; ASSERT_TRUE(r >= 0); write_offset += r; if (write_offset == m->content.size()) { write_offset = 0; writings.pop_front(); acking.push_back(m); } } if (writings.empty() && write_enabled) { center->delete_file_event(socket.fd(), EVENT_WRITABLE); write_enabled = false; } else if (!writings.empty() && !write_enabled) { ASSERT_EQ(0, center->create_file_event( socket.fd(), EVENT_WRITABLE, &write_ctxt)); write_enabled = true; } } bool finish() const { return left == 0 && acking.empty() && writings.empty(); } }; friend class Client; class Server { StressFactory *factory; EventCenter *center; ConnectedSocket socket; std::deque<std::string> buffers; bool write_enabled = false; bool dead = false; class Server_read_handle : public EventCallback { Server *s; public: explicit Server_read_handle(Server *_s): s(_s) {} void do_request(uint64_t id) override { s->do_read_request(); } } read_ctxt; class Server_write_handle : public EventCallback { Server *s; public: explicit Server_write_handle(Server *_s): s(_s) {} void do_request(uint64_t id) override { s->do_write_request(); } } write_ctxt; public: Server(StressFactory *f, EventCenter *c, ConnectedSocket s): factory(f), center(c), socket(std::move(s)), read_ctxt(this), write_ctxt(this) { center->create_file_event(socket.fd(), EVENT_READABLE, &read_ctxt); center->dispatch_event_external(&read_ctxt); } void close() { ASSERT_FALSE(write_enabled); socket.shutdown(); center->delete_file_event(socket.fd(), EVENT_READABLE); center->dispatch_event_external(new C_delete<Server>(this)); } void do_read_request() { if (dead) return ; int r = 0; while (true) { char buf[4096]; bufferptr data; r = socket.read(buf, sizeof(buf)); ASSERT_TRUE(r == -EAGAIN || (r >= 0 && (size_t)r <= sizeof(buf))); if (r == 0) { ASSERT_TRUE(buffers.empty()); dead = true; return ; } else if (r == -EAGAIN) break; buffers.emplace_back(buf, 0, r); std::cerr << " server " << this << " receive " << r << " content: " << std::endl; } if (!buffers.empty() && !write_enabled) center->dispatch_event_external(&write_ctxt); } void do_write_request() { if (dead) return ; while (!buffers.empty()) { bufferlist bl; auto it = buffers.begin(); for (size_t i = 0; i < buffers.size(); ++i) { bl.push_back(bufferptr((char*)it->data(), it->size())); ++it; } ssize_t r = socket.send(bl, false); std::cerr << " server " << this << " send " << r << std::endl; if (r == 0) break; ASSERT_TRUE(r >= 0); while (r > 0) { ASSERT_TRUE(!buffers.empty()); string &buffer = buffers.front(); if (r >= (int)buffer.size()) { r -= (int)buffer.size(); buffers.pop_front(); } else { std::cerr << " server " << this << " sent " << r << std::endl; buffer = buffer.substr(r, buffer.size()); break; } } } if (buffers.empty()) { if (write_enabled) { center->delete_file_event(socket.fd(), EVENT_WRITABLE); write_enabled = false; } } else if (!write_enabled) { ASSERT_EQ(0, center->create_file_event( socket.fd(), EVENT_WRITABLE, &write_ctxt)); write_enabled = true; } } bool finish() { return dead; } }; friend class Server; class C_accept : public EventCallback { StressFactory *factory; ServerSocket bind_socket; ThreadData *t_data; Worker *worker; public: C_accept(StressFactory *f, ServerSocket s, ThreadData *data, Worker *w) : factory(f), bind_socket(std::move(s)), t_data(data), worker(w) {} void do_request(uint64_t id) override { while (true) { entity_addr_t cli_addr; ConnectedSocket srv_socket; SocketOptions options; int r = bind_socket.accept(&srv_socket, options, &cli_addr, worker); if (r == -EAGAIN) { break; } ASSERT_EQ(0, r); ASSERT_TRUE(srv_socket.fd() > 0); Server *cb = new Server(factory, &t_data->worker->center, std::move(srv_socket)); t_data->servers.insert(cb); } } }; friend class C_accept; public: static const size_t min_client_send_messages = 100; static const size_t max_client_send_messages = 1000; std::shared_ptr<NetworkStack> stack; RandomString rs; std::random_device rd; const size_t client_num, queue_depth, max_message_length; atomic_int message_count, message_left; entity_addr_t bind_addr; std::atomic_bool already_bind = {false}; SocketOptions options; explicit StressFactory(const std::shared_ptr<NetworkStack> &s, const string &addr, size_t cli, size_t qd, size_t mc, size_t l) : stack(s), rs(128), client_num(cli), queue_depth(qd), max_message_length(l), message_count(mc), message_left(mc) { bind_addr.parse(addr.c_str()); rs.prepare(100); } ~StressFactory() { } void add_client(ThreadData *t_data) { static ceph::mutex lock = ceph::make_mutex("add_client_lock"); std::lock_guard l{lock}; ConnectedSocket sock; int r = t_data->worker->connect(bind_addr, options, &sock); std::default_random_engine rng(rd()); std::uniform_int_distribution<> dist( min_client_send_messages, max_client_send_messages); ASSERT_EQ(0, r); int c = dist(rng); if (c > message_count.load()) c = message_count.load(); Client *cb = new Client(this, &t_data->worker->center, std::move(sock), c); t_data->clients.insert(cb); message_count -= c; } void drop_client(ThreadData *t_data, Client *c) { c->close(); ASSERT_EQ(1U, t_data->clients.erase(c)); } void drop_server(ThreadData *t_data, Server *s) { s->close(); ASSERT_EQ(1U, t_data->servers.erase(s)); } void start(Worker *worker) { int r = 0; ThreadData t_data; t_data.worker = worker; ServerSocket bind_socket; if (stack->support_local_listen_table() || worker->id == 0) { r = worker->listen(bind_addr, 0, options, &bind_socket); ASSERT_EQ(0, r); already_bind = true; } while (!already_bind) usleep(50); C_accept *accept_handler = nullptr; int bind_fd = 0; if (bind_socket) { bind_fd = bind_socket.fd(); accept_handler = new C_accept(this, std::move(bind_socket), &t_data, worker); ASSERT_EQ(0, worker->center.create_file_event( bind_fd, EVENT_READABLE, accept_handler)); } int echo_throttle = message_count; while (message_count > 0 || !t_data.clients.empty() || !t_data.servers.empty()) { if (message_count > 0 && t_data.clients.size() < client_num && t_data.servers.size() < client_num) add_client(&t_data); for (auto &&c : t_data.clients) { if (c->finish()) { drop_client(&t_data, c); break; } } for (auto &&s : t_data.servers) { if (s->finish()) { drop_server(&t_data, s); break; } } worker->center.process_events(1); if (echo_throttle > message_left) { std::cerr << " clients " << t_data.clients.size() << " servers " << t_data.servers.size() << " message count " << message_left << std::endl; echo_throttle -= 100; } } if (bind_fd) worker->center.delete_file_event(bind_fd, EVENT_READABLE); delete accept_handler; } }; TEST_P(NetworkWorkerTest, StressTest) { StressFactory factory(stack, get_addr(), 16, 16, 10000, 1024); StressFactory *f = &factory; exec_events([f](Worker *worker) mutable { f->start(worker); }); ASSERT_EQ(0, factory.message_left); } INSTANTIATE_TEST_SUITE_P( NetworkStack, NetworkWorkerTest, ::testing::Values( #ifdef HAVE_DPDK "dpdk", #endif "posix" ) ); /* * Local Variables: * compile-command: "cd ../.. ; make ceph_test_async_networkstack && * ./ceph_test_async_networkstack * * End: */
31,246
28.121156
111
cc
null
ceph-main/src/test/msgr/test_comp_registry.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "include/types.h" #include "include/stringify.h" #include "compressor/Compressor.h" #include "msg/compressor_registry.h" #include "gtest/gtest.h" #include "common/ceph_context.h" #include "global/global_context.h" #include <sstream> TEST(CompressorRegistry, con_modes) { auto cct = g_ceph_context; CompressorRegistry reg(cct); std::vector<uint32_t> methods; uint32_t method; uint32_t mode; const std::vector<uint32_t> snappy_method = { Compressor::COMP_ALG_SNAPPY }; const std::vector<uint32_t> zlib_method = { Compressor::COMP_ALG_ZLIB }; const std::vector<uint32_t> both_methods = { Compressor::COMP_ALG_ZLIB, Compressor::COMP_ALG_SNAPPY}; const std::vector<uint32_t> no_method = { Compressor::COMP_ALG_NONE }; cct->_conf.set_val( "enable_experimental_unrecoverable_data_corrupting_features", "*"); // baseline: compression for communication with osd is enabled cct->_set_module_type(CEPH_ENTITY_TYPE_CLIENT); cct->_conf.set_val("ms_osd_compress_mode", "force"); cct->_conf.set_val("ms_osd_compression_algorithm", "snappy"); cct->_conf.set_val("ms_compress_secure", "false"); cct->_conf.apply_changes(NULL); ASSERT_EQ(reg.get_is_compress_secure(), false); methods = reg.get_methods(CEPH_ENTITY_TYPE_MON); ASSERT_EQ(methods.size(), 0); method = reg.pick_method(CEPH_ENTITY_TYPE_MON, both_methods); ASSERT_EQ(method, Compressor::COMP_ALG_NONE); mode = reg.get_mode(CEPH_ENTITY_TYPE_MON, false); ASSERT_EQ(mode, Compressor::COMP_NONE); methods = reg.get_methods(CEPH_ENTITY_TYPE_OSD); ASSERT_EQ(methods, snappy_method); const std::vector<uint32_t> rev_both_methods (both_methods.rbegin(), both_methods.rend()); method = reg.pick_method(CEPH_ENTITY_TYPE_OSD, rev_both_methods); ASSERT_EQ(method, Compressor::COMP_ALG_SNAPPY); mode = reg.get_mode(CEPH_ENTITY_TYPE_OSD, false); ASSERT_EQ(mode, Compressor::COMP_FORCE); mode = reg.get_mode(CEPH_ENTITY_TYPE_OSD, true); ASSERT_EQ(mode, Compressor::COMP_NONE); method = reg.pick_method(CEPH_ENTITY_TYPE_OSD, zlib_method); ASSERT_EQ(method, Compressor::COMP_ALG_NONE); // disable compression mode cct->_set_module_type(CEPH_ENTITY_TYPE_CLIENT); cct->_conf.set_val("ms_osd_compress_mode", "none"); cct->_conf.apply_changes(NULL); mode = reg.get_mode(CEPH_ENTITY_TYPE_OSD, false); ASSERT_EQ(mode, Compressor::COMP_NONE); // no compression methods cct->_conf.set_val("ms_osd_compress_mode", "force"); cct->_conf.set_val("ms_osd_compression_algorithm", "none"); cct->_conf.apply_changes(NULL); method = reg.pick_method(CEPH_ENTITY_TYPE_OSD, both_methods); ASSERT_EQ(method, Compressor::COMP_ALG_NONE); // min compression size cct->_conf.set_val("ms_osd_compress_min_size", "1024"); cct->_conf.apply_changes(NULL); uint32_t s = reg.get_min_compression_size(CEPH_ENTITY_TYPE_OSD); ASSERT_EQ(s, 1024); // allow secure with compression cct->_conf.set_val("ms_osd_compress_mode", "force"); cct->_conf.set_val("ms_osd_compression_algorithm", "snappy"); cct->_conf.set_val("ms_compress_secure", "true"); cct->_conf.apply_changes(NULL); ASSERT_EQ(reg.get_is_compress_secure(), true); mode = reg.get_mode(CEPH_ENTITY_TYPE_OSD, true); ASSERT_EQ(mode, Compressor::COMP_FORCE); mode = reg.get_mode(CEPH_ENTITY_TYPE_OSD, false); ASSERT_EQ(mode, Compressor::COMP_FORCE); // back to normalish, for the benefit of the next test(s) cct->_set_module_type(CEPH_ENTITY_TYPE_CLIENT); }
3,565
35.020202
103
cc
null
ceph-main/src/test/msgr/test_frames_v2.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system * * Copyright (C) 2020 Red Hat * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #include "msg/async/frames_v2.h" #include <numeric> #include <ostream> #include <string> #include <tuple> #include "msg/async/compression_meta.h" #include "auth/Auth.h" #include "common/ceph_argparse.h" #include "global/global_init.h" #include "global/global_context.h" #include "include/Context.h" #include <gtest/gtest.h> #define COMP_THRESHOLD 1 << 10 #define EXPECT_COMPRESSED(is_compressed, val1, val2) \ if (is_compressed && val1 > COMP_THRESHOLD) { \ EXPECT_GE(val1, val2); \ } else { \ EXPECT_EQ(val1, val2); \ } using namespace std; namespace ceph::msgr::v2 { // MessageFrame with the first segment not fixed to ceph_msg_header2 struct TestFrame : Frame<TestFrame, /* four segments */ segment_t::DEFAULT_ALIGNMENT, segment_t::DEFAULT_ALIGNMENT, segment_t::DEFAULT_ALIGNMENT, segment_t::PAGE_SIZE_ALIGNMENT> { static constexpr Tag tag = static_cast<Tag>(123); static TestFrame Encode(const bufferlist& header, const bufferlist& front, const bufferlist& middle, const bufferlist& data) { TestFrame f; f.segments[SegmentIndex::Msg::HEADER] = header; f.segments[SegmentIndex::Msg::FRONT] = front; f.segments[SegmentIndex::Msg::MIDDLE] = middle; f.segments[SegmentIndex::Msg::DATA] = data; // discard cached crcs for perf tests f.segments[SegmentIndex::Msg::HEADER].invalidate_crc(); f.segments[SegmentIndex::Msg::FRONT].invalidate_crc(); f.segments[SegmentIndex::Msg::MIDDLE].invalidate_crc(); f.segments[SegmentIndex::Msg::DATA].invalidate_crc(); return f; } static TestFrame Decode(segment_bls_t& segment_bls) { TestFrame f; // Transfer segments' bufferlists. If segment_bls contains // less than SegmentsNumV segments, the missing ones will be // seen as empty. for (size_t i = 0; i < segment_bls.size(); i++) { f.segments[i] = std::move(segment_bls[i]); } return f; } bufferlist& header() { return segments[SegmentIndex::Msg::HEADER]; } bufferlist& front() { return segments[SegmentIndex::Msg::FRONT]; } bufferlist& middle() { return segments[SegmentIndex::Msg::MIDDLE]; } bufferlist& data() { return segments[SegmentIndex::Msg::DATA]; } protected: using Frame::Frame; }; struct mode_t { bool is_rev1; bool is_secure; bool is_compress; }; static std::ostream& operator<<(std::ostream& os, const mode_t& m) { os << "msgr2." << (m.is_rev1 ? "1" : "0") << (m.is_secure ? "-secure" : "-crc") << (m.is_compress ? "-compress": "-nocompress"); return os; } static const mode_t modes[] = { {false, false, false}, {false, true, false}, {true, false, false}, {true, true, false}, {false, false, true}, {false, true, true}, {true, false, true}, {true, true, true} }; struct round_trip_instance_t { uint32_t header_len; uint32_t front_len; uint32_t middle_len; uint32_t data_len; // expected number of segments (same for each mode) size_t num_segments; // expected layout (different for each mode) uint32_t onwire_lens[4][MAX_NUM_SEGMENTS + 2]; }; static std::ostream& operator<<(std::ostream& os, const round_trip_instance_t& rti) { os << rti.header_len << "+" << rti.front_len << "+" << rti.middle_len << "+" << rti.data_len; return os; } static bufferlist make_bufferlist(size_t len, char c) { bufferlist bl; if (len > 0) { bl.reserve(len); bl.append(std::string(len, c)); } return bl; } bool disassemble_frame(FrameAssembler& frame_asm, bufferlist& frame_bl, Tag& tag, segment_bls_t& segment_bls) { bufferlist preamble_bl; frame_bl.splice(0, frame_asm.get_preamble_onwire_len(), &preamble_bl); tag = frame_asm.disassemble_preamble(preamble_bl); do { size_t seg_idx = segment_bls.size(); segment_bls.emplace_back(); uint32_t onwire_len = frame_asm.get_segment_onwire_len(seg_idx); if (onwire_len > 0) { frame_bl.splice(0, onwire_len, &segment_bls.back()); } } while (segment_bls.size() < frame_asm.get_num_segments()); bufferlist epilogue_bl; uint32_t epilogue_onwire_len = frame_asm.get_epilogue_onwire_len(); if (epilogue_onwire_len > 0) { frame_bl.splice(0, epilogue_onwire_len, &epilogue_bl); } return frame_asm.disassemble_segments(preamble_bl, segment_bls.data(), epilogue_bl); } class RoundTripTestBase : public ::testing::TestWithParam< std::tuple<round_trip_instance_t, mode_t>> { protected: RoundTripTestBase() : m_tx_frame_asm(&m_tx_crypto, std::get<1>(GetParam()).is_rev1, true, &m_tx_comp), m_rx_frame_asm(&m_rx_crypto, std::get<1>(GetParam()).is_rev1, true, &m_rx_comp), m_header(make_bufferlist(std::get<0>(GetParam()).header_len, 'H')), m_front(make_bufferlist(std::get<0>(GetParam()).front_len, 'F')), m_middle(make_bufferlist(std::get<0>(GetParam()).middle_len, 'M')), m_data(make_bufferlist(std::get<0>(GetParam()).data_len, 'D')) { const auto& m = std::get<1>(GetParam()); if (m.is_secure) { AuthConnectionMeta auth_meta; auth_meta.con_mode = CEPH_CON_MODE_SECURE; // see AuthConnectionMeta::get_connection_secret_length() auth_meta.connection_secret.resize(64); g_ceph_context->random()->get_bytes(auth_meta.connection_secret.data(), auth_meta.connection_secret.size()); m_tx_crypto = ceph::crypto::onwire::rxtx_t::create_handler_pair( g_ceph_context, auth_meta, /*new_nonce_format=*/m.is_rev1, /*crossed=*/false); m_rx_crypto = ceph::crypto::onwire::rxtx_t::create_handler_pair( g_ceph_context, auth_meta, /*new_nonce_format=*/m.is_rev1, /*crossed=*/true); } if (m.is_compress) { CompConnectionMeta comp_meta; comp_meta.con_mode = Compressor::COMP_FORCE; comp_meta.con_method = Compressor::COMP_ALG_SNAPPY; m_tx_comp = ceph::compression::onwire::rxtx_t::create_handler_pair( g_ceph_context, comp_meta, /*min_compress_size=*/COMP_THRESHOLD ); m_rx_comp = ceph::compression::onwire::rxtx_t::create_handler_pair( g_ceph_context, comp_meta, /*min_compress_size=*/COMP_THRESHOLD ); } } void check_frame_assembler(const FrameAssembler& frame_asm) { const auto& [rti, m] = GetParam(); const auto& onwire_lens = rti.onwire_lens[m.is_rev1 << 1 | m.is_secure]; EXPECT_COMPRESSED(m.is_compress, rti.header_len + rti.front_len + rti.middle_len + rti.data_len, frame_asm.get_frame_logical_len()); ASSERT_EQ(rti.num_segments, frame_asm.get_num_segments()); EXPECT_COMPRESSED(m.is_compress, onwire_lens[0], frame_asm.get_preamble_onwire_len()); for (size_t i = 0; i < rti.num_segments; i++) { EXPECT_COMPRESSED(m.is_compress, onwire_lens[i + 1], frame_asm.get_segment_onwire_len(i)); } EXPECT_COMPRESSED(m.is_compress, onwire_lens[rti.num_segments + 1], frame_asm.get_epilogue_onwire_len()); EXPECT_COMPRESSED(m.is_compress, std::accumulate(std::begin(onwire_lens), std::end(onwire_lens), uint64_t(0)), frame_asm.get_frame_onwire_len()); } void test_round_trip() { auto tx_frame = TestFrame::Encode(m_header, m_front, m_middle, m_data); auto onwire_bl = tx_frame.get_buffer(m_tx_frame_asm); check_frame_assembler(m_tx_frame_asm); EXPECT_EQ(m_tx_frame_asm.get_frame_onwire_len(), onwire_bl.length()); Tag rx_tag; segment_bls_t rx_segment_bls; EXPECT_TRUE(disassemble_frame(m_rx_frame_asm, onwire_bl, rx_tag, rx_segment_bls)); check_frame_assembler(m_rx_frame_asm); EXPECT_EQ(0, onwire_bl.length()); EXPECT_EQ(TestFrame::tag, rx_tag); EXPECT_EQ(m_rx_frame_asm.get_num_segments(), rx_segment_bls.size()); auto rx_frame = TestFrame::Decode(rx_segment_bls); EXPECT_TRUE(m_header.contents_equal(rx_frame.header())); EXPECT_TRUE(m_front.contents_equal(rx_frame.front())); EXPECT_TRUE(m_middle.contents_equal(rx_frame.middle())); EXPECT_TRUE(m_data.contents_equal(rx_frame.data())); } ceph::crypto::onwire::rxtx_t m_tx_crypto; ceph::crypto::onwire::rxtx_t m_rx_crypto; ceph::compression::onwire::rxtx_t m_tx_comp; ceph::compression::onwire::rxtx_t m_rx_comp; FrameAssembler m_tx_frame_asm; FrameAssembler m_rx_frame_asm; const bufferlist m_header; const bufferlist m_front; const bufferlist m_middle; const bufferlist m_data; }; class RoundTripTest : public RoundTripTestBase {}; TEST_P(RoundTripTest, Basic) { test_round_trip(); } TEST_P(RoundTripTest, Reuse) { for (int i = 0; i < 3; i++) { test_round_trip(); } } static const round_trip_instance_t round_trip_instances[] = { // first segment is empty { 0, 0, 0, 0, 1, {{32, 0, 17, 0, 0, 0}, {32, 0, 32, 0, 0, 0}, {32, 0, 0, 0, 0, 0}, {96, 0, 0, 0, 0, 0}}}, { 0, 0, 0, 303, 4, {{32, 0, 0, 0, 303, 17}, {32, 0, 0, 0, 304, 32}, {32, 0, 0, 0, 303, 13}, {96, 0, 0, 0, 304, 32}}}, { 0, 0, 202, 0, 3, {{32, 0, 0, 202, 17, 0}, {32, 0, 0, 208, 32, 0}, {32, 0, 0, 202, 13, 0}, {96, 0, 0, 208, 32, 0}}}, { 0, 0, 202, 303, 4, {{32, 0, 0, 202, 303, 17}, {32, 0, 0, 208, 304, 32}, {32, 0, 0, 202, 303, 13}, {96, 0, 0, 208, 304, 32}}}, { 0, 101, 0, 0, 2, {{32, 0, 101, 17, 0, 0}, {32, 0, 112, 32, 0, 0}, {32, 0, 101, 13, 0, 0}, {96, 0, 112, 32, 0, 0}}}, { 0, 101, 0, 303, 4, {{32, 0, 101, 0, 303, 17}, {32, 0, 112, 0, 304, 32}, {32, 0, 101, 0, 303, 13}, {96, 0, 112, 0, 304, 32}}}, { 0, 101, 202, 0, 3, {{32, 0, 101, 202, 17, 0}, {32, 0, 112, 208, 32, 0}, {32, 0, 101, 202, 13, 0}, {96, 0, 112, 208, 32, 0}}}, { 0, 101, 202, 303, 4, {{32, 0, 101, 202, 303, 17}, {32, 0, 112, 208, 304, 32}, {32, 0, 101, 202, 303, 13}, {96, 0, 112, 208, 304, 32}}}, // first segment is fully inlined, inline buffer is not full { 1, 0, 0, 0, 1, {{32, 1, 17, 0, 0, 0}, {32, 16, 32, 0, 0, 0}, {32, 5, 0, 0, 0, 0}, {96, 0, 0, 0, 0, 0}}}, { 1, 0, 0, 303, 4, {{32, 1, 0, 0, 303, 17}, {32, 16, 0, 0, 304, 32}, {32, 5, 0, 0, 303, 13}, {96, 0, 0, 0, 304, 32}}}, { 1, 0, 202, 0, 3, {{32, 1, 0, 202, 17, 0}, {32, 16, 0, 208, 32, 0}, {32, 5, 0, 202, 13, 0}, {96, 0, 0, 208, 32, 0}}}, { 1, 0, 202, 303, 4, {{32, 1, 0, 202, 303, 17}, {32, 16, 0, 208, 304, 32}, {32, 5, 0, 202, 303, 13}, {96, 0, 0, 208, 304, 32}}}, { 1, 101, 0, 0, 2, {{32, 1, 101, 17, 0, 0}, {32, 16, 112, 32, 0, 0}, {32, 5, 101, 13, 0, 0}, {96, 0, 112, 32, 0, 0}}}, { 1, 101, 0, 303, 4, {{32, 1, 101, 0, 303, 17}, {32, 16, 112, 0, 304, 32}, {32, 5, 101, 0, 303, 13}, {96, 0, 112, 0, 304, 32}}}, { 1, 101, 202, 0, 3, {{32, 1, 101, 202, 17, 0}, {32, 16, 112, 208, 32, 0}, {32, 5, 101, 202, 13, 0}, {96, 0, 112, 208, 32, 0}}}, { 1, 101, 202, 303, 4, {{32, 1, 101, 202, 303, 17}, {32, 16, 112, 208, 304, 32}, {32, 5, 101, 202, 303, 13}, {96, 0, 112, 208, 304, 32}}}, // first segment is fully inlined, inline buffer is full {48, 0, 0, 0, 1, {{32, 48, 17, 0, 0, 0}, {32, 48, 32, 0, 0, 0}, {32, 52, 0, 0, 0, 0}, {96, 0, 0, 0, 0, 0}}}, {48, 0, 0, 303, 4, {{32, 48, 0, 0, 303, 17}, {32, 48, 0, 0, 304, 32}, {32, 52, 0, 0, 303, 13}, {96, 0, 0, 0, 304, 32}}}, {48, 0, 202, 0, 3, {{32, 48, 0, 202, 17, 0}, {32, 48, 0, 208, 32, 0}, {32, 52, 0, 202, 13, 0}, {96, 0, 0, 208, 32, 0}}}, {48, 0, 202, 303, 4, {{32, 48, 0, 202, 303, 17}, {32, 48, 0, 208, 304, 32}, {32, 52, 0, 202, 303, 13}, {96, 0, 0, 208, 304, 32}}}, {48, 101, 0, 0, 2, {{32, 48, 101, 17, 0, 0}, {32, 48, 112, 32, 0, 0}, {32, 52, 101, 13, 0, 0}, {96, 0, 112, 32, 0, 0}}}, {48, 101, 0, 303, 4, {{32, 48, 101, 0, 303, 17}, {32, 48, 112, 0, 304, 32}, {32, 52, 101, 0, 303, 13}, {96, 0, 112, 0, 304, 32}}}, {48, 101, 202, 0, 3, {{32, 48, 101, 202, 17, 0}, {32, 48, 112, 208, 32, 0}, {32, 52, 101, 202, 13, 0}, {96, 0, 112, 208, 32, 0}}}, {48, 101, 202, 303, 4, {{32, 48, 101, 202, 303, 17}, {32, 48, 112, 208, 304, 32}, {32, 52, 101, 202, 303, 13}, {96, 0, 112, 208, 304, 32}}}, // first segment is partially inlined {49, 0, 0, 0, 1, {{32, 49, 17, 0, 0, 0}, {32, 64, 32, 0, 0, 0}, {32, 53, 0, 0, 0, 0}, {96, 32, 0, 0, 0, 0}}}, {49, 0, 0, 303, 4, {{32, 49, 0, 0, 303, 17}, {32, 64, 0, 0, 304, 32}, {32, 53, 0, 0, 303, 13}, {96, 32, 0, 0, 304, 32}}}, {49, 0, 202, 0, 3, {{32, 49, 0, 202, 17, 0}, {32, 64, 0, 208, 32, 0}, {32, 53, 0, 202, 13, 0}, {96, 32, 0, 208, 32, 0}}}, {49, 0, 202, 303, 4, {{32, 49, 0, 202, 303, 17}, {32, 64, 0, 208, 304, 32}, {32, 53, 0, 202, 303, 13}, {96, 32, 0, 208, 304, 32}}}, {49, 101, 0, 0, 2, {{32, 49, 101, 17, 0, 0}, {32, 64, 112, 32, 0, 0}, {32, 53, 101, 13, 0, 0}, {96, 32, 112, 32, 0, 0}}}, {49, 101, 0, 303, 4, {{32, 49, 101, 0, 303, 17}, {32, 64, 112, 0, 304, 32}, {32, 53, 101, 0, 303, 13}, {96, 32, 112, 0, 304, 32}}}, {49, 101, 202, 0, 3, {{32, 49, 101, 202, 17, 0}, {32, 64, 112, 208, 32, 0}, {32, 53, 101, 202, 13, 0}, {96, 32, 112, 208, 32, 0}}}, {49, 101, 202, 303, 4, {{32, 49, 101, 202, 303, 17}, {32, 64, 112, 208, 304, 32}, {32, 53, 101, 202, 303, 13}, {96, 32, 112, 208, 304, 32}}}, }; INSTANTIATE_TEST_SUITE_P( RoundTripTests, RoundTripTest, ::testing::Combine( ::testing::ValuesIn(round_trip_instances), ::testing::ValuesIn(modes))); class RoundTripPerfTest : public RoundTripTestBase {}; TEST_P(RoundTripPerfTest, DISABLED_Basic) { for (int i = 0; i < 100000; i++) { auto tx_frame = TestFrame::Encode(m_header, m_front, m_middle, m_data); auto onwire_bl = tx_frame.get_buffer(m_tx_frame_asm); Tag rx_tag; segment_bls_t rx_segment_bls; ASSERT_TRUE(disassemble_frame(m_rx_frame_asm, onwire_bl, rx_tag, rx_segment_bls)); } } static const round_trip_instance_t round_trip_perf_instances[] = { {41, 250, 0, 0, 2, {{32, 41, 250, 17, 0, 0}, {32, 48, 256, 32, 0, 0}, {32, 45, 250, 13, 0, 0}, {96, 0, 256, 32, 0, 0}}}, {41, 250, 0, 512, 4, {{32, 41, 250, 0, 512, 17}, {32, 48, 256, 0, 512, 32}, {32, 45, 250, 0, 512, 13}, {96, 0, 256, 0, 512, 32}}}, {41, 250, 0, 4096, 4, {{32, 41, 250, 0, 4096, 17}, {32, 48, 256, 0, 4096, 32}, {32, 45, 250, 0, 4096, 13}, {96, 0, 256, 0, 4096, 32}}}, {41, 250, 0, 32768, 4, {{32, 41, 250, 0, 32768, 17}, {32, 48, 256, 0, 32768, 32}, {32, 45, 250, 0, 32768, 13}, {96, 0, 256, 0, 32768, 32}}}, {41, 250, 0, 131072, 4, {{32, 41, 250, 0, 131072, 17}, {32, 48, 256, 0, 131072, 32}, {32, 45, 250, 0, 131072, 13}, {96, 0, 256, 0, 131072, 32}}}, {41, 250, 0, 4194304, 4, {{32, 41, 250, 0, 4194304, 17}, {32, 48, 256, 0, 4194304, 32}, {32, 45, 250, 0, 4194304, 13}, {96, 0, 256, 0, 4194304, 32}}}, }; INSTANTIATE_TEST_SUITE_P( RoundTripPerfTests, RoundTripPerfTest, ::testing::Combine( ::testing::ValuesIn(round_trip_perf_instances), ::testing::ValuesIn(modes))); } // namespace ceph::msgr::v2 int main(int argc, char* argv[]) { auto args = argv_to_vec(argc, argv); auto cct = global_init(nullptr, args, CEPH_ENTITY_TYPE_CLIENT, CODE_ENVIRONMENT_UTILITY, CINIT_FLAG_NO_DEFAULT_CONFIG_FILE); common_init_finish(g_ceph_context); ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); }
19,530
39.353306
100
cc
null
ceph-main/src/test/msgr/test_msgr.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system * * Copyright (C) 2014 UnitedStack <haomai@unitedstack.com> * * Author: Haomai Wang <haomaiwang@gmail.com> * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #include <atomic> #include <iostream> #include <list> #include <memory> #include <set> #include <stdlib.h> #include <time.h> #include <unistd.h> #include <boost/random/binomial_distribution.hpp> #include <boost/random/mersenne_twister.hpp> #include <boost/random/uniform_int.hpp> #include <gtest/gtest.h> #define MSG_POLICY_UNIT_TESTING #include "common/ceph_argparse.h" #include "common/ceph_mutex.h" #include "global/global_init.h" #include "messages/MCommand.h" #include "messages/MPing.h" #include "msg/Connection.h" #include "msg/Dispatcher.h" #include "msg/Message.h" #include "msg/Messenger.h" #include "msg/msg_types.h" typedef boost::mt11213b gen_type; #include "common/dout.h" #include "include/ceph_assert.h" #include "auth/DummyAuth.h" #define dout_subsys ceph_subsys_ms #undef dout_prefix #define dout_prefix *_dout << " ceph_test_msgr " #define CHECK_AND_WAIT_TRUE(expr) do { \ int n = 1000; \ while (--n) { \ if (expr) \ break; \ usleep(1000); \ } \ } while(0); using namespace std; class MessengerTest : public ::testing::TestWithParam<const char*> { public: DummyAuthClientServer dummy_auth; Messenger *server_msgr; Messenger *client_msgr; MessengerTest() : dummy_auth(g_ceph_context), server_msgr(NULL), client_msgr(NULL) { dummy_auth.auth_registry.refresh_config(); } void SetUp() override { lderr(g_ceph_context) << __func__ << " start set up " << GetParam() << dendl; server_msgr = Messenger::create(g_ceph_context, string(GetParam()), entity_name_t::OSD(0), "server", getpid()); client_msgr = Messenger::create(g_ceph_context, string(GetParam()), entity_name_t::CLIENT(-1), "client", getpid()); server_msgr->set_default_policy(Messenger::Policy::stateless_server(0)); client_msgr->set_default_policy(Messenger::Policy::lossy_client(0)); server_msgr->set_auth_client(&dummy_auth); server_msgr->set_auth_server(&dummy_auth); client_msgr->set_auth_client(&dummy_auth); client_msgr->set_auth_server(&dummy_auth); server_msgr->set_require_authorizer(false); } void TearDown() override { ASSERT_EQ(server_msgr->get_dispatch_queue_len(), 0); ASSERT_EQ(client_msgr->get_dispatch_queue_len(), 0); delete server_msgr; delete client_msgr; } }; class FakeDispatcher : public Dispatcher { public: struct Session : public RefCountedObject { atomic<uint64_t> count; ConnectionRef con; explicit Session(ConnectionRef c): RefCountedObject(g_ceph_context), count(0), con(c) { } uint64_t get_count() { return count; } }; ceph::mutex lock = ceph::make_mutex("FakeDispatcher::lock"); ceph::condition_variable cond; bool is_server; bool got_new; bool got_remote_reset; bool got_connect; bool loopback; entity_addrvec_t last_accept; ConnectionRef *last_accept_con_ptr = nullptr; explicit FakeDispatcher(bool s): Dispatcher(g_ceph_context), is_server(s), got_new(false), got_remote_reset(false), got_connect(false), loopback(false) { } bool ms_can_fast_dispatch_any() const override { return true; } bool ms_can_fast_dispatch(const Message *m) const override { switch (m->get_type()) { case CEPH_MSG_PING: return true; default: return false; } } void ms_handle_fast_connect(Connection *con) override { std::scoped_lock l{lock}; lderr(g_ceph_context) << __func__ << " " << con << dendl; auto s = con->get_priv(); if (!s) { auto session = new Session(con); con->set_priv(RefCountedPtr{session, false}); lderr(g_ceph_context) << __func__ << " con: " << con << " count: " << session->count << dendl; } got_connect = true; cond.notify_all(); } void ms_handle_fast_accept(Connection *con) override { last_accept = con->get_peer_addrs(); if (last_accept_con_ptr) { *last_accept_con_ptr = con; } if (!con->get_priv()) { con->set_priv(RefCountedPtr{new Session(con), false}); } } bool ms_dispatch(Message *m) override { auto priv = m->get_connection()->get_priv(); auto s = static_cast<Session*>(priv.get()); if (!s) { s = new Session(m->get_connection()); priv.reset(s, false); m->get_connection()->set_priv(priv); } s->count++; lderr(g_ceph_context) << __func__ << " conn: " << m->get_connection() << " session " << s << " count: " << s->count << dendl; if (is_server) { reply_message(m); } std::lock_guard l{lock}; got_new = true; cond.notify_all(); m->put(); return true; } bool ms_handle_reset(Connection *con) override { std::lock_guard l{lock}; lderr(g_ceph_context) << __func__ << " " << con << dendl; auto priv = con->get_priv(); if (auto s = static_cast<Session*>(priv.get()); s) { s->con.reset(); // break con <-> session ref cycle con->set_priv(nullptr); // break ref <-> session cycle, if any } return true; } void ms_handle_remote_reset(Connection *con) override { std::lock_guard l{lock}; lderr(g_ceph_context) << __func__ << " " << con << dendl; auto priv = con->get_priv(); if (auto s = static_cast<Session*>(priv.get()); s) { s->con.reset(); // break con <-> session ref cycle con->set_priv(nullptr); // break ref <-> session cycle, if any } got_remote_reset = true; cond.notify_all(); } bool ms_handle_refused(Connection *con) override { return false; } void ms_fast_dispatch(Message *m) override { auto priv = m->get_connection()->get_priv(); auto s = static_cast<Session*>(priv.get()); if (!s) { s = new Session(m->get_connection()); priv.reset(s, false); m->get_connection()->set_priv(priv); } s->count++; lderr(g_ceph_context) << __func__ << " conn: " << m->get_connection() << " session " << s << " count: " << s->count << dendl; if (is_server) { if (loopback) ceph_assert(m->get_source().is_osd()); else reply_message(m); } else if (loopback) { ceph_assert(m->get_source().is_client()); } m->put(); std::lock_guard l{lock}; got_new = true; cond.notify_all(); } int ms_handle_authentication(Connection *con) override { return 1; } void reply_message(Message *m) { MPing *rm = new MPing(); m->get_connection()->send_message(rm); } }; typedef FakeDispatcher::Session Session; struct TestInterceptor : public Interceptor { bool step_waiting = false; bool waiting = true; std::map<Connection *, uint32_t> current_step; std::map<Connection *, std::list<uint32_t>> step_history; std::map<uint32_t, std::optional<ACTION>> decisions; std::set<uint32_t> breakpoints; uint32_t count_step(Connection *conn, uint32_t step) { uint32_t count = 0; for (auto s : step_history[conn]) { if (s == step) { count++; } } return count; } void breakpoint(uint32_t step) { breakpoints.insert(step); } void remove_bp(uint32_t step) { breakpoints.erase(step); } Connection *wait(uint32_t step, Connection *conn=nullptr) { std::unique_lock<std::mutex> l(lock); while(true) { if (conn) { auto it = current_step.find(conn); if (it != current_step.end()) { if (it->second == step) { break; } } } else { for (auto it : current_step) { if (it.second == step) { conn = it.first; break; } } if (conn) { break; } } step_waiting = true; cond_var.wait(l); } step_waiting = false; return conn; } ACTION wait_for_decision(uint32_t step, std::unique_lock<std::mutex> &l) { if (decisions[step]) { return *(decisions[step]); } waiting = true; cond_var.wait(l, [this] { return !waiting; }); return *(decisions[step]); } void proceed(uint32_t step, ACTION decision) { std::unique_lock<std::mutex> l(lock); decisions[step] = decision; if (waiting) { waiting = false; cond_var.notify_one(); } } ACTION intercept(Connection *conn, uint32_t step) override { lderr(g_ceph_context) << __func__ << " conn(" << conn << ") intercept called on step=" << step << dendl; { std::unique_lock<std::mutex> l(lock); step_history[conn].push_back(step); current_step[conn] = step; if (step_waiting) { cond_var.notify_one(); } } std::unique_lock<std::mutex> l(lock); ACTION decision = ACTION::CONTINUE; if (breakpoints.find(step) != breakpoints.end()) { lderr(g_ceph_context) << __func__ << " conn(" << conn << ") pausing on step=" << step << dendl; decision = wait_for_decision(step, l); } else { if (decisions[step]) { decision = *(decisions[step]); } } lderr(g_ceph_context) << __func__ << " conn(" << conn << ") resuming step=" << step << " with decision=" << decision << dendl; decisions[step].reset(); return decision; } }; /** * Scenario: A connects to B, and B connects to A at the same time. */ TEST_P(MessengerTest, ConnectionRaceTest) { FakeDispatcher cli_dispatcher(false), srv_dispatcher(false); TestInterceptor *cli_interceptor = new TestInterceptor(); TestInterceptor *srv_interceptor = new TestInterceptor(); server_msgr->set_policy(entity_name_t::TYPE_CLIENT, Messenger::Policy::lossless_peer_reuse(0)); server_msgr->interceptor = srv_interceptor; client_msgr->set_policy(entity_name_t::TYPE_OSD, Messenger::Policy::lossless_peer_reuse(0)); client_msgr->interceptor = cli_interceptor; entity_addr_t bind_addr; bind_addr.parse("v2:127.0.0.1:3300"); server_msgr->bind(bind_addr); server_msgr->add_dispatcher_head(&srv_dispatcher); server_msgr->start(); bind_addr.parse("v2:127.0.0.1:3301"); client_msgr->bind(bind_addr); client_msgr->add_dispatcher_head(&cli_dispatcher); client_msgr->start(); // pause before sending client_ident message cli_interceptor->breakpoint(Interceptor::STEP::SEND_CLIENT_IDENTITY); // pause before sending client_ident message srv_interceptor->breakpoint(Interceptor::STEP::SEND_CLIENT_IDENTITY); ConnectionRef c2s = client_msgr->connect_to(server_msgr->get_mytype(), server_msgr->get_myaddrs()); MPing *m1 = new MPing(); ASSERT_EQ(c2s->send_message(m1), 0); ConnectionRef s2c = server_msgr->connect_to(client_msgr->get_mytype(), client_msgr->get_myaddrs()); MPing *m2 = new MPing(); ASSERT_EQ(s2c->send_message(m2), 0); cli_interceptor->wait(Interceptor::STEP::SEND_CLIENT_IDENTITY, c2s.get()); srv_interceptor->wait(Interceptor::STEP::SEND_CLIENT_IDENTITY, s2c.get()); // at this point both connections (A->B, B->A) are paused just before sending // the client_ident message. cli_interceptor->remove_bp(Interceptor::STEP::SEND_CLIENT_IDENTITY); srv_interceptor->remove_bp(Interceptor::STEP::SEND_CLIENT_IDENTITY); cli_interceptor->proceed(Interceptor::STEP::SEND_CLIENT_IDENTITY, Interceptor::ACTION::CONTINUE); srv_interceptor->proceed(Interceptor::STEP::SEND_CLIENT_IDENTITY, Interceptor::ACTION::CONTINUE); { std::unique_lock l{cli_dispatcher.lock}; cli_dispatcher.cond.wait(l, [&] { return cli_dispatcher.got_new; }); cli_dispatcher.got_new = false; } { std::unique_lock l{srv_dispatcher.lock}; srv_dispatcher.cond.wait(l, [&] { return srv_dispatcher.got_new; }); srv_dispatcher.got_new = false; } ASSERT_TRUE(s2c->is_connected()); ASSERT_EQ(1u, static_cast<Session*>(s2c->get_priv().get())->get_count()); ASSERT_TRUE(s2c->peer_is_client()); ASSERT_TRUE(c2s->is_connected()); ASSERT_EQ(1u, static_cast<Session*>(c2s->get_priv().get())->get_count()); ASSERT_TRUE(c2s->peer_is_osd()); client_msgr->shutdown(); client_msgr->wait(); server_msgr->shutdown(); server_msgr->wait(); delete cli_interceptor; delete srv_interceptor; } /** * Scenario: A connects to B, and B connects to A at the same time. * The first (A -> B) connection gets to message flow handshake, the * second (B -> A) connection is stuck waiting for a banner from A. * After A sends client_ident to B, the first connection wins and B * calls reuse_connection() to replace the second connection's socket * while the second connection is still in BANNER_CONNECTING. */ TEST_P(MessengerTest, ConnectionRaceReuseBannerTest) { FakeDispatcher cli_dispatcher(false), srv_dispatcher(false); auto cli_interceptor = std::make_unique<TestInterceptor>(); auto srv_interceptor = std::make_unique<TestInterceptor>(); server_msgr->set_policy(entity_name_t::TYPE_CLIENT, Messenger::Policy::lossless_peer_reuse(0)); server_msgr->interceptor = srv_interceptor.get(); client_msgr->set_policy(entity_name_t::TYPE_OSD, Messenger::Policy::lossless_peer_reuse(0)); client_msgr->interceptor = cli_interceptor.get(); entity_addr_t bind_addr; bind_addr.parse("v2:127.0.0.1:3300"); server_msgr->bind(bind_addr); server_msgr->add_dispatcher_head(&srv_dispatcher); server_msgr->start(); bind_addr.parse("v2:127.0.0.1:3301"); client_msgr->bind(bind_addr); client_msgr->add_dispatcher_head(&cli_dispatcher); client_msgr->start(); // pause before sending client_ident message srv_interceptor->breakpoint(Interceptor::STEP::SEND_CLIENT_IDENTITY); ConnectionRef s2c = server_msgr->connect_to(client_msgr->get_mytype(), client_msgr->get_myaddrs()); MPing *m1 = new MPing(); ASSERT_EQ(s2c->send_message(m1), 0); srv_interceptor->wait(Interceptor::STEP::SEND_CLIENT_IDENTITY); srv_interceptor->remove_bp(Interceptor::STEP::SEND_CLIENT_IDENTITY); // pause before sending banner cli_interceptor->breakpoint(Interceptor::STEP::BANNER_EXCHANGE_BANNER_CONNECTING); ConnectionRef c2s = client_msgr->connect_to(server_msgr->get_mytype(), server_msgr->get_myaddrs()); MPing *m2 = new MPing(); ASSERT_EQ(c2s->send_message(m2), 0); cli_interceptor->wait(Interceptor::STEP::BANNER_EXCHANGE_BANNER_CONNECTING); cli_interceptor->remove_bp(Interceptor::STEP::BANNER_EXCHANGE_BANNER_CONNECTING); // second connection is in BANNER_CONNECTING, ensure it stays so // and send client_ident srv_interceptor->breakpoint(Interceptor::STEP::BANNER_EXCHANGE); srv_interceptor->proceed(Interceptor::STEP::SEND_CLIENT_IDENTITY, Interceptor::ACTION::CONTINUE); // handle client_ident -- triggers reuse_connection() with exproto // in BANNER_CONNECTING cli_interceptor->breakpoint(Interceptor::STEP::READY); cli_interceptor->proceed(Interceptor::STEP::BANNER_EXCHANGE_BANNER_CONNECTING, Interceptor::ACTION::CONTINUE); cli_interceptor->wait(Interceptor::STEP::READY); cli_interceptor->remove_bp(Interceptor::STEP::READY); // first connection is in READY Connection *s2c_accepter = srv_interceptor->wait(Interceptor::STEP::BANNER_EXCHANGE); srv_interceptor->remove_bp(Interceptor::STEP::BANNER_EXCHANGE); srv_interceptor->proceed(Interceptor::STEP::BANNER_EXCHANGE, Interceptor::ACTION::CONTINUE); cli_interceptor->proceed(Interceptor::STEP::READY, Interceptor::ACTION::CONTINUE); { std::unique_lock l{cli_dispatcher.lock}; cli_dispatcher.cond.wait(l, [&] { return cli_dispatcher.got_new; }); cli_dispatcher.got_new = false; } { std::unique_lock l{srv_dispatcher.lock}; srv_dispatcher.cond.wait(l, [&] { return srv_dispatcher.got_new; }); srv_dispatcher.got_new = false; } EXPECT_TRUE(s2c->is_connected()); EXPECT_EQ(1u, static_cast<Session*>(s2c->get_priv().get())->get_count()); EXPECT_TRUE(s2c->peer_is_client()); EXPECT_TRUE(c2s->is_connected()); EXPECT_EQ(1u, static_cast<Session*>(c2s->get_priv().get())->get_count()); EXPECT_TRUE(c2s->peer_is_osd()); // closed in reuse_connection() -- EPIPE when writing banner/hello EXPECT_FALSE(s2c_accepter->is_connected()); // established exactly once, never faulted and reconnected EXPECT_EQ(cli_interceptor->count_step(c2s.get(), Interceptor::STEP::START_CLIENT_BANNER_EXCHANGE), 1u); EXPECT_EQ(cli_interceptor->count_step(c2s.get(), Interceptor::STEP::SEND_RECONNECT), 0u); EXPECT_EQ(cli_interceptor->count_step(c2s.get(), Interceptor::STEP::READY), 1u); client_msgr->shutdown(); client_msgr->wait(); server_msgr->shutdown(); server_msgr->wait(); } /** * Scenario: * - A connects to B * - A sends client_ident to B * - B fails before sending server_ident to A * - A reconnects */ TEST_P(MessengerTest, MissingServerIdenTest) { FakeDispatcher cli_dispatcher(false), srv_dispatcher(false); TestInterceptor *cli_interceptor = new TestInterceptor(); TestInterceptor *srv_interceptor = new TestInterceptor(); server_msgr->set_policy(entity_name_t::TYPE_CLIENT, Messenger::Policy::stateful_server(0)); server_msgr->interceptor = srv_interceptor; client_msgr->set_policy(entity_name_t::TYPE_OSD, Messenger::Policy::lossy_client(0)); client_msgr->interceptor = cli_interceptor; entity_addr_t bind_addr; bind_addr.parse("v2:127.0.0.1:3300"); server_msgr->bind(bind_addr); server_msgr->add_dispatcher_head(&srv_dispatcher); server_msgr->start(); bind_addr.parse("v2:127.0.0.1:3301"); client_msgr->bind(bind_addr); client_msgr->add_dispatcher_head(&cli_dispatcher); client_msgr->start(); // pause before sending server_ident message srv_interceptor->breakpoint(Interceptor::STEP::SEND_SERVER_IDENTITY); ConnectionRef c2s = client_msgr->connect_to(server_msgr->get_mytype(), server_msgr->get_myaddrs()); MPing *m1 = new MPing(); ASSERT_EQ(c2s->send_message(m1), 0); Connection *c2s_accepter = srv_interceptor->wait(Interceptor::STEP::SEND_SERVER_IDENTITY); srv_interceptor->remove_bp(Interceptor::STEP::SEND_SERVER_IDENTITY); // We inject a message from this side of the connection to force it to be // in standby when we inject the failure below MPing *m2 = new MPing(); ASSERT_EQ(c2s_accepter->send_message(m2), 0); srv_interceptor->proceed(Interceptor::STEP::SEND_SERVER_IDENTITY, Interceptor::ACTION::FAIL); { std::unique_lock l{srv_dispatcher.lock}; srv_dispatcher.cond.wait(l, [&] { return srv_dispatcher.got_new; }); srv_dispatcher.got_new = false; } { std::unique_lock l{cli_dispatcher.lock}; cli_dispatcher.cond.wait(l, [&] { return cli_dispatcher.got_new; }); cli_dispatcher.got_new = false; } ASSERT_TRUE(c2s->is_connected()); ASSERT_EQ(1u, static_cast<Session*>(c2s->get_priv().get())->get_count()); ASSERT_TRUE(c2s->peer_is_osd()); ASSERT_TRUE(c2s_accepter->is_connected()); ASSERT_EQ(1u, static_cast<Session*>(c2s_accepter->get_priv().get())->get_count()); ASSERT_TRUE(c2s_accepter->peer_is_client()); client_msgr->shutdown(); client_msgr->wait(); server_msgr->shutdown(); server_msgr->wait(); delete cli_interceptor; delete srv_interceptor; } /** * Scenario: * - A connects to B * - A sends client_ident to B * - B fails before sending server_ident to A * - A goes to standby * - B reconnects to A */ TEST_P(MessengerTest, MissingServerIdenTest2) { FakeDispatcher cli_dispatcher(false), srv_dispatcher(false); TestInterceptor *cli_interceptor = new TestInterceptor(); TestInterceptor *srv_interceptor = new TestInterceptor(); server_msgr->set_policy(entity_name_t::TYPE_CLIENT, Messenger::Policy::lossless_peer(0)); server_msgr->interceptor = srv_interceptor; client_msgr->set_policy(entity_name_t::TYPE_OSD, Messenger::Policy::lossless_peer(0)); client_msgr->interceptor = cli_interceptor; entity_addr_t bind_addr; bind_addr.parse("v2:127.0.0.1:3300"); server_msgr->bind(bind_addr); server_msgr->add_dispatcher_head(&srv_dispatcher); server_msgr->start(); bind_addr.parse("v2:127.0.0.1:3301"); client_msgr->bind(bind_addr); client_msgr->add_dispatcher_head(&cli_dispatcher); client_msgr->start(); // pause before sending server_ident message srv_interceptor->breakpoint(Interceptor::STEP::SEND_SERVER_IDENTITY); ConnectionRef c2s = client_msgr->connect_to(server_msgr->get_mytype(), server_msgr->get_myaddrs()); Connection *c2s_accepter = srv_interceptor->wait(Interceptor::STEP::SEND_SERVER_IDENTITY); srv_interceptor->remove_bp(Interceptor::STEP::SEND_SERVER_IDENTITY); // We inject a message from this side of the connection to force it to be // in standby when we inject the failure below MPing *m2 = new MPing(); ASSERT_EQ(c2s_accepter->send_message(m2), 0); srv_interceptor->proceed(Interceptor::STEP::SEND_SERVER_IDENTITY, Interceptor::ACTION::FAIL); { std::unique_lock l{cli_dispatcher.lock}; cli_dispatcher.cond.wait(l, [&] { return cli_dispatcher.got_new; }); cli_dispatcher.got_new = false; } ASSERT_TRUE(c2s->is_connected()); ASSERT_EQ(1u, static_cast<Session*>(c2s->get_priv().get())->get_count()); ASSERT_TRUE(c2s->peer_is_osd()); ASSERT_TRUE(c2s_accepter->is_connected()); ASSERT_EQ(0u, static_cast<Session*>(c2s_accepter->get_priv().get())->get_count()); ASSERT_TRUE(c2s_accepter->peer_is_client()); client_msgr->shutdown(); client_msgr->wait(); server_msgr->shutdown(); server_msgr->wait(); delete cli_interceptor; delete srv_interceptor; } /** * Scenario: * - A connects to B * - A and B exchange messages * - A fails * - B goes into standby * - A reconnects */ TEST_P(MessengerTest, ReconnectTest) { FakeDispatcher cli_dispatcher(false), srv_dispatcher(true); TestInterceptor *cli_interceptor = new TestInterceptor(); TestInterceptor *srv_interceptor = new TestInterceptor(); server_msgr->set_policy(entity_name_t::TYPE_CLIENT, Messenger::Policy::stateful_server(0)); server_msgr->interceptor = srv_interceptor; client_msgr->set_policy(entity_name_t::TYPE_OSD, Messenger::Policy::lossless_peer(0)); client_msgr->interceptor = cli_interceptor; entity_addr_t bind_addr; bind_addr.parse("v2:127.0.0.1:3300"); server_msgr->bind(bind_addr); server_msgr->add_dispatcher_head(&srv_dispatcher); server_msgr->start(); bind_addr.parse("v2:127.0.0.1:3301"); client_msgr->bind(bind_addr); client_msgr->add_dispatcher_head(&cli_dispatcher); client_msgr->start(); ConnectionRef c2s = client_msgr->connect_to(server_msgr->get_mytype(), server_msgr->get_myaddrs()); MPing *m1 = new MPing(); ASSERT_EQ(c2s->send_message(m1), 0); { std::unique_lock l{cli_dispatcher.lock}; cli_dispatcher.cond.wait(l, [&] { return cli_dispatcher.got_new; }); cli_dispatcher.got_new = false; } ASSERT_TRUE(c2s->is_connected()); ASSERT_EQ(1u, static_cast<Session*>(c2s->get_priv().get())->get_count()); ASSERT_TRUE(c2s->peer_is_osd()); cli_interceptor->breakpoint(Interceptor::STEP::HANDLE_MESSAGE); MPing *m2 = new MPing(); ASSERT_EQ(c2s->send_message(m2), 0); cli_interceptor->wait(Interceptor::STEP::HANDLE_MESSAGE, c2s.get()); cli_interceptor->remove_bp(Interceptor::STEP::HANDLE_MESSAGE); // at this point client and server are connected together srv_interceptor->breakpoint(Interceptor::STEP::READY); // failing client cli_interceptor->proceed(Interceptor::STEP::HANDLE_MESSAGE, Interceptor::ACTION::FAIL); MPing *m3 = new MPing(); ASSERT_EQ(c2s->send_message(m3), 0); Connection *c2s_accepter = srv_interceptor->wait(Interceptor::STEP::READY); // the srv end of theconnection is now paused at ready // this means that the reconnect was successful srv_interceptor->remove_bp(Interceptor::STEP::READY); ASSERT_TRUE(c2s_accepter->peer_is_client()); // c2s_accepter sent 0 reconnect messages ASSERT_EQ(srv_interceptor->count_step(c2s_accepter, Interceptor::STEP::SEND_RECONNECT), 0u); // c2s_accepter sent 1 reconnect_ok messages ASSERT_EQ(srv_interceptor->count_step(c2s_accepter, Interceptor::STEP::SEND_RECONNECT_OK), 1u); // c2s sent 1 reconnect messages ASSERT_EQ(cli_interceptor->count_step(c2s.get(), Interceptor::STEP::SEND_RECONNECT), 1u); // c2s sent 0 reconnect_ok messages ASSERT_EQ(cli_interceptor->count_step(c2s.get(), Interceptor::STEP::SEND_RECONNECT_OK), 0u); srv_interceptor->proceed(15, Interceptor::ACTION::CONTINUE); { std::unique_lock l{cli_dispatcher.lock}; cli_dispatcher.cond.wait(l, [&] { return cli_dispatcher.got_new; }); cli_dispatcher.got_new = false; } client_msgr->shutdown(); client_msgr->wait(); server_msgr->shutdown(); server_msgr->wait(); delete cli_interceptor; delete srv_interceptor; } /** * Scenario: * - A connects to B * - A and B exchange messages * - A fails * - A reconnects // B reconnects */ TEST_P(MessengerTest, ReconnectRaceTest) { FakeDispatcher cli_dispatcher(false), srv_dispatcher(true); TestInterceptor *cli_interceptor = new TestInterceptor(); TestInterceptor *srv_interceptor = new TestInterceptor(); server_msgr->set_policy(entity_name_t::TYPE_CLIENT, Messenger::Policy::lossless_peer(0)); server_msgr->interceptor = srv_interceptor; client_msgr->set_policy(entity_name_t::TYPE_OSD, Messenger::Policy::lossless_peer(0)); client_msgr->interceptor = cli_interceptor; entity_addr_t bind_addr; bind_addr.parse("v2:127.0.0.1:3300"); server_msgr->bind(bind_addr); server_msgr->add_dispatcher_head(&srv_dispatcher); server_msgr->start(); bind_addr.parse("v2:127.0.0.1:3301"); client_msgr->bind(bind_addr); client_msgr->add_dispatcher_head(&cli_dispatcher); client_msgr->start(); ConnectionRef c2s = client_msgr->connect_to(server_msgr->get_mytype(), server_msgr->get_myaddrs()); MPing *m1 = new MPing(); ASSERT_EQ(c2s->send_message(m1), 0); { std::unique_lock l{cli_dispatcher.lock}; cli_dispatcher.cond.wait(l, [&] { return cli_dispatcher.got_new; }); cli_dispatcher.got_new = false; } ASSERT_TRUE(c2s->is_connected()); ASSERT_EQ(1u, static_cast<Session*>(c2s->get_priv().get())->get_count()); ASSERT_TRUE(c2s->peer_is_osd()); cli_interceptor->breakpoint(Interceptor::STEP::HANDLE_MESSAGE); MPing *m2 = new MPing(); ASSERT_EQ(c2s->send_message(m2), 0); cli_interceptor->wait(Interceptor::STEP::HANDLE_MESSAGE, c2s.get()); cli_interceptor->remove_bp(Interceptor::STEP::HANDLE_MESSAGE); // at this point client and server are connected together // force both client and server to race on reconnect cli_interceptor->breakpoint(Interceptor::STEP::SEND_RECONNECT); srv_interceptor->breakpoint(Interceptor::STEP::SEND_RECONNECT); // failing client // this will cause both client and server to reconnect at the same time cli_interceptor->proceed(Interceptor::STEP::HANDLE_MESSAGE, Interceptor::ACTION::FAIL); MPing *m3 = new MPing(); ASSERT_EQ(c2s->send_message(m3), 0); cli_interceptor->wait(Interceptor::STEP::SEND_RECONNECT, c2s.get()); srv_interceptor->wait(Interceptor::STEP::SEND_RECONNECT); cli_interceptor->remove_bp(Interceptor::STEP::SEND_RECONNECT); srv_interceptor->remove_bp(Interceptor::STEP::SEND_RECONNECT); // pause on "ready" srv_interceptor->breakpoint(Interceptor::STEP::READY); cli_interceptor->proceed(Interceptor::STEP::SEND_RECONNECT, Interceptor::ACTION::CONTINUE); srv_interceptor->proceed(Interceptor::STEP::SEND_RECONNECT, Interceptor::ACTION::CONTINUE); Connection *c2s_accepter = srv_interceptor->wait(Interceptor::STEP::READY); // the server has reconnected and is "ready" srv_interceptor->remove_bp(Interceptor::STEP::READY); ASSERT_TRUE(c2s_accepter->peer_is_client()); ASSERT_TRUE(c2s->peer_is_osd()); // the server should win the reconnect race // c2s_accepter sent 1 or 2 reconnect messages ASSERT_LT(srv_interceptor->count_step(c2s_accepter, Interceptor::STEP::SEND_RECONNECT), 3u); ASSERT_GT(srv_interceptor->count_step(c2s_accepter, Interceptor::STEP::SEND_RECONNECT), 0u); // c2s_accepter sent 0 reconnect_ok messages ASSERT_EQ(srv_interceptor->count_step(c2s_accepter, Interceptor::STEP::SEND_RECONNECT_OK), 0u); // c2s sent 1 reconnect messages ASSERT_EQ(cli_interceptor->count_step(c2s.get(), Interceptor::STEP::SEND_RECONNECT), 1u); // c2s sent 1 reconnect_ok messages ASSERT_EQ(cli_interceptor->count_step(c2s.get(), Interceptor::STEP::SEND_RECONNECT_OK), 1u); if (srv_interceptor->count_step(c2s_accepter, Interceptor::STEP::SEND_RECONNECT) == 2) { // if the server send the reconnect message two times then // the client must have sent a session retry message to the server ASSERT_EQ(cli_interceptor->count_step(c2s.get(), Interceptor::STEP::SESSION_RETRY), 1u); } else { ASSERT_EQ(cli_interceptor->count_step(c2s.get(), Interceptor::STEP::SESSION_RETRY), 0u); } srv_interceptor->proceed(Interceptor::STEP::READY, Interceptor::ACTION::CONTINUE); { std::unique_lock l{cli_dispatcher.lock}; cli_dispatcher.cond.wait(l, [&] { return cli_dispatcher.got_new; }); cli_dispatcher.got_new = false; } client_msgr->shutdown(); client_msgr->wait(); server_msgr->shutdown(); server_msgr->wait(); delete cli_interceptor; delete srv_interceptor; } TEST_P(MessengerTest, SimpleTest) { FakeDispatcher cli_dispatcher(false), srv_dispatcher(true); entity_addr_t bind_addr; bind_addr.parse("v2:127.0.0.1"); server_msgr->bind(bind_addr); server_msgr->add_dispatcher_head(&srv_dispatcher); server_msgr->start(); client_msgr->add_dispatcher_head(&cli_dispatcher); client_msgr->start(); // 1. simple round trip MPing *m = new MPing(); ConnectionRef conn = client_msgr->connect_to(server_msgr->get_mytype(), server_msgr->get_myaddrs()); { ASSERT_EQ(conn->send_message(m), 0); std::unique_lock l{cli_dispatcher.lock}; cli_dispatcher.cond.wait(l, [&] { return cli_dispatcher.got_new; }); cli_dispatcher.got_new = false; } ASSERT_TRUE(conn->is_connected()); ASSERT_EQ(1u, static_cast<Session*>(conn->get_priv().get())->get_count()); ASSERT_TRUE(conn->peer_is_osd()); // 2. test rebind port set<int> avoid_ports; for (int i = 0; i < 10 ; i++) { for (auto a : server_msgr->get_myaddrs().v) { avoid_ports.insert(a.get_port() + i); } } server_msgr->rebind(avoid_ports); for (auto a : server_msgr->get_myaddrs().v) { ASSERT_TRUE(avoid_ports.count(a.get_port()) == 0); } conn = client_msgr->connect_to(server_msgr->get_mytype(), server_msgr->get_myaddrs()); { m = new MPing(); ASSERT_EQ(conn->send_message(m), 0); std::unique_lock l{cli_dispatcher.lock}; cli_dispatcher.cond.wait(l, [&] { return cli_dispatcher.got_new; }); cli_dispatcher.got_new = false; } ASSERT_EQ(1U, static_cast<Session*>(conn->get_priv().get())->get_count()); // 3. test markdown connection conn->mark_down(); ASSERT_FALSE(conn->is_connected()); // 4. test failed connection server_msgr->shutdown(); server_msgr->wait(); m = new MPing(); conn->send_message(m); CHECK_AND_WAIT_TRUE(!conn->is_connected()); ASSERT_FALSE(conn->is_connected()); // 5. loopback connection srv_dispatcher.loopback = true; conn = client_msgr->get_loopback_connection(); { m = new MPing(); ASSERT_EQ(conn->send_message(m), 0); std::unique_lock l{cli_dispatcher.lock}; cli_dispatcher.cond.wait(l, [&] { return cli_dispatcher.got_new; }); cli_dispatcher.got_new = false; } srv_dispatcher.loopback = false; ASSERT_EQ(1U, static_cast<Session*>(conn->get_priv().get())->get_count()); client_msgr->shutdown(); client_msgr->wait(); server_msgr->shutdown(); server_msgr->wait(); } TEST_P(MessengerTest, SimpleMsgr2Test) { FakeDispatcher cli_dispatcher(false), srv_dispatcher(true); entity_addr_t legacy_addr; legacy_addr.parse("v1:127.0.0.1"); entity_addr_t msgr2_addr; msgr2_addr.parse("v2:127.0.0.1"); entity_addrvec_t bind_addrs; bind_addrs.v.push_back(legacy_addr); bind_addrs.v.push_back(msgr2_addr); server_msgr->bindv(bind_addrs); server_msgr->add_dispatcher_head(&srv_dispatcher); server_msgr->start(); client_msgr->add_dispatcher_head(&cli_dispatcher); client_msgr->start(); // 1. simple round trip MPing *m = new MPing(); ConnectionRef conn = client_msgr->connect_to( server_msgr->get_mytype(), server_msgr->get_myaddrs()); { ASSERT_EQ(conn->send_message(m), 0); std::unique_lock l{cli_dispatcher.lock}; cli_dispatcher.cond.wait(l, [&] { return cli_dispatcher.got_new; }); cli_dispatcher.got_new = false; } ASSERT_TRUE(conn->is_connected()); ASSERT_EQ(1u, static_cast<Session*>(conn->get_priv().get())->get_count()); ASSERT_TRUE(conn->peer_is_osd()); // 2. test rebind port set<int> avoid_ports; for (int i = 0; i < 10 ; i++) { for (auto a : server_msgr->get_myaddrs().v) { avoid_ports.insert(a.get_port() + i); } } server_msgr->rebind(avoid_ports); for (auto a : server_msgr->get_myaddrs().v) { ASSERT_TRUE(avoid_ports.count(a.get_port()) == 0); } conn = client_msgr->connect_to( server_msgr->get_mytype(), server_msgr->get_myaddrs()); { m = new MPing(); ASSERT_EQ(conn->send_message(m), 0); std::unique_lock l{cli_dispatcher.lock}; cli_dispatcher.cond.wait(l, [&] { return cli_dispatcher.got_new; }); cli_dispatcher.got_new = false; } ASSERT_EQ(1U, static_cast<Session*>(conn->get_priv().get())->get_count()); // 3. test markdown connection conn->mark_down(); ASSERT_FALSE(conn->is_connected()); // 4. test failed connection server_msgr->shutdown(); server_msgr->wait(); m = new MPing(); conn->send_message(m); CHECK_AND_WAIT_TRUE(!conn->is_connected()); ASSERT_FALSE(conn->is_connected()); // 5. loopback connection srv_dispatcher.loopback = true; conn = client_msgr->get_loopback_connection(); { m = new MPing(); ASSERT_EQ(conn->send_message(m), 0); std::unique_lock l{cli_dispatcher.lock}; cli_dispatcher.cond.wait(l, [&] { return cli_dispatcher.got_new; }); cli_dispatcher.got_new = false; } srv_dispatcher.loopback = false; ASSERT_EQ(1U, static_cast<Session*>(conn->get_priv().get())->get_count()); client_msgr->shutdown(); client_msgr->wait(); server_msgr->shutdown(); server_msgr->wait(); } TEST_P(MessengerTest, FeatureTest) { FakeDispatcher cli_dispatcher(false), srv_dispatcher(true); entity_addr_t bind_addr; bind_addr.parse("v2:127.0.0.1"); uint64_t all_feature_supported, feature_required, feature_supported = 0; for (int i = 0; i < 10; i++) feature_supported |= 1ULL << i; feature_supported |= CEPH_FEATUREMASK_MSG_ADDR2; feature_supported |= CEPH_FEATUREMASK_SERVER_NAUTILUS; feature_required = feature_supported | 1ULL << 13; all_feature_supported = feature_required | 1ULL << 14; Messenger::Policy p = server_msgr->get_policy(entity_name_t::TYPE_CLIENT); p.features_required = feature_required; server_msgr->set_policy(entity_name_t::TYPE_CLIENT, p); server_msgr->bind(bind_addr); server_msgr->add_dispatcher_head(&srv_dispatcher); server_msgr->start(); // 1. Suppose if only support less than required p = client_msgr->get_policy(entity_name_t::TYPE_OSD); p.features_supported = feature_supported; client_msgr->set_policy(entity_name_t::TYPE_OSD, p); client_msgr->add_dispatcher_head(&cli_dispatcher); client_msgr->start(); MPing *m = new MPing(); ConnectionRef conn = client_msgr->connect_to(server_msgr->get_mytype(), server_msgr->get_myaddrs()); conn->send_message(m); CHECK_AND_WAIT_TRUE(!conn->is_connected()); // should failed build a connection ASSERT_FALSE(conn->is_connected()); client_msgr->shutdown(); client_msgr->wait(); // 2. supported met required p = client_msgr->get_policy(entity_name_t::TYPE_OSD); p.features_supported = all_feature_supported; client_msgr->set_policy(entity_name_t::TYPE_OSD, p); client_msgr->start(); conn = client_msgr->connect_to(server_msgr->get_mytype(), server_msgr->get_myaddrs()); { m = new MPing(); ASSERT_EQ(conn->send_message(m), 0); std::unique_lock l{cli_dispatcher.lock}; cli_dispatcher.cond.wait(l, [&] { return cli_dispatcher.got_new; }); cli_dispatcher.got_new = false; } ASSERT_EQ(1U, static_cast<Session*>(conn->get_priv().get())->get_count()); server_msgr->shutdown(); client_msgr->shutdown(); server_msgr->wait(); client_msgr->wait(); } TEST_P(MessengerTest, TimeoutTest) { g_ceph_context->_conf.set_val("ms_connection_idle_timeout", "1"); FakeDispatcher cli_dispatcher(false), srv_dispatcher(true); entity_addr_t bind_addr; bind_addr.parse("v2:127.0.0.1"); server_msgr->bind(bind_addr); server_msgr->add_dispatcher_head(&srv_dispatcher); server_msgr->start(); client_msgr->add_dispatcher_head(&cli_dispatcher); client_msgr->start(); // 1. build the connection MPing *m = new MPing(); ConnectionRef conn = client_msgr->connect_to(server_msgr->get_mytype(), server_msgr->get_myaddrs()); { ASSERT_EQ(conn->send_message(m), 0); std::unique_lock l{cli_dispatcher.lock}; cli_dispatcher.cond.wait(l, [&] { return cli_dispatcher.got_new; }); cli_dispatcher.got_new = false; } ASSERT_TRUE(conn->is_connected()); ASSERT_EQ(1U, static_cast<Session*>(conn->get_priv().get())->get_count()); ASSERT_TRUE(conn->peer_is_osd()); // 2. wait for idle usleep(2500*1000); ASSERT_FALSE(conn->is_connected()); server_msgr->shutdown(); server_msgr->wait(); client_msgr->shutdown(); client_msgr->wait(); g_ceph_context->_conf.set_val("ms_connection_idle_timeout", "900"); } TEST_P(MessengerTest, StatefulTest) { Message *m; FakeDispatcher cli_dispatcher(false), srv_dispatcher(true); entity_addr_t bind_addr; bind_addr.parse("v2:127.0.0.1"); Messenger::Policy p = Messenger::Policy::stateful_server(0); server_msgr->set_policy(entity_name_t::TYPE_CLIENT, p); p = Messenger::Policy::lossless_client(0); client_msgr->set_policy(entity_name_t::TYPE_OSD, p); server_msgr->bind(bind_addr); server_msgr->add_dispatcher_head(&srv_dispatcher); server_msgr->start(); client_msgr->add_dispatcher_head(&cli_dispatcher); client_msgr->start(); // 1. test for server standby ConnectionRef conn = client_msgr->connect_to(server_msgr->get_mytype(), server_msgr->get_myaddrs()); { m = new MPing(); ASSERT_EQ(conn->send_message(m), 0); std::unique_lock l{cli_dispatcher.lock}; cli_dispatcher.cond.wait(l, [&] { return cli_dispatcher.got_new; }); cli_dispatcher.got_new = false; } ASSERT_EQ(1U, static_cast<Session*>(conn->get_priv().get())->get_count()); conn->mark_down(); ASSERT_FALSE(conn->is_connected()); ConnectionRef server_conn = server_msgr->connect_to( client_msgr->get_mytype(), srv_dispatcher.last_accept); // don't lose state ASSERT_EQ(1U, static_cast<Session*>(server_conn->get_priv().get())->get_count()); srv_dispatcher.got_new = false; conn = client_msgr->connect_to(server_msgr->get_mytype(), server_msgr->get_myaddrs()); { m = new MPing(); ASSERT_EQ(conn->send_message(m), 0); std::unique_lock l{cli_dispatcher.lock}; cli_dispatcher.cond.wait(l, [&] { return cli_dispatcher.got_new; }); cli_dispatcher.got_new = false; } ASSERT_EQ(1U, static_cast<Session*>(conn->get_priv().get())->get_count()); server_conn = server_msgr->connect_to(client_msgr->get_mytype(), srv_dispatcher.last_accept); { std::unique_lock l{srv_dispatcher.lock}; srv_dispatcher.cond.wait(l, [&] { return srv_dispatcher.got_remote_reset; }); } // 2. test for client reconnect ASSERT_FALSE(cli_dispatcher.got_remote_reset); cli_dispatcher.got_connect = false; cli_dispatcher.got_new = false; cli_dispatcher.got_remote_reset = false; server_conn->mark_down(); ASSERT_FALSE(server_conn->is_connected()); // ensure client detect server socket closed { std::unique_lock l{cli_dispatcher.lock}; cli_dispatcher.cond.wait(l, [&] { return cli_dispatcher.got_remote_reset; }); cli_dispatcher.got_remote_reset = false; } { std::unique_lock l{cli_dispatcher.lock}; cli_dispatcher.cond.wait(l, [&] { return cli_dispatcher.got_connect; }); cli_dispatcher.got_connect = false; } CHECK_AND_WAIT_TRUE(conn->is_connected()); ASSERT_TRUE(conn->is_connected()); { m = new MPing(); ASSERT_EQ(conn->send_message(m), 0); ASSERT_TRUE(conn->is_connected()); std::unique_lock l{cli_dispatcher.lock}; cli_dispatcher.cond.wait(l, [&] { return cli_dispatcher.got_new; }); cli_dispatcher.got_new = false; } // resetcheck happen ASSERT_EQ(1U, static_cast<Session*>(conn->get_priv().get())->get_count()); server_conn = server_msgr->connect_to(client_msgr->get_mytype(), srv_dispatcher.last_accept); ASSERT_EQ(1U, static_cast<Session*>(server_conn->get_priv().get())->get_count()); cli_dispatcher.got_remote_reset = false; server_msgr->shutdown(); client_msgr->shutdown(); server_msgr->wait(); client_msgr->wait(); } TEST_P(MessengerTest, StatelessTest) { Message *m; FakeDispatcher cli_dispatcher(false), srv_dispatcher(true); entity_addr_t bind_addr; bind_addr.parse("v2:127.0.0.1"); Messenger::Policy p = Messenger::Policy::stateless_server(0); server_msgr->set_policy(entity_name_t::TYPE_CLIENT, p); p = Messenger::Policy::lossy_client(0); client_msgr->set_policy(entity_name_t::TYPE_OSD, p); server_msgr->bind(bind_addr); server_msgr->add_dispatcher_head(&srv_dispatcher); server_msgr->start(); client_msgr->add_dispatcher_head(&cli_dispatcher); client_msgr->start(); // 1. test for server lose state ConnectionRef conn = client_msgr->connect_to(server_msgr->get_mytype(), server_msgr->get_myaddrs()); { m = new MPing(); ASSERT_EQ(conn->send_message(m), 0); std::unique_lock l{cli_dispatcher.lock}; cli_dispatcher.cond.wait(l, [&] { return cli_dispatcher.got_new; }); cli_dispatcher.got_new = false; } ASSERT_EQ(1U, static_cast<Session*>(conn->get_priv().get())->get_count()); conn->mark_down(); ASSERT_FALSE(conn->is_connected()); srv_dispatcher.got_new = false; ConnectionRef server_conn; srv_dispatcher.last_accept_con_ptr = &server_conn; conn = client_msgr->connect_to(server_msgr->get_mytype(), server_msgr->get_myaddrs()); { m = new MPing(); ASSERT_EQ(conn->send_message(m), 0); std::unique_lock l{cli_dispatcher.lock}; cli_dispatcher.cond.wait(l, [&] { return cli_dispatcher.got_new; }); cli_dispatcher.got_new = false; } ASSERT_EQ(1U, static_cast<Session*>(conn->get_priv().get())->get_count()); ASSERT_TRUE(server_conn); // server lose state { std::unique_lock l{srv_dispatcher.lock}; srv_dispatcher.cond.wait(l, [&] { return srv_dispatcher.got_new; }); } ASSERT_EQ(1U, static_cast<Session*>(server_conn->get_priv().get())->get_count()); // 2. test for client lossy server_conn->mark_down(); ASSERT_FALSE(server_conn->is_connected()); conn->send_keepalive(); CHECK_AND_WAIT_TRUE(!conn->is_connected()); ASSERT_FALSE(conn->is_connected()); conn = client_msgr->connect_to(server_msgr->get_mytype(), server_msgr->get_myaddrs()); { m = new MPing(); ASSERT_EQ(conn->send_message(m), 0); std::unique_lock l{cli_dispatcher.lock}; cli_dispatcher.cond.wait(l, [&] { return cli_dispatcher.got_new; }); cli_dispatcher.got_new = false; } ASSERT_EQ(1U, static_cast<Session*>(conn->get_priv().get())->get_count()); server_msgr->shutdown(); client_msgr->shutdown(); server_msgr->wait(); client_msgr->wait(); } TEST_P(MessengerTest, AnonTest) { Message *m; FakeDispatcher cli_dispatcher(false), srv_dispatcher(true); entity_addr_t bind_addr; bind_addr.parse("v2:127.0.0.1"); Messenger::Policy p = Messenger::Policy::stateless_server(0); server_msgr->set_policy(entity_name_t::TYPE_CLIENT, p); p = Messenger::Policy::lossy_client(0); client_msgr->set_policy(entity_name_t::TYPE_OSD, p); server_msgr->bind(bind_addr); server_msgr->add_dispatcher_head(&srv_dispatcher); server_msgr->start(); client_msgr->add_dispatcher_head(&cli_dispatcher); client_msgr->start(); ConnectionRef server_con_a, server_con_b; // a srv_dispatcher.last_accept_con_ptr = &server_con_a; ConnectionRef con_a = client_msgr->connect_to(server_msgr->get_mytype(), server_msgr->get_myaddrs(), true); { m = new MPing(); ASSERT_EQ(con_a->send_message(m), 0); std::unique_lock l{cli_dispatcher.lock}; cli_dispatcher.cond.wait(l, [&] { return cli_dispatcher.got_new; }); cli_dispatcher.got_new = false; } ASSERT_EQ(1U, static_cast<Session*>(con_a->get_priv().get())->get_count()); // b srv_dispatcher.last_accept_con_ptr = &server_con_b; ConnectionRef con_b = client_msgr->connect_to(server_msgr->get_mytype(), server_msgr->get_myaddrs(), true); { m = new MPing(); ASSERT_EQ(con_b->send_message(m), 0); std::unique_lock l{cli_dispatcher.lock}; cli_dispatcher.cond.wait(l, [&] { return cli_dispatcher.got_new; }); cli_dispatcher.got_new = false; } ASSERT_EQ(1U, static_cast<Session*>(con_b->get_priv().get())->get_count()); // these should be distinct ASSERT_NE(con_a, con_b); ASSERT_NE(server_con_a, server_con_b); // and both connected { m = new MPing(); ASSERT_EQ(con_a->send_message(m), 0); std::unique_lock l{cli_dispatcher.lock}; cli_dispatcher.cond.wait(l, [&] { return cli_dispatcher.got_new; }); cli_dispatcher.got_new = false; } { m = new MPing(); ASSERT_EQ(con_b->send_message(m), 0); std::unique_lock l{cli_dispatcher.lock}; cli_dispatcher.cond.wait(l, [&] { return cli_dispatcher.got_new; }); cli_dispatcher.got_new = false; } // clean up con_a->mark_down(); ASSERT_FALSE(con_a->is_connected()); con_b->mark_down(); ASSERT_FALSE(con_b->is_connected()); server_msgr->shutdown(); client_msgr->shutdown(); server_msgr->wait(); client_msgr->wait(); } TEST_P(MessengerTest, ClientStandbyTest) { Message *m; FakeDispatcher cli_dispatcher(false), srv_dispatcher(true); entity_addr_t bind_addr; bind_addr.parse("v2:127.0.0.1"); Messenger::Policy p = Messenger::Policy::stateful_server(0); server_msgr->set_policy(entity_name_t::TYPE_CLIENT, p); p = Messenger::Policy::lossless_peer(0); client_msgr->set_policy(entity_name_t::TYPE_OSD, p); server_msgr->bind(bind_addr); server_msgr->add_dispatcher_head(&srv_dispatcher); server_msgr->start(); client_msgr->add_dispatcher_head(&cli_dispatcher); client_msgr->start(); // 1. test for client standby, resetcheck ConnectionRef conn = client_msgr->connect_to(server_msgr->get_mytype(), server_msgr->get_myaddrs()); { m = new MPing(); ASSERT_EQ(conn->send_message(m), 0); std::unique_lock l{cli_dispatcher.lock}; cli_dispatcher.cond.wait(l, [&] { return cli_dispatcher.got_new; }); cli_dispatcher.got_new = false; } ASSERT_EQ(1U, static_cast<Session*>(conn->get_priv().get())->get_count()); ConnectionRef server_conn = server_msgr->connect_to( client_msgr->get_mytype(), srv_dispatcher.last_accept); ASSERT_FALSE(cli_dispatcher.got_remote_reset); cli_dispatcher.got_connect = false; server_conn->mark_down(); ASSERT_FALSE(server_conn->is_connected()); // client should be standby usleep(300*1000); // client should be standby, so we use original connection { // Try send message to verify got remote reset callback m = new MPing(); ASSERT_EQ(conn->send_message(m), 0); { std::unique_lock l{cli_dispatcher.lock}; cli_dispatcher.cond.wait(l, [&] { return cli_dispatcher.got_remote_reset; }); cli_dispatcher.got_remote_reset = false; cli_dispatcher.cond.wait(l, [&] { return cli_dispatcher.got_connect; }); cli_dispatcher.got_connect = false; } CHECK_AND_WAIT_TRUE(conn->is_connected()); ASSERT_TRUE(conn->is_connected()); m = new MPing(); ASSERT_EQ(conn->send_message(m), 0); std::unique_lock l{cli_dispatcher.lock}; cli_dispatcher.cond.wait(l, [&] { return cli_dispatcher.got_new; }); cli_dispatcher.got_new = false; } ASSERT_EQ(1U, static_cast<Session*>(conn->get_priv().get())->get_count()); server_conn = server_msgr->connect_to(client_msgr->get_mytype(), srv_dispatcher.last_accept); ASSERT_EQ(1U, static_cast<Session*>(server_conn->get_priv().get())->get_count()); server_msgr->shutdown(); client_msgr->shutdown(); server_msgr->wait(); client_msgr->wait(); } TEST_P(MessengerTest, AuthTest) { g_ceph_context->_conf.set_val("auth_cluster_required", "cephx"); g_ceph_context->_conf.set_val("auth_service_required", "cephx"); g_ceph_context->_conf.set_val("auth_client_required", "cephx"); FakeDispatcher cli_dispatcher(false), srv_dispatcher(true); entity_addr_t bind_addr; bind_addr.parse("v2:127.0.0.1"); server_msgr->bind(bind_addr); server_msgr->add_dispatcher_head(&srv_dispatcher); server_msgr->start(); client_msgr->add_dispatcher_head(&cli_dispatcher); client_msgr->start(); // 1. simple auth round trip MPing *m = new MPing(); ConnectionRef conn = client_msgr->connect_to(server_msgr->get_mytype(), server_msgr->get_myaddrs()); { ASSERT_EQ(conn->send_message(m), 0); std::unique_lock l{cli_dispatcher.lock}; cli_dispatcher.cond.wait(l, [&] { return cli_dispatcher.got_new; }); cli_dispatcher.got_new = false; } ASSERT_TRUE(conn->is_connected()); ASSERT_EQ(1U, static_cast<Session*>(conn->get_priv().get())->get_count()); // 2. mix auth g_ceph_context->_conf.set_val("auth_cluster_required", "none"); g_ceph_context->_conf.set_val("auth_service_required", "none"); g_ceph_context->_conf.set_val("auth_client_required", "none"); conn->mark_down(); ASSERT_FALSE(conn->is_connected()); conn = client_msgr->connect_to(server_msgr->get_mytype(), server_msgr->get_myaddrs()); { MPing *m = new MPing(); ASSERT_EQ(conn->send_message(m), 0); std::unique_lock l{cli_dispatcher.lock}; cli_dispatcher.cond.wait(l, [&] { return cli_dispatcher.got_new; }); cli_dispatcher.got_new = false; } ASSERT_TRUE(conn->is_connected()); ASSERT_EQ(1U, static_cast<Session*>(conn->get_priv().get())->get_count()); server_msgr->shutdown(); client_msgr->shutdown(); server_msgr->wait(); client_msgr->wait(); } TEST_P(MessengerTest, MessageTest) { FakeDispatcher cli_dispatcher(false), srv_dispatcher(true); entity_addr_t bind_addr; bind_addr.parse("v2:127.0.0.1"); Messenger::Policy p = Messenger::Policy::stateful_server(0); server_msgr->set_policy(entity_name_t::TYPE_CLIENT, p); p = Messenger::Policy::lossless_peer(0); client_msgr->set_policy(entity_name_t::TYPE_OSD, p); server_msgr->bind(bind_addr); server_msgr->add_dispatcher_head(&srv_dispatcher); server_msgr->start(); client_msgr->add_dispatcher_head(&cli_dispatcher); client_msgr->start(); // 1. A very large "front"(as well as "payload") // Because a external message need to invade Messenger::decode_message, // here we only use existing message class(MCommand) ConnectionRef conn = client_msgr->connect_to(server_msgr->get_mytype(), server_msgr->get_myaddrs()); { uuid_d uuid; uuid.generate_random(); vector<string> cmds; string s("abcdefghijklmnopqrstuvwxyz"); for (int i = 0; i < 1024*30; i++) cmds.push_back(s); MCommand *m = new MCommand(uuid); m->cmd = cmds; conn->send_message(m); std::unique_lock l{cli_dispatcher.lock}; cli_dispatcher.cond.wait_for(l, 500s, [&] { return cli_dispatcher.got_new; }); ASSERT_TRUE(cli_dispatcher.got_new); cli_dispatcher.got_new = false; } // 2. A very large "data" { bufferlist bl; string s("abcdefghijklmnopqrstuvwxyz"); for (int i = 0; i < 1024*30; i++) bl.append(s); MPing *m = new MPing(); m->set_data(bl); conn->send_message(m); utime_t t; t += 1000*1000*500; std::unique_lock l{cli_dispatcher.lock}; cli_dispatcher.cond.wait(l, [&] { return cli_dispatcher.got_new; }); ASSERT_TRUE(cli_dispatcher.got_new); cli_dispatcher.got_new = false; } server_msgr->shutdown(); client_msgr->shutdown(); server_msgr->wait(); client_msgr->wait(); } class SyntheticWorkload; struct Payload { enum Who : uint8_t { PING = 0, PONG = 1, }; uint8_t who = 0; uint64_t seq = 0; bufferlist data; Payload(Who who, uint64_t seq, const bufferlist& data) : who(who), seq(seq), data(data) {} Payload() = default; DENC(Payload, v, p) { DENC_START(1, 1, p); denc(v.who, p); denc(v.seq, p); denc(v.data, p); DENC_FINISH(p); } }; WRITE_CLASS_DENC(Payload) ostream& operator<<(ostream& out, const Payload &pl) { return out << "reply=" << pl.who << " i = " << pl.seq; } class SyntheticDispatcher : public Dispatcher { public: ceph::mutex lock = ceph::make_mutex("SyntheticDispatcher::lock"); ceph::condition_variable cond; bool is_server; bool got_new; bool got_remote_reset; bool got_connect; map<ConnectionRef, list<uint64_t> > conn_sent; map<uint64_t, bufferlist> sent; atomic<uint64_t> index; SyntheticWorkload *workload; SyntheticDispatcher(bool s, SyntheticWorkload *wl): Dispatcher(g_ceph_context), is_server(s), got_new(false), got_remote_reset(false), got_connect(false), index(0), workload(wl) { } bool ms_can_fast_dispatch_any() const override { return true; } bool ms_can_fast_dispatch(const Message *m) const override { switch (m->get_type()) { case CEPH_MSG_PING: case MSG_COMMAND: return true; default: return false; } } void ms_handle_fast_connect(Connection *con) override { std::lock_guard l{lock}; list<uint64_t> c = conn_sent[con]; for (list<uint64_t>::iterator it = c.begin(); it != c.end(); ++it) sent.erase(*it); conn_sent.erase(con); got_connect = true; cond.notify_all(); } void ms_handle_fast_accept(Connection *con) override { std::lock_guard l{lock}; list<uint64_t> c = conn_sent[con]; for (list<uint64_t>::iterator it = c.begin(); it != c.end(); ++it) sent.erase(*it); conn_sent.erase(con); cond.notify_all(); } bool ms_dispatch(Message *m) override { ceph_abort(); } bool ms_handle_reset(Connection *con) override; void ms_handle_remote_reset(Connection *con) override { std::lock_guard l{lock}; list<uint64_t> c = conn_sent[con]; for (list<uint64_t>::iterator it = c.begin(); it != c.end(); ++it) sent.erase(*it); conn_sent.erase(con); got_remote_reset = true; } bool ms_handle_refused(Connection *con) override { return false; } void ms_fast_dispatch(Message *m) override { // MSG_COMMAND is used to disorganize regular message flow if (m->get_type() == MSG_COMMAND) { m->put(); return ; } Payload pl; auto p = m->get_data().cbegin(); decode(pl, p); if (pl.who == Payload::PING) { lderr(g_ceph_context) << __func__ << " conn=" << m->get_connection() << pl << dendl; reply_message(m, pl); m->put(); std::lock_guard l{lock}; got_new = true; cond.notify_all(); } else { std::lock_guard l{lock}; if (sent.count(pl.seq)) { lderr(g_ceph_context) << __func__ << " conn=" << m->get_connection() << pl << dendl; ASSERT_EQ(conn_sent[m->get_connection()].front(), pl.seq); ASSERT_TRUE(pl.data.contents_equal(sent[pl.seq])); conn_sent[m->get_connection()].pop_front(); sent.erase(pl.seq); } m->put(); got_new = true; cond.notify_all(); } } int ms_handle_authentication(Connection *con) override { return 1; } void reply_message(const Message *m, Payload& pl) { pl.who = Payload::PONG; bufferlist bl; encode(pl, bl); MPing *rm = new MPing(); rm->set_data(bl); m->get_connection()->send_message(rm); lderr(g_ceph_context) << __func__ << " conn=" << m->get_connection() << " reply m=" << m << " i=" << pl.seq << dendl; } void send_message_wrap(ConnectionRef con, const bufferlist& data) { Message *m = new MPing(); Payload pl{Payload::PING, index++, data}; bufferlist bl; encode(pl, bl); m->set_data(bl); if (!con->get_messenger()->get_default_policy().lossy) { std::lock_guard l{lock}; sent[pl.seq] = pl.data; conn_sent[con].push_back(pl.seq); } lderr(g_ceph_context) << __func__ << " conn=" << con.get() << " send m=" << m << " i=" << pl.seq << dendl; ASSERT_EQ(0, con->send_message(m)); } uint64_t get_pending() { std::lock_guard l{lock}; return sent.size(); } void clear_pending(ConnectionRef con) { std::lock_guard l{lock}; for (list<uint64_t>::iterator it = conn_sent[con].begin(); it != conn_sent[con].end(); ++it) sent.erase(*it); conn_sent.erase(con); } void print() { for (auto && p : conn_sent) { if (!p.second.empty()) { lderr(g_ceph_context) << __func__ << " " << p.first << " wait " << p.second.size() << dendl; } } } }; class SyntheticWorkload { ceph::mutex lock = ceph::make_mutex("SyntheticWorkload::lock"); ceph::condition_variable cond; set<Messenger*> available_servers; set<Messenger*> available_clients; Messenger::Policy client_policy; map<ConnectionRef, pair<Messenger*, Messenger*> > available_connections; SyntheticDispatcher dispatcher; gen_type rng; vector<bufferlist> rand_data; DummyAuthClientServer dummy_auth; public: const unsigned max_in_flight = 0; const unsigned max_connections = 0; static const unsigned max_message_len = 1024 * 1024 * 4; SyntheticWorkload(int servers, int clients, string type, int random_num, Messenger::Policy srv_policy, Messenger::Policy cli_policy, int _max_in_flight = 64, int _max_connections = 128) : client_policy(cli_policy), dispatcher(false, this), rng(time(NULL)), dummy_auth(g_ceph_context), max_in_flight(_max_in_flight), max_connections(_max_connections) { dummy_auth.auth_registry.refresh_config(); Messenger *msgr; int base_port = 16800; entity_addr_t bind_addr; char addr[64]; for (int i = 0; i < servers; ++i) { msgr = Messenger::create(g_ceph_context, type, entity_name_t::OSD(0), "server", getpid()+i); snprintf(addr, sizeof(addr), "v2:127.0.0.1:%d", base_port+i); bind_addr.parse(addr); msgr->bind(bind_addr); msgr->add_dispatcher_head(&dispatcher); msgr->set_auth_client(&dummy_auth); msgr->set_auth_server(&dummy_auth); ceph_assert(msgr); msgr->set_default_policy(srv_policy); available_servers.insert(msgr); msgr->start(); } for (int i = 0; i < clients; ++i) { msgr = Messenger::create(g_ceph_context, type, entity_name_t::CLIENT(-1), "client", getpid()+i+servers); if (cli_policy.standby) { snprintf(addr, sizeof(addr), "v2:127.0.0.1:%d", base_port+i+servers); bind_addr.parse(addr); msgr->bind(bind_addr); } msgr->add_dispatcher_head(&dispatcher); msgr->set_auth_client(&dummy_auth); msgr->set_auth_server(&dummy_auth); ceph_assert(msgr); msgr->set_default_policy(cli_policy); available_clients.insert(msgr); msgr->start(); } for (int i = 0; i < random_num; i++) { bufferlist bl; boost::uniform_int<> u(32, max_message_len); uint64_t value_len = u(rng); bufferptr bp(value_len); bp.zero(); for (uint64_t j = 0; j < value_len-sizeof(i); ) { memcpy(bp.c_str()+j, &i, sizeof(i)); j += 4096; } bl.append(bp); rand_data.push_back(bl); } } ConnectionRef _get_random_connection() { while (dispatcher.get_pending() > max_in_flight) { lock.unlock(); usleep(500); lock.lock(); } ceph_assert(ceph_mutex_is_locked(lock)); boost::uniform_int<> choose(0, available_connections.size() - 1); int index = choose(rng); map<ConnectionRef, pair<Messenger*, Messenger*> >::iterator i = available_connections.begin(); for (; index > 0; --index, ++i) ; return i->first; } bool can_create_connection() { return available_connections.size() < max_connections; } void generate_connection() { std::lock_guard l{lock}; if (!can_create_connection()) return ; Messenger *server, *client; { boost::uniform_int<> choose(0, available_servers.size() - 1); int index = choose(rng); set<Messenger*>::iterator i = available_servers.begin(); for (; index > 0; --index, ++i) ; server = *i; } { boost::uniform_int<> choose(0, available_clients.size() - 1); int index = choose(rng); set<Messenger*>::iterator i = available_clients.begin(); for (; index > 0; --index, ++i) ; client = *i; } pair<Messenger*, Messenger*> p; { boost::uniform_int<> choose(0, available_servers.size() - 1); if (server->get_default_policy().server) { p = make_pair(client, server); ConnectionRef conn = client->connect_to(server->get_mytype(), server->get_myaddrs()); available_connections[conn] = p; } else { ConnectionRef conn = client->connect_to(server->get_mytype(), server->get_myaddrs()); p = make_pair(client, server); available_connections[conn] = p; } } } void send_message() { std::lock_guard l{lock}; ConnectionRef conn = _get_random_connection(); boost::uniform_int<> true_false(0, 99); int val = true_false(rng); if (val >= 95) { uuid_d uuid; uuid.generate_random(); MCommand *m = new MCommand(uuid); vector<string> cmds; cmds.push_back("command"); m->cmd = cmds; m->set_priority(200); conn->send_message(m); } else { boost::uniform_int<> u(0, rand_data.size()-1); dispatcher.send_message_wrap(conn, rand_data[u(rng)]); } } void send_large_message(bool inject_network_congestion=false) { std::lock_guard l{lock}; ConnectionRef conn = _get_random_connection(); uuid_d uuid; uuid.generate_random(); MCommand *m = new MCommand(uuid); vector<string> cmds; cmds.push_back("command"); // set the random data to make the large message bufferlist bl; string s("abcdefghijklmnopqrstuvwxyz"); for (int i = 0; i < 1024*256; i++) bl.append(s); // bl is around 6M m->set_data(bl); m->cmd = cmds; m->set_priority(200); // setup after connection is ready if (inject_network_congestion && conn->is_connected()) { g_ceph_context->_conf.set_val("ms_inject_network_congestion", "100"); } else { g_ceph_context->_conf.set_val("ms_inject_network_congestion", "0"); } conn->send_message(m); } void drop_connection() { std::lock_guard l{lock}; if (available_connections.size() < 10) return; ConnectionRef conn = _get_random_connection(); dispatcher.clear_pending(conn); conn->mark_down(); if (!client_policy.server && !client_policy.lossy && client_policy.standby) { // it's a lossless policy, so we need to mark down each side pair<Messenger*, Messenger*> &p = available_connections[conn]; if (!p.first->get_default_policy().server && !p.second->get_default_policy().server) { ASSERT_EQ(conn->get_messenger(), p.first); ConnectionRef peer = p.second->connect_to(p.first->get_mytype(), p.first->get_myaddrs()); peer->mark_down(); dispatcher.clear_pending(peer); available_connections.erase(peer); } } ASSERT_EQ(available_connections.erase(conn), 1U); } void print_internal_state(bool detail=false) { std::lock_guard l{lock}; lderr(g_ceph_context) << "available_connections: " << available_connections.size() << " inflight messages: " << dispatcher.get_pending() << dendl; if (detail && !available_connections.empty()) { dispatcher.print(); } } void wait_for_done() { int64_t tick_us = 1000 * 100; // 100ms int64_t timeout_us = 5 * 60 * 1000 * 1000; // 5 mins int i = 0; while (dispatcher.get_pending()) { usleep(tick_us); timeout_us -= tick_us; if (i++ % 50 == 0) print_internal_state(true); if (timeout_us < 0) ceph_abort_msg(" loop time exceed 5 mins, it looks we stuck into some problems!"); } for (set<Messenger*>::iterator it = available_servers.begin(); it != available_servers.end(); ++it) { (*it)->shutdown(); (*it)->wait(); ASSERT_EQ((*it)->get_dispatch_queue_len(), 0); delete (*it); } available_servers.clear(); for (set<Messenger*>::iterator it = available_clients.begin(); it != available_clients.end(); ++it) { (*it)->shutdown(); (*it)->wait(); ASSERT_EQ((*it)->get_dispatch_queue_len(), 0); delete (*it); } available_clients.clear(); } void handle_reset(Connection *con) { std::lock_guard l{lock}; available_connections.erase(con); dispatcher.clear_pending(con); } }; bool SyntheticDispatcher::ms_handle_reset(Connection *con) { workload->handle_reset(con); return true; } TEST_P(MessengerTest, SyntheticStressTest) { SyntheticWorkload test_msg(8, 32, GetParam(), 100, Messenger::Policy::stateful_server(0), Messenger::Policy::lossless_client(0)); for (int i = 0; i < 100; ++i) { if (!(i % 10)) lderr(g_ceph_context) << "seeding connection " << i << dendl; test_msg.generate_connection(); } gen_type rng(time(NULL)); for (int i = 0; i < 5000; ++i) { if (!(i % 10)) { lderr(g_ceph_context) << "Op " << i << ": " << dendl; test_msg.print_internal_state(); } boost::uniform_int<> true_false(0, 99); int val = true_false(rng); if (val > 90) { test_msg.generate_connection(); } else if (val > 80) { test_msg.drop_connection(); } else if (val > 10) { test_msg.send_message(); } else { usleep(rand() % 1000 + 500); } } test_msg.wait_for_done(); } TEST_P(MessengerTest, SyntheticStressTest1) { SyntheticWorkload test_msg(16, 32, GetParam(), 100, Messenger::Policy::lossless_peer_reuse(0), Messenger::Policy::lossless_peer_reuse(0)); for (int i = 0; i < 10; ++i) { if (!(i % 10)) lderr(g_ceph_context) << "seeding connection " << i << dendl; test_msg.generate_connection(); } gen_type rng(time(NULL)); for (int i = 0; i < 10000; ++i) { if (!(i % 10)) { lderr(g_ceph_context) << "Op " << i << ": " << dendl; test_msg.print_internal_state(); } boost::uniform_int<> true_false(0, 99); int val = true_false(rng); if (val > 80) { test_msg.generate_connection(); } else if (val > 60) { test_msg.drop_connection(); } else if (val > 10) { test_msg.send_message(); } else { usleep(rand() % 1000 + 500); } } test_msg.wait_for_done(); } TEST_P(MessengerTest, SyntheticInjectTest) { uint64_t dispatch_throttle_bytes = g_ceph_context->_conf->ms_dispatch_throttle_bytes; g_ceph_context->_conf.set_val("ms_inject_socket_failures", "30"); g_ceph_context->_conf.set_val("ms_inject_internal_delays", "0.1"); g_ceph_context->_conf.set_val("ms_dispatch_throttle_bytes", "16777216"); SyntheticWorkload test_msg(8, 32, GetParam(), 100, Messenger::Policy::stateful_server(0), Messenger::Policy::lossless_client(0)); for (int i = 0; i < 100; ++i) { if (!(i % 10)) lderr(g_ceph_context) << "seeding connection " << i << dendl; test_msg.generate_connection(); } gen_type rng(time(NULL)); for (int i = 0; i < 1000; ++i) { if (!(i % 10)) { lderr(g_ceph_context) << "Op " << i << ": " << dendl; test_msg.print_internal_state(); } boost::uniform_int<> true_false(0, 99); int val = true_false(rng); if (val > 90) { test_msg.generate_connection(); } else if (val > 80) { test_msg.drop_connection(); } else if (val > 10) { test_msg.send_message(); } else { usleep(rand() % 500 + 100); } } test_msg.wait_for_done(); g_ceph_context->_conf.set_val("ms_inject_socket_failures", "0"); g_ceph_context->_conf.set_val("ms_inject_internal_delays", "0"); g_ceph_context->_conf.set_val( "ms_dispatch_throttle_bytes", std::to_string(dispatch_throttle_bytes)); } TEST_P(MessengerTest, SyntheticInjectTest2) { g_ceph_context->_conf.set_val("ms_inject_socket_failures", "30"); g_ceph_context->_conf.set_val("ms_inject_internal_delays", "0.1"); SyntheticWorkload test_msg(8, 16, GetParam(), 100, Messenger::Policy::lossless_peer_reuse(0), Messenger::Policy::lossless_peer_reuse(0)); for (int i = 0; i < 100; ++i) { if (!(i % 10)) lderr(g_ceph_context) << "seeding connection " << i << dendl; test_msg.generate_connection(); } gen_type rng(time(NULL)); for (int i = 0; i < 1000; ++i) { if (!(i % 10)) { lderr(g_ceph_context) << "Op " << i << ": " << dendl; test_msg.print_internal_state(); } boost::uniform_int<> true_false(0, 99); int val = true_false(rng); if (val > 90) { test_msg.generate_connection(); } else if (val > 80) { test_msg.drop_connection(); } else if (val > 10) { test_msg.send_message(); } else { usleep(rand() % 500 + 100); } } test_msg.wait_for_done(); g_ceph_context->_conf.set_val("ms_inject_socket_failures", "0"); g_ceph_context->_conf.set_val("ms_inject_internal_delays", "0"); } TEST_P(MessengerTest, SyntheticInjectTest3) { g_ceph_context->_conf.set_val("ms_inject_socket_failures", "600"); g_ceph_context->_conf.set_val("ms_inject_internal_delays", "0.1"); SyntheticWorkload test_msg(8, 16, GetParam(), 100, Messenger::Policy::stateless_server(0), Messenger::Policy::lossy_client(0)); for (int i = 0; i < 100; ++i) { if (!(i % 10)) lderr(g_ceph_context) << "seeding connection " << i << dendl; test_msg.generate_connection(); } gen_type rng(time(NULL)); for (int i = 0; i < 1000; ++i) { if (!(i % 10)) { lderr(g_ceph_context) << "Op " << i << ": " << dendl; test_msg.print_internal_state(); } boost::uniform_int<> true_false(0, 99); int val = true_false(rng); if (val > 90) { test_msg.generate_connection(); } else if (val > 80) { test_msg.drop_connection(); } else if (val > 10) { test_msg.send_message(); } else { usleep(rand() % 500 + 100); } } test_msg.wait_for_done(); g_ceph_context->_conf.set_val("ms_inject_socket_failures", "0"); g_ceph_context->_conf.set_val("ms_inject_internal_delays", "0"); } TEST_P(MessengerTest, SyntheticInjectTest4) { g_ceph_context->_conf.set_val("ms_inject_socket_failures", "30"); g_ceph_context->_conf.set_val("ms_inject_internal_delays", "0.1"); g_ceph_context->_conf.set_val("ms_inject_delay_probability", "1"); g_ceph_context->_conf.set_val("ms_inject_delay_type", "client osd"); g_ceph_context->_conf.set_val("ms_inject_delay_max", "5"); SyntheticWorkload test_msg(16, 32, GetParam(), 100, Messenger::Policy::lossless_peer(0), Messenger::Policy::lossless_peer(0)); for (int i = 0; i < 100; ++i) { if (!(i % 10)) lderr(g_ceph_context) << "seeding connection " << i << dendl; test_msg.generate_connection(); } gen_type rng(time(NULL)); for (int i = 0; i < 1000; ++i) { if (!(i % 10)) { lderr(g_ceph_context) << "Op " << i << ": " << dendl; test_msg.print_internal_state(); } boost::uniform_int<> true_false(0, 99); int val = true_false(rng); if (val > 95) { test_msg.generate_connection(); } else if (val > 80) { // test_msg.drop_connection(); } else if (val > 10) { test_msg.send_message(); } else { usleep(rand() % 500 + 100); } } test_msg.wait_for_done(); g_ceph_context->_conf.set_val("ms_inject_socket_failures", "0"); g_ceph_context->_conf.set_val("ms_inject_internal_delays", "0"); g_ceph_context->_conf.set_val("ms_inject_delay_probability", "0"); g_ceph_context->_conf.set_val("ms_inject_delay_type", ""); g_ceph_context->_conf.set_val("ms_inject_delay_max", "0"); } // This is test for network block, means ::send return EAGAIN TEST_P(MessengerTest, SyntheticInjectTest5) { SyntheticWorkload test_msg(1, 8, GetParam(), 100, Messenger::Policy::stateful_server(0), Messenger::Policy::lossless_client(0), 64, 2); bool simulate_network_congestion = true; for (int i = 0; i < 2; ++i) test_msg.generate_connection(); for (int i = 0; i < 5000; ++i) { if (!(i % 10)) { ldout(g_ceph_context, 0) << "Op " << i << ": " << dendl; test_msg.print_internal_state(); } if (i < 1600) { // means that we would stuck 1600 * 6M (9.6G) around with 2 connections test_msg.send_large_message(simulate_network_congestion); } else { simulate_network_congestion = false; test_msg.send_large_message(simulate_network_congestion); } } test_msg.wait_for_done(); } class MarkdownDispatcher : public Dispatcher { ceph::mutex lock = ceph::make_mutex("MarkdownDispatcher::lock"); set<ConnectionRef> conns; bool last_mark; public: std::atomic<uint64_t> count = { 0 }; explicit MarkdownDispatcher(bool s): Dispatcher(g_ceph_context), last_mark(false) { } bool ms_can_fast_dispatch_any() const override { return false; } bool ms_can_fast_dispatch(const Message *m) const override { switch (m->get_type()) { case CEPH_MSG_PING: return true; default: return false; } } void ms_handle_fast_connect(Connection *con) override { lderr(g_ceph_context) << __func__ << " " << con << dendl; std::lock_guard l{lock}; conns.insert(con); } void ms_handle_fast_accept(Connection *con) override { std::lock_guard l{lock}; conns.insert(con); } bool ms_dispatch(Message *m) override { lderr(g_ceph_context) << __func__ << " conn: " << m->get_connection() << dendl; std::lock_guard l{lock}; count++; conns.insert(m->get_connection()); if (conns.size() < 2 && !last_mark) { m->put(); return true; } last_mark = true; usleep(rand() % 500); for (set<ConnectionRef>::iterator it = conns.begin(); it != conns.end(); ++it) { if ((*it) != m->get_connection().get()) { (*it)->mark_down(); conns.erase(it); break; } } if (conns.empty()) last_mark = false; m->put(); return true; } bool ms_handle_reset(Connection *con) override { lderr(g_ceph_context) << __func__ << " " << con << dendl; std::lock_guard l{lock}; conns.erase(con); usleep(rand() % 500); return true; } void ms_handle_remote_reset(Connection *con) override { std::lock_guard l{lock}; conns.erase(con); lderr(g_ceph_context) << __func__ << " " << con << dendl; } bool ms_handle_refused(Connection *con) override { return false; } void ms_fast_dispatch(Message *m) override { ceph_abort(); } int ms_handle_authentication(Connection *con) override { return 1; } }; // Markdown with external lock TEST_P(MessengerTest, MarkdownTest) { Messenger *server_msgr2 = Messenger::create(g_ceph_context, string(GetParam()), entity_name_t::OSD(0), "server", getpid()); MarkdownDispatcher cli_dispatcher(false), srv_dispatcher(true); DummyAuthClientServer dummy_auth(g_ceph_context); dummy_auth.auth_registry.refresh_config(); entity_addr_t bind_addr; bind_addr.parse("v2:127.0.0.1:16800"); server_msgr->bind(bind_addr); server_msgr->add_dispatcher_head(&srv_dispatcher); server_msgr->set_auth_client(&dummy_auth); server_msgr->set_auth_server(&dummy_auth); server_msgr->start(); bind_addr.parse("v2:127.0.0.1:16801"); server_msgr2->bind(bind_addr); server_msgr2->add_dispatcher_head(&srv_dispatcher); server_msgr2->set_auth_client(&dummy_auth); server_msgr2->set_auth_server(&dummy_auth); server_msgr2->start(); client_msgr->add_dispatcher_head(&cli_dispatcher); client_msgr->set_auth_client(&dummy_auth); client_msgr->set_auth_server(&dummy_auth); client_msgr->start(); int i = 1000; uint64_t last = 0; bool equal = false; uint64_t equal_count = 0; while (i--) { ConnectionRef conn1 = client_msgr->connect_to(server_msgr->get_mytype(), server_msgr->get_myaddrs()); ConnectionRef conn2 = client_msgr->connect_to(server_msgr2->get_mytype(), server_msgr2->get_myaddrs()); MPing *m = new MPing(); ASSERT_EQ(conn1->send_message(m), 0); m = new MPing(); ASSERT_EQ(conn2->send_message(m), 0); CHECK_AND_WAIT_TRUE(srv_dispatcher.count > last + 1); if (srv_dispatcher.count == last) { lderr(g_ceph_context) << __func__ << " last is " << last << dendl; equal = true; equal_count++; } else { equal = false; equal_count = 0; } last = srv_dispatcher.count; if (equal_count) usleep(1000*500); ASSERT_FALSE(equal && equal_count > 3); } server_msgr->shutdown(); client_msgr->shutdown(); server_msgr2->shutdown(); server_msgr->wait(); client_msgr->wait(); server_msgr2->wait(); delete server_msgr2; } INSTANTIATE_TEST_SUITE_P( Messenger, MessengerTest, ::testing::Values( "async+posix" ) ); int main(int argc, char **argv) { auto args = argv_to_vec(argc, argv); auto cct = global_init(NULL, args, CEPH_ENTITY_TYPE_CLIENT, CODE_ENVIRONMENT_UTILITY, CINIT_FLAG_NO_DEFAULT_CONFIG_FILE); g_ceph_context->_conf.set_val("auth_cluster_required", "none"); g_ceph_context->_conf.set_val("auth_service_required", "none"); g_ceph_context->_conf.set_val("auth_client_required", "none"); g_ceph_context->_conf.set_val("keyring", "/dev/null"); g_ceph_context->_conf.set_val("enable_experimental_unrecoverable_data_corrupting_features", "ms-type-async"); g_ceph_context->_conf.set_val("ms_die_on_bad_msg", "true"); g_ceph_context->_conf.set_val("ms_die_on_old_message", "true"); g_ceph_context->_conf.set_val("ms_max_backoff", "1"); common_init_finish(g_ceph_context); ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } /* * Local Variables: * compile-command: "cd ../.. ; make -j4 ceph_test_msgr && valgrind --tool=memcheck ./ceph_test_msgr" * End: */
79,934
31.962887
129
cc
null
ceph-main/src/test/msgr/test_userspace_event.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system * * Copyright (C) 2016 XSky <haomai@xsky.com> * * Author: Haomai Wang <haomaiwang@gmail.com> * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #include <map> #include <random> #include <gtest/gtest.h> #include "msg/async/dpdk/UserspaceEvent.h" #include "global/global_context.h" class UserspaceManagerTest : public ::testing::Test { public: UserspaceEventManager *manager; UserspaceManagerTest() {} virtual void SetUp() { manager = new UserspaceEventManager(g_ceph_context); } virtual void TearDown() { delete manager; } }; TEST_F(UserspaceManagerTest, BasicTest) { int events[10]; int masks[10]; int fd = manager->get_eventfd(); ASSERT_EQ(fd, 1); ASSERT_EQ(0, manager->listen(fd, 1)); ASSERT_EQ(0, manager->notify(fd, 1)); ASSERT_EQ(1, manager->poll(events, masks, 10, nullptr)); ASSERT_EQ(fd, events[0]); ASSERT_EQ(1, masks[0]); ASSERT_EQ(0, manager->notify(fd, 2)); ASSERT_EQ(0, manager->poll(events, masks, 10, nullptr)); ASSERT_EQ(0, manager->unlisten(fd, 1)); ASSERT_EQ(0, manager->notify(fd, 1)); ASSERT_EQ(0, manager->poll(events, masks, 10, nullptr)); manager->close(fd); fd = manager->get_eventfd(); ASSERT_EQ(fd, 1); ASSERT_EQ(0, manager->poll(events, masks, 10, nullptr)); } TEST_F(UserspaceManagerTest, FailTest) { int events[10]; int masks[10]; int fd = manager->get_eventfd(); ASSERT_EQ(fd, 1); ASSERT_EQ(-ENOENT, manager->listen(fd+1, 1)); ASSERT_EQ(-ENOENT, manager->notify(fd+1, 1)); ASSERT_EQ(0, manager->poll(events, masks, 10, nullptr)); ASSERT_EQ(-ENOENT, manager->unlisten(fd+1, 1)); manager->close(fd); } TEST_F(UserspaceManagerTest, StressTest) { std::vector<std::pair<int, int> > mappings; int events[10]; int masks[10]; std::random_device rd; std::default_random_engine rng(rd()); std::uniform_int_distribution<> dist(0, 100); mappings.resize(1001); mappings[0] = std::make_pair(-1, -1); for (int i = 0; i < 1000; ++i) { int fd = manager->get_eventfd(); ASSERT_TRUE(fd > 0); mappings[fd] = std::make_pair(0, 0); } int r = 0; int fd = manager->get_eventfd(); auto get_activate_count = [](std::vector<std::pair<int, int> > &m) { std::vector<int> fds; int mask = 0; size_t idx = 0; for (auto &&p : m) { mask = p.first & p.second; if (p.first != -1 && mask) { p.second &= (~mask); fds.push_back(idx); std::cerr << " activate " << idx << " mask " << mask << std::endl; } ++idx; } return fds; }; for (int i = 0; i < 10000; ++i) { int value = dist(rng); fd = dist(rng) % mappings.size(); auto &p = mappings[fd]; int mask = dist(rng) % 2 + 1; if (value > 55) { r = manager->notify(fd, mask); if (p.first == -1) { ASSERT_EQ(p.second, -1); ASSERT_EQ(r, -ENOENT); } else { p.second |= mask; ASSERT_EQ(r, 0); } std::cerr << " notify fd " << fd << " mask " << mask << " r " << r << std::endl; } else if (value > 45) { r = manager->listen(fd, mask); std::cerr << " listen fd " << fd << " mask " << mask << " r " << r << std::endl; if (p.first == -1) { ASSERT_EQ(p.second, -1); ASSERT_EQ(r, -ENOENT); } else { p.first |= mask; ASSERT_EQ(r, 0); } } else if (value > 35) { r = manager->unlisten(fd, mask); std::cerr << " unlisten fd " << fd << " mask " << mask << " r " << r << std::endl; if (p.first == -1) { ASSERT_EQ(p.second, -1); ASSERT_EQ(r, -ENOENT); } else { p.first &= ~mask; ASSERT_EQ(r, 0); } } else if (value > 20) { std::set<int> actual, expected; do { r = manager->poll(events, masks, 3, nullptr); std::cerr << " poll " << r; for (int k = 0; k < r; ++k) { std::cerr << events[k] << " "; actual.insert(events[k]); } } while (r == 3); std::cerr << std::endl; auto fds = get_activate_count(mappings); for (auto &&d : fds) expected.insert(d); ASSERT_EQ(expected, actual); } else if (value > 10) { r = manager->get_eventfd(); std::cerr << " open fd " << r << std::endl; ASSERT_TRUE(r > 0); if ((size_t)r >= mappings.size()) mappings.resize(r+1); mappings[r] = std::make_pair(0, 0); } else { manager->close(fd); std::cerr << " close fd " << fd << std::endl; mappings[fd] = std::make_pair(-1, -1); } ASSERT_TRUE(manager->check()); } } /* * Local Variables: * compile-command: "cd ../.. ; make ceph_test_userspace_event && * ./ceph_test_userspace_event.cc * * End: */
5,005
27.605714
88
cc
null
ceph-main/src/test/neorados/common_tests.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system * * Copyright (C) 2020 Red Hat, Inc. * Author: Adam C. Emerson <aemerson@redhat.com> * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. */ #include <cstring> #include <string> #include <string_view> #include <boost/asio/ip/host_name.hpp> #include <fmt/format.h> #include "common_tests.h" #include "include/neorados/RADOS.hpp" namespace ba = boost::asio; namespace R = neorados; std::string get_temp_pool_name(std::string_view prefix) { static auto hostname = ba::ip::host_name(); static auto num = 1ull; return fmt::format("{}{}-{}-{}", prefix, hostname, getpid(), num++); }
898
24.685714
70
cc
null
ceph-main/src/test/neorados/common_tests.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system * * Copyright (C) 2020 Red Hat, Inc. * Author: Adam C. Emerson <aemerson@redhat.com> * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. */ #include <string> #include <string_view> #include "include/neorados/RADOS.hpp" std::string get_temp_pool_name(std::string_view prefix = {}); template<typename CompletionToken> auto create_pool(neorados::RADOS& r, std::string_view pname, CompletionToken&& token) { boost::asio::async_completion<CompletionToken, void(boost::system::error_code, std::int64_t)> init(token); r.create_pool(pname, std::nullopt, [&r, pname = std::string(pname), h = std::move(init.completion_handler)] (boost::system::error_code ec) mutable { r.lookup_pool( pname, [h = std::move(h)] (boost::system::error_code ec, std::int64_t pool) mutable { std::move(h)(ec, pool); }); }); return init.result.get(); }
1,202
27.642857
70
h
null
ceph-main/src/test/neorados/completions.cc
#include <cassert> #include <boost/asio.hpp> #include <boost/system/system_error.hpp> constexpr int max_completions = 10'000'000; int completed = 0; boost::asio::io_context c; void nested_cb() { if (++completed < max_completions) c.post(&nested_cb); } int main(void) { c.post(&nested_cb); c.run(); assert(completed == max_completions); return 0; }
366
16.47619
43
cc
null
ceph-main/src/test/neorados/list_pool.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system * * Copyright (C) 2019 Red Hat <contact@redhat.com> * Author: Adam C. Emerson <aemerson@redhat.com> * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #include <iostream> #include <initializer_list> #include <optional> #include <thread> #include <tuple> #include <string_view> #include <vector> #include <sys/param.h> #include <unistd.h> #include <boost/system/system_error.hpp> #include <fmt/format.h> #include "include/neorados/RADOS.hpp" #include "include/scope_guard.h" #include "common/async/context_pool.h" #include "common/ceph_time.h" #include "common/ceph_argparse.h" #include "common/async/blocked_completion.h" #include "global/global_init.h" #include "test/neorados/common_tests.h" namespace ba = boost::asio; namespace bs = boost::system; namespace ca = ceph::async; namespace R = neorados; std::string_view hostname() { static char hostname[MAXHOSTNAMELEN] = { 0 }; static size_t len = 0; if (!len) { auto r = gethostname(hostname, sizeof(hostname)); if (r != 0) { throw bs::system_error( errno, bs::system_category()); } len = std::strlen(hostname); } return {hostname, len}; } std::string temp_pool_name(const std::string_view prefix) { using namespace std::chrono; static std::uint64_t num = 1; return fmt::format( "{}-{}-{}-{}-{}", prefix, hostname(), getpid(), duration_cast<milliseconds>(ceph::coarse_real_clock::now() .time_since_epoch()).count(), num++); } bs::error_code noisy_list(R::RADOS& r, int64_t p) { auto b = R::Cursor::begin(); auto e = R::Cursor::end(); std::cout << "begin = " << b.to_str() << std::endl; std::cout << "end = " << e.to_str() << std::endl; try { auto [v, next] = r.enumerate_objects(p, b, e, 1000, {}, ca::use_blocked, R::all_nspaces); std::cout << "Got " << v.size() << " entries." << std::endl; std::cout << "next cursor = " << next.to_str() << std::endl; std::cout << "next == end: " << (next == e) << std::endl; std::cout << "Returned Objects: "; std::cout << "["; auto o = v.cbegin(); while (o != v.cend()) { std::cout << *o; if (++o != v.cend()) std::cout << " "; } std::cout << "]" << std::endl; } catch (const bs::system_error& e) { std::cerr << "RADOS::enumerate_objects: " << e.what() << std::endl; return e.code(); } return {}; } bs::error_code create_several(R::RADOS& r, const R::IOContext& i, std::initializer_list<std::string> l) { for (const auto& o : l) try { R::WriteOp op; std::cout << "Creating " << o << std::endl; ceph::bufferlist bl; bl.append("My bologna has no name."); op.write_full(std::move(bl)); r.execute(o, i, std::move(op), ca::use_blocked); } catch (const bs::system_error& e) { std::cerr << "RADOS::execute: " << e.what() << std::endl; return e.code(); } return {}; } int main(int argc, char** argv) { using namespace std::literals; auto args = argv_to_vec(argc, argv); env_to_vec(args); auto cct = global_init(NULL, args, CEPH_ENTITY_TYPE_CLIENT, CODE_ENVIRONMENT_UTILITY, 0); common_init_finish(cct.get()); try { ca::io_context_pool p(1); auto r = R::RADOS::make_with_cct(cct.get(), p, ca::use_blocked); auto pool_name = get_temp_pool_name("ceph_test_RADOS_list_pool"sv); r.create_pool(pool_name, std::nullopt, ca::use_blocked); auto pd = make_scope_guard( [&pool_name, &r]() { r.delete_pool(pool_name, ca::use_blocked); }); auto pool = r.lookup_pool(pool_name, ca::use_blocked); R::IOContext i(pool); if (noisy_list(r, pool)) { return 1; } if (create_several(r, i, {"meow", "woof", "squeak"})) { return 1; } if (noisy_list(r, pool)) { return 1; } } catch (const bs::system_error& e) { std::cerr << "Error: " << e.what() << std::endl; return 1; } return 0; }
4,276
24.610778
76
cc
null
ceph-main/src/test/neorados/op_speed.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system * * Copyright (C) 2019 Red Hat <contact@redhat.com> * Author: Adam C. Emerson <aemerson@redhat.com> * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #include "include/neorados/RADOS.hpp" constexpr int to_create = 10'000'000; int main() { for (int i = 0; i < to_create; ++i) { neorados::ReadOp op; bufferlist bl; std::uint64_t sz; ceph::real_time tm; boost::container::flat_map<std::string, ceph::buffer::list> xattrs; boost::container::flat_map<std::string, ceph::buffer::list> omap; bool trunc; op.read(0, 0, &bl); op.stat(&sz, &tm); op.get_xattrs(&xattrs); op.get_omap_vals(std::nullopt, std::nullopt, 1000, &omap, &trunc); } }
1,000
27.6
71
cc
null
ceph-main/src/test/neorados/start_stop.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system * * Copyright (C) 2019 Red Hat <contact@redhat.com> * Author: Adam C. Emerson <aemerson@redhat.com> * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #include <thread> #include <vector> #include "include/neorados/RADOS.hpp" #include "common/async/context_pool.h" #include "common/ceph_argparse.h" #include "global/global_init.h" namespace R = neorados; int main(int argc, char** argv) { using namespace std::literals; auto args = argv_to_vec(argc, argv); env_to_vec(args); auto cct = global_init(nullptr, args, CEPH_ENTITY_TYPE_CLIENT, CODE_ENVIRONMENT_UTILITY, 0); common_init_finish(cct.get()); { ceph::async::io_context_pool p(1); auto r = R::RADOS::make_with_cct(cct.get(), p, boost::asio::use_future).get(); std::this_thread::sleep_for(30s); } std::this_thread::sleep_for(30s); { ceph::async::io_context_pool p(1); auto r = R::RADOS::make_with_cct(cct.get(), p, boost::asio::use_future).get(); std::this_thread::sleep_for(30s); } { ceph::async::io_context_pool p(1); auto r = R::RADOS::make_with_cct(cct.get(), p, boost::asio::use_future).get(); std::this_thread::sleep_for(1s); } { ceph::async::io_context_pool p(1); auto r = R::RADOS::make_with_cct(cct.get(), p, boost::asio::use_future).get(); std::this_thread::sleep_for(1s); } { ceph::async::io_context_pool p(1); auto r = R::RADOS::make_with_cct(cct.get(), p, boost::asio::use_future).get(); std::this_thread::sleep_for(1s); } { ceph::async::io_context_pool p(1); auto r = R::RADOS::make_with_cct(cct.get(), p, boost::asio::use_future).get(); std::this_thread::sleep_for(1s); } { ceph::async::io_context_pool p(1); auto r = R::RADOS::make_with_cct(cct.get(), p, boost::asio::use_future).get(); std::this_thread::sleep_for(1s); } { ceph::async::io_context_pool p(1); auto r = R::RADOS::make_with_cct(cct.get(), p, boost::asio::use_future).get(); std::this_thread::sleep_for(1s); } { ceph::async::io_context_pool p(1); auto r = R::RADOS::make_with_cct(cct.get(), p, boost::asio::use_future).get(); std::this_thread::sleep_for(1s); } { ceph::async::io_context_pool p(1); auto r = R::RADOS::make_with_cct(cct.get(), p, boost::asio::use_future).get(); std::this_thread::sleep_for(500ms); } { ceph::async::io_context_pool p(1); auto r = R::RADOS::make_with_cct(cct.get(), p, boost::asio::use_future).get(); std::this_thread::sleep_for(500ms); } { ceph::async::io_context_pool p(1); auto r = R::RADOS::make_with_cct(cct.get(), p, boost::asio::use_future).get(); std::this_thread::sleep_for(50ms); } { ceph::async::io_context_pool p(1); auto r = R::RADOS::make_with_cct(cct.get(), p, boost::asio::use_future).get(); std::this_thread::sleep_for(50ms); } { ceph::async::io_context_pool p(1); auto r = R::RADOS::make_with_cct(cct.get(), p, boost::asio::use_future).get(); std::this_thread::sleep_for(50ms); } { ceph::async::io_context_pool p(1); auto r = R::RADOS::make_with_cct(cct.get(), p, boost::asio::use_future).get(); std::this_thread::sleep_for(5ms); } { ceph::async::io_context_pool p(1); auto r = R::RADOS::make_with_cct(cct.get(), p, boost::asio::use_future).get(); std::this_thread::sleep_for(5ms); } { ceph::async::io_context_pool p(1); auto r = R::RADOS::make_with_cct(cct.get(), p, boost::asio::use_future).get(); std::this_thread::sleep_for(5ms); } { ceph::async::io_context_pool p(1); auto r = R::RADOS::make_with_cct(cct.get(), p, boost::asio::use_future).get(); std::this_thread::sleep_for(5ms); } { ceph::async::io_context_pool p(1); auto r = R::RADOS::make_with_cct(cct.get(), p, boost::asio::use_future).get(); std::this_thread::sleep_for(5ms); } { ceph::async::io_context_pool p(1); auto r = R::RADOS::make_with_cct(cct.get(), p, boost::asio::use_future).get(); std::this_thread::sleep_for(5us); } { ceph::async::io_context_pool p(1); auto r = R::RADOS::make_with_cct(cct.get(), p, boost::asio::use_future).get(); std::this_thread::sleep_for(5us); } { ceph::async::io_context_pool p(1); auto r = R::RADOS::make_with_cct(cct.get(), p, boost::asio::use_future).get(); std::this_thread::sleep_for(5us); } return 0; }
4,838
26.651429
70
cc
null
ceph-main/src/test/neorados/test_neorados.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "include/rados/librados.hpp" #include "include/neorados/RADOS.hpp" #include "common/async/blocked_completion.h" #include "test/librados/test_cxx.h" #include "gtest/gtest.h" #include <iostream> namespace neorados { class TestNeoRADOS : public ::testing::Test { public: TestNeoRADOS() { } }; TEST_F(TestNeoRADOS, MakeWithLibRADOS) { librados::Rados paleo_rados; auto result = connect_cluster_pp(paleo_rados); ASSERT_EQ("", result); auto rados = RADOS::make_with_librados(paleo_rados); ReadOp op; bufferlist bl; op.read(0, 0, &bl); // provide pool that doesn't exists -- just testing round-trip ASSERT_THROW( rados.execute({"dummy-obj"}, std::numeric_limits<int64_t>::max(), std::move(op), nullptr, ceph::async::use_blocked), boost::system::system_error); } } // namespace neorados int main(int argc, char **argv) { ::testing::InitGoogleTest(&argc, argv); int seed = getpid(); std::cout << "seed " << seed << std::endl; srand(seed); return RUN_ALL_TESTS(); }
1,132
22.604167
70
cc
null
ceph-main/src/test/objectstore/Allocator_aging_fragmentation.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Bitmap allocator fragmentation benchmarks. * Author: Adam Kupczyk, akupczyk@redhat.com */ #include <bit> #include <iostream> #include <boost/scoped_ptr.hpp> #include <gtest/gtest.h> #include <boost/random/triangle_distribution.hpp> #include "common/ceph_mutex.h" #include "common/Cond.h" #include "common/errno.h" #include "global/global_init.h" #include "include/stringify.h" #include "include/Context.h" #include "os/bluestore/Allocator.h" #include <boost/random/uniform_int.hpp> typedef boost::mt11213b gen_type; #include "common/debug.h" #define dout_context cct #define dout_subsys ceph_subsys_ struct Scenario { uint64_t capacity; uint64_t alloc_unit; double high_mark; double low_mark; double leakness; uint32_t repeats; }; std::vector<Scenario> scenarios{ Scenario{512, 65536, 0.8, 0.6, 0.1, 3}, Scenario{512, 65536, 0.9, 0.7, 0.0, 3}, Scenario{512, 65536, 0.9, 0.7, 0.1, 3}, Scenario{512, 65536, 0.8, 0.6, 0.5, 3}, Scenario{512, 65536, 0.9, 0.7, 0.5, 3}, Scenario{1024, 65536, 0.8, 0.6, 0.1, 3}, Scenario{1024, 65536, 0.9, 0.7, 0.0, 3}, Scenario{1024, 65536, 0.9, 0.7, 0.1, 3}, Scenario{1024*2, 65536, 0.8, 0.6, 0.3, 3}, Scenario{1024*2, 65536, 0.9, 0.7, 0.0, 3}, Scenario{1024*2, 65536, 0.9, 0.7, 0.3, 3}, Scenario{512, 65536/16, 0.8, 0.6, 0.1, 3}, Scenario{512, 65536/16, 0.9, 0.7, 0.0, 3}, Scenario{512, 65536/16, 0.9, 0.7, 0.1, 3}, Scenario{512, 65536/16, 0.8, 0.6, 0.5, 3}, Scenario{512, 65536/16, 0.9, 0.7, 0.5, 3}, Scenario{1024, 65536/16, 0.8, 0.6, 0.1, 3}, Scenario{1024, 65536/16, 0.9, 0.7, 0.0, 3}, Scenario{1024, 65536/16, 0.9, 0.7, 0.1, 3}, Scenario{1024*2, 65536/16, 0.8, 0.6, 0.3, 3}, Scenario{1024*2, 65536/16, 0.9, 0.7, 0.0, 3}, Scenario{1024*2, 65536/16, 0.9, 0.7, 0.3, 3} }; void PrintTo(const Scenario& s, ::std::ostream* os) { *os << "(capacity=" << s.capacity; *os << "G, alloc_unit=" << s.alloc_unit; *os << ", high_mark=" << s.high_mark; *os << ", low_mark=" << s.low_mark; *os << ", leakness=" << s.leakness; *os << ", repeats=" << s.repeats << ")"; } bool verbose = getenv("VERBOSE") != nullptr; class AllocTracker; class AllocTest : public ::testing::TestWithParam<std::string> { protected: boost::scoped_ptr<AllocTracker> at; gen_type rng; static boost::intrusive_ptr<CephContext> cct; public: boost::scoped_ptr<Allocator> alloc; AllocTest(): alloc(nullptr) {} void init_alloc(const std::string& alloc_name, int64_t size, uint64_t min_alloc_size); void init_close(); void doAgingTest(std::function<uint32_t()> size_generator, const std::string& alloc_name, uint64_t capacity, uint32_t alloc_unit, uint64_t high_mark, uint64_t low_mark, uint32_t iterations, double leak_factor = 0); uint64_t capacity; uint32_t alloc_unit; uint64_t level = 0; uint64_t allocs = 0; uint64_t fragmented = 0; uint64_t fragments = 0; uint64_t total_fragments = 0; void do_fill(uint64_t high_mark, std::function<uint32_t()> size_generator, double leak_factor = 0); void do_free(uint64_t low_mark); uint32_t free_random(); void TearDown() final; static void SetUpTestSuite(); static void TearDownTestSuite(); }; struct test_result { uint64_t tests_cnt = 0; double fragmented_percent = 0; double fragments_count = 0; double time = 0; double frag_score = 0; }; std::map<std::string, test_result> results_per_allocator; const uint64_t _1m = 1024 * 1024; const uint64_t _1G = 1024 * 1024 * 1024; const uint64_t _2m = 2 * 1024 * 1024; class AllocTracker { std::vector<bluestore_pextent_t> allocations; uint64_t size = 0; public: bool push(uint64_t offs, uint32_t len) { assert(len != 0); if (size + 1 > allocations.size()) allocations.resize(size + 100); allocations[size++] = bluestore_pextent_t(offs, len); return true; } bool pop_random(gen_type& rng, uint64_t* offs, uint32_t* len, uint32_t max_len = 0) { if (size == 0) return false; uint64_t pos = rng() % size; *len = allocations[pos].length; *offs = allocations[pos].offset; if (max_len && *len > max_len) { allocations[pos].length = *len - max_len; allocations[pos].offset = *offs + max_len; *len = max_len; } else { allocations[pos] = allocations[size-1]; --size; } return true; } }; boost::intrusive_ptr<CephContext> AllocTest::cct; void AllocTest::init_alloc(const std::string& allocator_name, int64_t size, uint64_t min_alloc_size) { this->capacity = size; this->alloc_unit = min_alloc_size; rng.seed(0); alloc.reset(Allocator::create(cct.get(), allocator_name, size, min_alloc_size)); at.reset(new AllocTracker()); } void AllocTest::init_close() { alloc.reset(0); at.reset(nullptr); } uint32_t AllocTest::free_random() { uint64_t o = 0; uint32_t l = 0; interval_set<uint64_t> release_set; if (!at->pop_random(rng, &o, &l)) { //empty? return 0; } release_set.insert(o, l); alloc->release(release_set); level -= l; return l; } void AllocTest::do_fill(uint64_t high_mark, std::function<uint32_t()> size_generator, double leak_factor) { assert (leak_factor >= 0); assert (leak_factor < 1); uint32_t leak_level = leak_factor * std::numeric_limits<uint32_t>::max(); PExtentVector tmp; while (level < high_mark) { uint32_t want = size_generator(); tmp.clear(); auto r = alloc->allocate(want, alloc_unit, 0, 0, &tmp); if (r < want) { break; } level += r; for(auto a : tmp) { bool full = !at->push(a.offset, a.length); EXPECT_EQ(full, false); } allocs++; if (tmp.size() > 1) { fragmented ++; total_fragments += r; fragments += tmp.size(); } if (leak_level > 0) { for (size_t i=0; i<tmp.size(); i++) { if (uint32_t(rng()) < leak_level) { free_random(); } } } } } void AllocTest::do_free(uint64_t low_mark) { while (level > low_mark) { if (free_random() == 0) break; } } void AllocTest::doAgingTest( std::function<uint32_t()> size_generator, const std::string& allocator_name, uint64_t capacity, uint32_t alloc_unit, uint64_t high_mark, uint64_t low_mark, uint32_t iterations, double leak_factor) { assert(std::has_single_bit(alloc_unit)); cct->_conf->bdev_block_size = alloc_unit; PExtentVector allocated, tmp; init_alloc(allocator_name, capacity, alloc_unit); alloc->init_add_free(0, capacity); utime_t start = ceph_clock_now(); level = 0; allocs = 0; fragmented = 0; fragments = 0; total_fragments = 0; if (verbose) std::cout << "INITIAL FILL" << std::endl; do_fill(high_mark, size_generator, leak_factor); //initial fill with data if (verbose) std::cout << " fragmented allocs=" << 100.0 * fragmented / allocs << "%" << " #frags=" << ( fragmented != 0 ? double(fragments) / fragmented : 0 )<< " time=" << (ceph_clock_now() - start) * 1000 << "ms" << std::endl; for (uint32_t i=0; i < iterations; i++) { allocs = 0; fragmented = 0; fragments = 0; total_fragments = 0; uint64_t level_previous = level; start = ceph_clock_now(); if (verbose) std::cout << "ADDING CAPACITY " << i + 1 << std::endl; do_free(low_mark); //simulates adding new capacity to cluster if (verbose) std::cout << " level change: " << double(level_previous) / capacity * 100 << "% -> " << double(level) / capacity * 100 << "% time=" << (ceph_clock_now() - start) * 1000 << "ms" << std::endl; start = ceph_clock_now(); if (verbose) std::cout << "APPENDING " << i + 1 << std::endl; do_fill(high_mark, size_generator, leak_factor); //only creating elements if (verbose) std::cout << " fragmented allocs=" << 100.0 * fragmented / allocs << "%" << " #frags=" << ( fragmented != 0 ? double(fragments) / fragmented : 0 ) << " time=" << (ceph_clock_now() - start) * 1000 << "ms" << std::endl; } double frag_score = alloc->get_fragmentation_score(); do_free(0); double free_frag_score = alloc->get_fragmentation_score(); ASSERT_EQ(alloc->get_free(), capacity); std::cout << " fragmented allocs=" << 100.0 * fragmented / allocs << "%" << " #frags=" << ( fragmented != 0 ? double(fragments) / fragmented : 0 ) << " time=" << (ceph_clock_now() - start) * 1000 << "ms" << " frag.score=" << frag_score << " after free frag.score=" << free_frag_score << std::endl; uint64_t sum = 0; uint64_t cnt = 0; auto list_free = [&](size_t off, size_t len) { cnt++; sum+=len; }; alloc->dump(list_free); ASSERT_EQ(sum, capacity); if (verbose) std::cout << "free chunks sum=" << sum << " free chunks count=" << cnt << std::endl; //adding to totals test_result &r = results_per_allocator[allocator_name]; r.tests_cnt ++; r.fragmented_percent += 100.0 * fragmented / allocs; r.fragments_count += ( fragmented != 0 ? double(fragments) / fragmented : 2 ); r.time += ceph_clock_now() - start; r.frag_score += frag_score; } void AllocTest::SetUpTestSuite() { vector<const char*> args; cct = global_init(NULL, args, CEPH_ENTITY_TYPE_CLIENT, CODE_ENVIRONMENT_UTILITY, CINIT_FLAG_NO_DEFAULT_CONFIG_FILE); common_init_finish(cct.get()); } void AllocTest::TearDown() { at.reset(); alloc.reset(); } void AllocTest::TearDownTestSuite() { cct.reset(); std::cout << "Summary: " << std::endl; for (auto& r: results_per_allocator) { std::cout << r.first << " fragmented allocs=" << r.second.fragmented_percent / r.second.tests_cnt << "%" << " #frags=" << r.second.fragments_count / r.second.tests_cnt << " free_score=" << r.second.frag_score / r.second.tests_cnt << " time=" << r.second.time * 1000 << "ms" << std::endl; } } TEST_P(AllocTest, test_alloc_triangle_0_8M_16M) { std::string allocator_name = GetParam(); boost::triangle_distribution<double> D(1, (8 * 1024 * 1024) , (16 * 1024 * 1024) ); for (auto& s:scenarios) { std::cout << "Allocator: " << allocator_name << ", "; PrintTo(s, &std::cout); std::cout << std::endl; auto size_generator = [&]() -> uint32_t { return (uint32_t(D(rng)) + s.alloc_unit) & ~(s.alloc_unit - 1); }; doAgingTest(size_generator, allocator_name, s.capacity * _1G, s.alloc_unit, s.high_mark * s.capacity * _1G, s.low_mark * s.capacity * _1G, s.repeats, s.leakness); } } TEST_P(AllocTest, test_alloc_8M_and_64K) { std::string allocator_name = GetParam(); constexpr uint32_t max_chunk_size = 8*1024*1024; constexpr uint32_t min_chunk_size = 64*1024; for (auto& s:scenarios) { std::cout << "Allocator: " << allocator_name << ", "; PrintTo(s, &std::cout); std::cout << std::endl; boost::uniform_int<> D(0, 1); auto size_generator = [&]() -> uint32_t { if (D(rng) == 0) return max_chunk_size; else return min_chunk_size; }; doAgingTest(size_generator, allocator_name, s.capacity * _1G, s.alloc_unit, s.high_mark * s.capacity * _1G, s.low_mark * s.capacity * _1G, s.repeats, s.leakness); } } TEST_P(AllocTest, test_alloc_fragmentation_max_chunk_8M) { std::string allocator_name = GetParam(); constexpr uint32_t max_object_size = 150*1000*1000; constexpr uint32_t max_chunk_size = 8*1024*1024; for (auto& s:scenarios) { std::cout << "Allocator: " << allocator_name << ", "; PrintTo(s, &std::cout); std::cout << std::endl; boost::uniform_int<> D(1, max_object_size / s.alloc_unit); uint32_t object_size = 0; auto size_generator = [&]() -> uint32_t { uint32_t c; if (object_size == 0) object_size = (uint32_t(D(rng))* s.alloc_unit); if (object_size > max_chunk_size) c = max_chunk_size; else c = object_size; object_size -= c; return c; }; doAgingTest(size_generator, allocator_name, s.capacity * _1G, s.alloc_unit, s.high_mark * s.capacity * _1G, s.low_mark * s.capacity * _1G, s.repeats, s.leakness); } } TEST_P(AllocTest, test_bonus_empty_fragmented) { uint64_t capacity = uint64_t(512) * 1024 * 1024 * 1024; //512 G uint64_t alloc_unit = 64 * 1024; std::string allocator_name = GetParam(); std::cout << "Allocator: " << allocator_name << std::endl; init_alloc(allocator_name, capacity, alloc_unit); alloc->init_add_free(0, capacity); PExtentVector tmp; for (size_t i = 0; i < capacity / (1024 * 1024); i++) { tmp.clear(); uint32_t want = 1024 * 1024; int r = alloc->allocate(want, alloc_unit, 0, 0, &tmp); ASSERT_EQ(r, want); if (tmp.size() > 1) { interval_set<uint64_t> release_set; for (auto& t: tmp) { release_set.insert(t.offset, t.length); } alloc->release(release_set); } else { interval_set<uint64_t> release_set; uint64_t offset = tmp[0].offset; uint64_t length = tmp[0].length; release_set.insert(offset + alloc_unit, length - 3 * alloc_unit); alloc->release(release_set); release_set.clear(); release_set.insert(offset , alloc_unit); alloc->release(release_set); release_set.clear(); release_set.insert(offset + length - 2 * alloc_unit, 2 * alloc_unit); alloc->release(release_set); release_set.clear(); } } double frag_score = alloc->get_fragmentation_score(); ASSERT_EQ(alloc->get_free(), capacity); std::cout << " empty storage frag.score=" << frag_score << std::endl; } INSTANTIATE_TEST_SUITE_P( Allocator, AllocTest, ::testing::Values("stupid", "bitmap", "avl", "btree"));
13,776
28.69181
107
cc
null
ceph-main/src/test/objectstore/Allocator_bench.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * In memory space allocator benchmarks. * Author: Igor Fedotov, ifedotov@suse.com */ #include <iostream> #include <boost/scoped_ptr.hpp> #include <gtest/gtest.h> #include "common/Cond.h" #include "common/errno.h" #include "include/stringify.h" #include "include/Context.h" #include "os/bluestore/Allocator.h" #include <boost/random/uniform_int.hpp> typedef boost::mt11213b gen_type; #include "common/debug.h" #define dout_context g_ceph_context #define dout_subsys ceph_subsys_ using namespace std; class AllocTest : public ::testing::TestWithParam<const char*> { public: boost::scoped_ptr<Allocator> alloc; AllocTest(): alloc(0) { } void init_alloc(int64_t size, uint64_t min_alloc_size) { std::cout << "Creating alloc type " << string(GetParam()) << " \n"; alloc.reset(Allocator::create(g_ceph_context, GetParam(), size, min_alloc_size)); } void init_close() { alloc.reset(0); } void doOverwriteTest(uint64_t capacity, uint64_t prefill, uint64_t overwrite); }; const uint64_t _1m = 1024 * 1024; void dump_mempools() { ostringstream ostr; Formatter* f = Formatter::create("json-pretty", "json-pretty", "json-pretty"); ostr << "Mempools: "; f->open_object_section("mempools"); mempool::dump(f); f->close_section(); f->flush(ostr); delete f; ldout(g_ceph_context, 0) << ostr.str() << dendl; } class AllocTracker { std::vector<uint64_t> allocations; uint64_t head = 0; uint64_t tail = 0; uint64_t size = 0; boost::uniform_int<> u1; public: AllocTracker(uint64_t capacity, uint64_t alloc_unit) : u1(0, capacity) { ceph_assert(alloc_unit >= 0x100); ceph_assert(capacity <= (uint64_t(1) << 48)); // we use 5 octets (bytes 1 - 5) to store // offset to save the required space. // This supports capacity up to 281 TB allocations.resize(capacity / alloc_unit); } inline uint64_t get_head() const { return head; } inline uint64_t get_tail() const { return tail; } bool push(uint64_t offs, uint32_t len) { ceph_assert((len & 0xff) == 0); ceph_assert((offs & 0xff) == 0); ceph_assert((offs & 0xffff000000000000) == 0); if (head + 1 == tail) return false; uint64_t val = (offs << 16) | (len >> 8); allocations[head++] = val; head %= allocations.size(); ++size; return true; } bool pop(uint64_t* offs, uint32_t* len) { if (size == 0) return false; uint64_t val = allocations[tail++]; *len = uint64_t((val & 0xffffff) << 8); *offs = (val >> 16) & ~uint64_t(0xff); tail %= allocations.size(); --size; return true; } bool pop_random(gen_type& rng, uint64_t* offs, uint32_t* len, uint32_t max_len = 0) { if (size == 0) return false; uint64_t pos = (u1(rng) % size) + tail; pos %= allocations.size(); uint64_t val = allocations[pos]; *len = uint64_t((val & 0xffffff) << 8); *offs = (val >> 16) & ~uint64_t(0xff); if (max_len && *len > max_len) { val = ((*offs + max_len) << 16) | ((*len - max_len) >> 8); allocations[pos] = val; *len = max_len; } else { allocations[pos] = allocations[tail++]; tail %= allocations.size(); --size; } return true; } }; TEST_P(AllocTest, test_alloc_bench_seq) { uint64_t capacity = uint64_t(1024) * 1024 * 1024 * 1024; uint64_t alloc_unit = 4096; uint64_t want_size = alloc_unit; PExtentVector allocated, tmp; init_alloc(capacity, alloc_unit); alloc->init_add_free(0, capacity); utime_t start = ceph_clock_now(); for (uint64_t i = 0; i < capacity; i += want_size) { tmp.clear(); EXPECT_EQ(static_cast<int64_t>(want_size), alloc->allocate(want_size, alloc_unit, 0, 0, &tmp)); if (0 == (i % (1 * 1024 * _1m))) { std::cout << "alloc " << i / 1024 / 1024 << " mb of " << capacity / 1024 / 1024 << std::endl; } } std::cout << "releasing..." << std::endl; for (size_t i = 0; i < capacity; i += want_size) { interval_set<uint64_t> release_set; release_set.insert(i, want_size); alloc->release(release_set); if (0 == (i % (1 * 1024 * _1m))) { std::cout << "release " << i / 1024 / 1024 << " mb of " << capacity / 1024 / 1024 << std::endl; } } std::cout<<"Executed in "<< ceph_clock_now() - start << std::endl; dump_mempools(); } TEST_P(AllocTest, test_alloc_bench) { uint64_t capacity = uint64_t(1024) * 1024 * 1024 * 1024; uint64_t alloc_unit = 4096; PExtentVector allocated, tmp; AllocTracker at(capacity, alloc_unit); init_alloc(capacity, alloc_unit); alloc->init_add_free(0, capacity); gen_type rng(time(NULL)); boost::uniform_int<> u1(0, 9); // 4K-2M boost::uniform_int<> u2(0, 7); // 4K-512K utime_t start = ceph_clock_now(); for (uint64_t i = 0; i < capacity * 2; ) { uint32_t want = alloc_unit << u1(rng); tmp.clear(); auto r = alloc->allocate(want, alloc_unit, 0, 0, &tmp); if (r < want) { break; } i += r; for(auto a : tmp) { bool full = !at.push(a.offset, a.length); EXPECT_EQ(full, false); } uint64_t want_release = alloc_unit << u2(rng); uint64_t released = 0; do { uint64_t o = 0; uint32_t l = 0; interval_set<uint64_t> release_set; if (!at.pop_random(rng, &o, &l, want_release - released)) { break; } release_set.insert(o, l); alloc->release(release_set); released += l; } while (released < want_release); if (0 == (i % (1 * 1024 * _1m))) { std::cout << "alloc " << i / 1024 / 1024 << " mb of " << capacity / 1024 / 1024 << std::endl; } } std::cout<<"Executed in "<< ceph_clock_now() - start << std::endl; std::cout<<"Avail "<< alloc->get_free() / _1m << " MB" << std::endl; dump_mempools(); } void AllocTest::doOverwriteTest(uint64_t capacity, uint64_t prefill, uint64_t overwrite) { uint64_t alloc_unit = 4096; PExtentVector allocated, tmp; AllocTracker at(capacity, alloc_unit); init_alloc(capacity, alloc_unit); alloc->init_add_free(0, capacity); gen_type rng(time(NULL)); boost::uniform_int<> u1(0, 9); // 4K-2M boost::uniform_int<> u2(0, 9); // 4K-512K utime_t start = ceph_clock_now(); // allocate 90% of the capacity auto cap = prefill; for (uint64_t i = 0; i < cap; ) { uint32_t want = alloc_unit << u1(rng); tmp.clear(); auto r = alloc->allocate(want, alloc_unit, 0, 0, &tmp); if (r < want) { break; } i += r; for(auto a : tmp) { bool full = !at.push(a.offset, a.length); EXPECT_EQ(full, false); } if (0 == (i % (1 * 1024 * _1m))) { std::cout << "alloc " << i / 1024 / 1024 << " mb of " << cap / 1024 / 1024 << std::endl; } } cap = overwrite; for (uint64_t i = 0; i < cap; ) { uint64_t want_release = alloc_unit << u2(rng); uint64_t released = 0; do { uint64_t o = 0; uint32_t l = 0; interval_set<uint64_t> release_set; if (!at.pop_random(rng, &o, &l, want_release - released)) { break; } release_set.insert(o, l); alloc->release(release_set); released += l; } while (released < want_release); uint32_t want = alloc_unit << u1(rng); tmp.clear(); auto r = alloc->allocate(want, alloc_unit, 0, 0, &tmp); if (r != want) { std::cout<<"Can't allocate more space, stopping."<< std::endl; break; } i += r; for(auto a : tmp) { bool full = !at.push(a.offset, a.length); EXPECT_EQ(full, false); } if (0 == (i % (1 * 1024 * _1m))) { std::cout << "reuse " << i / 1024 / 1024 << " mb of " << cap / 1024 / 1024 << std::endl; } } std::cout<<"Executed in "<< ceph_clock_now() - start << std::endl; std::cout<<"Avail "<< alloc->get_free() / _1m << " MB" << std::endl; dump_mempools(); } TEST_P(AllocTest, test_alloc_bench_90_300) { uint64_t capacity = uint64_t(1024) * 1024 * 1024 * 1024; auto prefill = capacity - capacity / 10; auto overwrite = capacity * 3; doOverwriteTest(capacity, prefill, overwrite); } TEST_P(AllocTest, test_alloc_bench_50_300) { uint64_t capacity = uint64_t(1024) * 1024 * 1024 * 1024; auto prefill = capacity / 2; auto overwrite = capacity * 3; doOverwriteTest(capacity, prefill, overwrite); } TEST_P(AllocTest, test_alloc_bench_10_300) { uint64_t capacity = uint64_t(1024) * 1024 * 1024 * 1024; auto prefill = capacity / 10; auto overwrite = capacity * 3; doOverwriteTest(capacity, prefill, overwrite); } TEST_P(AllocTest, mempoolAccounting) { uint64_t bytes = mempool::bluestore_alloc::allocated_bytes(); uint64_t items = mempool::bluestore_alloc::allocated_items(); uint64_t alloc_size = 4 * 1024; uint64_t capacity = 512ll * 1024 * 1024 * 1024; Allocator* alloc = Allocator::create(g_ceph_context, GetParam(), capacity, alloc_size); ASSERT_NE(alloc, nullptr); alloc->init_add_free(0, capacity); std::map<uint32_t, PExtentVector> all_allocs; for (size_t i = 0; i < 10000; i++) { PExtentVector tmp; alloc->allocate(alloc_size, alloc_size, 0, 0, &tmp); all_allocs[rand()] = tmp; tmp.clear(); alloc->allocate(alloc_size, alloc_size, 0, 0, &tmp); all_allocs[rand()] = tmp; tmp.clear(); auto it = all_allocs.upper_bound(rand()); if (it != all_allocs.end()) { alloc->release(it->second); all_allocs.erase(it); } } delete(alloc); ASSERT_EQ(mempool::bluestore_alloc::allocated_bytes(), bytes); ASSERT_EQ(mempool::bluestore_alloc::allocated_items(), items); } INSTANTIATE_TEST_SUITE_P( Allocator, AllocTest, ::testing::Values("stupid", "bitmap", "avl", "btree", "hybrid"));
9,803
25.569106
91
cc
null
ceph-main/src/test/objectstore/Allocator_test.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * In memory space allocator test cases. * Author: Ramesh Chander, Ramesh.Chander@sandisk.com */ #include <iostream> #include <boost/scoped_ptr.hpp> #include <gtest/gtest.h> #include "common/Cond.h" #include "common/errno.h" #include "include/stringify.h" #include "include/Context.h" #include "os/bluestore/Allocator.h" using namespace std; typedef boost::mt11213b gen_type; class AllocTest : public ::testing::TestWithParam<const char*> { public: boost::scoped_ptr<Allocator> alloc; AllocTest(): alloc(0) { } void init_alloc(int64_t size, uint64_t min_alloc_size) { std::cout << "Creating alloc type " << string(GetParam()) << " \n"; alloc.reset(Allocator::create(g_ceph_context, GetParam(), size, min_alloc_size, 256*1048576, 100*256*1048576ull)); } void init_close() { alloc.reset(0); } }; TEST_P(AllocTest, test_alloc_init) { int64_t blocks = 64; init_alloc(blocks, 1); ASSERT_EQ(0U, alloc->get_free()); alloc->shutdown(); blocks = 1024 * 2 + 16; init_alloc(blocks, 1); ASSERT_EQ(0U, alloc->get_free()); alloc->shutdown(); blocks = 1024 * 2; init_alloc(blocks, 1); ASSERT_EQ(alloc->get_free(), (uint64_t) 0); } TEST_P(AllocTest, test_init_add_free) { int64_t block_size = 1024; int64_t capacity = 4 * 1024 * block_size; { init_alloc(capacity, block_size); auto free = alloc->get_free(); alloc->init_add_free(block_size, 0); ASSERT_EQ(free, alloc->get_free()); alloc->init_rm_free(block_size, 0); ASSERT_EQ(free, alloc->get_free()); } } TEST_P(AllocTest, test_alloc_min_alloc) { int64_t block_size = 1024; int64_t capacity = 4 * 1024 * block_size; { init_alloc(capacity, block_size); alloc->init_add_free(block_size, block_size); PExtentVector extents; EXPECT_EQ(block_size, alloc->allocate(block_size, block_size, 0, (int64_t) 0, &extents)); } /* * Allocate extent and make sure all comes in single extent. */ { init_alloc(capacity, block_size); alloc->init_add_free(0, block_size * 4); PExtentVector extents; EXPECT_EQ(4*block_size, alloc->allocate(4 * (uint64_t)block_size, (uint64_t) block_size, 0, (int64_t) 0, &extents)); EXPECT_EQ(1u, extents.size()); EXPECT_EQ(extents[0].length, 4 * block_size); } /* * Allocate extent and make sure we get two different extents. */ { init_alloc(capacity, block_size); alloc->init_add_free(0, block_size * 2); alloc->init_add_free(3 * block_size, block_size * 2); PExtentVector extents; EXPECT_EQ(4*block_size, alloc->allocate(4 * (uint64_t)block_size, (uint64_t) block_size, 0, (int64_t) 0, &extents)); EXPECT_EQ(2u, extents.size()); EXPECT_EQ(extents[0].length, 2 * block_size); EXPECT_EQ(extents[1].length, 2 * block_size); } alloc->shutdown(); } TEST_P(AllocTest, test_alloc_min_max_alloc) { int64_t block_size = 1024; int64_t capacity = 4 * 1024 * block_size; init_alloc(capacity, block_size); /* * Make sure we get all extents different when * min_alloc_size == max_alloc_size */ { init_alloc(capacity, block_size); alloc->init_add_free(0, block_size * 4); PExtentVector extents; EXPECT_EQ(4*block_size, alloc->allocate(4 * (uint64_t)block_size, (uint64_t) block_size, block_size, (int64_t) 0, &extents)); for (auto e : extents) { EXPECT_EQ(e.length, block_size); } EXPECT_EQ(4u, extents.size()); } /* * Make sure we get extents of length max_alloc size * when max alloc size > min_alloc size */ { init_alloc(capacity, block_size); alloc->init_add_free(0, block_size * 4); PExtentVector extents; EXPECT_EQ(4*block_size, alloc->allocate(4 * (uint64_t)block_size, (uint64_t) block_size, 2 * block_size, (int64_t) 0, &extents)); EXPECT_EQ(2u, extents.size()); for (auto& e : extents) { EXPECT_EQ(e.length, block_size * 2); } } /* * Make sure allocations are of min_alloc_size when min_alloc_size > block_size. */ { init_alloc(capacity, block_size); alloc->init_add_free(0, block_size * 1024); PExtentVector extents; EXPECT_EQ(1024 * block_size, alloc->allocate(1024 * (uint64_t)block_size, (uint64_t) block_size * 4, block_size * 4, (int64_t) 0, &extents)); for (auto& e : extents) { EXPECT_EQ(e.length, block_size * 4); } EXPECT_EQ(1024u/4, extents.size()); } /* * Allocate and free. */ { init_alloc(capacity, block_size); alloc->init_add_free(0, block_size * 16); PExtentVector extents; EXPECT_EQ(16 * block_size, alloc->allocate(16 * (uint64_t)block_size, (uint64_t) block_size, 2 * block_size, (int64_t) 0, &extents)); EXPECT_EQ(extents.size(), 8u); for (auto& e : extents) { EXPECT_EQ(e.length, 2 * block_size); } } } TEST_P(AllocTest, test_alloc_failure) { int64_t block_size = 1024; int64_t capacity = 4 * 1024 * block_size; { init_alloc(capacity, block_size); alloc->init_add_free(0, block_size * 256); alloc->init_add_free(block_size * 512, block_size * 256); PExtentVector extents; EXPECT_EQ(512 * block_size, alloc->allocate(512 * (uint64_t)block_size, (uint64_t) block_size * 256, block_size * 256, (int64_t) 0, &extents)); alloc->init_add_free(0, block_size * 256); alloc->init_add_free(block_size * 512, block_size * 256); extents.clear(); EXPECT_EQ(-ENOSPC, alloc->allocate(512 * (uint64_t)block_size, (uint64_t) block_size * 512, block_size * 512, (int64_t) 0, &extents)); } } TEST_P(AllocTest, test_alloc_big) { int64_t block_size = 4096; int64_t blocks = 104857600; int64_t mas = 4096; init_alloc(blocks*block_size, block_size); alloc->init_add_free(2*block_size, (blocks-2)*block_size); for (int64_t big = mas; big < 1048576*128; big*=2) { cout << big << std::endl; PExtentVector extents; EXPECT_EQ(big, alloc->allocate(big, mas, 0, &extents)); } } TEST_P(AllocTest, test_alloc_non_aligned_len) { int64_t block_size = 1 << 12; int64_t blocks = (1 << 20) * 100; int64_t want_size = 1 << 22; int64_t alloc_unit = 1 << 20; init_alloc(blocks*block_size, block_size); alloc->init_add_free(0, 2097152); alloc->init_add_free(2097152, 1064960); alloc->init_add_free(3670016, 2097152); PExtentVector extents; EXPECT_EQ(want_size, alloc->allocate(want_size, alloc_unit, 0, &extents)); } TEST_P(AllocTest, test_alloc_39334) { uint64_t block = 0x4000; uint64_t size = 0x5d00000000; init_alloc(size, block); alloc->init_add_free(0x4000, 0x5cffffc000); EXPECT_EQ(size - block, alloc->get_free()); } TEST_P(AllocTest, test_alloc_fragmentation) { uint64_t capacity = 4 * 1024 * 1024; uint64_t alloc_unit = 4096; uint64_t want_size = alloc_unit; PExtentVector allocated, tmp; init_alloc(capacity, alloc_unit); alloc->init_add_free(0, capacity); bool bitmap_alloc = GetParam() == std::string("bitmap"); EXPECT_EQ(0.0, alloc->get_fragmentation()); for (size_t i = 0; i < capacity / alloc_unit; ++i) { tmp.clear(); EXPECT_EQ(static_cast<int64_t>(want_size), alloc->allocate(want_size, alloc_unit, 0, 0, &tmp)); allocated.insert(allocated.end(), tmp.begin(), tmp.end()); // bitmap fragmentation calculation doesn't provide such constant // estimate if (!bitmap_alloc) { EXPECT_EQ(0.0, alloc->get_fragmentation()); } } tmp.clear(); EXPECT_EQ(-ENOSPC, alloc->allocate(want_size, alloc_unit, 0, 0, &tmp)); if (GetParam() == string("avl")) { // AVL allocator uses a different allocating strategy GTEST_SKIP() << "skipping for AVL allocator"; } else if (GetParam() == string("hybrid")) { // AVL allocator uses a different allocating strategy GTEST_SKIP() << "skipping for Hybrid allocator"; } for (size_t i = 0; i < allocated.size(); i += 2) { interval_set<uint64_t> release_set; release_set.insert(allocated[i].offset, allocated[i].length); alloc->release(release_set); } EXPECT_EQ(1.0, alloc->get_fragmentation()); for (size_t i = 1; i < allocated.size() / 2; i += 2) { interval_set<uint64_t> release_set; release_set.insert(allocated[i].offset, allocated[i].length); alloc->release(release_set); } if (bitmap_alloc) { // fragmentation = one l1 slot is free + one l1 slot is partial EXPECT_EQ(50U, uint64_t(alloc->get_fragmentation() * 100)); } else { // fragmentation approx = 257 intervals / 768 max intervals EXPECT_EQ(33u, uint64_t(alloc->get_fragmentation() * 100)); } for (size_t i = allocated.size() / 2 + 1; i < allocated.size(); i += 2) { interval_set<uint64_t> release_set; release_set.insert(allocated[i].offset, allocated[i].length); alloc->release(release_set); } // doing some rounding trick as stupid allocator doesn't merge all the // extents that causes some minor fragmentation (minor bug or by-design behavior?). // Hence leaving just two // digits after decimal point due to this. EXPECT_EQ(0u, uint64_t(alloc->get_fragmentation() * 100)); } TEST_P(AllocTest, test_fragmentation_score_0) { uint64_t capacity = 16LL * 1024 * 1024 * 1024; //16 GB, very small uint64_t alloc_unit = 4096; init_alloc(capacity, alloc_unit); alloc->init_add_free(0, capacity); EXPECT_EQ(0, alloc->get_fragmentation_score()); // alloc every 100M, should get very small score for (uint64_t pos = 0; pos < capacity; pos += 100 * 1024 * 1024) { alloc->init_rm_free(pos, alloc_unit); } EXPECT_LT(alloc->get_fragmentation_score(), 0.0001); // frag < 0.01% for (uint64_t pos = 0; pos < capacity; pos += 100 * 1024 * 1024) { // put back alloc->init_add_free(pos, alloc_unit); } // 10% space is trashed, rest is free, small score for (uint64_t pos = 0; pos < capacity / 10; pos += 3 * alloc_unit) { alloc->init_rm_free(pos, alloc_unit); } EXPECT_LT(0.01, alloc->get_fragmentation_score()); // 1% < frag < 10% EXPECT_LT(alloc->get_fragmentation_score(), 0.1); } TEST_P(AllocTest, test_fragmentation_score_some) { uint64_t capacity = 1024 * 1024 * 1024; //1 GB, very small uint64_t alloc_unit = 4096; init_alloc(capacity, alloc_unit); alloc->init_add_free(0, capacity); // half (in 16 chunks) is completely free, // other half completely fragmented, expect less than 50% fragmentation score for (uint64_t chunk = 0; chunk < capacity; chunk += capacity / 16) { for (uint64_t pos = 0; pos < capacity / 32; pos += alloc_unit * 3) { alloc->init_rm_free(chunk + pos, alloc_unit); } } EXPECT_LT(alloc->get_fragmentation_score(), 0.5); // f < 50% init_alloc(capacity, alloc_unit); alloc->init_add_free(0, capacity); // half (in 16 chunks) is completely full, // other half completely fragmented, expect really high fragmentation score for (uint64_t chunk = 0; chunk < capacity; chunk += capacity / 16) { alloc->init_rm_free(chunk + capacity / 32, capacity / 32); for (uint64_t pos = 0; pos < capacity / 32; pos += alloc_unit * 3) { alloc->init_rm_free(chunk + pos, alloc_unit); } } EXPECT_LT(0.9, alloc->get_fragmentation_score()); // 50% < f } TEST_P(AllocTest, test_fragmentation_score_1) { uint64_t capacity = 1024 * 1024 * 1024; //1 GB, very small uint64_t alloc_unit = 4096; init_alloc(capacity, alloc_unit); alloc->init_add_free(0, capacity); // alloc every second AU, max fragmentation for (uint64_t pos = 0; pos < capacity; pos += alloc_unit * 2) { alloc->init_rm_free(pos, alloc_unit); } EXPECT_LT(0.99, alloc->get_fragmentation_score()); // 99% < f init_alloc(capacity, alloc_unit); alloc->init_add_free(0, capacity); // 1 allocated, 4 empty; expect very high score for (uint64_t pos = 0; pos < capacity; pos += alloc_unit * 5) { alloc->init_rm_free(pos, alloc_unit); } EXPECT_LT(0.90, alloc->get_fragmentation_score()); // 90% < f } TEST_P(AllocTest, test_dump_fragmentation_score) { uint64_t capacity = 1024 * 1024 * 1024; uint64_t one_alloc_max = 2 * 1024 * 1024; uint64_t alloc_unit = 4096; uint64_t want_size = alloc_unit; uint64_t rounds = 10; uint64_t actions_per_round = 1000; PExtentVector allocated, tmp; gen_type rng; init_alloc(capacity, alloc_unit); alloc->init_add_free(0, capacity); EXPECT_EQ(0.0, alloc->get_fragmentation()); EXPECT_EQ(0.0, alloc->get_fragmentation_score()); uint64_t allocated_cnt = 0; for (size_t round = 0; round < rounds ; round++) { for (size_t j = 0; j < actions_per_round ; j++) { //free or allocate ? if ( rng() % capacity >= allocated_cnt ) { //allocate want_size = ( rng() % one_alloc_max ) / alloc_unit * alloc_unit + alloc_unit; tmp.clear(); int64_t r = alloc->allocate(want_size, alloc_unit, 0, 0, &tmp); if (r > 0) { for (auto& t: tmp) { if (t.length > 0) allocated.push_back(t); } allocated_cnt += r; } } else { //free ceph_assert(allocated.size() > 0); size_t item = rng() % allocated.size(); ceph_assert(allocated[item].length > 0); allocated_cnt -= allocated[item].length; interval_set<uint64_t> release_set; release_set.insert(allocated[item].offset, allocated[item].length); alloc->release(release_set); std::swap(allocated[item], allocated[allocated.size() - 1]); allocated.resize(allocated.size() - 1); } } size_t free_sum = 0; auto iterated_allocation = [&](size_t off, size_t len) { ceph_assert(len > 0); free_sum += len; }; alloc->foreach(iterated_allocation); EXPECT_GT(1, alloc->get_fragmentation_score()); EXPECT_EQ(capacity, free_sum + allocated_cnt); } for (size_t i = 0; i < allocated.size(); i ++) { interval_set<uint64_t> release_set; release_set.insert(allocated[i].offset, allocated[i].length); alloc->release(release_set); } } TEST_P(AllocTest, test_alloc_bug_24598) { if (string(GetParam()) != "bitmap") return; uint64_t capacity = 0x2625a0000ull; uint64_t alloc_unit = 0x4000; uint64_t want_size = 0x200000; PExtentVector allocated, tmp; init_alloc(capacity, alloc_unit); alloc->init_add_free(0x4800000, 0x100000); alloc->init_add_free(0x4a00000, 0x100000); alloc->init_rm_free(0x4800000, 0x100000); alloc->init_rm_free(0x4a00000, 0x100000); alloc->init_add_free(0x3f00000, 0x500000); alloc->init_add_free(0x4500000, 0x100000); alloc->init_add_free(0x4700000, 0x100000); alloc->init_add_free(0x4900000, 0x100000); alloc->init_add_free(0x4b00000, 0x200000); EXPECT_EQ(static_cast<int64_t>(want_size), alloc->allocate(want_size, 0x100000, 0, 0, &tmp)); EXPECT_EQ(1u, tmp.size()); EXPECT_EQ(0x4b00000u, tmp[0].offset); EXPECT_EQ(0x200000u, tmp[0].length); } //Verifies issue from //http://tracker.ceph.com/issues/40703 // TEST_P(AllocTest, test_alloc_big2) { int64_t block_size = 4096; int64_t blocks = 1048576 * 2; int64_t mas = 1024*1024; init_alloc(blocks*block_size, block_size); alloc->init_add_free(0, blocks * block_size); PExtentVector extents; uint64_t need = block_size * blocks / 4; // 2GB EXPECT_EQ(need, alloc->allocate(need, mas, 0, &extents)); need = block_size * blocks / 4; // 2GB extents.clear(); EXPECT_EQ(need, alloc->allocate(need, mas, 0, &extents)); EXPECT_TRUE(extents[0].length > 0); } //Verifies stuck 4GB chunk allocation //in StupidAllocator // TEST_P(AllocTest, test_alloc_big3) { int64_t block_size = 4096; int64_t blocks = 1048576 * 2; int64_t mas = 1024*1024; init_alloc(blocks*block_size, block_size); alloc->init_add_free(0, blocks * block_size); PExtentVector extents; uint64_t need = block_size * blocks / 2; // 4GB EXPECT_EQ(need, alloc->allocate(need, mas, 0, &extents)); EXPECT_TRUE(extents[0].length > 0); } TEST_P(AllocTest, test_alloc_contiguous) { int64_t block_size = 0x1000; int64_t capacity = block_size * 1024 * 1024; { init_alloc(capacity, block_size); alloc->init_add_free(0, capacity); PExtentVector extents; uint64_t need = 4 * block_size; EXPECT_EQ(need, alloc->allocate(need, need, 0, (int64_t)0, &extents)); EXPECT_EQ(1u, extents.size()); EXPECT_EQ(extents[0].offset, 0); EXPECT_EQ(extents[0].length, 4 * block_size); extents.clear(); EXPECT_EQ(need, alloc->allocate(need, need, 0, (int64_t)0, &extents)); EXPECT_EQ(1u, extents.size()); EXPECT_EQ(extents[0].offset, 4 * block_size); EXPECT_EQ(extents[0].length, 4 * block_size); } alloc->shutdown(); } TEST_P(AllocTest, test_alloc_47883) { uint64_t block = 0x1000; uint64_t size = 1599858540544ul; init_alloc(size, block); alloc->init_add_free(0x1b970000, 0x26000); alloc->init_add_free(0x1747e9d5000, 0x493000); alloc->init_add_free(0x1747ee6a000, 0x196000); PExtentVector extents; auto need = 0x3f980000; auto got = alloc->allocate(need, 0x10000, 0, (int64_t)0, &extents); EXPECT_GT(got, 0); EXPECT_EQ(got, 0x630000); } TEST_P(AllocTest, test_alloc_50656_best_fit) { uint64_t block = 0x1000; uint64_t size = 0x3b9e400000; init_alloc(size, block); // too few free extents - causes best fit mode for avls for (size_t i = 0; i < 0x10; i++) { alloc->init_add_free(i * 2 * 0x100000, 0x100000); } alloc->init_add_free(0x1e1bd13000, 0x404000); PExtentVector extents; auto need = 0x400000; auto got = alloc->allocate(need, 0x10000, 0, (int64_t)0, &extents); EXPECT_GT(got, 0); EXPECT_EQ(got, 0x400000); } TEST_P(AllocTest, test_alloc_50656_first_fit) { uint64_t block = 0x1000; uint64_t size = 0x3b9e400000; init_alloc(size, block); for (size_t i = 0; i < 0x10000; i += 2) { alloc->init_add_free(i * 0x100000, 0x100000); } alloc->init_add_free(0x1e1bd13000, 0x404000); PExtentVector extents; auto need = 0x400000; auto got = alloc->allocate(need, 0x10000, 0, (int64_t)0, &extents); EXPECT_GT(got, 0); EXPECT_EQ(got, 0x400000); } INSTANTIATE_TEST_SUITE_P( Allocator, AllocTest, ::testing::Values("stupid", "bitmap", "avl", "hybrid"));
18,320
27.671362
85
cc
null
ceph-main/src/test/objectstore/Fragmentation_simulator.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Fragmentation Simulator * Author: Tri Dao, daominhtri0503@gmail.com */ #include "common/ceph_argparse.h" #include "common/common_init.h" #include "common/hobject.h" #include "global/global_init.h" #include "include/buffer_fwd.h" #include "os/ObjectStore.h" #include "test/objectstore/ObjectStoreImitator.h" #include <fmt/core.h> #include <gtest/gtest.h> #include <iostream> constexpr uint64_t _1Kb = 1024; constexpr uint64_t _1Mb = 1024 * _1Kb; constexpr uint64_t _1Gb = 1024 * _1Mb; static bufferlist make_bl(size_t len, char c) { bufferlist bl; if (len > 0) { bl.reserve(len); bl.append(std::string(len, c)); } return bl; } // --------- FragmentationSimulator ---------- class FragmentationSimulator : public ::testing::TestWithParam<std::string> { public: static boost::intrusive_ptr<CephContext> cct; struct WorkloadGenerator { virtual int generate_txns(ObjectStore::CollectionHandle &ch, ObjectStoreImitator *os) = 0; virtual std::string name() = 0; WorkloadGenerator() {} virtual ~WorkloadGenerator() {} }; using WorkloadGeneratorRef = std::shared_ptr<WorkloadGenerator>; void add_generator(WorkloadGeneratorRef gen); void clear_generators() { generators.clear(); } int begin_simulation_with_generators(); void init(const std::string &alloc_type, uint64_t size, uint64_t min_alloc_size = 4096); static void TearDownTestSuite() { cct.reset(); } static void SetUpTestSuite() {} void TearDown() final {} FragmentationSimulator() = default; ~FragmentationSimulator() = default; private: ObjectStoreImitator *os; std::vector<WorkloadGeneratorRef> generators; }; void FragmentationSimulator::init(const std::string &alloc_type, uint64_t size, uint64_t min_alloc_size) { std::cout << std::endl; std::cout << "Initializing ObjectStoreImitator" << std::endl; os = new ObjectStoreImitator(g_ceph_context, "", min_alloc_size); std::cout << "Initializing allocator: " << alloc_type << " size: 0x" << std::hex << size << std::dec << "\n" << std::endl; os->init_alloc(alloc_type, size); } void FragmentationSimulator::add_generator(WorkloadGeneratorRef gen) { std::cout << "Generator: " << gen->name() << " added\n"; generators.push_back(gen); } int FragmentationSimulator::begin_simulation_with_generators() { for (auto &g : generators) { ObjectStore::CollectionHandle ch = os->create_new_collection(coll_t::meta()); ObjectStore::Transaction t; t.create_collection(ch->cid, 0); os->queue_transaction(ch, std::move(t)); int r = g->generate_txns(ch, os); if (r < 0) return r; } os->print_status(); return 0; } // --------- Generators ---------- struct SimpleCWGenerator : public FragmentationSimulator::WorkloadGenerator { std::string name() override { return "SimpleCW"; } int generate_txns(ObjectStore::CollectionHandle &ch, ObjectStoreImitator *os) override { std::vector<ghobject_t> objs; for (unsigned i{0}; i < 100; ++i) { hobject_t h; h.oid = fmt::format("obj_{}", i); h.set_hash(1); h.pool = 1; objs.emplace_back(h); } std::vector<ObjectStore::Transaction> tls; for (unsigned i{0}; i < 100; ++i) { ObjectStore::Transaction t1; t1.create(ch->get_cid(), objs[i]); tls.emplace_back(std::move(t1)); ObjectStore::Transaction t2; t2.write(ch->get_cid(), objs[i], 0, _1Mb, make_bl(_1Mb, 'c')); tls.emplace_back(std::move(t2)); } os->queue_transactions(ch, tls); os->verify_objects(ch); // reapply os->queue_transactions(ch, tls); os->verify_objects(ch); tls.clear(); // Overwrite on object for (unsigned i{0}; i < 100; ++i) { ObjectStore::Transaction t; t.write(ch->get_cid(), objs[i], _1Kb * i, _1Mb * 3, make_bl(_1Mb * 3, 'x')); tls.emplace_back(std::move(t)); } os->queue_transactions(ch, tls); os->verify_objects(ch); tls.clear(); for (unsigned i{0}; i < 50; ++i) { ObjectStore::Transaction t1, t2; t1.clone(ch->get_cid(), objs[i], objs[i + 50]); tls.emplace_back(std::move(t1)); t2.clone(ch->get_cid(), objs[i + 50], objs[i]); tls.emplace_back(std::move(t2)); } os->queue_transactions(ch, tls); os->verify_objects(ch); return 0; } }; // ----------- Tests ----------- TEST_P(FragmentationSimulator, SimpleCWGenerator) { init(GetParam(), _1Gb); add_generator(std::make_shared<SimpleCWGenerator>()); begin_simulation_with_generators(); } // ----------- main ----------- INSTANTIATE_TEST_SUITE_P(Allocator, FragmentationSimulator, ::testing::Values("stupid", "bitmap", "avl", "btree")); boost::intrusive_ptr<CephContext> FragmentationSimulator::cct; int main(int argc, char **argv) { auto args = argv_to_vec(argc, argv); FragmentationSimulator::cct = global_init(NULL, args, CEPH_ENTITY_TYPE_CLIENT, CODE_ENVIRONMENT_UTILITY, CINIT_FLAG_NO_DEFAULT_CONFIG_FILE); common_init_finish(FragmentationSimulator::cct->get()); FragmentationSimulator::cct->_conf->bluestore_clone_cow = false; ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); }
5,420
27.68254
80
cc
null
ceph-main/src/test/objectstore/ObjectStoreImitator.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Fragmentation Simulator * Author: Tri Dao, daominhtri0503@gmail.com */ #include "test/objectstore/ObjectStoreImitator.h" #include "common/errno.h" #include "include/ceph_assert.h" #include "include/intarith.h" #define dout_context cct #define OBJECT_MAX_SIZE 0xffffffff // 32 bits void ObjectStoreImitator::init_alloc(const std::string &alloc_type, uint64_t size) { alloc.reset(Allocator::create(cct, alloc_type, size, min_alloc_size)); alloc->init_add_free(0, size); ceph_assert(alloc->get_free() == size); } void ObjectStoreImitator::print_status() { std::cout << std::hex << "Fragmentation score: " << alloc->get_fragmentation_score() << " , fragmentation: " << alloc->get_fragmentation() << ", allocator type " << alloc->get_type() << ", capacity 0x" << alloc->get_capacity() << ", block size 0x" << alloc->get_block_size() << ", free 0x" << alloc->get_free() << std::dec << std::endl; } void ObjectStoreImitator::verify_objects(CollectionHandle &ch) { Collection *c = static_cast<Collection *>(ch.get()); c->verify_objects(); } // ------- Transactions ------- int ObjectStoreImitator::queue_transactions(CollectionHandle &ch, std::vector<Transaction> &tls, TrackedOpRef op, ThreadPool::TPHandle *handle) { for (std::vector<Transaction>::iterator p = tls.begin(); p != tls.end(); ++p) { _add_transaction(&(*p)); } if (handle) handle->suspend_tp_timeout(); if (handle) handle->reset_tp_timeout(); return 0; } ObjectStoreImitator::CollectionRef ObjectStoreImitator::_get_collection(const coll_t &cid) { std::shared_lock l(coll_lock); ceph::unordered_map<coll_t, CollectionRef>::iterator cp = coll_map.find(cid); if (cp == coll_map.end()) return CollectionRef(); return cp->second; } void ObjectStoreImitator::_add_transaction(Transaction *t) { Transaction::iterator i = t->begin(); std::vector<CollectionRef> cvec(i.colls.size()); unsigned j = 0; for (auto p = i.colls.begin(); p != i.colls.end(); ++p, ++j) { cvec[j] = _get_collection(*p); } std::vector<ObjectRef> ovec(i.objects.size()); for (int pos = 0; i.have_op(); ++pos) { Transaction::Op *op = i.decode_op(); int r = 0; // no coll or obj if (op->op == Transaction::OP_NOP) continue; // collection operations CollectionRef &c = cvec[op->cid]; switch (op->op) { case Transaction::OP_RMCOLL: { const coll_t &cid = i.get_cid(op->cid); r = _remove_collection(cid, &c); if (!r) continue; } break; case Transaction::OP_MKCOLL: { ceph_assert(!c); const coll_t &cid = i.get_cid(op->cid); r = _create_collection(cid, op->split_bits, &c); if (!r) continue; } break; case Transaction::OP_SPLIT_COLLECTION: ceph_abort_msg("deprecated"); break; case Transaction::OP_SPLIT_COLLECTION2: { uint32_t bits = op->split_bits; uint32_t rem = op->split_rem; r = _split_collection(c, cvec[op->dest_cid], bits, rem); if (!r) continue; } break; case Transaction::OP_MERGE_COLLECTION: { uint32_t bits = op->split_bits; r = _merge_collection(&c, cvec[op->dest_cid], bits); if (!r) continue; } break; case Transaction::OP_COLL_HINT: { uint32_t type = op->hint; bufferlist hint; i.decode_bl(hint); auto hiter = hint.cbegin(); if (type == Transaction::COLL_HINT_EXPECTED_NUM_OBJECTS) { uint32_t pg_num; uint64_t num_objs; decode(pg_num, hiter); decode(num_objs, hiter); } continue; } break; case Transaction::OP_COLL_SETATTR: r = -EOPNOTSUPP; break; case Transaction::OP_COLL_RMATTR: r = -EOPNOTSUPP; break; case Transaction::OP_COLL_RENAME: ceph_abort_msg("not implemented"); break; } // these operations implicity create the object bool create = (op->op == Transaction::OP_TOUCH || op->op == Transaction::OP_CREATE || op->op == Transaction::OP_WRITE || op->op == Transaction::OP_ZERO); // object operations std::unique_lock l(c->lock); ObjectRef &o = ovec[op->oid]; if (!o) { ghobject_t oid = i.get_oid(op->oid); o = c->get_obj(oid, create); } if (!create && (!o || !o->exists)) { r = -ENOENT; goto endop; } switch (op->op) { case Transaction::OP_CREATE: case Transaction::OP_TOUCH: { _assign_nid(o); r = 0; } break; case Transaction::OP_WRITE: { uint64_t off = op->off; uint64_t len = op->len; uint32_t fadvise_flags = i.get_fadvise_flags(); bufferlist bl; i.decode_bl(bl); r = _write(c, o, off, len, bl, fadvise_flags); } break; case Transaction::OP_ZERO: { if (op->off + op->len > OBJECT_MAX_SIZE) { r = -E2BIG; } else { _assign_nid(o); r = _do_zero(c, o, op->off, op->len); } } break; case Transaction::OP_TRIMCACHE: { // deprecated, no-op } break; case Transaction::OP_TRUNCATE: { _do_truncate(c, o, op->off); } break; case Transaction::OP_REMOVE: { _do_truncate(c, o, 0); } break; case Transaction::OP_SETATTR: case Transaction::OP_SETATTRS: case Transaction::OP_RMATTR: case Transaction::OP_RMATTRS: break; case Transaction::OP_CLONE: { ObjectRef &no = ovec[op->dest_oid]; if (!no) { const ghobject_t &noid = i.get_oid(op->dest_oid); no = c->get_obj(noid, true); } r = _clone(c, o, no); } break; case Transaction::OP_CLONERANGE: ceph_abort_msg("deprecated"); break; case Transaction::OP_CLONERANGE2: { ObjectRef &no = ovec[op->dest_oid]; if (!no) { const ghobject_t &noid = i.get_oid(op->dest_oid); no = c->get_obj(noid, true); } uint64_t srcoff = op->off; uint64_t len = op->len; uint64_t dstoff = op->dest_off; r = _clone_range(c, o, no, srcoff, len, dstoff); } break; case Transaction::OP_COLL_ADD: case Transaction::OP_COLL_REMOVE: ceph_abort_msg("not implemented"); break; case Transaction::OP_COLL_MOVE: ceph_abort_msg("deprecated"); break; case Transaction::OP_COLL_MOVE_RENAME: case Transaction::OP_TRY_RENAME: { ceph_assert(op->cid == op->dest_cid); const ghobject_t &noid = i.get_oid(op->dest_oid); ObjectRef &no = ovec[op->dest_oid]; if (!no) { no = c->get_obj(noid, false); } r = _rename(c, o, no, noid); } break; case Transaction::OP_OMAP_CLEAR: case Transaction::OP_OMAP_SETKEYS: case Transaction::OP_OMAP_RMKEYS: case Transaction::OP_OMAP_RMKEYRANGE: case Transaction::OP_OMAP_SETHEADER: break; case Transaction::OP_SETALLOCHINT: { r = _set_alloc_hint(c, o, op->expected_object_size, op->expected_write_size, op->hint); } break; default: derr << __func__ << " bad op " << op->op << dendl; ceph_abort(); } endop: if (r < 0) { derr << __func__ << " error " << cpp_strerror(r) << " not handled on operation " << op->op << " (op " << pos << ", counting from 0)" << dendl; ceph_abort_msg("unexpected error"); } } } int ObjectStoreImitator::read(CollectionHandle &c_, const ghobject_t &oid, uint64_t offset, size_t length, bufferlist &bl, uint32_t op_flags) { Collection *c = static_cast<Collection *>(c_.get()); if (!c->exists) return -ENOENT; bl.clear(); int r; { std::shared_lock l(c->lock); ObjectRef o = c->get_obj(oid, false); if (!o || !o->exists) { r = -ENOENT; goto out; } if (offset == length && offset == 0) length = o->size; r = _do_read(c, o, offset, length, bl, op_flags); } out: return r; } // ------- Helpers ------- void ObjectStoreImitator::_assign_nid(ObjectRef &o) { if (o->nid) { ceph_assert(o->exists); } o->nid = ++nid_last; o->exists = true; } int ObjectStoreImitator::_do_zero(CollectionRef &c, ObjectRef &o, uint64_t offset, size_t length) { PExtentVector old_extents; o->punch_hole(offset, length, old_extents); alloc->release(old_extents); return 0; } int ObjectStoreImitator::_do_read(Collection *c, ObjectRef &o, uint64_t offset, size_t len, ceph::buffer::list &bl, uint32_t op_flags, uint64_t retry_count) { auto data = std::string(len, 'a'); bl.append(data); return bl.length(); } int ObjectStoreImitator::_do_write(CollectionRef &c, ObjectRef &o, uint64_t offset, uint64_t length, bufferlist &bl, uint32_t fadvise_flags) { ceph_assert(length == bl.length()); int r = 0; uint64_t end = length + offset; if (length == 0) { return 0; } PExtentVector punched; o->punch_hole(offset, length, punched); alloc->release(punched); // all writes will trigger an allocation r = _do_alloc_write(c, o, bl); if (r < 0) { derr << __func__ << " _do_alloc_write failed with " << cpp_strerror(r) << dendl; goto out; } if (end > o->size) { o->size = end; } r = 0; out: return r; } int ObjectStoreImitator::_do_clone_range(CollectionRef &c, ObjectRef &oldo, ObjectRef &newo, uint64_t srcoff, uint64_t length, uint64_t dstoff) { if (dstoff + length > newo->size) newo->size = dstoff + length; return 0; } // ------- Operations ------- int ObjectStoreImitator::_write(CollectionRef &c, ObjectRef &o, uint64_t offset, size_t length, bufferlist &bl, uint32_t fadvise_flags) { int r = 0; if (offset + length >= OBJECT_MAX_SIZE) { r = -E2BIG; } else { _assign_nid(o); r = _do_write(c, o, offset, length, bl, fadvise_flags); } return r; } int ObjectStoreImitator::_do_alloc_write(CollectionRef coll, ObjectRef &o, bufferlist &bl) { // No compression for now uint64_t need = p2roundup(static_cast<uint64_t>(bl.length()), min_alloc_size); PExtentVector prealloc; int64_t prealloc_left = alloc->allocate(need, min_alloc_size, need, 0, &prealloc); if (prealloc_left < 0 || prealloc_left < (int64_t)need) { derr << __func__ << " failed to allocate 0x" << std::hex << need << " allocated 0x " << (prealloc_left < 0 ? 0 : prealloc_left) << " min_alloc_size 0x" << min_alloc_size << " available 0x " << alloc->get_free() << std::dec << dendl; if (prealloc.size()) alloc->release(prealloc); return -ENOSPC; } auto prealloc_pos = prealloc.begin(); ceph_assert(prealloc_pos != prealloc.end()); PExtentVector extents; int64_t left = need; while (left > 0) { ceph_assert(prealloc_left > 0); if (prealloc_pos->length <= left) { prealloc_left -= prealloc_pos->length; left -= prealloc_pos->length; extents.push_back(*prealloc_pos); ++prealloc_pos; } else { extents.emplace_back(prealloc_pos->offset, left); prealloc_pos->offset += left; prealloc_pos->length -= left; prealloc_left -= left; left = 0; break; } } o->append(extents); if (prealloc_left > 0) { PExtentVector old_extents; while (prealloc_pos != prealloc.end()) { old_extents.push_back(*prealloc_pos); prealloc_left -= prealloc_pos->length; ++prealloc_pos; } alloc->release(old_extents); } ceph_assert(prealloc_pos == prealloc.end()); ceph_assert(prealloc_left == 0); return 0; } void ObjectStoreImitator::_do_truncate(CollectionRef &c, ObjectRef &o, uint64_t offset) { if (offset == o->size) return; PExtentVector old_extents; o->punch_hole(offset, o->size - offset, old_extents); o->size = offset; alloc->release(old_extents); } int ObjectStoreImitator::_rename(CollectionRef &c, ObjectRef &oldo, ObjectRef &newo, const ghobject_t &new_oid) { int r; ghobject_t old_oid = oldo->oid; if (newo) { if (newo->exists) { r = -EEXIST; goto out; } } newo = oldo; c->rename_obj(oldo, old_oid, new_oid); r = 0; out: return r; } int ObjectStoreImitator::_set_alloc_hint(CollectionRef &c, ObjectRef &o, uint64_t expected_object_size, uint64_t expected_write_size, uint32_t flags) { o->expected_object_size = expected_object_size; o->expected_write_size = expected_write_size; o->alloc_hint_flags = flags; return 0; } int ObjectStoreImitator::_clone(CollectionRef &c, ObjectRef &oldo, ObjectRef &newo) { int r = 0; if (oldo->oid.hobj.get_hash() != newo->oid.hobj.get_hash()) { return -EINVAL; } _assign_nid(newo); _do_truncate(c, newo, 0); if (cct->_conf->bluestore_clone_cow) { _do_clone_range(c, oldo, newo, 0, oldo->size, 0); } else { bufferlist bl; r = _do_read(c.get(), oldo, 0, oldo->size, bl, 0); if (r < 0) goto out; r = _do_write(c, newo, 0, oldo->size, bl, 0); if (r < 0) goto out; } r = 0; out: return r; } int ObjectStoreImitator::_clone_range(CollectionRef &c, ObjectRef &oldo, ObjectRef &newo, uint64_t srcoff, uint64_t length, uint64_t dstoff) { int r = 0; if (srcoff + length >= OBJECT_MAX_SIZE || dstoff + length >= OBJECT_MAX_SIZE) { r = -E2BIG; goto out; } if (srcoff + length > oldo->size) { r = -EINVAL; goto out; } _assign_nid(newo); if (length > 0) { if (cct->_conf->bluestore_clone_cow) { _do_zero(c, newo, dstoff, length); _do_clone_range(c, oldo, newo, srcoff, length, dstoff); } else { bufferlist bl; r = _do_read(c.get(), oldo, srcoff, length, bl, 0); if (r < 0) goto out; r = _do_write(c, newo, dstoff, bl.length(), bl, 0); if (r < 0) goto out; } } r = 0; out: return r; } // ------- Collections ------- int ObjectStoreImitator::_merge_collection(CollectionRef *c, CollectionRef &d, unsigned bits) { std::unique_lock l((*c)->lock); std::unique_lock l2(d->lock); coll_t cid = (*c)->cid; spg_t pgid, dest_pgid; bool is_pg = cid.is_pg(&pgid); ceph_assert(is_pg); is_pg = d->cid.is_pg(&dest_pgid); ceph_assert(is_pg); // adjust bits. note that this will be redundant for all but the first // merge call for the parent/target. d->cnode.bits = bits; // remove source collection { std::unique_lock l3(coll_lock); _do_remove_collection(c); } return 0; } int ObjectStoreImitator::_split_collection(CollectionRef &c, CollectionRef &d, unsigned bits, int rem) { std::unique_lock l(c->lock); std::unique_lock l2(d->lock); // move any cached items (onodes and referenced shared blobs) that will // belong to the child collection post-split. leave everything else behind. // this may include things that don't strictly belong to the now-smaller // parent split, but the OSD will always send us a split for every new // child. spg_t pgid, dest_pgid; bool is_pg = c->cid.is_pg(&pgid); ceph_assert(is_pg); is_pg = d->cid.is_pg(&dest_pgid); ceph_assert(is_pg); ceph_assert(d->cnode.bits == bits); // adjust bits. note that this will be redundant for all but the first // split call for this parent (first child). c->cnode.bits = bits; ceph_assert(d->cnode.bits == bits); return 0; }; ObjectStore::CollectionHandle ObjectStoreImitator::open_collection(const coll_t &cid) { std::shared_lock l(coll_lock); ceph::unordered_map<coll_t, CollectionRef>::iterator cp = coll_map.find(cid); if (cp == coll_map.end()) return CollectionRef(); return cp->second; } ObjectStore::CollectionHandle ObjectStoreImitator::create_new_collection(const coll_t &cid) { std::unique_lock l{coll_lock}; auto c = ceph::make_ref<Collection>(this, cid); new_coll_map[cid] = c; return c; } void ObjectStoreImitator::set_collection_commit_queue( const coll_t &cid, ContextQueue *commit_queue) { if (commit_queue) { std::shared_lock l(coll_lock); if (coll_map.count(cid)) { coll_map[cid]->commit_queue = commit_queue; } else if (new_coll_map.count(cid)) { new_coll_map[cid]->commit_queue = commit_queue; } } } bool ObjectStoreImitator::exists(CollectionHandle &c_, const ghobject_t &oid) { Collection *c = static_cast<Collection *>(c_.get()); if (!c->exists) return false; bool r = true; { std::shared_lock l(c->lock); ObjectRef o = c->get_obj(oid, false); if (!o || !o->exists) r = false; } return r; } int ObjectStoreImitator::set_collection_opts(CollectionHandle &ch, const pool_opts_t &opts) { Collection *c = static_cast<Collection *>(ch.get()); if (!c->exists) return -ENOENT; std::unique_lock l{c->lock}; c->pool_opts = opts; return 0; } int ObjectStoreImitator::list_collections(std::vector<coll_t> &ls) { std::shared_lock l(coll_lock); ls.reserve(coll_map.size()); for (ceph::unordered_map<coll_t, CollectionRef>::iterator p = coll_map.begin(); p != coll_map.end(); ++p) ls.push_back(p->first); return 0; } bool ObjectStoreImitator::collection_exists(const coll_t &c) { std::shared_lock l(coll_lock); return coll_map.count(c); } int ObjectStoreImitator::collection_empty(CollectionHandle &ch, bool *empty) { std::vector<ghobject_t> ls; ghobject_t next; int r = collection_list(ch, ghobject_t(), ghobject_t::get_max(), 1, &ls, &next); if (r < 0) { derr << __func__ << " collection_list returned: " << cpp_strerror(r) << dendl; return r; } *empty = ls.empty(); return 0; } int ObjectStoreImitator::collection_bits(CollectionHandle &ch) { Collection *c = static_cast<Collection *>(ch.get()); std::shared_lock l(c->lock); return c->cnode.bits; } int ObjectStoreImitator::collection_list(CollectionHandle &c_, const ghobject_t &start, const ghobject_t &end, int max, std::vector<ghobject_t> *ls, ghobject_t *pnext) { Collection *c = static_cast<Collection *>(c_.get()); c->flush(); int r; { std::shared_lock l(c->lock); r = _collection_list(c, start, end, max, false, ls, pnext); } return r; } int ObjectStoreImitator::_collection_list( Collection *c, const ghobject_t &start, const ghobject_t &end, int max, bool legacy, std::vector<ghobject_t> *ls, ghobject_t *next) { if (!c->exists) return -ENOENT; if (start.is_max() || start.hobj.is_max()) { *next = ghobject_t::get_max(); return 0; } auto it = c->objects.find(start); if (it == c->objects.end()) { *next = ghobject_t::get_max(); return -ENOENT; } do { ls->push_back((it++)->first); if (ls->size() >= (unsigned)max) { *next = it->first; return 0; } } while (it != c->objects.end() && it->first != end); if (it != c->objects.end()) *next = it->first; else *next = ghobject_t::get_max(); return 0; } int ObjectStoreImitator::_remove_collection(const coll_t &cid, CollectionRef *c) { int r; { std::unique_lock l(coll_lock); if (!*c) { r = -ENOENT; goto out; } ceph_assert((*c)->exists); for (auto o : (*c)->objects) { if (o.second->exists) { r = -ENOTEMPTY; goto out; } } _do_remove_collection(c); r = 0; } out: return r; } void ObjectStoreImitator::_do_remove_collection(CollectionRef *c) { coll_map.erase((*c)->cid); (*c)->exists = false; c->reset(); } int ObjectStoreImitator::_create_collection(const coll_t &cid, unsigned bits, CollectionRef *c) { int r; bufferlist bl; { std::unique_lock l(coll_lock); if (*c) { r = -EEXIST; goto out; } auto p = new_coll_map.find(cid); ceph_assert(p != new_coll_map.end()); *c = p->second; (*c)->cnode.bits = bits; coll_map[cid] = *c; new_coll_map.erase(p); } encode((*c)->cnode, bl); r = 0; out: return r; }
21,195
24.785888
80
cc
null
ceph-main/src/test/objectstore/ObjectStoreImitator.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Fragmentation Simulator * Author: Tri Dao, daominhtri0503@gmail.com */ #pragma once #include "include/common_fwd.h" #include "os/ObjectStore.h" #include "os/bluestore/Allocator.h" #include "os/bluestore/bluestore_types.h" #include <algorithm> #include <boost/smart_ptr/intrusive_ptr.hpp> /** * ObjectStoreImitator will simulate how BlueStore does IO (as of the time * the simulator is written) and assess the defragmentation levels of different * allocators. As the main concern of the simulator is allocators, it mainly * focuses on operations that triggers IOs and tries to simplify the rest as * much as possible(caches, memory buffers). * * The simulator inherited from ObjectStore and tries to simulate BlueStore as * much as possible. * * #Note: This is an allocation simulator not a data consistency simulator so * any object data is not stored. */ class ObjectStoreImitator : public ObjectStore { private: class Collection; typedef boost::intrusive_ptr<Collection> CollectionRef; struct Object : public RefCountedObject { Collection *c; ghobject_t oid; bool exists; uint64_t nid; uint64_t size; uint32_t alloc_hint_flags = 0; uint32_t expected_object_size = 0; uint32_t expected_write_size = 0; // We assume these extents are sorted according by "logical" order. PExtentVector extents; Object(Collection *c_, const ghobject_t &oid_, bool exists_ = false, uint64_t nid_ = 0, uint64_t size_ = 0) : c(c_), oid(oid_), exists(exists_), nid(nid_), size(size_) {} void punch_hole(uint64_t offset, uint64_t length, PExtentVector &old_extents) { if (offset >= size || length == 0) return; if (offset + length >= size) { length = size - offset; } uint64_t l_offset{0}, punched_length{0}; PExtentVector to_be_punched, remains; for (auto e : extents) { if (l_offset > offset && l_offset - length >= offset) break; // Found where we need to punch if (l_offset >= offset) { // We only punched a portion of the extent if (e.length + punched_length > length) { uint64_t left = e.length + punched_length - length; e.length = length - punched_length; remains.emplace_back(e.offset + e.length, left); } to_be_punched.push_back(e); punched_length += e.length; } else { // else the extent will remain remains.push_back(e); } l_offset += e.length; } size -= punched_length; extents = remains; old_extents = to_be_punched; } void append(PExtentVector &ext) { for (auto &e : ext) { extents.push_back(e); size += e.length; } std::sort(extents.begin(), extents.end(), [](bluestore_pextent_t &a, bluestore_pextent_t &b) { return a.offset < b.offset; }); } void verify_extents() { uint64_t total{0}; for (auto &e : extents) { ceph_assert(total <= e.offset); ceph_assert(e.length > 0); total += e.length; } ceph_assert(total == size); } }; typedef boost::intrusive_ptr<Object> ObjectRef; struct Collection : public CollectionImpl { bluestore_cnode_t cnode; std::map<ghobject_t, ObjectRef> objects; ceph::shared_mutex lock = ceph::make_shared_mutex( "FragmentationSimulator::Collection::lock", true, false); // Lock for 'objects' ceph::recursive_mutex obj_lock = ceph::make_recursive_mutex( "FragmentationSimulator::Collection::obj_lock"); bool exists; // pool options pool_opts_t pool_opts; ContextQueue *commit_queue; bool contains(const ghobject_t &oid) { if (cid.is_meta()) return oid.hobj.pool == -1; spg_t spgid; if (cid.is_pg(&spgid)) return spgid.pgid.contains(cnode.bits, oid) && oid.shard_id == spgid.shard; return false; } int64_t pool() const { return cid.pool(); } ObjectRef get_obj(const ghobject_t &oid, bool create) { ceph_assert(create ? ceph_mutex_is_wlocked(lock) : ceph_mutex_is_locked(lock)); spg_t pgid; if (cid.is_pg(&pgid) && !oid.match(cnode.bits, pgid.ps())) { ceph_abort(); } auto o = objects.find(oid); if (o != objects.end()) return o->second; if (!create) return nullptr; return objects[oid] = new Object(this, oid); } bool flush_commit(Context *c) override { return false; } void flush() override {} void rename_obj(ObjectRef &oldo, const ghobject_t &old_oid, const ghobject_t &new_oid) { std::lock_guard l(obj_lock); auto po = objects.find(old_oid); auto pn = objects.find(new_oid); ceph_assert(po != pn); ceph_assert(po != objects.end()); if (pn != objects.end()) { objects.erase(pn); } ObjectRef o = po->second; oldo.reset(new Object(o->c, old_oid)); po->second = oldo; objects.insert(std::make_pair(new_oid, o)); o->oid = new_oid; } void verify_objects() { for (auto &[_, obj] : objects) { obj->verify_extents(); } } Collection(ObjectStoreImitator *sim_, coll_t cid_) : CollectionImpl(sim_->cct, cid_), exists(true), commit_queue(nullptr) { } }; CollectionRef _get_collection(const coll_t &cid); int _split_collection(CollectionRef &c, CollectionRef &d, unsigned bits, int rem); int _merge_collection(CollectionRef *c, CollectionRef &d, unsigned bits); int _collection_list(Collection *c, const ghobject_t &start, const ghobject_t &end, int max, bool legacy, std::vector<ghobject_t> *ls, ghobject_t *next); int _remove_collection(const coll_t &cid, CollectionRef *c); void _do_remove_collection(CollectionRef *c); int _create_collection(const coll_t &cid, unsigned bits, CollectionRef *c); // Transactions void _add_transaction(Transaction *t); // Object ops int _write(CollectionRef &c, ObjectRef &o, uint64_t offset, size_t length, bufferlist &bl, uint32_t fadvise_flags); int _set_alloc_hint(CollectionRef &c, ObjectRef &o, uint64_t expected_object_size, uint64_t expected_write_size, uint32_t flags); int _rename(CollectionRef &c, ObjectRef &oldo, ObjectRef &newo, const ghobject_t &new_oid); int _clone(CollectionRef &c, ObjectRef &oldo, ObjectRef &newo); int _clone_range(CollectionRef &c, ObjectRef &oldo, ObjectRef &newo, uint64_t srcoff, uint64_t length, uint64_t dstoff); int read(CollectionHandle &c, const ghobject_t &oid, uint64_t offset, size_t len, ceph::buffer::list &bl, uint32_t op_flags = 0) override; // Helpers void _assign_nid(ObjectRef &o); int _do_write(CollectionRef &c, ObjectRef &o, uint64_t offset, uint64_t length, ceph::buffer::list &bl, uint32_t fadvise_flags); int _do_alloc_write(CollectionRef c, ObjectRef &o, bufferlist &bl); void _do_truncate(CollectionRef &c, ObjectRef &o, uint64_t offset); int _do_zero(CollectionRef &c, ObjectRef &o, uint64_t offset, size_t length); int _do_clone_range(CollectionRef &c, ObjectRef &oldo, ObjectRef &newo, uint64_t srcoff, uint64_t length, uint64_t dstoff); int _do_read(Collection *c, ObjectRef &o, uint64_t offset, size_t len, ceph::buffer::list &bl, uint32_t op_flags = 0, uint64_t retry_count = 0); // Members boost::scoped_ptr<Allocator> alloc; std::atomic<uint64_t> nid_last = {0}; uint64_t min_alloc_size; ///< minimum allocation unit (power of 2) static_assert(std::numeric_limits<uint8_t>::max() > std::numeric_limits<decltype(min_alloc_size)>::digits, "not enough bits for min_alloc_size"); ///< rwlock to protect coll_map/new_coll_map ceph::shared_mutex coll_lock = ceph::make_shared_mutex("FragmentationSimulator::coll_lock"); std::unordered_map<coll_t, CollectionRef> coll_map; std::unordered_map<coll_t, CollectionRef> new_coll_map; // store collections that is opened via open_new_collection // but a create txn has not executed public: ObjectStoreImitator(CephContext *cct, const std::string &path_, uint64_t min_alloc_size_) : ObjectStore(cct, path_), alloc(nullptr), min_alloc_size(min_alloc_size_) {} ~ObjectStoreImitator() = default; void init_alloc(const std::string &alloc_type, uint64_t size); void print_status(); void verify_objects(CollectionHandle &ch); // Overrides // This is often not called directly but through queue_transaction int queue_transactions(CollectionHandle &ch, std::vector<Transaction> &tls, TrackedOpRef op = TrackedOpRef(), ThreadPool::TPHandle *handle = NULL) override; CollectionHandle open_collection(const coll_t &cid) override; CollectionHandle create_new_collection(const coll_t &cid) override; void set_collection_commit_queue(const coll_t &cid, ContextQueue *commit_queue) override; bool exists(CollectionHandle &c, const ghobject_t &old) override; int set_collection_opts(CollectionHandle &c, const pool_opts_t &opts) override; int list_collections(std::vector<coll_t> &ls) override; bool collection_exists(const coll_t &c) override; int collection_empty(CollectionHandle &c, bool *empty) override; int collection_bits(CollectionHandle &c) override; int collection_list(CollectionHandle &c, const ghobject_t &start, const ghobject_t &end, int max, std::vector<ghobject_t> *ls, ghobject_t *next) override; // Not used but implemented so it compiles std::string get_type() override { return "ObjectStoreImitator"; } bool test_mount_in_use() override { return false; } int mount() override { return 0; } int umount() override { return 0; } int validate_hobject_key(const hobject_t &obj) const override { return 0; } unsigned get_max_attr_name_length() override { return 256; } int mkfs() override { return 0; } int mkjournal() override { return 0; } bool needs_journal() override { return false; } bool wants_journal() override { return false; } bool allows_journal() override { return false; } int statfs(struct store_statfs_t *buf, osd_alert_list_t *alerts = nullptr) override { return 0; } int pool_statfs(uint64_t pool_id, struct store_statfs_t *buf, bool *per_pool_omap) override { return 0; } int stat(CollectionHandle &c, const ghobject_t &oid, struct stat *st, bool allow_eio = false) override { return 0; } int fiemap(CollectionHandle &c, const ghobject_t &oid, uint64_t offset, size_t len, ceph::buffer::list &bl) override { return 0; } int fiemap(CollectionHandle &c, const ghobject_t &oid, uint64_t offset, size_t len, std::map<uint64_t, uint64_t> &destmap) override { return 0; } int getattr(CollectionHandle &c, const ghobject_t &oid, const char *name, ceph::buffer::ptr &value) override { return 0; } int getattrs( CollectionHandle &c, const ghobject_t &oid, std::map<std::string, ceph::buffer::ptr, std::less<>> &aset) override { return 0; } int omap_get(CollectionHandle &c, ///< [in] Collection containing oid const ghobject_t &oid, ///< [in] Object containing omap ceph::buffer::list *header, ///< [out] omap header std::map<std::string, ceph::buffer::list> *out /// < [out] Key to value std::map ) override { return 0; } int omap_get_header(CollectionHandle &c, ///< [in] Collection containing oid const ghobject_t &oid, ///< [in] Object containing omap ceph::buffer::list *header, ///< [out] omap header bool allow_eio = false ///< [in] don't assert on eio ) override { return 0; } int omap_get_keys(CollectionHandle &c, ///< [in] Collection containing oid const ghobject_t &oid, ///< [in] Object containing omap std::set<std::string> *keys ///< [out] Keys defined on oid ) override { return 0; } int omap_get_values(CollectionHandle &c, ///< [in] Collection containing oid const ghobject_t &oid, ///< [in] Object containing omap const std::set<std::string> &keys, ///< [in] Keys to get std::map<std::string, ceph::buffer::list> *out ///< [out] Returned keys and values ) override { return 0; } int omap_check_keys( CollectionHandle &c, ///< [in] Collection containing oid const ghobject_t &oid, ///< [in] Object containing omap const std::set<std::string> &keys, ///< [in] Keys to check std::set<std::string> *out ///< [out] Subset of keys defined on oid ) override { return 0; } ObjectMap::ObjectMapIterator get_omap_iterator(CollectionHandle &c, ///< [in] collection const ghobject_t &oid ///< [in] object ) override { return {}; } void set_fsid(uuid_d u) override {} uuid_d get_fsid() override { return {}; } uint64_t estimate_objects_overhead(uint64_t num_objects) override { return num_objects * 300; } objectstore_perf_stat_t get_cur_stats() override { return {}; } const PerfCounters *get_perf_counters() const override { return nullptr; }; };
14,067
35.54026
80
h
null
ceph-main/src/test/objectstore/ObjectStoreTransactionBenchmark.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system * * Copyright (C) 2014 UnitedStack <haomai@unitedstack.com> * * Author: Haomai Wang <haomaiwang@gmail.com> * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #include <stdlib.h> #include <stdint.h> #include <string> #include <iostream> using namespace std; #include "common/ceph_argparse.h" #include "common/debug.h" #include "common/Cycles.h" #include "global/global_init.h" #include "os/ObjectStore.h" class Transaction { private: ObjectStore::Transaction t; public: struct Tick { uint64_t ticks; uint64_t count; Tick(): ticks(0), count(0) {} void add(uint64_t a) { ticks += a; count++; } }; static Tick write_ticks, setattr_ticks, omap_setkeys_ticks, omap_rmkey_ticks; static Tick encode_ticks, decode_ticks, iterate_ticks; void write(coll_t cid, const ghobject_t& oid, uint64_t off, uint64_t len, const bufferlist& data) { uint64_t start_time = Cycles::rdtsc(); t.write(cid, oid, off, len, data); write_ticks.add(Cycles::rdtsc() - start_time); } void setattr(coll_t cid, const ghobject_t& oid, const string &name, bufferlist& val) { uint64_t start_time = Cycles::rdtsc(); t.setattr(cid, oid, name, val); setattr_ticks.add(Cycles::rdtsc() - start_time); } void omap_setkeys(coll_t cid, const ghobject_t &oid, const map<string, bufferlist> &attrset) { uint64_t start_time = Cycles::rdtsc(); t.omap_setkeys(cid, oid, attrset); omap_setkeys_ticks.add(Cycles::rdtsc() - start_time); } void omap_rmkey(coll_t cid, const ghobject_t &oid, const string &key) { uint64_t start_time = Cycles::rdtsc(); t.omap_rmkey(cid, oid, key); omap_rmkey_ticks.add(Cycles::rdtsc() - start_time); } void apply_encode_decode() { bufferlist bl; ObjectStore::Transaction d; uint64_t start_time = Cycles::rdtsc(); t.encode(bl); encode_ticks.add(Cycles::rdtsc() - start_time); auto bliter = bl.cbegin(); start_time = Cycles::rdtsc(); d.decode(bliter); decode_ticks.add(Cycles::rdtsc() - start_time); } void apply_iterate() { uint64_t start_time = Cycles::rdtsc(); ObjectStore::Transaction::iterator i = t.begin(); while (i.have_op()) { ObjectStore::Transaction::Op *op = i.decode_op(); switch (op->op) { case ObjectStore::Transaction::OP_WRITE: { ghobject_t oid = i.get_oid(op->oid); bufferlist bl; i.decode_bl(bl); } break; case ObjectStore::Transaction::OP_SETATTR: { ghobject_t oid = i.get_oid(op->oid); string name = i.decode_string(); bufferlist bl; i.decode_bl(bl); map<string, bufferptr> to_set; to_set[name] = bufferptr(bl.c_str(), bl.length()); } break; case ObjectStore::Transaction::OP_OMAP_SETKEYS: { ghobject_t oid = i.get_oid(op->oid); map<string, bufferptr> aset; i.decode_attrset(aset); } break; case ObjectStore::Transaction::OP_OMAP_RMKEYS: { ghobject_t oid = i.get_oid(op->oid); set<string> keys; i.decode_keyset(keys); } break; } } iterate_ticks.add(Cycles::rdtsc() - start_time); } static void dump_stat() { cerr << " write op: " << Cycles::to_microseconds(write_ticks.ticks) << "us count: " << write_ticks.count << std::endl; cerr << " setattr op: " << Cycles::to_microseconds(setattr_ticks.ticks) << "us count: " << setattr_ticks.count << std::endl; cerr << " omap_setkeys op: " << Cycles::to_microseconds(Transaction::omap_setkeys_ticks.ticks) << "us count: " << Transaction::omap_setkeys_ticks.count << std::endl; cerr << " omap_rmkey op: " << Cycles::to_microseconds(Transaction::omap_rmkey_ticks.ticks) << "us count: " << Transaction::omap_rmkey_ticks.count << std::endl; cerr << " encode op: " << Cycles::to_microseconds(Transaction::encode_ticks.ticks) << "us count: " << Transaction::encode_ticks.count << std::endl; cerr << " decode op: " << Cycles::to_microseconds(Transaction::decode_ticks.ticks) << "us count: " << Transaction::decode_ticks.count << std::endl; cerr << " iterate op: " << Cycles::to_microseconds(Transaction::iterate_ticks.ticks) << "us count: " << Transaction::iterate_ticks.count << std::endl; } }; class PerfCase { static const uint64_t Kib = 1024; static const uint64_t Mib = 1024 * 1024; static const string info_epoch_attr; static const string info_info_attr; static const string attr; static const string snapset_attr; static const string pglog_attr; static const coll_t meta_cid; static const coll_t cid; static const ghobject_t pglog_oid; static const ghobject_t info_oid; map<string, bufferlist> data; ghobject_t create_object() { bufferlist bl = generate_random(100, 1); return ghobject_t(hobject_t(string("obj_")+string(bl.c_str()), string(), rand() & 2 ? CEPH_NOSNAP : rand(), rand() & 0xFF, 0, "")); } bufferlist generate_random(uint64_t len, int frag) { static const char alphanum[] = "0123456789" "ABCDEFGHIJKLMNOPQRSTUVWXYZ" "abcdefghijklmnopqrstuvwxyz"; uint64_t per_frag = len / frag; bufferlist bl; for (int i = 0; i < frag; i++ ) { bufferptr bp(per_frag); for (unsigned int j = 0; j < len; j++) { bp[j] = alphanum[rand() % (sizeof(alphanum) - 1)]; } bl.append(bp); } return bl; } public: PerfCase() { uint64_t four_kb = Kib * 4; uint64_t one_mb = Mib * 1; uint64_t four_mb = Mib * 4; data["4k"] = generate_random(four_kb, 1); data["1m"] = generate_random(one_mb, 1); data["4m"] = generate_random(four_mb, 1); data[attr] = generate_random(256, 1); data[snapset_attr] = generate_random(32, 1); data[pglog_attr] = generate_random(128, 1); data[info_epoch_attr] = generate_random(4, 1); data[info_info_attr] = generate_random(560, 1); } uint64_t rados_write_4k(int times) { uint64_t ticks = 0; uint64_t len = Kib *4; for (int i = 0; i < times; i++) { uint64_t start_time = 0; { Transaction t; ghobject_t oid = create_object(); start_time = Cycles::rdtsc(); t.write(cid, oid, 0, len, data["4k"]); t.setattr(cid, oid, attr, data[attr]); t.setattr(cid, oid, snapset_attr, data[snapset_attr]); t.apply_encode_decode(); t.apply_iterate(); ticks += Cycles::rdtsc() - start_time; } { Transaction t; map<string, bufferlist> pglog_attrset; map<string, bufferlist> info_attrset; pglog_attrset[pglog_attr] = data[pglog_attr]; info_attrset[info_epoch_attr] = data[info_epoch_attr]; info_attrset[info_info_attr] = data[info_info_attr]; start_time = Cycles::rdtsc(); t.omap_setkeys(meta_cid, pglog_oid, pglog_attrset); t.omap_setkeys(meta_cid, info_oid, info_attrset); t.omap_rmkey(meta_cid, pglog_oid, pglog_attr); t.apply_encode_decode(); t.apply_iterate(); ticks += Cycles::rdtsc() - start_time; } } return ticks; } }; const string PerfCase::info_epoch_attr("11.40_epoch"); const string PerfCase::info_info_attr("11.40_info"); const string PerfCase::attr("_"); const string PerfCase::snapset_attr("snapset"); const string PerfCase::pglog_attr("pglog_attr"); const coll_t PerfCase::meta_cid; const coll_t PerfCase::cid; const ghobject_t PerfCase::pglog_oid(hobject_t(sobject_t(object_t("cid_pglog"), 0))); const ghobject_t PerfCase::info_oid(hobject_t(sobject_t(object_t("infos"), 0))); Transaction::Tick Transaction::write_ticks, Transaction::setattr_ticks, Transaction::omap_setkeys_ticks, Transaction::omap_rmkey_ticks; Transaction::Tick Transaction::encode_ticks, Transaction::decode_ticks, Transaction::iterate_ticks; void usage(const string &name) { cerr << "Usage: " << name << " [times] " << std::endl; } int main(int argc, char **argv) { auto args = argv_to_vec(argc, argv); auto cct = global_init(NULL, args, CEPH_ENTITY_TYPE_CLIENT, CODE_ENVIRONMENT_UTILITY, CINIT_FLAG_NO_DEFAULT_CONFIG_FILE); common_init_finish(g_ceph_context); g_ceph_context->_conf.apply_changes(nullptr); Cycles::init(); cerr << "args: " << args << std::endl; if (args.size() < 1) { usage(argv[0]); return 1; } uint64_t times = atoi(args[0]); PerfCase c; uint64_t ticks = c.rados_write_4k(times); Transaction::dump_stat(); cerr << " Total rados op " << times << " run time " << Cycles::to_microseconds(ticks) << "us." << std::endl; return 0; }
9,053
32.910112
169
cc
null
ceph-main/src/test/objectstore/TestObjectStoreState.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system * * Copyright (C) 2012 New Dream Network * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. */ #include <stdio.h> #include <string.h> #include <iostream> #include <time.h> #include <stdlib.h> #include <signal.h> #include "os/ObjectStore.h" #include "common/ceph_argparse.h" #include "global/global_init.h" #include "common/debug.h" #include <boost/scoped_ptr.hpp> #include <boost/lexical_cast.hpp> #include "TestObjectStoreState.h" #include "include/ceph_assert.h" #define dout_context g_ceph_context #define dout_subsys ceph_subsys_filestore #undef dout_prefix #define dout_prefix *_dout << "ceph_test_objectstore_state " using namespace std; void TestObjectStoreState::init(int colls, int objs) { dout(5) << "init " << colls << " colls " << objs << " objs" << dendl; ObjectStore::Transaction t; auto meta_ch = m_store->create_new_collection(coll_t::meta()); t.create_collection(coll_t::meta(), 0); m_store->queue_transaction(meta_ch, std::move(t)); wait_for_ready(); int baseid = 0; for (int i = 0; i < colls; i++) { spg_t pgid(pg_t(i, 1), shard_id_t::NO_SHARD); coll_t cid(pgid); auto ch = m_store->create_new_collection(cid); coll_entry_t *entry = coll_create(pgid, ch); dout(5) << "init create collection " << entry->m_cid << " meta " << entry->m_meta_obj << dendl; ObjectStore::Transaction *t = new ObjectStore::Transaction; t->create_collection(entry->m_cid, 32); bufferlist hint; uint32_t pg_num = colls; uint64_t num_objs = uint64_t(objs / colls); encode(pg_num, hint); encode(num_objs, hint); t->collection_hint(entry->m_cid, ObjectStore::Transaction::COLL_HINT_EXPECTED_NUM_OBJECTS, hint); dout(5) << "give collection hint, number of objects per collection: " << num_objs << dendl; t->touch(cid, entry->m_meta_obj); for (int i = 0; i < objs; i++) { hobject_t *obj = entry->touch_obj(i + baseid); t->touch(entry->m_cid, ghobject_t(*obj)); ceph_assert(i + baseid == m_num_objects); m_num_objects++; } baseid += objs; t->register_on_commit(new C_OnFinished(this)); m_store->queue_transaction(entry->m_ch, std::move(*t), nullptr); delete t; inc_in_flight(); m_collections.insert(make_pair(cid, entry)); rebuild_id_vec(); m_next_coll_nr++; } dout(5) << "init has " << m_in_flight.load() << "in-flight transactions" << dendl; wait_for_done(); dout(5) << "init finished" << dendl; } TestObjectStoreState::coll_entry_t *TestObjectStoreState::coll_create( spg_t pgid, ObjectStore::CollectionHandle ch) { char meta_buf[100]; memset(meta_buf, 0, 100); snprintf(meta_buf, 100, "pglog_0_head"); return (new coll_entry_t(pgid, ch, meta_buf)); } TestObjectStoreState::coll_entry_t* TestObjectStoreState::get_coll(coll_t cid, bool erase) { dout(5) << "get_coll id " << cid << dendl; coll_entry_t *entry = NULL; auto it = m_collections.find(cid); if (it != m_collections.end()) { entry = it->second; if (erase) { m_collections.erase(it); rebuild_id_vec(); } } dout(5) << "get_coll id " << cid; if (!entry) *_dout << " non-existent"; else *_dout << " name " << entry->m_cid; *_dout << dendl; return entry; } TestObjectStoreState::coll_entry_t* TestObjectStoreState::get_coll_at(int pos, bool erase) { dout(5) << "get_coll_at pos " << pos << dendl; if (m_collections.empty()) return NULL; ceph_assert((size_t) pos < m_collections_ids.size()); coll_t cid = m_collections_ids[pos]; coll_entry_t *entry = m_collections[cid]; if (entry == NULL) { dout(5) << "get_coll_at pos " << pos << " non-existent" << dendl; return NULL; } if (erase) { m_collections.erase(cid); rebuild_id_vec(); } dout(5) << "get_coll_at pos " << pos << ": " << entry->m_cid << "(removed: " << erase << ")" << dendl; return entry; } TestObjectStoreState::coll_entry_t::~coll_entry_t() { if (m_objects.size() > 0) { map<int, hobject_t*>::iterator it = m_objects.begin(); for (; it != m_objects.end(); ++it) { hobject_t *obj = it->second; if (obj) { delete obj; } } m_objects.clear(); } } bool TestObjectStoreState::coll_entry_t::check_for_obj(int id) { if (m_objects.count(id)) return true; return false; } hobject_t *TestObjectStoreState::coll_entry_t::touch_obj(int id) { map<int, hobject_t*>::iterator it = m_objects.find(id); if (it != m_objects.end()) { dout(5) << "touch_obj coll id " << m_cid << " name " << it->second->oid.name << dendl; return it->second; } char buf[100]; memset(buf, 0, 100); snprintf(buf, 100, "obj%d", id); hobject_t *obj = new hobject_t(sobject_t(object_t(buf), CEPH_NOSNAP)); obj->set_hash(m_pgid.ps()); obj->pool = m_pgid.pool(); m_objects.insert(make_pair(id, obj)); dout(5) << "touch_obj coll id " << m_cid << " name " << buf << dendl; return obj; } hobject_t *TestObjectStoreState::coll_entry_t::get_obj(int id) { return get_obj(id, false); } /** * remove_obj - Removes object without freeing it. * @param id Object's id in the map. * @return The object or NULL in case of error. */ hobject_t *TestObjectStoreState::coll_entry_t::remove_obj(int id) { return get_obj(id, true); } hobject_t *TestObjectStoreState::coll_entry_t::get_obj(int id, bool remove) { map<int, hobject_t*>::iterator it = m_objects.find(id); if (it == m_objects.end()) { dout(5) << "get_obj coll " << m_cid << " obj #" << id << " non-existent" << dendl; return NULL; } hobject_t *obj = it->second; if (remove) m_objects.erase(it); dout(5) << "get_obj coll " << m_cid << " id " << id << ": " << obj->oid.name << "(removed: " << remove << ")" << dendl; return obj; } hobject_t *TestObjectStoreState::coll_entry_t::get_obj_at(int pos, int *key) { return get_obj_at(pos, false, key); } /** * remove_obj_at - Removes object without freeing it. * @param pos The map's position in which the object lies. * @return The object or NULL in case of error. */ hobject_t *TestObjectStoreState::coll_entry_t::remove_obj_at(int pos, int *key) { return get_obj_at(pos, true, key); } hobject_t *TestObjectStoreState::coll_entry_t::get_obj_at(int pos, bool remove, int *key) { if (m_objects.empty()) { dout(5) << "get_obj_at coll " << m_cid << " pos " << pos << " in an empty collection" << dendl; return NULL; } hobject_t *ret = NULL; map<int, hobject_t*>::iterator it = m_objects.begin(); for (int i = 0; it != m_objects.end(); ++it, i++) { if (i == pos) { ret = it->second; break; } } if (ret == NULL) { dout(5) << "get_obj_at coll " << m_cid << " pos " << pos << " non-existent" << dendl; return NULL; } if (key != NULL) *key = it->first; if (remove) m_objects.erase(it); dout(5) << "get_obj_at coll id " << m_cid << " pos " << pos << ": " << ret->oid.name << "(removed: " << remove << ")" << dendl; return ret; } hobject_t* TestObjectStoreState::coll_entry_t::replace_obj(int id, hobject_t *obj) { hobject_t *old_obj = remove_obj(id); m_objects.insert(make_pair(id, obj)); return old_obj; } int TestObjectStoreState::coll_entry_t::get_random_obj_id(rngen_t& gen) { ceph_assert(!m_objects.empty()); boost::uniform_int<> orig_obj_rng(0, m_objects.size()-1); int pos = orig_obj_rng(gen); map<int, hobject_t*>::iterator it = m_objects.begin(); for (int i = 0; it != m_objects.end(); ++it, i++) { if (i == pos) { return it->first; } } ceph_abort_msg("INTERNAL ERROR"); }
7,909
25.366667
101
cc
null
ceph-main/src/test/objectstore/TestObjectStoreState.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system * * Copyright (C) 2012 New Dream Network * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. */ #ifndef TEST_OBJECTSTORE_STATE_H_ #define TEST_OBJECTSTORE_STATE_H_ #include <boost/scoped_ptr.hpp> #include <boost/random/mersenne_twister.hpp> #include <boost/random/uniform_int.hpp> #include <map> #include <vector> #include "os/ObjectStore.h" #include "common/Cond.h" typedef boost::mt11213b rngen_t; class TestObjectStoreState { public: struct coll_entry_t { spg_t m_pgid; coll_t m_cid; ghobject_t m_meta_obj; ObjectStore::CollectionHandle m_ch; std::map<int, hobject_t*> m_objects; int m_next_object_id; coll_entry_t(spg_t pgid, ObjectStore::CollectionHandle& ch, char *meta_obj_buf) : m_pgid(pgid), m_cid(m_pgid), m_meta_obj(hobject_t(sobject_t(object_t(meta_obj_buf), CEPH_NOSNAP))), m_ch(ch), m_next_object_id(0) { m_meta_obj.hobj.pool = m_pgid.pool(); m_meta_obj.hobj.set_hash(m_pgid.ps()); } ~coll_entry_t(); hobject_t *touch_obj(int id); bool check_for_obj(int id); hobject_t *get_obj(int id); hobject_t *remove_obj(int id); hobject_t *get_obj_at(int pos, int *key = NULL); hobject_t *remove_obj_at(int pos, int *key = NULL); hobject_t *replace_obj(int id, hobject_t *obj); int get_random_obj_id(rngen_t& gen); private: hobject_t *get_obj(int id, bool remove); hobject_t *get_obj_at(int pos, bool remove, int *key = NULL); }; protected: boost::shared_ptr<ObjectStore> m_store; std::map<coll_t, coll_entry_t*> m_collections; std::vector<coll_t> m_collections_ids; int m_next_coll_nr; int m_num_objs_per_coll; int m_num_objects; int m_max_in_flight; std::atomic<int> m_in_flight = { 0 }; ceph::mutex m_finished_lock = ceph::make_mutex("Finished Lock"); ceph::condition_variable m_finished_cond; void rebuild_id_vec() { m_collections_ids.clear(); m_collections_ids.reserve(m_collections.size()); for (auto& i : m_collections) { m_collections_ids.push_back(i.first); } } void wait_for_ready() { std::unique_lock locker{m_finished_lock}; m_finished_cond.wait(locker, [this] { return m_max_in_flight <= 0 || m_in_flight < m_max_in_flight; }); } void wait_for_done() { std::unique_lock locker{m_finished_lock}; m_finished_cond.wait(locker, [this] { return m_in_flight == 0; }); } void set_max_in_flight(int max) { m_max_in_flight = max; } void set_num_objs_per_coll(int val) { m_num_objs_per_coll = val; } coll_entry_t *get_coll(coll_t cid, bool erase = false); coll_entry_t *get_coll_at(int pos, bool erase = false); int get_next_pool_id() { return m_next_pool++; } private: static const int m_default_num_colls = 30; // The pool ID used for collection creation, ID 0 is preserve for other tests int m_next_pool; public: explicit TestObjectStoreState(ObjectStore *store) : m_next_coll_nr(0), m_num_objs_per_coll(10), m_num_objects(0), m_max_in_flight(0), m_next_pool(2) { m_store.reset(store); } ~TestObjectStoreState() { auto it = m_collections.begin(); while (it != m_collections.end()) { if (it->second) delete it->second; m_collections.erase(it++); } } void init(int colls, int objs); void init() { init(m_default_num_colls, 0); } int inc_in_flight() { return ++m_in_flight; } int dec_in_flight() { return --m_in_flight; } coll_entry_t *coll_create(spg_t pgid, ObjectStore::CollectionHandle ch); class C_OnFinished: public Context { protected: TestObjectStoreState *m_state; public: explicit C_OnFinished(TestObjectStoreState *state) : m_state(state) { } void finish(int r) override { std::lock_guard locker{m_state->m_finished_lock}; m_state->dec_in_flight(); m_state->m_finished_cond.notify_all(); } }; }; #endif /* TEST_OBJECTSTORE_STATE_H_ */
4,213
25.503145
79
h
null
ceph-main/src/test/objectstore/TestRocksdbOptionParse.cc
#include <gtest/gtest.h> #include "include/Context.h" #include "rocksdb/db.h" #include "rocksdb/env.h" #include "rocksdb/thread_status.h" #include "kv/RocksDBStore.h" #include <iostream> using namespace std; const string dir("rocksdb.test_temp_dir"); TEST(RocksDBOption, simple) { rocksdb::Options options; rocksdb::Status status; map<string,string> kvoptions; RocksDBStore *db = new RocksDBStore(g_ceph_context, dir, kvoptions, NULL); string options_string = "" "write_buffer_size=536870912;" "create_if_missing=true;" "max_write_buffer_number=4;" "max_background_compactions=4;" "stats_dump_period_sec = 5;" "min_write_buffer_number_to_merge = 2;" "level0_file_num_compaction_trigger = 4;" "max_bytes_for_level_base = 104857600;" "target_file_size_base = 10485760;" "num_levels = 3;" "compression = kNoCompression;" "compaction_options_universal = {min_merge_width=4;size_ratio=2;max_size_amplification_percent=500}"; int r = db->ParseOptionsFromString(options_string, options); ASSERT_EQ(0, r); ASSERT_EQ(536870912u, options.write_buffer_size); ASSERT_EQ(4, options.max_write_buffer_number); ASSERT_EQ(4, options.max_background_compactions); ASSERT_EQ(5u, options.stats_dump_period_sec); ASSERT_EQ(2, options.min_write_buffer_number_to_merge); ASSERT_EQ(4, options.level0_file_num_compaction_trigger); ASSERT_EQ(104857600u, options.max_bytes_for_level_base); ASSERT_EQ(10485760u, options.target_file_size_base); ASSERT_EQ(3, options.num_levels); ASSERT_EQ(rocksdb::kNoCompression, options.compression); ASSERT_EQ(2, options.compaction_options_universal.size_ratio); ASSERT_EQ(4, options.compaction_options_universal.min_merge_width); ASSERT_EQ(500, options.compaction_options_universal.max_size_amplification_percent); } TEST(RocksDBOption, interpret) { rocksdb::Options options; rocksdb::Status status; map<string,string> kvoptions; RocksDBStore *db = new RocksDBStore(g_ceph_context, dir, kvoptions, NULL); string options_string = "compact_on_mount = true; compaction_threads=10;flusher_threads=5;"; int r = db->ParseOptionsFromString(options_string, options); ASSERT_EQ(0, r); ASSERT_TRUE(db->compact_on_mount); //check thread pool setting options.env->SleepForMicroseconds(100000); std::vector<rocksdb::ThreadStatus> thread_list; status = options.env->GetThreadList(&thread_list); ASSERT_TRUE(status.ok()); int num_high_pri_threads = 0; int num_low_pri_threads = 0; for (vector<rocksdb::ThreadStatus>::iterator it = thread_list.begin(); it!= thread_list.end(); ++it) { if (it->thread_type == rocksdb::ThreadStatus::HIGH_PRIORITY) num_high_pri_threads++; if (it->thread_type == rocksdb::ThreadStatus::LOW_PRIORITY) num_low_pri_threads++; } ASSERT_EQ(15u, thread_list.size()); //low pri threads is compaction_threads ASSERT_EQ(10, num_low_pri_threads); //high pri threads is flusher_threads ASSERT_EQ(5, num_high_pri_threads); }
3,007
37.075949
106
cc
null
ceph-main/src/test/objectstore/allocator_replay_test.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Allocator replay tool. * Author: Igor Fedotov, ifedotov@suse.com */ #include <iostream> #include <vector> #include "common/ceph_argparse.h" #include "common/debug.h" #include "common/Cycles.h" #include "common/errno.h" #include "common/ceph_json.h" #include "common/admin_socket.h" #include "include/denc.h" #include "global/global_init.h" #include "os/bluestore/Allocator.h" using namespace std; void usage(const string &name) { cerr << "Usage: " << name << " <log_to_replay> <raw_duplicates|duplicates|free_dump|try_alloc count want alloc_unit|replay_alloc alloc_list_file|export_binary out_file>" << std::endl; } void usage_replay_alloc(const string &name) { cerr << "Detailed replay_alloc usage: " << name << " <allocator_dump_JSON> replay_alloc <alloc_list_file> [number of replays]" << std::endl; cerr << "The number of replays defaults to 1." << std::endl; cerr << "The \"alloc_list_file\" parameter should be a file with allocation requests, one per line." << std::endl; cerr << "Allocation request format (space separated, optional parameters are 0 if not given): want unit [max] [hint]" << std::endl; } struct binary_alloc_map_t { std::vector<std::pair<uint64_t, uint64_t>> free_extents; DENC(binary_alloc_map_t, v, p) { DENC_START(1, 1, p); denc(v.free_extents, p); DENC_FINISH(p); } }; WRITE_CLASS_DENC(binary_alloc_map_t) int replay_and_check_for_duplicate(char* fname) { unique_ptr<Allocator> alloc; FILE* f = fopen(fname, "r"); if (!f) { std::cerr << "error: unable to open " << fname << std::endl; return -1; } PExtentVector tmp; bool init_done = false; char s[4096]; char* sp, *token; interval_set<uint64_t> owned_by_app; while (true) { if (fgets(s, sizeof(s), f) == nullptr) { break; } sp = strstr(s, "init_add_free"); if (!sp) { sp = strstr(s, "release"); } if (sp) { //2019-05-30 03:23:46.780 7f889a5edf00 10 fbmap_alloc 0x5642ed370600 init_add_free 0x100000~680000000 // or //2019-05-30 03:23:46.780 7f889a5edf00 10 fbmap_alloc 0x5642ed370600 init_add_free done // or // 2019 - 10 - 08T16:19 : 32.257 + 0300 7f5679f3fe80 10 fbmap_alloc 0x564fab96f100 release 0x450000~10000 // or // 2019 - 10 - 08T16 : 19 : 32.257 + 0300 7f5679f3fe80 10 fbmap_alloc 0x564fab96f100 release done if (strstr(sp, "done") != nullptr) { continue; } std::cout << s << std::endl; if (!init_done) { std::cerr << "error: no allocator init before: " << s << std::endl; return -1; } uint64_t offs, len; strtok(sp, " ~"); token = strtok(nullptr, " ~"); ceph_assert(token); offs = strtoul(token, nullptr, 16); token = strtok(nullptr, " ~"); ceph_assert(token); len = strtoul(token, nullptr, 16); if (len == 0) { std::cerr << "error: " << sp <<": " << s << std::endl; return -1; } if (!owned_by_app.contains(offs, len)) { std::cerr << "error: unexpected return to allocator, not owned by app: " << s << std::endl; return -1; } owned_by_app.erase(offs, len); if (strstr(sp, "init_add_free") != nullptr) { alloc->init_add_free(offs, len); } else { PExtentVector release_set; release_set.emplace_back(offs, len); alloc->release(release_set); } continue; } sp = strstr(s, "init_rm_free"); if (sp) { //2019-05-30 03:23:46.912 7f889a5edf00 10 fbmap_alloc 0x5642ed370600 init_rm_free 0x100000~680000000 // or // 2019-05-30 03:23:46.916 7f889a5edf00 10 fbmap_alloc 0x5642ed370600 init_rm_free done if (strstr(sp, "done") != nullptr) { continue; } std::cout << s << std::endl; if (!init_done) { std::cerr << "error: no allocator init before: " << s << std::endl; return -1; } uint64_t offs, len; strtok(sp, " ~"); token = strtok(nullptr, " ~"); ceph_assert(token); offs = strtoul(token, nullptr, 16); token = strtok(nullptr, " ~"); ceph_assert(token); len = strtoul(token, nullptr, 16); if (len == 0) { std::cerr << "error: " << sp <<": " << s << std::endl; return -1; } alloc->init_rm_free(offs, len); if (owned_by_app.intersects(offs, len)) { std::cerr << "error: unexpected takeover from allocator, already owned by app: " << s << std::endl; return -1; } else { owned_by_app.insert(offs, len); } continue; } sp = strstr(s, "allocate"); if (sp) { //2019-05-30 03:23:48.780 7f889a5edf00 10 fbmap_alloc 0x5642ed370600 allocate 0x80000000/100000,0,0 // and need to bypass // 2019-05-30 03:23:48.780 7f889a5edf00 10 fbmap_alloc 0x5642ed370600 allocate 0x69d400000~200000/100000,0,0 // Very simple and stupid check to bypass actual allocations if (strstr(sp, "~") != nullptr) { continue; } std::cout << s << std::endl; if (!init_done) { std::cerr << "error: no allocator init before: " << s << std::endl; return -1; } uint64_t want, alloc_unit; strtok(sp, " /"); token = strtok(nullptr, " /"); ceph_assert(token); want = strtoul(token, nullptr, 16); token = strtok(nullptr, " ~"); ceph_assert(token); alloc_unit = strtoul(token, nullptr, 16); if (want == 0 || alloc_unit == 0) { std::cerr << "error: allocate: " << s << std::endl; return -1; } tmp.clear(); auto allocated = alloc->allocate(want, alloc_unit, 0, 0, &tmp); std::cout << "allocated TOTAL: " << allocated << std::endl; for (auto& ee : tmp) { std::cerr << "dump extent: " << std::hex << ee.offset << "~" << ee.length << std::dec << std::endl; } std::cerr << "dump completed." << std::endl; for (auto& e : tmp) { if (owned_by_app.intersects(e.offset, e.length)) { std::cerr << "error: unexpected allocated extent: " << std::hex << e.offset << "~" << e.length << " dumping all allocations:" << std::dec << std::endl; for (auto& ee : tmp) { std::cerr <<"dump extent: " << std::hex << ee.offset << "~" << ee.length << std::dec << std::endl; } std::cerr <<"dump completed." << std::endl; return -1; } else { owned_by_app.insert(e.offset, e.length); } } continue; } string alloc_type = "bitmap"; sp = strstr(s, "BitmapAllocator"); if (!sp) { alloc_type = "avl"; sp = strstr(s, "AvlAllocator"); } if (!sp) { alloc_type = "hybrid"; sp = strstr(s, "HybridAllocator"); } if (!sp) { alloc_type = "stupid"; sp = strstr(s, "StupidAllocator"); } if (sp) { // 2019-05-30 03:23:43.460 7f889a5edf00 10 fbmap_alloc 0x5642ed36e900 BitmapAllocator 0x15940000000/100000 std::cout << s << std::endl; if (init_done) { std::cerr << "error: duplicate init: " << s << std::endl; return -1; } uint64_t total, alloc_unit; strtok(sp, " /"); token = strtok(nullptr, " /"); ceph_assert(token); total = strtoul(token, nullptr, 16); token = strtok(nullptr, " /"); ceph_assert(token); alloc_unit = strtoul(token, nullptr, 16); if (total == 0 || alloc_unit == 0) { std::cerr << "error: invalid init: " << s << std::endl; return -1; } alloc.reset(Allocator::create(g_ceph_context, alloc_type, total, alloc_unit)); owned_by_app.insert(0, total); init_done = true; continue; } } fclose(f); return 0; } int replay_free_dump_and_apply_raw( char* fname, std::function<void ( std::string_view, int64_t, int64_t, std::string_view)> create, std::function<void (uint64_t, uint64_t)> add_ext) { string alloc_type; string alloc_name; uint64_t capacity = 0; uint64_t alloc_unit = 0; JSONParser p; std::cout << "parsing..." << std::endl; bool b = p.parse(fname); if (!b) { std::cerr << "Failed to parse json: " << fname << std::endl; return -1; } JSONObj::data_val v; ceph_assert(p.is_object()); auto *o = p.find_obj("alloc_type"); ceph_assert(o); alloc_type = o->get_data_val().str; o = p.find_obj("alloc_name"); ceph_assert(o); alloc_name = o->get_data_val().str; o = p.find_obj("capacity"); ceph_assert(o); decode_json_obj(capacity, o); o = p.find_obj("alloc_unit"); ceph_assert(o); decode_json_obj(alloc_unit, o); int fd = -1; o = p.find_obj("extents_file"); if (o) { string filename = o->get_data_val().str; fd = open(filename.c_str(), O_RDONLY); if (fd < 0) { std::cerr << "error: unable to open extents file: " << filename << ", " << cpp_strerror(-errno) << std::endl; return -1; } } else { o = p.find_obj("extents"); ceph_assert(o); ceph_assert(o->is_array()); } std::cout << "parsing completed!" << std::endl; create(alloc_type, capacity, alloc_unit, alloc_name); int r = 0; if (fd < 0) { auto it = o->find_first(); while (!it.end()) { auto *item_obj = *it; uint64_t offset = 0; uint64_t length = 0; string offset_str, length_str; bool b = JSONDecoder::decode_json("offset", offset_str, item_obj); ceph_assert(b); b = JSONDecoder::decode_json("length", length_str, item_obj); ceph_assert(b); char* p; offset = strtol(offset_str.c_str(), &p, 16); length = strtol(length_str.c_str(), &p, 16); // intentionally skip/trim entries that are above the capacity, // just to be able to "shrink" allocator by editing that field if (offset < capacity) { if (offset + length > capacity) { length = offset + length - capacity; } add_ext(offset, length); } ++it; } } else { bufferlist bl; char buf[4096]; do { r = read(fd, buf, sizeof(buf)); if (r > 0) { bl.append(buf, r); } } while(r > 0); if (r < 0) { std::cerr << "error: error reading from extents file: " << cpp_strerror(-errno) << std::endl; } else { auto p = bl.cbegin(); binary_alloc_map_t amap; try { decode(amap, p); for (auto p : amap.free_extents) { add_ext(p.first, p.second); } } catch (ceph::buffer::error& e) { std::cerr << __func__ << " unable to decode extents " << ": " << e.what() << std::endl; r = -1; } } close(fd); } return r; } /* * This replays allocator dump (in JSON) reported by "ceph daemon <osd> bluestore allocator dump <name>" command and applies custom method to it */ int replay_free_dump_and_apply(char* fname, std::function<int (Allocator*, const string& aname)> fn) { unique_ptr<Allocator> alloc; auto create_fn = [&](std::string_view alloc_type, int64_t capacity, int64_t alloc_unit, std::string_view alloc_name) { alloc.reset( Allocator::create( g_ceph_context, alloc_type, capacity, alloc_unit, 0, 0, alloc_name)); }; auto add_fn = [&](uint64_t offset, uint64_t len) { alloc->init_add_free(offset, len); }; int r = replay_free_dump_and_apply_raw( fname, create_fn, add_fn); if (r == 0) { r = fn(alloc.get(), alloc->get_name()); } return r; } void dump_alloc(Allocator* alloc, const string& aname) { AdminSocket* admin_socket = g_ceph_context->get_admin_socket(); ceph_assert(admin_socket); ceph::bufferlist in, out; ostringstream err; string cmd = "{\"prefix\": \"bluestore allocator dump " + aname + "\"}"; auto r = admin_socket->execute_command( { cmd }, in, err, &out); if (r != 0) { cerr << "failure querying: " << cpp_strerror(r) << std::endl; } else { std::cout << std::string(out.c_str(), out.length()) << std::endl; } } int export_as_binary(char* fname, char* target_fname) { int fd = creat(target_fname, 0); if (fd < 0) { std::cerr << "error: unable to open target file: " << target_fname << ", " << cpp_strerror(-errno) << std::endl; return -1; } binary_alloc_map_t amap; auto dummy_create_fn = [&](std::string_view alloc_type, int64_t capacity, int64_t alloc_unit, std::string_view alloc_name) { }; auto add_fn = [&](uint64_t offset, uint64_t len) { amap.free_extents.emplace_back(offset, len); }; int r = replay_free_dump_and_apply_raw( fname, dummy_create_fn, add_fn); if (r == 0) { bufferlist out; ceph::encode(amap, out); auto w = write(fd, out.c_str(), out.length()); if (w < 1) { std::cerr << "error: unable to open target file: " << target_fname << ", " << cpp_strerror(-errno) << std::endl; } } close(fd); return r; } int check_duplicates(char* fname) { interval_set<uint64_t> free_extents; interval_set<uint64_t> invalid_extentsA; interval_set<uint64_t> invalid_extentsB; auto dummy_create_fn = [&](std::string_view alloc_type, int64_t capacity, int64_t alloc_unit, std::string_view alloc_name) { }; size_t errors = 0; size_t pos = 0; size_t first_err_pos = 0; auto add_fn = [&](uint64_t offset, uint64_t len) { ++pos; if (free_extents.intersects(offset, len)) { invalid_extentsB.insert(offset, len); ++errors; if (first_err_pos == 0) { first_err_pos = pos; } } else { free_extents.insert(offset, len); } }; int r = replay_free_dump_and_apply_raw( fname, dummy_create_fn, add_fn); if (r < 0) { return r; } pos = 0; auto add_fn2 = [&](uint64_t offset, uint64_t len) { ++pos; if (pos < first_err_pos) { if (invalid_extentsB.intersects(offset, len)) { invalid_extentsA.insert(offset, len); } } }; r = replay_free_dump_and_apply_raw( fname, dummy_create_fn, add_fn2); ceph_assert(r >= 0); auto itA = invalid_extentsA.begin(); auto itB = invalid_extentsB.begin(); while (itA != invalid_extentsA.end()) { std::cerr << "error: overlapping extents: " << std::hex << itA.get_start() << "~" << itA.get_end() - itA.get_start() << " vs."; while (itB != invalid_extentsB.end() && itB.get_start() >= itA.get_start() && itB.get_end() <= itA.get_end()) { std::cerr << " " << itB.get_start() << "~" << itB.get_end() - itB.get_start(); ++itB; } std::cerr << std::dec << std::endl; ++itA; } return r >= 0 ? errors != 0 : r; } int main(int argc, char **argv) { vector<const char*> args; auto cct = global_init(NULL, args, CEPH_ENTITY_TYPE_CLIENT, CODE_ENVIRONMENT_UTILITY, CINIT_FLAG_NO_DEFAULT_CONFIG_FILE); common_init_finish(g_ceph_context); g_ceph_context->_conf.apply_changes(nullptr); if (argc < 3) { usage(argv[0]); return 1; } if (strcmp(argv[2], "raw_duplicates") == 0) { return replay_and_check_for_duplicate(argv[1]); } else if (strcmp(argv[2], "free_dump") == 0) { return replay_free_dump_and_apply(argv[1], [&](Allocator* a, const string& aname) { ceph_assert(a); std::cout << "Fragmentation:" << a->get_fragmentation() << std::endl; std::cout << "Fragmentation score:" << a->get_fragmentation_score() << std::endl; std::cout << "Free:" << std::hex << a->get_free() << std::dec << std::endl; { // stub to implement various testing stuff on properly initialized allocator // e.g. one can dump allocator back via dump_alloc(a, aname); } return 0; }); } else if (strcmp(argv[2], "try_alloc") == 0) { if (argc < 6) { std::cerr << "Error: insufficient arguments for \"try_alloc\" operation." << std::endl; usage(argv[0]); return 1; } auto count = strtoul(argv[3], nullptr, 10); auto want = strtoul(argv[4], nullptr, 10); auto alloc_unit = strtoul(argv[5], nullptr, 10); return replay_free_dump_and_apply(argv[1], [&](Allocator* a, const string& aname) { ceph_assert(a); std::cout << "Fragmentation:" << a->get_fragmentation() << std::endl; std::cout << "Fragmentation score:" << a->get_fragmentation_score() << std::endl; std::cout << "Free:" << std::hex << a->get_free() << std::dec << std::endl; { PExtentVector extents; for(size_t i = 0; i < count; i++) { extents.clear(); auto r = a->allocate(want, alloc_unit, 0, &extents); if (r < 0) { std::cerr << "Error: allocation failure at step:" << i + 1 << ", ret = " << r << std::endl; return -1; } } } std::cout << "Successfully allocated: " << count << " * " << want << ", unit:" << alloc_unit << std::endl; return 0; }); } else if (strcmp(argv[2], "replay_alloc") == 0) { if (argc < 4) { std::cerr << "Error: insufficient arguments for \"replay_alloc\" option." << std::endl; usage_replay_alloc(argv[0]); return 1; } return replay_free_dump_and_apply(argv[1], [&](Allocator *a, const string &aname) { ceph_assert(a); std::cout << "Fragmentation:" << a->get_fragmentation() << std::endl; std::cout << "Fragmentation score:" << a->get_fragmentation_score() << std::endl; std::cout << "Free:" << std::hex << a->get_free() << std::dec << std::endl; { /* replay a set of allocation requests */ char s[4096]; FILE *f_alloc_list = fopen(argv[3], "r"); if (!f_alloc_list) { std::cerr << "error: unable to open " << argv[3] << std::endl; return -1; } /* Replay user specified number of times to simulate extended activity * Defaults to 1 replay. */ auto replay_count = 1; if (argc == 5) { replay_count = atoi(argv[4]); } for (auto i = 0; i < replay_count; ++i) { while (fgets(s, sizeof(s), f_alloc_list) != nullptr) { /* parse allocation request */ uint64_t want = 0, unit = 0, max = 0, hint = 0; if (std::sscanf(s, "%ji %ji %ji %ji", &want, &unit, &max, &hint) < 2) { cerr << "Error: malformed allocation request:" << std::endl; cerr << s << std::endl; /* do not attempt to allocate a malformed request */ continue; } /* timestamp for allocation start */ auto t0 = ceph::mono_clock::now(); /* allocate */ PExtentVector extents; auto r = a->allocate(want, unit, max, hint, &extents); if (r < 0) { /* blind replays of allocations may run out of space, provide info for easy confirmation */ std::cerr << "Error: allocation failure code: " << r << " requested want/unit/max/hint (hex): " << std::hex << want << "/" << unit << "/" << max << "/" << hint << std::dec << std::endl; std::cerr << "Fragmentation:" << a->get_fragmentation() << std::endl; std::cerr << "Fragmentation score:" << a->get_fragmentation_score() << std::endl; std::cerr << "Free:" << std::hex << a->get_free() << std::dec << std::endl; /* return 0 if the allocator ran out of space */ if (r == -ENOSPC) { return 0; } return -1; } /* Outputs the allocation's duration in nanoseconds and the allocation request parameters */ std::cout << "Duration (ns): " << (ceph::mono_clock::now() - t0).count() << " want/unit/max/hint (hex): " << std::hex << want << "/" << unit << "/" << max << "/" << hint << std::dec << std::endl; /* Do not release. */ //alloc->release(extents); extents.clear(); } fseek(f_alloc_list, 0, SEEK_SET); } fclose(f_alloc_list); std::cout << "Fragmentation:" << a->get_fragmentation() << std::endl; std::cout << "Fragmentation score:" << a->get_fragmentation_score() << std::endl; std::cout << "Free:" << std::hex << a->get_free() << std::dec << std::endl; } return 0; }); } else if (strcmp(argv[2], "export_binary") == 0) { return export_as_binary(argv[1], argv[3]); } else if (strcmp(argv[2], "duplicates") == 0) { return check_duplicates(argv[1]); } }
21,521
29.966906
185
cc
null
ceph-main/src/test/objectstore/fastbmap_allocator_test.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include <iostream> #include <gtest/gtest.h> #include "os/bluestore/fastbmap_allocator_impl.h" class TestAllocatorLevel01 : public AllocatorLevel01Loose { public: void init(uint64_t capacity, uint64_t alloc_unit) { _init(capacity, alloc_unit); } interval_t allocate_l1_cont(uint64_t length, uint64_t min_length, uint64_t pos_start, uint64_t pos_end) { return _allocate_l1_contiguous(length, min_length, 0, pos_start, pos_end); } void free_l1(const interval_t& r) { _free_l1(r.offset, r.length); } }; class TestAllocatorLevel02 : public AllocatorLevel02<AllocatorLevel01Loose> { public: void init(uint64_t capacity, uint64_t alloc_unit) { _init(capacity, alloc_unit); } void allocate_l2(uint64_t length, uint64_t min_length, uint64_t* allocated0, interval_vector_t* res) { uint64_t allocated = 0; uint64_t hint = 0; // trigger internal l2 hint support _allocate_l2(length, min_length, 0, hint, &allocated, res); *allocated0 += allocated; } void free_l2(const interval_vector_t& r) { _free_l2(r); } void mark_free(uint64_t o, uint64_t len) { _mark_free(o, len); } void mark_allocated(uint64_t o, uint64_t len) { _mark_allocated(o, len); } }; const uint64_t _1m = 1024 * 1024; const uint64_t _2m = 2 * 1024 * 1024; TEST(TestAllocatorLevel01, test_l1) { TestAllocatorLevel01 al1; uint64_t num_l1_entries = 3 * 256; uint64_t capacity = num_l1_entries * 512 * 4096; al1.init(capacity, 0x1000); ASSERT_EQ(capacity, al1.debug_get_free()); auto i1 = al1.allocate_l1_cont(0x1000, 0x1000, 0, num_l1_entries); ASSERT_EQ(i1.offset, 0u); ASSERT_EQ(i1.length, 0x1000u); ASSERT_EQ(capacity - 0x1000, al1.debug_get_free()); auto i2 = al1.allocate_l1_cont(0x1000, 0x1000, 0, num_l1_entries); ASSERT_EQ(i2.offset, 0x1000u); ASSERT_EQ(i2.length, 0x1000u); al1.free_l1(i2); al1.free_l1(i1); i1 = al1.allocate_l1_cont(0x1000, 0x1000, 0, num_l1_entries); ASSERT_EQ(i1.offset, 0u); ASSERT_EQ(i1.length, 0x1000u); i2 = al1.allocate_l1_cont(0x1000, 0x1000, 0, num_l1_entries); ASSERT_EQ(i2.offset, 0x1000u); ASSERT_EQ(i2.length, 0x1000u); al1.free_l1(i1); al1.free_l1(i2); i1 = al1.allocate_l1_cont(0x2000, 0x1000, 0, num_l1_entries); ASSERT_EQ(i1.offset, 0u); ASSERT_EQ(i1.length, 0x2000u); i2 = al1.allocate_l1_cont(0x3000, 0x1000, 0, num_l1_entries); ASSERT_EQ(i2.offset, 0x2000u); ASSERT_EQ(i2.length, 0x3000u); al1.free_l1(i1); al1.free_l1(i2); i1 = al1.allocate_l1_cont(0x2000, 0x1000, 0, num_l1_entries); ASSERT_EQ(i1.offset, 0u); ASSERT_EQ(i1.length, 0x2000u); i2 = al1.allocate_l1_cont(2 * 1024 * 1024, 0x1000, 0, num_l1_entries); ASSERT_EQ(i2.offset, 2u * 1024u * 1024u); ASSERT_EQ(i2.length, 2u * 1024u * 1024u); al1.free_l1(i1); i1 = al1.allocate_l1_cont(1024 * 1024, 0x1000, 0, num_l1_entries); ASSERT_EQ(i1.offset, 0u); ASSERT_EQ(i1.length, 1024u * 1024u); auto i3 = al1.allocate_l1_cont(1024 * 1024 + 0x1000, 0x1000, 0, num_l1_entries); ASSERT_EQ(i3.offset, 2u * 2u * 1024u * 1024u); ASSERT_EQ(i3.length, 1024u * 1024u + 0x1000u); // here we have the following layout: // Alloc: 0~1M, 2M~2M, 4M~1M+4K // Free: 1M~1M, 4M+4K ~ 2M-4K, 6M ~... // auto i4 = al1.allocate_l1_cont(1024 * 1024, 0x1000, 0, num_l1_entries); ASSERT_EQ(1 * 1024 * 1024u, i4.offset); ASSERT_EQ(1024 * 1024u, i4.length); al1.free_l1(i4); i4 = al1.allocate_l1_cont(1024 * 1024 - 0x1000, 0x1000, 0, num_l1_entries); ASSERT_EQ(i4.offset, 5u * 1024u * 1024u + 0x1000u); ASSERT_EQ(i4.length, 1024u * 1024u - 0x1000u); al1.free_l1(i4); i4 = al1.allocate_l1_cont(1024 * 1024 + 0x1000, 0x1000, 0, num_l1_entries); ASSERT_EQ(i4.offset, 6u * 1024u * 1024u); //ASSERT_EQ(i4.offset, 5 * 1024 * 1024 + 0x1000); ASSERT_EQ(i4.length, 1024u * 1024u + 0x1000u); al1.free_l1(i1); al1.free_l1(i2); al1.free_l1(i3); al1.free_l1(i4); i1 = al1.allocate_l1_cont(1024 * 1024, 0x1000, 0, num_l1_entries); ASSERT_EQ(i1.offset, 0u); ASSERT_EQ(i1.length, 1024u * 1024u); i2 = al1.allocate_l1_cont(1024 * 1024, 0x1000, 0, num_l1_entries); ASSERT_EQ(i2.offset, 1u * 1024u * 1024u); ASSERT_EQ(i2.length, 1024u * 1024u); i3 = al1.allocate_l1_cont(512 * 1024, 0x1000, 0, num_l1_entries); ASSERT_EQ(i3.offset, 2u * 1024u * 1024u); ASSERT_EQ(i3.length, 512u * 1024u); i4 = al1.allocate_l1_cont(1536 * 1024, 0x1000, 0, num_l1_entries); ASSERT_EQ(i4.offset, (2u * 1024u + 512u) * 1024u); ASSERT_EQ(i4.length, 1536u * 1024u); // making a hole 1.5 Mb length al1.free_l1(i2); al1.free_l1(i3); // and trying to fill it i2 = al1.allocate_l1_cont(1536 * 1024, 0x1000, 0, num_l1_entries); ASSERT_EQ(i2.offset, 1024u * 1024u); ASSERT_EQ(i2.length, 1536u * 1024u); al1.free_l1(i2); // and trying to fill it partially i2 = al1.allocate_l1_cont(1528 * 1024, 0x1000, 0, num_l1_entries); ASSERT_EQ(i2.offset, 1024u * 1024u); ASSERT_EQ(i2.length, 1528u * 1024u); i3 = al1.allocate_l1_cont(8 * 1024, 0x1000, 0, num_l1_entries); ASSERT_EQ(i3.offset, 2552u * 1024u); ASSERT_EQ(i3.length, 8u * 1024u); al1.free_l1(i2); // here we have the following layout: // Alloc: 0~1M, 2552K~8K, num_l1_entries0K~1.5M // Free: 1M~1528K, 4M ~... // i2 = al1.allocate_l1_cont(1536 * 1024, 0x1000, 0, num_l1_entries); ASSERT_EQ(i2.offset, 4u * 1024u * 1024u); ASSERT_EQ(i2.length, 1536u * 1024u); al1.free_l1(i1); al1.free_l1(i2); al1.free_l1(i3); al1.free_l1(i4); ASSERT_EQ(capacity, al1.debug_get_free()); for (uint64_t i = 0; i < capacity; i += _2m) { i1 = al1.allocate_l1_cont(_2m, _2m, 0, num_l1_entries); ASSERT_EQ(i1.offset, i); ASSERT_EQ(i1.length, _2m); } ASSERT_EQ(0u, al1.debug_get_free()); i2 = al1.allocate_l1_cont(_2m, _2m, 0, num_l1_entries); ASSERT_EQ(i2.length, 0u); ASSERT_EQ(0u, al1.debug_get_free()); al1.free_l1(i1); i2 = al1.allocate_l1_cont(_2m, _2m, 0, num_l1_entries); ASSERT_EQ(i2, i1); al1.free_l1(i2); i2 = al1.allocate_l1_cont(_1m, _1m, 0, num_l1_entries); ASSERT_EQ(i2.offset, i1.offset); ASSERT_EQ(i2.length, _1m); i3 = al1.allocate_l1_cont(_2m, _2m, 0, num_l1_entries); ASSERT_EQ(i3.length, 0u); i3 = al1.allocate_l1_cont(_2m, _1m, 0, num_l1_entries); ASSERT_EQ(i3.length, _1m); i4 = al1.allocate_l1_cont(_2m, _1m, 0, num_l1_entries); ASSERT_EQ(i4.length, 0u); al1.free_l1(i2); i2 = al1.allocate_l1_cont(_2m, _2m, 0, num_l1_entries); ASSERT_EQ(i2.length, 0u); i2 = al1.allocate_l1_cont(_2m, 0x1000, 0, num_l1_entries); ASSERT_EQ(i2.length, _1m); al1.free_l1(i2); al1.free_l1(i3); ASSERT_EQ(_2m, al1.debug_get_free()); i1 = al1.allocate_l1_cont(_2m - 3 * 0x1000, 0x1000, 0, num_l1_entries); i2 = al1.allocate_l1_cont(0x1000, 0x1000, 0, num_l1_entries); i3 = al1.allocate_l1_cont(0x1000, 0x1000, 0, num_l1_entries); i4 = al1.allocate_l1_cont(0x1000, 0x1000, 0, num_l1_entries); ASSERT_EQ(0u, al1.debug_get_free()); al1.free_l1(i2); al1.free_l1(i4); i2 = al1.allocate_l1_cont(0x4000, 0x2000, 0, num_l1_entries); ASSERT_EQ(i2.length, 0u); i2 = al1.allocate_l1_cont(0x4000, 0x1000, 0, num_l1_entries); ASSERT_EQ(i2.length, 0x1000u); al1.free_l1(i3); i3 = al1.allocate_l1_cont(0x6000, 0x3000, 0, num_l1_entries); ASSERT_EQ(i3.length, 0u); i3 = al1.allocate_l1_cont(0x6000, 0x1000, 0, num_l1_entries); ASSERT_EQ(i3.length, 0x2000u); ASSERT_EQ(0u, al1.debug_get_free()); std::cout << "Done L1" << std::endl; } TEST(TestAllocatorLevel01, test_l2) { TestAllocatorLevel02 al2; uint64_t num_l2_entries = 64;// *512; uint64_t capacity = num_l2_entries * 256 * 512 * 4096; al2.init(capacity, 0x1000); std::cout << "Init L2" << std::endl; uint64_t allocated1 = 0; interval_vector_t a1; al2.allocate_l2(0x2000, 0x2000, &allocated1, &a1); ASSERT_EQ(allocated1, 0x2000u); ASSERT_EQ(a1[0].offset, 0u); ASSERT_EQ(a1[0].length, 0x2000u); // limit query range in debug_get_free for the sake of performance ASSERT_EQ(0x2000u, al2.debug_get_allocated(0, 1)); ASSERT_EQ(0u, al2.debug_get_allocated(1, 2)); uint64_t allocated2 = 0; interval_vector_t a2; al2.allocate_l2(0x2000, 0x2000, &allocated2, &a2); ASSERT_EQ(allocated2, 0x2000u); ASSERT_EQ(a2[0].offset, 0x2000u); ASSERT_EQ(a2[0].length, 0x2000u); // limit query range in debug_get_free for the sake of performance ASSERT_EQ(0x4000u, al2.debug_get_allocated(0, 1)); ASSERT_EQ(0u, al2.debug_get_allocated(1, 2)); al2.free_l2(a1); allocated2 = 0; a2.clear(); al2.allocate_l2(0x1000, 0x1000, &allocated2, &a2); ASSERT_EQ(allocated2, 0x1000u); ASSERT_EQ(a2[0].offset, 0x0000u); ASSERT_EQ(a2[0].length, 0x1000u); // limit query range in debug_get_free for the sake of performance ASSERT_EQ(0x3000u, al2.debug_get_allocated(0, 1)); ASSERT_EQ(0u, al2.debug_get_allocated(1, 2)); uint64_t allocated3 = 0; interval_vector_t a3; al2.allocate_l2(0x2000, 0x1000, &allocated3, &a3); ASSERT_EQ(allocated3, 0x2000u); ASSERT_EQ(a3.size(), 2u); ASSERT_EQ(a3[0].offset, 0x1000u); ASSERT_EQ(a3[0].length, 0x1000u); ASSERT_EQ(a3[1].offset, 0x4000u); ASSERT_EQ(a3[1].length, 0x1000u); // limit query range in debug_get_free for the sake of performance ASSERT_EQ(0x5000u, al2.debug_get_allocated(0, 1)); ASSERT_EQ(0u, al2.debug_get_allocated(1, 2)); { interval_vector_t r; r.emplace_back(0x0, 0x5000); al2.free_l2(r); } a3.clear(); allocated3 = 0; al2.allocate_l2(_1m, _1m, &allocated3, &a3); ASSERT_EQ(a3.size(), 1u); ASSERT_EQ(a3[0].offset, 0u); ASSERT_EQ(a3[0].length, _1m); al2.free_l2(a3); a3.clear(); allocated3 = 0; al2.allocate_l2(4 * _1m, _1m, &allocated3, &a3); ASSERT_EQ(a3.size(), 1u); ASSERT_EQ(a3[0].offset, 0u); ASSERT_EQ(a3[0].length, 4 * _1m); al2.free_l2(a3); #ifndef _DEBUG for (uint64_t i = 0; i < capacity; i += 0x1000) { uint64_t allocated4 = 0; interval_vector_t a4; al2.allocate_l2(0x1000, 0x1000, &allocated4, &a4); ASSERT_EQ(a4.size(), 1u); ASSERT_EQ(a4[0].offset, i); ASSERT_EQ(a4[0].length, 0x1000u); if (0 == (i % (1 * 1024 * _1m))) { std::cout << "alloc1 " << i / 1024 / 1024 << " mb of " << capacity / 1024 / 1024 << std::endl; } } #else for (uint64_t i = 0; i < capacity; i += _2m) { uint64_t allocated4 = 0; interval_vector_t a4; al2.allocate_l2(_2m, _2m, &allocated4, &a4); ASSERT_EQ(a4.size(), 1); ASSERT_EQ(a4[0].offset, i); ASSERT_EQ(a4[0].length, _2m); if (0 == (i % (1 * 1024 * _1m))) { std::cout << "alloc1 " << i / 1024 / 1024 << " mb of " << capacity / 1024 / 1024 << std::endl; } } #endif ASSERT_EQ(0u, al2.debug_get_free()); for (uint64_t i = 0; i < capacity; i += _1m) { interval_vector_t r; r.emplace_back(i, _1m); al2.free_l2(r); if (0 == (i % (1 * 1024 * _1m))) { std::cout << "free1 " << i / 1024 / 1024 << " mb of " << capacity / 1024 / 1024 << std::endl; } } ASSERT_EQ(capacity, al2.debug_get_free()); for (uint64_t i = 0; i < capacity; i += _1m) { uint64_t allocated4 = 0; interval_vector_t a4; al2.allocate_l2(_1m, _1m, &allocated4, &a4); ASSERT_EQ(a4.size(), 1u); ASSERT_EQ(allocated4, _1m); ASSERT_EQ(a4[0].offset, i); ASSERT_EQ(a4[0].length, _1m); if (0 == (i % (1 * 1024 * _1m))) { std::cout << "alloc2 " << i / 1024 / 1024 << " mb of " << capacity / 1024 / 1024 << std::endl; } } ASSERT_EQ(0u, al2.debug_get_free()); uint64_t allocated4 = 0; interval_vector_t a4; al2.allocate_l2(_1m, _1m, &allocated4, &a4); ASSERT_EQ(a4.size(), 0u); al2.allocate_l2(0x1000, 0x1000, &allocated4, &a4); ASSERT_EQ(a4.size(), 0u); for (uint64_t i = 0; i < capacity; i += 0x2000) { interval_vector_t r; r.emplace_back(i, 0x1000); al2.free_l2(r); if (0 == (i % (1 * 1024 * _1m))) { std::cout << "free2 " << i / 1024 / 1024 << " mb of " << capacity / 1024 / 1024 << std::endl; } } ASSERT_EQ(capacity / 2, al2.debug_get_free()); // unable to allocate due to fragmentation al2.allocate_l2(_1m, _1m, &allocated4, &a4); ASSERT_EQ(a4.size(), 0u); for (uint64_t i = 0; i < capacity; i += 2 * _1m) { a4.clear(); allocated4 = 0; al2.allocate_l2(_1m, 0x1000, &allocated4, &a4); ASSERT_EQ(a4.size(), _1m / 0x1000); ASSERT_EQ(allocated4, _1m); ASSERT_EQ(a4[0].offset, i); ASSERT_EQ(a4[0].length, 0x1000u); if (0 == (i % (1 * 1024 * _1m))) { std::cout << "alloc3 " << i / 1024 / 1024 << " mb of " << capacity / 1024 / 1024 << std::endl; } } ASSERT_EQ(0u, al2.debug_get_free()); std::cout << "Done L2" << std::endl; } TEST(TestAllocatorLevel01, test_l2_huge) { TestAllocatorLevel02 al2; uint64_t num_l2_entries = 4 * 512; uint64_t capacity = num_l2_entries * 256 * 512 * 4096; // 1 TB al2.init(capacity, 0x1000); std::cout << "Init L2 Huge" << std::endl; for (uint64_t i = 0; i < capacity; i += _1m) { uint64_t allocated4 = 0; interval_vector_t a4; al2.allocate_l2(0x1000, 0x1000, &allocated4, &a4); ASSERT_EQ(a4.size(), 1u); ASSERT_EQ(allocated4, 0x1000u); ASSERT_EQ(a4[0].offset, i); ASSERT_EQ(a4[0].length, 0x1000u); allocated4 = 0; a4.clear(); al2.allocate_l2(_1m - 0x1000, 0x1000, &allocated4, &a4); ASSERT_EQ(a4.size(), 1u); ASSERT_EQ(allocated4, _1m - 0x1000); ASSERT_EQ(a4[0].offset, i + 0x1000); ASSERT_EQ(a4[0].length, _1m - 0x1000); if (0 == (i % (1 * 1024 * _1m))) { std::cout << "allocH " << i / 1024 / 1024 << " mb of " << capacity / 1024 / 1024 << std::endl; } } for (uint64_t i = 0; i < capacity; i += _1m) { interval_vector_t a4; a4.emplace_back(i, 0x1000); al2.free_l2(a4); if (0 == (i % (1 * 1024 * _1m))) { std::cout << "freeH1 " << i / 1024 / 1024 << " mb of " << capacity / 1024 / 1024 << std::endl; } } { std::cout << "Try" << std::endl; time_t t = time(NULL); for (int i = 0; i < 10; ++i) { uint64_t allocated = 0; interval_vector_t a; al2.allocate_l2(0x2000, 0x2000, &allocated, &a); ASSERT_EQ(a.size(), 0u); } std::cout << "End try in " << time(NULL) - t << " seconds" << std::endl; } { std::cout << "Try" << std::endl; time_t t = time(NULL); for (int i = 0; i < 10; ++i) { uint64_t allocated = 0; interval_vector_t a; al2.allocate_l2(_2m, _2m, &allocated, &a); ASSERT_EQ(a.size(), 0u); } std::cout << "End try in " << time(NULL) - t << " seconds" << std::endl; } ASSERT_EQ((capacity / _1m) * 0x1000, al2.debug_get_free()); std::cout << "Done L2 Huge" << std::endl; } TEST(TestAllocatorLevel01, test_l2_unaligned) { { TestAllocatorLevel02 al2; uint64_t num_l2_entries = 3; uint64_t capacity = num_l2_entries * 256 * 512 * 4096; // 3x512 MB al2.init(capacity, 0x1000); std::cout << "Init L2 Unaligned" << std::endl; for (uint64_t i = 0; i < capacity; i += _1m / 2) { uint64_t allocated4 = 0; interval_vector_t a4; al2.allocate_l2(_1m / 2, _1m / 2, &allocated4, &a4); ASSERT_EQ(a4.size(), 1u); ASSERT_EQ(allocated4, _1m / 2); ASSERT_EQ(a4[0].offset, i); ASSERT_EQ(a4[0].length, _1m / 2); if (0 == (i % (1 * 1024 * _1m))) { std::cout << "allocU " << i / 1024 / 1024 << " mb of " << capacity / 1024 / 1024 << std::endl; } } ASSERT_EQ(0u, al2.debug_get_free()); { // no space to allocate uint64_t allocated4 = 0; interval_vector_t a4; al2.allocate_l2(0x1000, 0x1000, &allocated4, &a4); ASSERT_EQ(a4.size(), 0u); } } { TestAllocatorLevel02 al2; uint64_t capacity = 500 * 512 * 4096; // 500x2 MB al2.init(capacity, 0x1000); std::cout << ("Init L2 Unaligned2\n"); for (uint64_t i = 0; i < capacity; i += _1m / 2) { uint64_t allocated4 = 0; interval_vector_t a4; al2.allocate_l2(_1m / 2, _1m / 2, &allocated4, &a4); ASSERT_EQ(a4.size(), 1u); ASSERT_EQ(allocated4, _1m / 2); ASSERT_EQ(a4[0].offset, i); ASSERT_EQ(a4[0].length, _1m / 2); if (0 == (i % (1 * 1024 * _1m))) { std::cout << "allocU2 " << i / 1024 / 1024 << " mb of " << capacity / 1024 / 1024 << std::endl; } } ASSERT_EQ(0u, al2.debug_get_free()); { // no space to allocate uint64_t allocated4 = 0; interval_vector_t a4; al2.allocate_l2(0x1000, 0x1000, &allocated4, &a4); ASSERT_EQ(a4.size(), 0u); } } { TestAllocatorLevel02 al2; uint64_t capacity = 100 * 512 * 4096 + 127 * 4096; al2.init(capacity, 0x1000); std::cout << "Init L2 Unaligned2" << std::endl; for (uint64_t i = 0; i < capacity; i += 0x1000) { uint64_t allocated4 = 0; interval_vector_t a4; al2.allocate_l2(0x1000, 0x1000, &allocated4, &a4); ASSERT_EQ(a4.size(), 1u); ASSERT_EQ(allocated4, 0x1000u); ASSERT_EQ(a4[0].offset, i); ASSERT_EQ(a4[0].length, 0x1000u); } ASSERT_EQ(0u, al2.debug_get_free()); { // no space to allocate uint64_t allocated4 = 0; interval_vector_t a4; al2.allocate_l2(0x1000, 0x1000, &allocated4, &a4); ASSERT_EQ(a4.size(), 0u); } } { TestAllocatorLevel02 al2; uint64_t capacity = 3 * 4096; al2.init(capacity, 0x1000); std::cout << "Init L2 Unaligned2" << std::endl; for (uint64_t i = 0; i < capacity; i += 0x1000) { uint64_t allocated4 = 0; interval_vector_t a4; al2.allocate_l2(0x1000, 0x1000, &allocated4, &a4); ASSERT_EQ(a4.size(), 1u); ASSERT_EQ(allocated4, 0x1000u); ASSERT_EQ(a4[0].offset, i); ASSERT_EQ(a4[0].length, 0x1000u); } ASSERT_EQ(0u, al2.debug_get_free()); { // no space to allocate uint64_t allocated4 = 0; interval_vector_t a4; al2.allocate_l2(0x1000, 0x1000, &allocated4, &a4); ASSERT_EQ(a4.size(), 0u); } } std::cout << "Done L2 Unaligned" << std::endl; } TEST(TestAllocatorLevel01, test_l2_contiguous_alignment) { { TestAllocatorLevel02 al2; uint64_t num_l2_entries = 3; uint64_t capacity = num_l2_entries * 256 * 512 * 4096; // 3x512 MB uint64_t num_chunks = capacity / 4096; al2.init(capacity, 4096); std::cout << "Init L2 cont aligned" << std::endl; std::map<size_t, size_t> bins_overall; al2.collect_stats(bins_overall); ASSERT_EQ(bins_overall.size(), 1u); // std::cout<<bins_overall.begin()->first << std::endl; ASSERT_EQ(bins_overall[cbits(num_chunks) - 1], 1u); for (uint64_t i = 0; i < capacity / 2; i += _1m) { uint64_t allocated4 = 0; interval_vector_t a4; al2.allocate_l2(_1m, _1m, &allocated4, &a4); ASSERT_EQ(a4.size(), 1u); ASSERT_EQ(allocated4, _1m); ASSERT_EQ(a4[0].offset, i); ASSERT_EQ(a4[0].length, _1m); } ASSERT_EQ(capacity / 2, al2.debug_get_free()); bins_overall.clear(); al2.collect_stats(bins_overall); ASSERT_EQ(bins_overall.size(), 1u); ASSERT_EQ(bins_overall[cbits(num_chunks / 2) - 1], 1u); { size_t to_release = 2 * _1m + 0x1000; // release 2M + 4K at the beginning interval_vector_t r; r.emplace_back(0, to_release); al2.free_l2(r); bins_overall.clear(); al2.collect_stats(bins_overall); ASSERT_EQ(bins_overall.size(), 2u); ASSERT_EQ(bins_overall[cbits(to_release / 0x1000) - 1], 1u); ASSERT_EQ(bins_overall[cbits(num_chunks / 2) - 1], 1u); } { // allocate 4K within the deallocated range uint64_t allocated4 = 0; interval_vector_t a4; al2.allocate_l2(0x1000, 0x1000, &allocated4, &a4); ASSERT_EQ(a4.size(), 1u); ASSERT_EQ(allocated4, 0x1000u); ASSERT_EQ(a4[0].offset, 0u); ASSERT_EQ(a4[0].length, 0x1000u); bins_overall.clear(); al2.collect_stats(bins_overall); ASSERT_EQ(bins_overall.size(), 2u); ASSERT_EQ(bins_overall[cbits(2 * _1m / 0x1000) - 1], 1u); ASSERT_EQ(bins_overall[cbits(num_chunks / 2) - 1], 1u); } { // allocate 1M - should go to the second 1M chunk uint64_t allocated4 = 0; interval_vector_t a4; al2.allocate_l2(_1m, _1m, &allocated4, &a4); ASSERT_EQ(a4.size(), 1u); ASSERT_EQ(allocated4, _1m); ASSERT_EQ(a4[0].offset, _1m); ASSERT_EQ(a4[0].length, _1m); bins_overall.clear(); al2.collect_stats(bins_overall); ASSERT_EQ(bins_overall.size(), 3u); ASSERT_EQ(bins_overall[0], 1u); ASSERT_EQ(bins_overall[cbits((_1m - 0x1000) / 0x1000) - 1], 1u); ASSERT_EQ(bins_overall[cbits(num_chunks / 2) - 1], 1u); } { // and allocate yet another 8K within the deallocated range uint64_t allocated4 = 0; interval_vector_t a4; al2.allocate_l2(0x2000, 0x1000, &allocated4, &a4); ASSERT_EQ(a4.size(), 1u); ASSERT_EQ(allocated4, 0x2000u); ASSERT_EQ(a4[0].offset, 0x1000u); ASSERT_EQ(a4[0].length, 0x2000u); bins_overall.clear(); al2.collect_stats(bins_overall); ASSERT_EQ(bins_overall[0], 1u); ASSERT_EQ(bins_overall[cbits((_1m - 0x3000) / 0x1000) - 1], 1u); ASSERT_EQ(bins_overall[cbits(num_chunks / 2) - 1], 1u); } { // release just allocated 1M interval_vector_t r; r.emplace_back(_1m, _1m); al2.free_l2(r); bins_overall.clear(); al2.collect_stats(bins_overall); ASSERT_EQ(bins_overall.size(), 2u); ASSERT_EQ(bins_overall[cbits((2 * _1m - 0x3000) / 0x1000) - 1], 1u); ASSERT_EQ(bins_overall[cbits(num_chunks / 2) - 1], 1u); } { // allocate 3M - should go to the second 1M chunk and @capacity/2 uint64_t allocated4 = 0; interval_vector_t a4; al2.allocate_l2(3 * _1m, _1m, &allocated4, &a4); ASSERT_EQ(a4.size(), 2u); ASSERT_EQ(allocated4, 3 * _1m); ASSERT_EQ(a4[0].offset, _1m); ASSERT_EQ(a4[0].length, _1m); ASSERT_EQ(a4[1].offset, capacity / 2); ASSERT_EQ(a4[1].length, 2 * _1m); bins_overall.clear(); al2.collect_stats(bins_overall); ASSERT_EQ(bins_overall.size(), 3u); ASSERT_EQ(bins_overall[0], 1u); ASSERT_EQ(bins_overall[cbits((_1m - 0x3000) / 0x1000) - 1], 1u); ASSERT_EQ(bins_overall[cbits((num_chunks - 512) / 2) - 1], 1u); } { // release allocated 1M in the second meg chunk except // the first 4K chunk interval_vector_t r; r.emplace_back(_1m + 0x1000, _1m); al2.free_l2(r); bins_overall.clear(); al2.collect_stats(bins_overall); ASSERT_EQ(bins_overall.size(), 3u); ASSERT_EQ(bins_overall[cbits(_1m / 0x1000) - 1], 1u); ASSERT_EQ(bins_overall[cbits((_1m - 0x3000) / 0x1000) - 1], 1u); ASSERT_EQ(bins_overall[cbits((num_chunks - 512) / 2) - 1], 1u); } { // release 2M @(capacity / 2) interval_vector_t r; r.emplace_back(capacity / 2, 2 * _1m); al2.free_l2(r); bins_overall.clear(); al2.collect_stats(bins_overall); ASSERT_EQ(bins_overall.size(), 3u); ASSERT_EQ(bins_overall[cbits(_1m / 0x1000) - 1], 1u); ASSERT_EQ(bins_overall[cbits((_1m - 0x3000) / 0x1000) - 1], 1u); ASSERT_EQ(bins_overall[cbits((num_chunks) / 2) - 1], 1u); } { // allocate 4x512K - should go to the second halves of // the first and second 1M chunks and @(capacity / 2) uint64_t allocated4 = 0; interval_vector_t a4; al2.allocate_l2(2 * _1m, _1m / 2, &allocated4, &a4); ASSERT_EQ(a4.size(), 3u); ASSERT_EQ(allocated4, 2 * _1m); ASSERT_EQ(a4[0].offset, _1m / 2); ASSERT_EQ(a4[0].length, _1m / 2); ASSERT_EQ(a4[1].offset, _1m + _1m / 2); ASSERT_EQ(a4[1].length, _1m / 2); ASSERT_EQ(a4[2].offset, capacity / 2); ASSERT_EQ(a4[2].length, _1m); bins_overall.clear(); al2.collect_stats(bins_overall); ASSERT_EQ(bins_overall.size(), 3u); ASSERT_EQ(bins_overall[0], 1u); // below we have 512K - 4K & 512K - 12K chunks which both fit into // the same bin = 6 ASSERT_EQ(bins_overall[6], 2u); ASSERT_EQ(bins_overall[cbits((num_chunks - 256) / 2) - 1], 1u); } { // cleanup first 2M except except the last 4K chunk interval_vector_t r; r.emplace_back(0, 2 * _1m - 0x1000); al2.free_l2(r); bins_overall.clear(); al2.collect_stats(bins_overall); ASSERT_EQ(bins_overall.size(), 3u); ASSERT_EQ(bins_overall[0], 1u); ASSERT_EQ(bins_overall[cbits((_2m - 0x1000) / 0x1000) - 1], 1u); ASSERT_EQ(bins_overall[cbits((num_chunks - 256) / 2) - 1], 1u); } { // release 2M @(capacity / 2) interval_vector_t r; r.emplace_back(capacity / 2, 2 * _1m); al2.free_l2(r); bins_overall.clear(); al2.collect_stats(bins_overall); ASSERT_EQ(bins_overall.size(), 3u); ASSERT_EQ(bins_overall[0], 1u); ASSERT_EQ(bins_overall[cbits((_2m - 0x1000) / 0x1000) - 1], 1u); ASSERT_EQ(bins_overall[cbits(num_chunks / 2) - 1], 1u); } { // allocate 132M using 4M granularity should go to (capacity / 2) uint64_t allocated4 = 0; interval_vector_t a4; al2.allocate_l2(132 * _1m, 4 * _1m , &allocated4, &a4); ASSERT_EQ(a4.size(), 1u); ASSERT_EQ(a4[0].offset, capacity / 2); ASSERT_EQ(a4[0].length, 132 * _1m); bins_overall.clear(); al2.collect_stats(bins_overall); ASSERT_EQ(bins_overall.size(), 3u); } { // cleanup left 4K chunk in the first 2M interval_vector_t r; r.emplace_back(2 * _1m - 0x1000, 0x1000); al2.free_l2(r); bins_overall.clear(); al2.collect_stats(bins_overall); ASSERT_EQ(bins_overall.size(), 2u); } { // release 132M @(capacity / 2) interval_vector_t r; r.emplace_back(capacity / 2, 132 * _1m); al2.free_l2(r); } { // allocate 132M using 2M granularity should go to the first chunk and to // (capacity / 2) uint64_t allocated4 = 0; interval_vector_t a4; al2.allocate_l2(132 * _1m, 2 * _1m , &allocated4, &a4); ASSERT_EQ(a4.size(), 2u); ASSERT_EQ(a4[0].offset, 0u); ASSERT_EQ(a4[0].length, 2 * _1m); ASSERT_EQ(a4[1].offset, capacity / 2); ASSERT_EQ(a4[1].length, 130 * _1m); } { // release 130M @(capacity / 2) interval_vector_t r; r.emplace_back(capacity / 2, 132 * _1m); al2.free_l2(r); } { // release 4K~16K // release 28K~32K // release 68K~24K interval_vector_t r; r.emplace_back(0x1000, 0x4000); r.emplace_back(0x7000, 0x8000); r.emplace_back(0x11000, 0x6000); al2.free_l2(r); } { // allocate 32K using 16K granularity - should bypass the first // unaligned extent, use the second free extent partially given // the 16K alignment and then fallback to capacity / 2 uint64_t allocated4 = 0; interval_vector_t a4; al2.allocate_l2(0x8000, 0x4000, &allocated4, &a4); ASSERT_EQ(a4.size(), 2u); ASSERT_EQ(a4[0].offset, 0x8000u); ASSERT_EQ(a4[0].length, 0x4000u); ASSERT_EQ(a4[1].offset, capacity / 2); ASSERT_EQ(a4[1].length, 0x4000u); } } std::cout << "Done L2 cont aligned" << std::endl; } TEST(TestAllocatorLevel01, test_4G_alloc_bug) { { TestAllocatorLevel02 al2; uint64_t capacity = 0x8000 * _1m; // = 32GB al2.init(capacity, 0x10000); std::cout << "Init L2 cont aligned" << std::endl; uint64_t allocated4 = 0; interval_vector_t a4; al2.allocate_l2(_1m, _1m, &allocated4, &a4); ASSERT_EQ(a4.size(), 1u); // the bug caused no allocations here ASSERT_EQ(allocated4, _1m); ASSERT_EQ(a4[0].offset, 0u); ASSERT_EQ(a4[0].length, _1m); } } TEST(TestAllocatorLevel01, test_4G_alloc_bug2) { { TestAllocatorLevel02 al2; uint64_t capacity = 0x8000 * _1m; // = 32GB al2.init(capacity, 0x10000); for (uint64_t i = 0; i < capacity; i += _1m) { uint64_t allocated4 = 0; interval_vector_t a4; al2.allocate_l2(_1m, _1m, &allocated4, &a4); ASSERT_EQ(a4.size(), 1u); ASSERT_EQ(allocated4, _1m); ASSERT_EQ(a4[0].offset, i); ASSERT_EQ(a4[0].length, _1m); } ASSERT_EQ(0u , al2.debug_get_free()); interval_vector_t r; r.emplace_back(0x5fec30000, 0x13d0000); r.emplace_back(0x628000000, 0x80000000); r.emplace_back(0x6a8000000, 0x80000000); r.emplace_back(0x728100000, 0x70000); al2.free_l2(r); std::map<size_t, size_t> bins_overall; al2.collect_stats(bins_overall); uint64_t allocated4 = 0; interval_vector_t a4; al2.allocate_l2(0x3e000000, _1m, &allocated4, &a4); ASSERT_EQ(a4.size(), 2u); ASSERT_EQ(allocated4, 0x3e000000u); ASSERT_EQ(a4[0].offset, 0x5fed00000u); ASSERT_EQ(a4[0].length, 0x1300000u); ASSERT_EQ(a4[1].offset, 0x628000000u); ASSERT_EQ(a4[1].length, 0x3cd00000u); } } TEST(TestAllocatorLevel01, test_4G_alloc_bug3) { { TestAllocatorLevel02 al2; uint64_t capacity = 0x8000 * _1m; // = 32GB al2.init(capacity, 0x10000); std::cout << "Init L2 cont aligned" << std::endl; uint64_t allocated4 = 0; interval_vector_t a4; al2.allocate_l2(4096ull * _1m, _1m, &allocated4, &a4); ASSERT_EQ(a4.size(), 2u); // allocator has to split into 2 allocations ASSERT_EQ(allocated4, 4096ull * _1m); ASSERT_EQ(a4[0].offset, 0u); ASSERT_EQ(a4[0].length, 2048ull * _1m); ASSERT_EQ(a4[1].offset, 2048ull * _1m); ASSERT_EQ(a4[1].length, 2048ull * _1m); } } TEST(TestAllocatorLevel01, test_claim_free_l2) { TestAllocatorLevel02 al2; uint64_t num_l2_entries = 64;// *512; uint64_t capacity = num_l2_entries * 256 * 512 * 4096; al2.init(capacity, 0x1000); std::cout << "Init L2" << std::endl; uint64_t max_available = 0x20000; al2.mark_allocated(max_available, capacity - max_available); uint64_t allocated1 = 0; interval_vector_t a1; al2.allocate_l2(0x2000, 0x2000, &allocated1, &a1); ASSERT_EQ(allocated1, 0x2000u); ASSERT_EQ(a1[0].offset, 0u); ASSERT_EQ(a1[0].length, 0x2000u); uint64_t allocated2 = 0; interval_vector_t a2; al2.allocate_l2(0x2000, 0x2000, &allocated2, &a2); ASSERT_EQ(allocated2, 0x2000u); ASSERT_EQ(a2[0].offset, 0x2000u); ASSERT_EQ(a2[0].length, 0x2000u); uint64_t allocated3 = 0; interval_vector_t a3; al2.allocate_l2(0x3000, 0x3000, &allocated3, &a3); ASSERT_EQ(allocated3, 0x3000u); ASSERT_EQ(a3[0].offset, 0x4000u); ASSERT_EQ(a3[0].length, 0x3000u); al2.free_l2(a1); al2.free_l2(a3); ASSERT_EQ(max_available - 0x2000, al2.debug_get_free()); auto claimed = al2.claim_free_to_right(0x4000); ASSERT_EQ(max_available - 0x4000u, claimed); ASSERT_EQ(0x2000, al2.debug_get_free()); claimed = al2.claim_free_to_right(0x4000); ASSERT_EQ(0, claimed); ASSERT_EQ(0x2000, al2.debug_get_free()); claimed = al2.claim_free_to_left(0x2000); ASSERT_EQ(0x2000u, claimed); ASSERT_EQ(0, al2.debug_get_free()); claimed = al2.claim_free_to_left(0x2000); ASSERT_EQ(0, claimed); ASSERT_EQ(0, al2.debug_get_free()); al2.mark_free(0x3000, 0x4000); ASSERT_EQ(0x4000, al2.debug_get_free()); claimed = al2.claim_free_to_right(0x7000); ASSERT_EQ(0, claimed); ASSERT_EQ(0x4000, al2.debug_get_free()); claimed = al2.claim_free_to_right(0x6000); ASSERT_EQ(0x1000, claimed); ASSERT_EQ(0x3000, al2.debug_get_free()); claimed = al2.claim_free_to_right(0x6000); ASSERT_EQ(0, claimed); ASSERT_EQ(0x3000, al2.debug_get_free()); claimed = al2.claim_free_to_left(0x3000); ASSERT_EQ(0u, claimed); ASSERT_EQ(0x3000, al2.debug_get_free()); claimed = al2.claim_free_to_left(0x4000); ASSERT_EQ(0x1000, claimed); ASSERT_EQ(0x2000, al2.debug_get_free()); // claiming on the right boundary claimed = al2.claim_free_to_right(capacity); ASSERT_EQ(0x0, claimed); ASSERT_EQ(0x2000, al2.debug_get_free()); // extend allocator space up to 64M auto max_available2 = 64 * 1024 * 1024; al2.mark_free(max_available, max_available2 - max_available); ASSERT_EQ(max_available2 - max_available + 0x2000, al2.debug_get_free()); // pin some allocations al2.mark_allocated(0x400000 + 0x2000, 1000); al2.mark_allocated(0x400000 + 0x5000, 1000); al2.mark_allocated(0x400000 + 0x20000, 1000); ASSERT_EQ(max_available2 - max_available - 0x1000, al2.debug_get_free()); claimed = al2.claim_free_to_left(0x403000); ASSERT_EQ(0x0, claimed); claimed = al2.claim_free_to_left(0x404000); ASSERT_EQ(0x1000, claimed); ASSERT_EQ(max_available2 - max_available - 0x2000, al2.debug_get_free()); claimed = al2.claim_free_to_left(max_available); ASSERT_EQ(0, claimed); claimed = al2.claim_free_to_left(0x400000); ASSERT_EQ(0x3e0000, claimed); ASSERT_EQ(max_available2 - max_available - 0x3e2000, al2.get_available()); ASSERT_EQ(max_available2 - max_available - 0x3e2000, al2.debug_get_free()); claimed = al2.claim_free_to_right(0x407000); ASSERT_EQ(0x19000, claimed); ASSERT_EQ(max_available2 - max_available - 0x3e2000 - 0x19000, al2.get_available()); ASSERT_EQ(max_available2 - max_available - 0x3e2000 - 0x19000, al2.debug_get_free()); claimed = al2.claim_free_to_right(0x407000); ASSERT_EQ(0, claimed); claimed = al2.claim_free_to_right(0x430000); ASSERT_EQ(max_available2 - 0x430000, claimed); ASSERT_EQ(0x15000, al2.get_available()); ASSERT_EQ(0x15000, al2.debug_get_free()); }
33,713
30.597001
82
cc
null
ceph-main/src/test/objectstore/hybrid_allocator_test.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include <iostream> #include <gtest/gtest.h> #include "os/bluestore/HybridAllocator.h" class TestHybridAllocator : public HybridAllocator { public: TestHybridAllocator(CephContext* cct, int64_t device_size, int64_t _block_size, uint64_t max_entries, const std::string& name) : HybridAllocator(cct, device_size, _block_size, max_entries, name) { } uint64_t get_bmap_free() { return get_bmap() ? get_bmap()->get_free() : 0; } uint64_t get_avl_free() { return AvlAllocator::get_free(); } }; const uint64_t _1m = 1024 * 1024; const uint64_t _4m = 4 * 1024 * 1024; TEST(HybridAllocator, basic) { { uint64_t block_size = 0x1000; uint64_t capacity = 0x10000 * _1m; // = 64GB TestHybridAllocator ha(g_ceph_context, capacity, block_size, 4 * sizeof(range_seg_t), "test_hybrid_allocator"); ASSERT_EQ(0, ha.get_free()); ASSERT_EQ(0, ha.get_avl_free()); ASSERT_EQ(0, ha.get_bmap_free()); ha.init_add_free(0, _4m); ASSERT_EQ(_4m, ha.get_free()); ASSERT_EQ(_4m, ha.get_avl_free()); ASSERT_EQ(0, ha.get_bmap_free()); ha.init_add_free(2 * _4m, _4m); ASSERT_EQ(_4m * 2, ha.get_free()); ASSERT_EQ(_4m * 2, ha.get_avl_free()); ASSERT_EQ(0, ha.get_bmap_free()); ha.init_add_free(100 * _4m, _4m); ha.init_add_free(102 * _4m, _4m); ASSERT_EQ(_4m * 4, ha.get_free()); ASSERT_EQ(_4m * 4, ha.get_avl_free()); ASSERT_EQ(0, ha.get_bmap_free()); // next allocs will go to bitmap ha.init_add_free(4 * _4m, _4m); ASSERT_EQ(_4m * 5, ha.get_free()); ASSERT_EQ(_4m * 4, ha.get_avl_free()); ASSERT_EQ(_4m * 1, ha.get_bmap_free()); ha.init_add_free(6 * _4m, _4m); ASSERT_EQ(_4m * 6, ha.get_free()); ASSERT_EQ(_4m * 4, ha.get_avl_free()); ASSERT_EQ(_4m * 2, ha.get_bmap_free()); // so we have 6x4M chunks, 4 chunks at AVL and 2 at bitmap ha.init_rm_free(_1m, _1m); // take 1M from AVL ASSERT_EQ(_1m * 23, ha.get_free()); ASSERT_EQ(_1m * 14, ha.get_avl_free()); ASSERT_EQ(_1m * 9, ha.get_bmap_free()); ha.init_rm_free(6 * _4m + _1m, _1m); // take 1M from bmap ASSERT_EQ(_1m * 22, ha.get_free()); ASSERT_EQ(_1m * 14, ha.get_avl_free()); ASSERT_EQ(_1m * 8, ha.get_bmap_free()); // so we have at avl: 2M~2M, 8M~4M, 400M~4M , 408M~4M // and at bmap: 0~1M, 16M~1M, 18M~2M, 24~4M PExtentVector extents; // allocate 4K, to be served from bitmap EXPECT_EQ(block_size, ha.allocate(block_size, block_size, 0, (int64_t)0, &extents)); ASSERT_EQ(1, extents.size()); ASSERT_EQ(0, extents[0].offset); ASSERT_EQ(_1m * 14, ha.get_avl_free()); ASSERT_EQ(_1m * 8 - block_size, ha.get_bmap_free()); interval_set<uint64_t> release_set; // release 4K, to be returned to bitmap release_set.insert(extents[0].offset, extents[0].length); ha.release(release_set); ASSERT_EQ(_1m * 14, ha.get_avl_free()); ASSERT_EQ(_1m * 8, ha.get_bmap_free()); extents.clear(); release_set.clear(); // again we have at avl: 2M~2M, 8M~4M, 400M~4M , 408M~4M // and at bmap: 0~1M, 16M~1M, 18M~2M, 24~4M // add 12M~3M which will go to avl ha.init_add_free(3 * _4m, 3 * _1m); ASSERT_EQ(_1m * 17, ha.get_avl_free()); ASSERT_EQ(_1m * 8, ha.get_bmap_free()); // add 15M~4K which will be appended to existing slot ha.init_add_free(15 * _1m, 0x1000); ASSERT_EQ(_1m * 17 + 0x1000, ha.get_avl_free()); ASSERT_EQ(_1m * 8, ha.get_bmap_free()); // again we have at avl: 2M~2M, 8M~(7M+4K), 400M~4M , 408M~4M // and at bmap: 0~1M, 16M~1M, 18M~2M, 24~4M //some removals from bmap ha.init_rm_free(28 * _1m - 0x1000, 0x1000); ASSERT_EQ(_1m * 17 + 0x1000, ha.get_avl_free()); ASSERT_EQ(_1m * 8 - 0x1000, ha.get_bmap_free()); ha.init_rm_free(24 * _1m + 0x1000, 0x1000); ASSERT_EQ(_1m * 17 + 0x1000, ha.get_avl_free()); ASSERT_EQ(_1m * 8 - 0x2000, ha.get_bmap_free()); ha.init_rm_free(24 * _1m + 0x1000, _4m - 0x2000); ASSERT_EQ(_1m * 17 + 0x1000, ha.get_avl_free()); ASSERT_EQ(_1m * 4, ha.get_bmap_free()); //4K removal from avl ha.init_rm_free(15 * _1m, 0x1000); ASSERT_EQ(_1m * 17, ha.get_avl_free()); ASSERT_EQ(_1m * 4, ha.get_bmap_free()); //remove highest 4Ms from avl ha.init_rm_free(_1m * 400, _4m); ha.init_rm_free(_1m * 408, _4m); ASSERT_EQ(_1m * 9, ha.get_avl_free()); ASSERT_EQ(_1m * 4, ha.get_bmap_free()); // we have at avl: 2M~2M, 8M~7M // and at bmap: 0~1M, 16M~1M, 18M~2M // this will be merged with neighbors from bmap and go to avl ha.init_add_free(17 * _1m, _1m); ASSERT_EQ(_1m * 1, ha.get_bmap_free()); ASSERT_EQ(_1m * 13, ha.get_avl_free()); // we have at avl: 2M~2M, 8M~7M, 16M~4M // and at bmap: 0~1M // and now do some cutoffs from 0~1M span //cut off 4K from bmap ha.init_rm_free(0 * _1m, 0x1000); ASSERT_EQ(_1m * 13, ha.get_avl_free()); ASSERT_EQ(_1m * 1 - 0x1000, ha.get_bmap_free()); //cut off 1M-4K from bmap ha.init_rm_free(0 * _1m + 0x1000, _1m - 0x1000); ASSERT_EQ(_1m * 13, ha.get_avl_free()); ASSERT_EQ(0, ha.get_bmap_free()); //cut off 512K avl ha.init_rm_free(17 * _1m + 0x1000, _1m / 2); ASSERT_EQ(_1m * 13 - _1m / 2, ha.get_avl_free()); ASSERT_EQ(0, ha.get_bmap_free()); //cut off the rest from avl ha.init_rm_free(17 * _1m + 0x1000 + _1m / 2, _1m / 2); ASSERT_EQ(_1m * 12, ha.get_avl_free()); ASSERT_EQ(0, ha.get_bmap_free()); } { uint64_t block_size = 0x1000; uint64_t capacity = 0x10000 * _1m; // = 64GB TestHybridAllocator ha(g_ceph_context, capacity, block_size, 4 * sizeof(range_seg_t), "test_hybrid_allocator"); ha.init_add_free(_1m, _1m); ha.init_add_free(_1m * 3, _1m); ha.init_add_free(_1m * 5, _1m); ha.init_add_free(0x4000, 0x1000); ASSERT_EQ(_1m * 3 + 0x1000, ha.get_free()); ASSERT_EQ(_1m * 3 + 0x1000, ha.get_avl_free()); ASSERT_EQ(0, ha.get_bmap_free()); // This will substitute chunk 0x4000~1000. // Since new chunk insertion into into AvlAllocator:range_tree // happens immediately before 0x4000~1000 chunk care should be taken // to order operations properly and do not use already disposed iterator. ha.init_add_free(0, 0x2000); ASSERT_EQ(_1m * 3 + 0x3000, ha.get_free()); ASSERT_EQ(_1m * 3 + 0x2000, ha.get_avl_free()); ASSERT_EQ(0x1000, ha.get_bmap_free()); } } TEST(HybridAllocator, fragmentation) { { uint64_t block_size = 0x1000; uint64_t capacity = 0x1000 * 0x1000; // = 16M TestHybridAllocator ha(g_ceph_context, capacity, block_size, 4 * sizeof(range_seg_t), "test_hybrid_allocator"); ha.init_add_free(0, 0x2000); ha.init_add_free(0x4000, 0x2000); ha.init_add_free(0x8000, 0x2000); ha.init_add_free(0xc000, 0x1000); ASSERT_EQ(0.5, ha.get_fragmentation()); // this will got to bmap with fragmentation = 1 ha.init_add_free(0x10000, 0x1000); // which results in the following total fragmentation ASSERT_EQ(0.5 * 7 / 8 + 1.0 / 8, ha.get_fragmentation()); } }
7,271
30.344828
77
cc
null
ceph-main/src/test/objectstore/run_seed_to.sh
#!/usr/bin/env bash # vim: ts=8 sw=2 smarttab # # run_seed_to.sh - Run ceph_test_filestore_idempotent_sequence up until an # injection point, generating a sequence of operations based on a # provided seed. # # We also perform three additional tests, focused on assessing if # replaying a larger chunck of the journal affects the expected store # behavior. These tests will be performed by increasing the store's # journal sync interval to a very large value, allowing the store to # finish execution before the first sync (unless the store runs for # over 10 hours, case on which the interval variables must be changed # to an appropriate value). Unless the '--no-journal-test' option is # specified, we will run the 3 following scenarios: # # 1) journal sync'ing for both stores is good as disabled # (we call it '00', for store naming purposes) # 2) journal sync'ing for store A is as good as disabled # (we call it '01', for store naming purposes) # 3) journal sync'ing for store B is as good as disabled # (we call it '10', for store naming purposes) # # All log files are also appropriately named accordingly (i.e., a.00.fail, # a.10.recover, or b.01.clean). # # By default, the test will not exit on error, although it will show the # fail message. This behavior is so defined so we run the whole battery of # tests, and obtain as many mismatches as possible in one go. We may force # the test to exit on error by specifying the '--exit-on-error' option. # # set -e test_opts="" usage() { echo "usage: $1 [options..] <seed> <kill-at>" echo echo "options:" echo " -c, --colls <VAL> # of collections" echo " -o, --objs <VAL> # of objects" echo " -b, --btrfs <VAL> seq number for btrfs stores" echo " --no-journal-test don't perform journal replay tests" echo " -e, --exit-on-error exit with 1 on error" echo " -v, --valgrind run commands through valgrind" echo echo "env vars:" echo " OPTS_STORE additional opts for both stores" echo " OPTS_STORE_A additional opts for store A" echo " OPTS_STORE_B additional opts for store B" echo } echo $0 $* die_on_missing_arg() { if [[ "$2" == "" ]]; then echo "$1: missing required parameter" exit 1 fi } required_args=2 obtained_args=0 seed="" killat="" on_btrfs=0 on_btrfs_seq=0 journal_test=1 min_sync_interval="36000" # ten hours, yes. max_sync_interval="36001" exit_on_error=0 v="" do_rm() { if [[ $on_btrfs -eq 0 ]]; then rm -fr $* fi } set_arg() { if [[ $1 -eq 1 ]]; then seed=$2 elif [[ $1 -eq 2 ]]; then killat=$2 else echo "error: unknown purpose for '$2'" usage $0 exit 1 fi } while [[ $# -gt 0 ]]; do case "$1" in -c | --colls) die_on_missing_arg "$1" "$2" test_opts="$test_opts --test-num-colls $2" shift 2 ;; -o | --objs) die_on_missing_arg "$1" "$2" test_opts="$test_opts --test-num-objs $2" shift 2 ;; -h | --help) usage $0 ; exit 0 ;; -b | --btrfs) die_on_missing_arg "$1" "$2" on_btrfs=1 on_btrfs_seq=$2 shift 2 ;; --no-journal-test) journal_test=0 shift ;; -e | --exit-on-error) exit_on_error=1 shift ;; -v | --valgrind) v="valgrind --leak-check=full" shift ;; --) shift break ;; -*) echo "$1: unknown option" >&2 usage $0 exit 1 ;; *) obtained_args=$(($obtained_args+1)) set_arg $obtained_args $1 shift ;; esac done if [[ $obtained_args -ne $required_args ]]; then echo "error: missing argument" usage $0 ; exit 1 fi if [[ "$OPTS_STORE" != "" ]]; then test_opts="$test_opts $OPTS_STORE" fi test_opts_a="$test_opts" test_opts_b="$test_opts" if [[ "$OPTS_STORE_A" != "" ]]; then test_opts_a="$test_opts_a $OPTS_STORE_A" fi if [[ "$OPTS_STORE_B" != "" ]]; then test_opts_b="$test_opts_b $OPTS_STORE_B" fi echo seed $seed echo kill at $killat # run forever, until $killat... to=1000000000 # # store names # # We need these for two reasons: # 1) if we are running the tests on a btrfs volume, then we need to use # a seq number for each run. Being on btrfs means we will fail when # removing the store's directories and it's far more simple to just # specify differente store names such as 'a.$seq' or 'b.$seq'. # # 2) unless the '--no-journal-test' option is specified, we will run # three additional tests for each store, and we will reuse the same # command for each one of the runs, but varying the store's name and # arguments. # store_a="a" store_b="b" if [[ $on_btrfs -eq 1 ]]; then store_a="$store_a.$on_btrfs_seq" store_b="$store_b.$on_btrfs_seq" fi total_runs=1 if [[ $journal_test -eq 1 ]]; then total_runs=$(($total_runs + 3)) fi num_runs=0 opt_min_sync="--filestore-min-sync-interval $min_sync_interval" opt_max_sync="--filestore-max-sync-interval $max_sync_interval" ret=0 while [[ $num_runs -lt $total_runs ]]; do tmp_name_a=$store_a tmp_name_b=$store_b tmp_opts_a=$test_opts_a tmp_opts_b=$test_opts_b # # We have already tested whether there are diffs when both journals # are properly working. Now let's try on three other scenarios: # 1) journal sync'ing for both stores is good as disabled # (we call it '00') # 2) journal sync'ing for store A is as good as disabled # (we call it '01') # 3) journal sync'ing for store B is as good as disabled # (we call it '10') # if [[ $num_runs -gt 0 && $journal_test -eq 1 ]]; then echo "run #$num_runs" case $num_runs in 1) tmp_name_a="$tmp_name_a.00" tmp_name_b="$tmp_name_b.00" tmp_opts_a="$tmp_opts_a $opt_min_sync $opt_max_sync" tmp_opts_b="$tmp_opts_b $opt_min_sync $opt_max_sync" ;; 2) tmp_name_a="$tmp_name_a.01" tmp_name_b="$tmp_name_b.01" tmp_opts_a="$tmp_opts_a $opt_min_sync $opt_max_sync" ;; 3) tmp_name_a="$tmp_name_a.10" tmp_name_b="$tmp_name_b.10" tmp_opts_b="$tmp_opts_b $opt_min_sync $opt_max_sync" ;; esac fi do_rm $tmp_name_a $tmp_name_a.fail $tmp_name_a.recover $v ceph_test_filestore_idempotent_sequence run-sequence-to $to \ $tmp_name_a $tmp_name_a/journal \ --test-seed $seed --osd-journal-size 100 \ --filestore-kill-at $killat $tmp_opts_a \ --log-file $tmp_name_a.fail --debug-filestore 20 --no-log-to-stderr || true stop_at=`ceph_test_filestore_idempotent_sequence get-last-op \ $tmp_name_a $tmp_name_a/journal \ --log-file $tmp_name_a.recover \ --debug-filestore 20 --debug-journal 20 --no-log-to-stderr` if [[ "`expr $stop_at - $stop_at 2>/dev/null`" != "0" ]]; then echo "error: get-last-op returned '$stop_at'" exit 1 fi echo stopped at $stop_at do_rm $tmp_name_b $tmp_name_b.clean $v ceph_test_filestore_idempotent_sequence run-sequence-to \ $stop_at $tmp_name_b $tmp_name_b/journal \ --test-seed $seed --osd-journal-size 100 \ --log-file $tmp_name_b.clean --debug-filestore 20 --no-log-to-stderr \ $tmp_opts_b if $v ceph_test_filestore_idempotent_sequence diff \ $tmp_name_a $tmp_name_a/journal $tmp_name_b $tmp_name_b/journal --no-log-to-stderr --log-file $tmp_name_a.diff.log --debug-filestore 20 ; then echo OK else echo "FAIL" echo " see:" echo " $tmp_name_a.fail -- leading up to failure" echo " $tmp_name_a.recover -- journal replay" echo " $tmp_name_b.clean -- the clean reference" ret=1 if [[ $exit_on_error -eq 1 ]]; then exit 1 fi fi num_runs=$(($num_runs+1)) done exit $ret
7,659
25.054422
146
sh
null
ceph-main/src/test/objectstore/run_seed_to_range.sh
#!/bin/sh set -x set -e seed=$1 from=$2 to=$3 dir=$4 mydir=`dirname $0` for f in `seq $from $to` do if ! $mydir/run_seed_to.sh -o 10 -e $seed $f; then if [ -d "$dir" ]; then echo copying evidence to $dir cp -a . $dir else echo no dir provided for evidence disposal fi exit 1 fi done
314
11.6
54
sh
null
ceph-main/src/test/objectstore/run_smr_bluestore_test.sh
#!/bin/bash -ex # 1) run_smr_bluestore_test.sh # Setup smr device, run all tests # 2) run_smr_bluestore_test.sh --smr # Setup smr device but skip tests failing on smr before_creation=$(mktemp) lsscsi > $before_creation echo "cd /backstores/user:zbc create name=zbc0 size=20G cfgstring=model-HM/zsize-256/conv-10@zbc0.raw /loopback create cd /loopback create naa.50014055e5f25aa0 cd naa.50014055e5f25aa0/luns create /backstores/user:zbc/zbc0 0 " | sudo targetcli sleep 1 #if too fast device does not show up after_creation=$(mktemp) lsscsi > $after_creation if [[ $(diff $before_creation $after_creation | wc -l ) != 2 ]] then echo New zbc device not created false fi function cleanup() { echo "cd /loopback delete naa.50014055e5f25aa0 cd /backstores/user:zbc delete zbc0" | sudo targetcli sudo rm -f zbc0.raw rm -f $before_creation $after_creation } trap cleanup EXIT DEV=$(diff $before_creation $after_creation |grep zbc |sed "s@.* /@/@") sudo chmod 666 $DEV # Need sudo # https://patchwork.kernel.org/project/linux-block/patch/20210811110505.29649-3-Niklas.Cassel@wdc.com/ sudo ceph_test_objectstore \ --bluestore-block-path $DEV \ --gtest_filter=*/2 \ $*
1,198
23.469388
102
sh
null
ceph-main/src/test/objectstore/run_test_deferred.sh
#!/bin/bash if [[ ! (-x ./bin/unittest_deferred) || ! (-x ./bin/ceph-kvstore-tool) || ! (-x ./bin/ceph-bluestore-tool)]] then echo Test must be run from ceph build directory echo with unittest_deferred, ceph-kvstore-tool and ceph-bluestore-tool compiled exit 1 fi # Create BlueStore, only main block device, 4K AU, forced deferred 4K, 64K AU for BlueFS # Create file zapchajdziura, that is 0xe000 in size. # This adds to 0x0000 - 0x1000 of BlueStore superblock and 0x1000 - 0x2000 of BlueFS superblock, # making 0x00000 - 0x10000 filled, nicely aligning for 64K BlueFS requirements # Prefill 10 objects Object-0 .. Object-9, each 64K. Sync to disk. # Do transactions like: # - fill Object-x+1 16 times at offsets 0x0000, 0x1000, ... 0xf000 with 8bytes, trigerring deferred writes # - fill Object-x with 64K data # Repeat for Object-0 to Object-8. # Right after getting notification on_complete for all 9 transactions, immediately exit(1). ./bin/unittest_deferred --log-to-stderr=false # Now we should have a considerable amount of pending deferred writes. # They do refer disk regions that do not belong to any object. # Perform compaction on RocksDB # This initializes BlueFS, but does not replay deferred writes. # It jiggles RocksDB files around. CURRENT and MANIFEST are recreated, with some .sst files too. # The hope here is that newly created RocksDB files will occupy space that is free, # but targetted by pending deferred writes. ./bin/ceph-kvstore-tool bluestore-kv bluestore.test_temp_dir/ compact --log-to-stderr=false # It this step we (hopefully) get RocksDB files overwritten # We initialize BlueFS and RocksDB, there should be no problem here. # Then we apply deferred writes. Now some of RocksDB files might get corrupted. # It is very likely that this will not cause any problems, since CURRENT and MANIFEST are only read at bootup. ./bin/ceph-bluestore-tool --path bluestore.test_temp_dir/ --command fsck --deep 1 --debug-bluestore=30/30 --debug-bdev=30/30 --log-file=log-bs-corrupts.txt --log-to-file --log-to-stderr=false # If we were lucky, this command now fails ./bin/ceph-bluestore-tool --path bluestore.test_temp_dir/ --command fsck --deep 1 --debug-bluestore=30/30 --debug-bdev=30/30 --log-file=log-bs-crash.txt --log-to-file --log-to-stderr=false if [[ $? != 0 ]] then echo "Deferred writes corruption successfully created !" else echo "No deferred write problems detected." fi #cleanup rm -rf bluestore.test_temp_dir/
2,479
45.792453
191
sh