Search is not available for this dataset
repo stringlengths 2 152 ⌀ | file stringlengths 15 239 | code stringlengths 0 58.4M | file_length int64 0 58.4M | avg_line_length float64 0 1.81M | max_line_length int64 0 12.7M | extension_type stringclasses 364 values |
|---|---|---|---|---|---|---|
null | ceph-main/src/test/rgw/amqp_mock.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "amqp_mock.h"
#include <amqp.h>
#include <amqp_ssl_socket.h>
#include <amqp_tcp_socket.h>
#include <string>
#include <stdarg.h>
#include <mutex>
#include <boost/lockfree/queue.hpp>
#include <openssl/ssl.h>
namespace amqp_mock {
std::mutex set_valid_lock;
int VALID_PORT(5672);
std::string VALID_HOST("localhost");
std::string VALID_VHOST("/");
std::string VALID_USER("guest");
std::string VALID_PASSWORD("guest");
void set_valid_port(int port) {
std::lock_guard<std::mutex> lock(set_valid_lock);
VALID_PORT = port;
}
void set_valid_host(const std::string& host) {
std::lock_guard<std::mutex> lock(set_valid_lock);
VALID_HOST = host;
}
void set_valid_vhost(const std::string& vhost) {
std::lock_guard<std::mutex> lock(set_valid_lock);
VALID_VHOST = vhost;
}
void set_valid_user(const std::string& user, const std::string& password) {
std::lock_guard<std::mutex> lock(set_valid_lock);
VALID_USER = user;
VALID_PASSWORD = password;
}
std::atomic<unsigned> g_tag_skip = 0;
std::atomic<int> g_multiple = 0;
void set_multiple(unsigned tag_skip) {
g_multiple = 1;
g_tag_skip = tag_skip;
}
void reset_multiple() {
g_multiple = 0;
g_tag_skip = 0;
}
bool FAIL_NEXT_WRITE(false);
bool FAIL_NEXT_READ(false);
bool REPLY_ACK(true);
}
using namespace amqp_mock;
struct amqp_connection_state_t_ {
amqp_socket_t* socket;
amqp_channel_open_ok_t* channel1;
amqp_channel_open_ok_t* channel2;
amqp_exchange_declare_ok_t* exchange;
amqp_queue_declare_ok_t* queue;
amqp_confirm_select_ok_t* confirm;
amqp_basic_consume_ok_t* consume;
bool login_called;
boost::lockfree::queue<amqp_basic_ack_t> ack_list;
boost::lockfree::queue<amqp_basic_nack_t> nack_list;
std::atomic<uint64_t> delivery_tag;
amqp_rpc_reply_t reply;
amqp_basic_ack_t ack;
amqp_basic_nack_t nack;
bool use_ssl;
// ctor
amqp_connection_state_t_() :
socket(nullptr),
channel1(nullptr),
channel2(nullptr),
exchange(nullptr),
queue(nullptr),
confirm(nullptr),
consume(nullptr),
login_called(false),
ack_list(1024),
nack_list(1024),
delivery_tag(1),
use_ssl(false) {
reply.reply_type = AMQP_RESPONSE_NONE;
}
};
struct amqp_socket_t_ {
void *klass;
void *ssl_ctx;
bool open_called;
// ctor
amqp_socket_t_() : klass(nullptr), ssl_ctx(nullptr), open_called(false) {
}
};
extern "C" {
amqp_connection_state_t AMQP_CALL amqp_new_connection(void) {
auto s = new amqp_connection_state_t_;
return s;
}
int amqp_destroy_connection(amqp_connection_state_t state) {
delete state->socket;
delete state->channel1;
delete state->channel2;
delete state->exchange;
delete state->queue;
delete state->confirm;
delete state->consume;
delete state;
return 0;
}
amqp_socket_t* amqp_tcp_socket_new(amqp_connection_state_t state) {
state->socket = new amqp_socket_t;
return state->socket;
}
amqp_socket_t* amqp_ssl_socket_new(amqp_connection_state_t state) {
state->socket = new amqp_socket_t;
state->use_ssl = true;
return state->socket;
}
int amqp_ssl_socket_set_cacert(amqp_socket_t *self, const char *cacert) {
// do nothing
return AMQP_STATUS_OK;
}
void amqp_ssl_socket_set_verify_peer(amqp_socket_t *self, amqp_boolean_t verify) {
// do nothing
}
void amqp_ssl_socket_set_verify_hostname(amqp_socket_t *self, amqp_boolean_t verify) {
// do nothing
}
#if AMQP_VERSION >= AMQP_VERSION_CODE(0, 10, 0, 1)
void* amqp_ssl_socket_get_context(amqp_socket_t *self) {
return nullptr;
}
#endif
int SSL_CTX_set_default_verify_paths(SSL_CTX *ctx) {
return 1;
}
int amqp_socket_open(amqp_socket_t *self, const char *host, int port) {
if (!self) {
return -1;
}
{
std::lock_guard<std::mutex> lock(set_valid_lock);
if (std::string(host) != VALID_HOST) {
return -2;
}
if (port != VALID_PORT) {
return -3;
}
}
self->open_called = true;
return 0;
}
amqp_rpc_reply_t amqp_login(
amqp_connection_state_t state,
char const *vhost,
int channel_max,
int frame_max,
int heartbeat,
amqp_sasl_method_enum sasl_method, ...) {
state->reply.reply_type = AMQP_RESPONSE_SERVER_EXCEPTION;
state->reply.library_error = 0;
state->reply.reply.decoded = nullptr;
state->reply.reply.id = 0;
if (std::string(vhost) != VALID_VHOST) {
return state->reply;
}
if (sasl_method != AMQP_SASL_METHOD_PLAIN) {
return state->reply;
}
va_list args;
va_start(args, sasl_method);
char* user = va_arg(args, char*);
char* password = va_arg(args, char*);
va_end(args);
if (std::string(user) != VALID_USER) {
return state->reply;
}
if (std::string(password) != VALID_PASSWORD) {
return state->reply;
}
state->reply.reply_type = AMQP_RESPONSE_NORMAL;
state->login_called = true;
return state->reply;
}
amqp_channel_open_ok_t* amqp_channel_open(amqp_connection_state_t state, amqp_channel_t channel) {
state->reply.reply_type = AMQP_RESPONSE_NORMAL;
if (state->channel1 == nullptr) {
state->channel1 = new amqp_channel_open_ok_t;
return state->channel1;
}
state->channel2 = new amqp_channel_open_ok_t;
return state->channel2;
}
amqp_exchange_declare_ok_t* amqp_exchange_declare(
amqp_connection_state_t state,
amqp_channel_t channel,
amqp_bytes_t exchange,
amqp_bytes_t type,
amqp_boolean_t passive,
amqp_boolean_t durable,
amqp_boolean_t auto_delete,
amqp_boolean_t internal,
amqp_table_t arguments) {
state->exchange = new amqp_exchange_declare_ok_t;
state->reply.reply_type = AMQP_RESPONSE_NORMAL;
return state->exchange;
}
amqp_rpc_reply_t amqp_get_rpc_reply(amqp_connection_state_t state) {
return state->reply;
}
int amqp_basic_publish(
amqp_connection_state_t state,
amqp_channel_t channel,
amqp_bytes_t exchange,
amqp_bytes_t routing_key,
amqp_boolean_t mandatory,
amqp_boolean_t immediate,
struct amqp_basic_properties_t_ const *properties,
amqp_bytes_t body) {
// make sure that all calls happened before publish
if (state->socket && state->socket->open_called &&
state->login_called && state->channel1 && state->channel2 && state->exchange &&
!FAIL_NEXT_WRITE) {
state->reply.reply_type = AMQP_RESPONSE_NORMAL;
if (properties) {
if (REPLY_ACK) {
state->ack_list.push(amqp_basic_ack_t{state->delivery_tag++, 0});
} else {
state->nack_list.push(amqp_basic_nack_t{state->delivery_tag++, 0});
}
}
return AMQP_STATUS_OK;
}
return AMQP_STATUS_CONNECTION_CLOSED;
}
const amqp_table_t amqp_empty_table = {0, NULL};
const amqp_bytes_t amqp_empty_bytes = {0, NULL};
const char* amqp_error_string2(int code) {
static const char* str = "mock error";
return str;
}
char const* amqp_method_name(amqp_method_number_t methodNumber) {
static const char* str = "mock method";
return str;
}
amqp_queue_declare_ok_t* amqp_queue_declare(
amqp_connection_state_t state, amqp_channel_t channel, amqp_bytes_t queue,
amqp_boolean_t passive, amqp_boolean_t durable, amqp_boolean_t exclusive,
amqp_boolean_t auto_delete, amqp_table_t arguments) {
state->queue = new amqp_queue_declare_ok_t;
static const char* str = "tmp-queue";
state->queue->queue = amqp_cstring_bytes(str);
state->reply.reply_type = AMQP_RESPONSE_NORMAL;
return state->queue;
}
amqp_confirm_select_ok_t* amqp_confirm_select(amqp_connection_state_t state, amqp_channel_t channel) {
state->confirm = new amqp_confirm_select_ok_t;
state->reply.reply_type = AMQP_RESPONSE_NORMAL;
return state->confirm;
}
#if AMQP_VERSION >= AMQP_VERSION_CODE(0, 11, 0, 1)
int amqp_simple_wait_frame_noblock(amqp_connection_state_t state, amqp_frame_t *decoded_frame, const struct timeval* tv) {
#else
int amqp_simple_wait_frame_noblock(amqp_connection_state_t state, amqp_frame_t *decoded_frame, struct timeval* tv) {
#endif
if (state->socket && state->socket->open_called &&
state->login_called && state->channel1 && state->channel2 && state->exchange &&
state->queue && state->consume && state->confirm && !FAIL_NEXT_READ) {
// "wait" for queue
usleep(tv->tv_sec*1000000+tv->tv_usec);
// read from queue
if (g_multiple) {
// pop multiples and reply once at the end
for (auto i = 0U; i < g_tag_skip; ++i) {
if (REPLY_ACK && !state->ack_list.pop(state->ack)) {
// queue is empty
return AMQP_STATUS_TIMEOUT;
} else if (!REPLY_ACK && !state->nack_list.pop(state->nack)) {
// queue is empty
return AMQP_STATUS_TIMEOUT;
}
}
if (REPLY_ACK) {
state->ack.multiple = g_multiple;
decoded_frame->payload.method.id = AMQP_BASIC_ACK_METHOD;
decoded_frame->payload.method.decoded = &state->ack;
} else {
state->nack.multiple = g_multiple;
decoded_frame->payload.method.id = AMQP_BASIC_NACK_METHOD;
decoded_frame->payload.method.decoded = &state->nack;
}
decoded_frame->frame_type = AMQP_FRAME_METHOD;
state->reply.reply_type = AMQP_RESPONSE_NORMAL;
reset_multiple();
return AMQP_STATUS_OK;
}
// pop replies one by one
if (REPLY_ACK && state->ack_list.pop(state->ack)) {
state->ack.multiple = g_multiple;
decoded_frame->frame_type = AMQP_FRAME_METHOD;
decoded_frame->payload.method.id = AMQP_BASIC_ACK_METHOD;
decoded_frame->payload.method.decoded = &state->ack;
state->reply.reply_type = AMQP_RESPONSE_NORMAL;
return AMQP_STATUS_OK;
} else if (!REPLY_ACK && state->nack_list.pop(state->nack)) {
state->nack.multiple = g_multiple;
decoded_frame->frame_type = AMQP_FRAME_METHOD;
decoded_frame->payload.method.id = AMQP_BASIC_NACK_METHOD;
decoded_frame->payload.method.decoded = &state->nack;
state->reply.reply_type = AMQP_RESPONSE_NORMAL;
return AMQP_STATUS_OK;
} else {
// queue is empty
return AMQP_STATUS_TIMEOUT;
}
}
return AMQP_STATUS_CONNECTION_CLOSED;
}
amqp_basic_consume_ok_t* amqp_basic_consume(
amqp_connection_state_t state, amqp_channel_t channel, amqp_bytes_t queue,
amqp_bytes_t consumer_tag, amqp_boolean_t no_local, amqp_boolean_t no_ack,
amqp_boolean_t exclusive, amqp_table_t arguments) {
state->consume = new amqp_basic_consume_ok_t;
state->reply.reply_type = AMQP_RESPONSE_NORMAL;
return state->consume;
}
} // extern "C"
// amqp_parse_url() is linked via the actual rabbitmq-c library code. see: amqp_url.c
// following functions are the actual implementation copied from rabbitmq-c library
#include <string.h>
amqp_bytes_t amqp_cstring_bytes(const char* cstr) {
amqp_bytes_t result;
result.len = strlen(cstr);
result.bytes = (void *)cstr;
return result;
}
void amqp_bytes_free(amqp_bytes_t bytes) { free(bytes.bytes); }
amqp_bytes_t amqp_bytes_malloc_dup(amqp_bytes_t src) {
amqp_bytes_t result;
result.len = src.len;
result.bytes = malloc(src.len);
if (result.bytes != NULL) {
memcpy(result.bytes, src.bytes, src.len);
}
return result;
}
| 11,183 | 27.530612 | 122 | cc |
null | ceph-main/src/test/rgw/amqp_mock.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <string>
namespace amqp_mock {
void set_valid_port(int port);
void set_valid_host(const std::string& host);
void set_valid_vhost(const std::string& vhost);
void set_valid_user(const std::string& user, const std::string& password);
void set_multiple(unsigned tag);
void reset_multiple();
extern bool FAIL_NEXT_WRITE; // default "false"
extern bool FAIL_NEXT_READ; // default "false"
extern bool REPLY_ACK; // default "true"
}
| 579 | 28 | 74 | h |
null | ceph-main/src/test/rgw/amqp_url.c | /*
* ***** BEGIN LICENSE BLOCK *****
* Version: MIT
*
* Portions created by Alan Antonuk are Copyright (c) 2012-2013
* Alan Antonuk. All Rights Reserved.
*
* Portions created by VMware are Copyright (c) 2007-2012 VMware, Inc.
* All Rights Reserved.
*
* Portions created by Tony Garnock-Jones are Copyright (c) 2009-2010
* VMware, Inc. and Tony Garnock-Jones. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
* ***** END LICENSE BLOCK *****
*/
// this version of the file is slightly modified from the original one
// as it is only used to mock amqp libraries
#ifdef _MSC_VER
#define _CRT_SECURE_NO_WARNINGS
#endif
#include "amqp.h"
#include <limits.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
void amqp_default_connection_info(struct amqp_connection_info *ci) {
/* Apply defaults */
ci->user = "guest";
ci->password = "guest";
ci->host = "localhost";
ci->port = 5672;
ci->vhost = "/";
ci->ssl = 0;
}
/* Scan for the next delimiter, handling percent-encodings on the way. */
static char find_delim(char **pp, int colon_and_at_sign_are_delims) {
char *from = *pp;
char *to = from;
for (;;) {
char ch = *from++;
switch (ch) {
case ':':
case '@':
if (!colon_and_at_sign_are_delims) {
*to++ = ch;
break;
}
/* fall through */
case 0:
case '/':
case '?':
case '#':
case '[':
case ']':
*to = 0;
*pp = from;
return ch;
case '%': {
unsigned int val;
int chars;
int res = sscanf(from, "%2x%n", &val, &chars);
if (res == EOF || res < 1 || chars != 2 || val > CHAR_MAX)
/* Return a surprising delimiter to
force an error. */
{
return '%';
}
*to++ = (char)val;
from += 2;
break;
}
default:
*to++ = ch;
break;
}
}
}
/* Parse an AMQP URL into its component parts. */
int amqp_parse_url(char *url, struct amqp_connection_info *parsed) {
int res = AMQP_STATUS_BAD_URL;
char delim;
char *start;
char *host;
char *port = NULL;
amqp_default_connection_info(parsed);
parsed->port = 5672;
parsed->ssl = 0;
/* check the prefix */
if (!strncmp(url, "amqp://", 7)) {
/* do nothing */
} else if (!strncmp(url, "amqps://", 8)) {
parsed->port = 5671;
parsed->ssl = 1;
} else {
goto out;
}
host = start = url += (parsed->ssl ? 8 : 7);
delim = find_delim(&url, 1);
if (delim == ':') {
/* The colon could be introducing the port or the
password part of the userinfo. We don't know yet,
so stash the preceding component. */
port = start = url;
delim = find_delim(&url, 1);
}
if (delim == '@') {
/* What might have been the host and port were in fact
the username and password */
parsed->user = host;
if (port) {
parsed->password = port;
}
port = NULL;
host = start = url;
delim = find_delim(&url, 1);
}
if (delim == '[') {
/* IPv6 address. The bracket should be the first
character in the host. */
if (host != start || *host != 0) {
goto out;
}
start = url;
delim = find_delim(&url, 0);
if (delim != ']') {
goto out;
}
parsed->host = start;
start = url;
delim = find_delim(&url, 1);
/* Closing bracket should be the last character in the
host. */
if (*start != 0) {
goto out;
}
} else {
/* If we haven't seen the host yet, this is it. */
if (*host != 0) {
parsed->host = host;
}
}
if (delim == ':') {
port = start = url;
delim = find_delim(&url, 1);
}
if (port) {
char *end;
long portnum = strtol(port, &end, 10);
if (port == end || *end != 0 || portnum < 0 || portnum > 65535) {
goto out;
}
parsed->port = portnum;
}
if (delim == '/') {
start = url;
delim = find_delim(&url, 1);
if (delim != 0) {
goto out;
}
parsed->vhost = start;
res = AMQP_STATUS_OK;
} else if (delim == 0) {
res = AMQP_STATUS_OK;
}
/* Any other delimiter is bad, and we will return AMQP_STATUS_BAD_AMQP_URL. */
out:
return res;
}
| 5,304 | 22.896396 | 78 | c |
null | ceph-main/src/test/rgw/bench_rgw_ratelimit.cc | #include "rgw_ratelimit.h"
#include "rgw_common.h"
#include "random"
#include <cstdlib>
#include <string>
#include <boost/asio.hpp>
#include <spawn/spawn.hpp>
#include <boost/asio/steady_timer.hpp>
#include <chrono>
#include <mutex>
#include <unordered_map>
#include <atomic>
#include <boost/program_options.hpp>
using Executor = boost::asio::io_context::executor_type;
std::uniform_int_distribution<unsigned int> dist(0, 1);
std::random_device rd;
std::default_random_engine rng{rd()};
std::uniform_int_distribution<unsigned long long> disttenant(2, 100000000);
struct client_info {
uint64_t accepted = 0;
uint64_t rejected = 0;
uint64_t ops = 0;
uint64_t bytes = 0;
uint64_t num_retries = 0;
std::string tenant;
};
struct parameters {
int64_t req_size = 1;
int64_t backend_bandwidth = 1;
size_t wait_between_retries_ms = 1;
int num_clients = 1;
};
std::shared_ptr<std::vector<client_info>> ds = std::make_shared<std::vector<client_info>>(std::vector<client_info>());
std::string method[2] = {"PUT", "GET"};
void simulate_transfer(client_info& it, const RGWRateLimitInfo* info, std::shared_ptr<RateLimiter> ratelimit, const parameters& params, spawn::yield_context& yield, boost::asio::io_context& ioctx)
{
auto dout = DoutPrefix(g_ceph_context, ceph_subsys_rgw, "rate limiter: ");
boost::asio::steady_timer timer(ioctx);
int rw = 0; // will always use PUT method as there is no difference
std::string methodop(method[rw]);
auto req_size = params.req_size;
auto backend_bandwidth = params.backend_bandwidth;
// the 4 * 1024 * 1024 is the RGW default we are sending in a typical environment
while (req_size) {
if (req_size <= backend_bandwidth) {
while (req_size > 0) {
if(req_size > 4*1024*1024) {
ratelimit->decrease_bytes(methodop.c_str(),it.tenant, 4*1024*1024, info);
it.bytes += 4*1024*1024;
req_size = req_size - 4*1024*1024;
}
else {
ratelimit->decrease_bytes(methodop.c_str(),it.tenant, req_size, info);
req_size = 0;
}
}
} else {
int64_t total_bytes = 0;
while (req_size > 0) {
if (req_size >= 4*1024*1024) {
if (total_bytes >= backend_bandwidth)
{
timer.expires_after(std::chrono::seconds(1));
timer.async_wait(yield);
total_bytes = 0;
}
ratelimit->decrease_bytes(methodop.c_str(),it.tenant, 4*1024*1024, info);
it.bytes += 4*1024*1024;
req_size = req_size - 4*1024*1024;
total_bytes += 4*1024*1024;
}
else {
ratelimit->decrease_bytes(methodop.c_str(),it.tenant, req_size, info);
it.bytes += req_size;
total_bytes += req_size;
req_size = 0;
}
}
}
}
}
bool simulate_request(client_info& it, const RGWRateLimitInfo& info, std::shared_ptr<RateLimiter> ratelimit)
{
boost::asio::io_context context;
auto time = ceph::coarse_real_clock::now();
int rw = 0; // will always use PUT method as there is no different
std::string methodop = method[rw];
auto dout = DoutPrefix(g_ceph_context, ceph_subsys_rgw, "rate limiter: ");
bool to_fail = ratelimit->should_rate_limit(methodop.c_str(), it.tenant, time, &info);
if(to_fail)
{
it.rejected++;
it.ops++;
return true;
}
it.accepted++;
return false;
}
void simulate_client(client_info& it, const RGWRateLimitInfo& info, std::shared_ptr<RateLimiter> ratelimit, const parameters& params, spawn::yield_context& ctx, bool& to_run, boost::asio::io_context& ioctx)
{
for (;;)
{
bool to_retry = simulate_request(it, info, ratelimit);
while (to_retry && to_run)
{
if (params.wait_between_retries_ms)
{
boost::asio::steady_timer timer(ioctx);
timer.expires_after(std::chrono::milliseconds(params.wait_between_retries_ms));
timer.async_wait(ctx);
}
to_retry = simulate_request(it, info, ratelimit);
}
if (!to_run)
{
return;
}
simulate_transfer(it, &info, ratelimit, params, ctx, ioctx);
}
}
void simulate_clients(boost::asio::io_context& context, std::string tenant, const RGWRateLimitInfo& info, std::shared_ptr<RateLimiter> ratelimit, const parameters& params, bool& to_run)
{
for (int i = 0; i < params.num_clients; i++)
{
auto& it = ds->emplace_back(client_info());
it.tenant = tenant;
int x = ds->size() - 1;
spawn::spawn(context,
[&to_run ,x, ratelimit, info, params, &context](spawn::yield_context ctx)
{
auto& it = ds.get()->operator[](x);
simulate_client(it, info, ratelimit, params, ctx, to_run, context);
});
}
}
int main(int argc, char **argv)
{
int num_ratelimit_classes = 1;
int64_t ops_limit = 1;
int64_t bw_limit = 1;
int thread_count = 512;
int runtime = 60;
parameters params;
try
{
using namespace boost::program_options;
options_description desc{"Options"};
desc.add_options()
("help,h", "Help screen")
("num_ratelimit_classes", value<int>()->default_value(1), "how many ratelimit tenants")
("request_size", value<int64_t>()->default_value(1), "what is the request size we are testing if 0, it will be randomized")
("backend_bandwidth", value<int64_t>()->default_value(1), "what is the backend bandwidth, so there will be wait between decrease_bytes")
("wait_between_retries_ms", value<size_t>()->default_value(1), "time in seconds to wait between retries")
("ops_limit", value<int64_t>()->default_value(1), "ops limit for the tenants")
("bw_limit", value<int64_t>()->default_value(1), "bytes per second limit")
("threads", value<int>()->default_value(512), "server's threads count")
("runtime", value<int>()->default_value(60), "For how many seconds the test will run")
("num_clients", value<int>()->default_value(1), "number of clients per tenant to run");
variables_map vm;
store(parse_command_line(argc, argv, desc), vm);
if (vm.count("help")) {
std::cout << desc << std::endl;
return EXIT_SUCCESS;
}
num_ratelimit_classes = vm["num_ratelimit_classes"].as<int>();
params.req_size = vm["request_size"].as<int64_t>();
params.backend_bandwidth = vm["backend_bandwidth"].as<int64_t>();
params.wait_between_retries_ms = vm["wait_between_retries_ms"].as<size_t>();
params.num_clients = vm["num_clients"].as<int>();
ops_limit = vm["ops_limit"].as<int64_t>();
bw_limit = vm["bw_limit"].as<int64_t>();
thread_count = vm["threads"].as<int>();
runtime = vm["runtime"].as<int>();
}
catch (const boost::program_options::error &ex)
{
std::cerr << ex.what() << std::endl;
return EXIT_FAILURE;
}
RGWRateLimitInfo info;
info.enabled = true;
info.max_read_bytes = bw_limit;
info.max_write_bytes = bw_limit;
info.max_read_ops = ops_limit;
info.max_write_ops = ops_limit;
std::unique_ptr<CephContext> cct = std::make_unique<CephContext>(CEPH_ENTITY_TYPE_ANY);
if (!g_ceph_context)
{
g_ceph_context = cct.get();
}
std::shared_ptr<ActiveRateLimiter> ratelimit(new ActiveRateLimiter(g_ceph_context));
ratelimit->start();
std::vector<std::thread> threads;
using Executor = boost::asio::io_context::executor_type;
std::optional<boost::asio::executor_work_guard<Executor>> work;
threads.reserve(thread_count);
boost::asio::io_context context;
boost::asio::io_context stopme;
work.emplace(boost::asio::make_work_guard(context));
// server execution
for (int i = 0; i < thread_count; i++) {
threads.emplace_back([&]() noexcept {
context.run();
});
}
//client execution
bool to_run = true;
ds->reserve(num_ratelimit_classes*params.num_clients);
for (int i = 0; i < num_ratelimit_classes; i++)
{
unsigned long long tenantid = disttenant(rng);
std::string tenantuser = "uuser" + std::to_string(tenantid);
simulate_clients(context, tenantuser, info, ratelimit->get_active(), params, to_run);
}
boost::asio::steady_timer timer_runtime(stopme);
timer_runtime.expires_after(std::chrono::seconds(runtime));
timer_runtime.wait();
work.reset();
context.stop();
to_run = false;
for (auto& i : threads)
{
i.join();
}
std::unordered_map<std::string,client_info> metrics_by_tenant;
for(auto& i : *ds.get())
{
auto it = metrics_by_tenant.emplace(i.tenant, client_info()).first;
std::cout << i.accepted << std::endl;
it->second.accepted += i.accepted;
it->second.rejected += i.rejected;
}
// TODO sum the results by tenant
for(auto& i : metrics_by_tenant)
{
std::cout << "Tenant is: " << i.first << std::endl;
std::cout << "Simulator finished accepted sum : " << i.second.accepted << std::endl;
std::cout << "Simulator finished rejected sum : " << i.second.rejected << std::endl;
}
return 0;
}
| 9,875 | 38.822581 | 206 | cc |
null | ceph-main/src/test/rgw/bench_rgw_ratelimit_gc.cc | #include "rgw_ratelimit.h"
#include "rgw_common.h"
#include "random"
#include <cstdlib>
#include <string>
#include <chrono>
#include <boost/program_options.hpp>
int main(int argc, char **argv)
{
int num_qos_classes = 1;
try
{
using namespace boost::program_options;
options_description desc{"Options"};
desc.add_options()
("help,h", "Help screen")
("num_qos_classes", value<int>()->default_value(1), "how many qos tenants");
variables_map vm;
store(parse_command_line(argc, argv, desc), vm);
if (vm.count("help")) {
std::cout << desc << std::endl;
return EXIT_SUCCESS;
}
num_qos_classes = vm["num_qos_classes"].as<int>();
}
catch (const boost::program_options::error &ex)
{
std::cerr << ex.what() << std::endl;
return EXIT_FAILURE;
}
RGWRateLimitInfo info;
info.enabled = true;
info.max_read_bytes = 0;
info.max_write_bytes = 0;
info.max_read_ops = 0;
info.max_write_ops = 0;
std::unique_ptr<CephContext> cct = std::make_unique<CephContext>(CEPH_ENTITY_TYPE_ANY);
if (!g_ceph_context)
{
g_ceph_context = cct.get();
}
std::shared_ptr<ActiveRateLimiter> ratelimit(new ActiveRateLimiter(g_ceph_context));
ratelimit->start();
auto dout = DoutPrefix(g_ceph_context, ceph_subsys_rgw, "rate limiter: ");
for(int i = 0; i < num_qos_classes; i++)
{
std::string tenant = "uuser" + std::to_string(i);
auto time = ceph::coarse_real_clock::now();
ratelimit->get_active()->should_rate_limit("PUT", tenant, time, &info);
}
}
| 1,655 | 30.245283 | 91 | cc |
null | ceph-main/src/test/rgw/kafka_stub.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include <librdkafka/rdkafka.h>
const char *rd_kafka_topic_name(const rd_kafka_topic_t *rkt) {
return "";
}
rd_kafka_resp_err_t rd_kafka_last_error() {
return rd_kafka_resp_err_t();
}
const char *rd_kafka_err2str(rd_kafka_resp_err_t err) {
return "";
}
rd_kafka_conf_t *rd_kafka_conf_new() {
return nullptr;
}
rd_kafka_conf_res_t rd_kafka_conf_set(rd_kafka_conf_t *conf,
const char *name,
const char *value,
char *errstr, size_t errstr_size) {
return rd_kafka_conf_res_t();
}
void rd_kafka_conf_set_dr_msg_cb(rd_kafka_conf_t *conf,
void (*dr_msg_cb) (rd_kafka_t *rk,
const rd_kafka_message_t *
rkmessage,
void *opaque)) {}
void rd_kafka_conf_set_opaque(rd_kafka_conf_t *conf, void *opaque) {}
rd_kafka_t *rd_kafka_new(rd_kafka_type_t type, rd_kafka_conf_t *conf,
char *errstr, size_t errstr_size) {
return nullptr;
}
void rd_kafka_conf_destroy(rd_kafka_conf_t *conf) {}
rd_kafka_resp_err_t rd_kafka_flush (rd_kafka_t *rk, int timeout_ms) {
return rd_kafka_resp_err_t();
}
void rd_kafka_destroy(rd_kafka_t *rk) {}
rd_kafka_topic_t *rd_kafka_topic_new(rd_kafka_t *rk, const char *topic,
rd_kafka_topic_conf_t *conf) {
return nullptr;
}
int rd_kafka_produce(rd_kafka_topic_t *rkt, int32_t partition,
int msgflags,
void *payload, size_t len,
const void *key, size_t keylen,
void *msg_opaque) {
return 0;
}
int rd_kafka_poll(rd_kafka_t *rk, int timeout_ms) {
return 0;
}
void rd_kafka_topic_destroy(rd_kafka_topic_t *rkt) {}
| 1,885 | 26.333333 | 79 | cc |
null | ceph-main/src/test/rgw/rgw_cr_test.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
#include <cerrno>
#include <iostream>
#include <sstream>
#include <string>
#include <fmt/format.h>
#include "include/rados/librados.hpp"
#include "rgw_tools.h"
#include "common/common_init.h"
#include "common/config.h"
#include "common/ceph_argparse.h"
#include "common/debug.h"
#include "rgw_coroutine.h"
#include "rgw_cr_rados.h"
#include "rgw_sal.h"
#include "rgw_sal_rados.h"
#include "gtest/gtest.h"
using namespace std::literals;
static constexpr auto dout_subsys = ceph_subsys_rgw;
static rgw::sal::RadosStore* store = nullptr;
static const DoutPrefixProvider* dpp() {
struct GlobalPrefix : public DoutPrefixProvider {
CephContext *get_cct() const override { return g_ceph_context; }
unsigned get_subsys() const override { return dout_subsys; }
std::ostream& gen_prefix(std::ostream& out) const override { return out; }
};
static GlobalPrefix global_dpp;
return &global_dpp;
}
class StoreDestructor {
rgw::sal::Driver* driver;
public:
explicit StoreDestructor(rgw::sal::RadosStore* _s) : driver(_s) {}
~StoreDestructor() {
DriverManager::close_storage(store);
}
};
struct TempPool {
inline static uint64_t num = 0;
std::string name =
fmt::format("{}-{}-{}", ::time(nullptr), ::getpid(),num++);
TempPool() {
auto r = store->getRados()->get_rados_handle()->pool_create(name.c_str());
assert(r == 0);
}
~TempPool() {
auto r = store->getRados()->get_rados_handle()->pool_delete(name.c_str());
assert(r == 0);
}
operator rgw_pool() {
return { name };
}
operator librados::IoCtx() {
librados::IoCtx ioctx;
auto r = store->getRados()->get_rados_handle()->ioctx_create(name.c_str(),
ioctx);
assert(r == 0);
return ioctx;
}
};
int run(RGWCoroutine* cr) {
RGWCoroutinesManager cr_mgr{store->ctx(),
store->getRados()->get_cr_registry()};
std::list<RGWCoroutinesStack *> stacks;
auto stack = new RGWCoroutinesStack(store->ctx(), &cr_mgr);
stack->call(cr);
stacks.push_back(stack);
return cr_mgr.run(dpp(), stacks);
}
TEST(ReadAttrs, Unfiltered) {
TempPool pool;
ceph::bufferlist bl;
auto dummy = "Dummy attribute value"s;
encode(dummy, bl);
const std::map<std::string, ceph::bufferlist> ref_attrs{
{ "foo"s, bl }, { "bar"s, bl }, { "baz"s, bl }
};
auto oid = "object"s;
{
librados::IoCtx ioctx(pool);
librados::ObjectWriteOperation op;
op.setxattr("foo", bl);
op.setxattr("bar", bl);
op.setxattr("baz", bl);
auto r = ioctx.operate(oid, &op);
ASSERT_EQ(0, r);
}
std::map<std::string, ceph::bufferlist> attrs;
auto r = run(new RGWSimpleRadosReadAttrsCR(dpp(), store, {pool, oid}, &attrs,
true));
ASSERT_EQ(0, r);
ASSERT_EQ(ref_attrs, attrs);
}
TEST(ReadAttrs, Filtered) {
TempPool pool;
ceph::bufferlist bl;
auto dummy = "Dummy attribute value"s;
encode(dummy, bl);
const std::map<std::string, ceph::bufferlist> ref_attrs{
{ RGW_ATTR_PREFIX "foo"s, bl },
{ RGW_ATTR_PREFIX "bar"s, bl },
{ RGW_ATTR_PREFIX "baz"s, bl }
};
auto oid = "object"s;
{
librados::IoCtx ioctx(pool);
librados::ObjectWriteOperation op;
op.setxattr(RGW_ATTR_PREFIX "foo", bl);
op.setxattr(RGW_ATTR_PREFIX "bar", bl);
op.setxattr(RGW_ATTR_PREFIX "baz", bl);
op.setxattr("oneOfTheseThingsIsNotLikeTheOthers", bl);
auto r = ioctx.operate(oid, &op);
ASSERT_EQ(0, r);
}
std::map<std::string, ceph::bufferlist> attrs;
auto r = run(new RGWSimpleRadosReadAttrsCR(dpp(), store, {pool, oid}, &attrs,
false));
ASSERT_EQ(0, r);
ASSERT_EQ(ref_attrs, attrs);
}
TEST(Read, Dne) {
TempPool pool;
std::string result;
auto r = run(new RGWSimpleRadosReadCR(dpp(), store, {pool, "doesnotexist"},
&result, false));
ASSERT_EQ(-ENOENT, r);
}
TEST(Read, Read) {
TempPool pool;
auto data = "I am test data!"sv;
auto oid = "object"s;
{
bufferlist bl;
encode(data, bl);
librados::IoCtx ioctx(pool);
auto r = ioctx.write_full(oid, bl);
ASSERT_EQ(0, r);
}
std::string result;
auto r = run(new RGWSimpleRadosReadCR(dpp(), store, {pool, oid}, &result,
false));
ASSERT_EQ(0, r);
ASSERT_EQ(result, data);
}
TEST(Read, ReadVersion) {
TempPool pool;
auto data = "I am test data!"sv;
auto oid = "object"s;
RGWObjVersionTracker wobjv;
{
bufferlist bl;
encode(data, bl);
librados::IoCtx ioctx(pool);
librados::ObjectWriteOperation op;
wobjv.generate_new_write_ver(store->ctx());
wobjv.prepare_op_for_write(&op);
op.write_full(bl);
auto r = ioctx.operate(oid, &op);
EXPECT_EQ(0, r);
wobjv.apply_write();
}
RGWObjVersionTracker robjv;
std::string result;
auto r = run(new RGWSimpleRadosReadCR(dpp(), store, {pool, oid}, &result,
false, &robjv));
ASSERT_EQ(0, r);
ASSERT_EQ(result, data);
data = "I am NEW test data!";
{
bufferlist bl;
encode(data, bl);
librados::IoCtx ioctx(pool);
librados::ObjectWriteOperation op;
wobjv.generate_new_write_ver(store->ctx());
wobjv.prepare_op_for_write(&op);
op.write_full(bl);
r = ioctx.operate(oid, &op);
EXPECT_EQ(0, r);
wobjv.apply_write();
}
result.clear();
r = run(new RGWSimpleRadosReadCR(dpp(), store, {pool, oid}, &result, false,
&robjv));
ASSERT_EQ(-ECANCELED, r);
ASSERT_TRUE(result.empty());
robjv.clear();
r = run(new RGWSimpleRadosReadCR(dpp(), store, {pool, oid}, &result, false,
&robjv));
ASSERT_EQ(0, r);
ASSERT_EQ(result, data);
ASSERT_EQ(wobjv.read_version, robjv.read_version);
}
TEST(Write, Exclusive) {
TempPool pool;
auto oid = "object"s;
{
bufferlist bl;
bl.append("I'm some data!"s);
librados::IoCtx ioctx(pool);
auto r = ioctx.write_full(oid, bl);
ASSERT_EQ(0, r);
}
auto r = run(new RGWSimpleRadosWriteCR(dpp(), store, {pool, oid},
"I am some DIFFERENT data!"s, nullptr,
true));
ASSERT_EQ(-EEXIST, r);
}
TEST(Write, Write) {
TempPool pool;
auto oid = "object"s;
auto data = "I'm some data!"s;
auto r = run(new RGWSimpleRadosWriteCR(dpp(), store, {pool, oid},
data, nullptr, true));
ASSERT_EQ(0, r);
bufferlist bl;
librados::IoCtx ioctx(pool);
ioctx.read(oid, bl, 0, 0);
ASSERT_EQ(0, r);
std::string result;
decode(result, bl);
ASSERT_EQ(data, result);
}
TEST(Write, ObjV) {
TempPool pool;
auto oid = "object"s;
RGWObjVersionTracker objv;
objv.generate_new_write_ver(store->ctx());
auto r = run(new RGWSimpleRadosWriteCR(dpp(), store, {pool, oid},
"I'm some data!"s, &objv,
true));
RGWObjVersionTracker interfering_objv(objv);
r = run(new RGWSimpleRadosWriteCR(dpp(), store, {pool, oid},
"I'm some newer, better data!"s,
&interfering_objv, false));
ASSERT_EQ(0, r);
r = run(new RGWSimpleRadosWriteCR(dpp(), store, {pool, oid},
"I'm some treacherous, obsolete data!"s,
&objv, false));
ASSERT_EQ(-ECANCELED, r);
}
TEST(WriteAttrs, Attrs) {
TempPool pool;
auto oid = "object"s;
bufferlist bl;
bl.append("I'm some data.");
std::map<std::string, bufferlist> wrattrs {
{ "foo", bl }, { "bar", bl }, { "baz", bl }
};
auto r = run(new RGWSimpleRadosWriteAttrsCR(dpp(), store, {pool, oid},
wrattrs, nullptr, true));
ASSERT_EQ(0, r);
std::map<std::string, bufferlist> rdattrs;
librados::IoCtx ioctx(pool);
r = ioctx.getxattrs(oid, rdattrs);
ASSERT_EQ(0, r);
ASSERT_EQ(wrattrs, rdattrs);
}
TEST(WriteAttrs, Empty) {
TempPool pool;
auto oid = "object"s;
bufferlist bl;
std::map<std::string, bufferlist> wrattrs {
{ "foo", bl }, { "bar", bl }, { "baz", bl }
};
// With an empty bufferlist all attributes should be skipped.
auto r = run(new RGWSimpleRadosWriteAttrsCR(dpp(), store, {pool, oid},
wrattrs, nullptr, true));
ASSERT_EQ(0, r);
std::map<std::string, bufferlist> rdattrs;
librados::IoCtx ioctx(pool);
r = ioctx.getxattrs(oid, rdattrs);
ASSERT_EQ(0, r);
ASSERT_TRUE(rdattrs.empty());
}
int main(int argc, const char **argv)
{
auto args = argv_to_vec(argc, argv);
auto cct = rgw_global_init(NULL, args, CEPH_ENTITY_TYPE_CLIENT,
CODE_ENVIRONMENT_UTILITY, 0);
// for region -> zonegroup conversion (must happen before common_init_finish())
if (!g_conf()->rgw_region.empty() && g_conf()->rgw_zonegroup.empty()) {
g_conf().set_val_or_die("rgw_zonegroup", g_conf()->rgw_region.c_str());
}
/* common_init_finish needs to be called after g_conf().set_val() */
common_init_finish(g_ceph_context);
DriverManager::Config cfg = DriverManager::get_config(true, g_ceph_context);
store = static_cast<rgw::sal::RadosStore*>(
DriverManager::get_storage(dpp(),
g_ceph_context,
cfg,
false,
false,
false,
false,
false,
true, null_yield,
false));
if (!store) {
std::cerr << "couldn't init storage provider" << std::endl;
return 5; //EIO
}
StoreDestructor store_destructor(static_cast<rgw::sal::RadosStore*>(store));
std::string pool{"rgw_cr_test"};
store->getRados()->create_pool(dpp(), pool);
testing::InitGoogleTest();
return RUN_ALL_TESTS();
}
| 9,298 | 25.875723 | 81 | cc |
null | ceph-main/src/test/rgw/run-d4n-unit-tests.sh | #!/bin/bash
ps cax | grep redis-server > /dev/null
if [ $? -eq 0 ];
then
echo "Redis process found; flushing!"
redis-cli FLUSHALL
fi
redis-server --daemonize yes
echo "-----------Redis Server Started-----------"
../../../build/bin/ceph_test_rgw_d4n_directory
printf "\n-----------Directory Test Executed-----------\n"
redis-cli FLUSHALL
echo "-----------Redis Server Flushed-----------"
../../../build/bin/ceph_test_rgw_d4n_filter
printf "\n-----------Filter Test Executed-----------\n"
redis-cli FLUSHALL
echo "-----------Redis Server Flushed-----------"
REDIS_PID=$(lsof -i4TCP:6379 -sTCP:LISTEN -t)
kill $REDIS_PID
echo "-----------Redis Server Stopped-----------"
| 671 | 31 | 58 | sh |
null | ceph-main/src/test/rgw/test-ceph-diff-sorted.sh | #!/usr/bin/env bash
# set -e -x
. "`dirname $0`/test-rgw-common.sh"
temp_prefix="/tmp/`basename $0`-$$"
short=${temp_prefix}-short
short_w_blank=${temp_prefix}-short-w-blank
long=${temp_prefix}-long
unsorted=${temp_prefix}-unsorted
empty=${temp_prefix}-empty
fake=${temp_prefix}-fake
out1=${temp_prefix}-out1
out2=${temp_prefix}-out2
cat >"${short}" <<EOF
bear
fox
hippo
zebra
EOF
cat >"${short_w_blank}" <<EOF
bear
fox
hippo
zebra
EOF
cat >"${long}" <<EOF
badger
cuttlefish
fox
llama
octopus
penguine
seal
squid
whale
yak
zebra
EOF
cat >"${unsorted}" <<EOF
bear
hippo
fox
zebra
EOF
touch $empty
#### testing ####
# test perfect match
ceph-diff-sorted $long $long >"${out1}"
$assert $? -eq 0
$assert $(cat $out1 | wc -l) -eq 0
# test non-match; use /bin/diff to verify
/bin/diff $short $long >"${out2}"
ceph-diff-sorted $short $long >"${out1}"
$assert $? -eq 1
$assert $(cat $out1 | grep '^<' | wc -l) -eq $(cat $out2 | grep '^<' | wc -l)
$assert $(cat $out1 | grep '^>' | wc -l) -eq $(cat $out2 | grep '^>' | wc -l)
/bin/diff $long $short >"${out2}"
ceph-diff-sorted $long $short >"${out1}"
$assert $? -eq 1
$assert $(cat $out1 | grep '^<' | wc -l) -eq $(cat $out2 | grep '^<' | wc -l)
$assert $(cat $out1 | grep '^>' | wc -l) -eq $(cat $out2 | grep '^>' | wc -l)
# test w blank line
ceph-diff-sorted $short $short_w_blank 2>/dev/null
$assert $? -eq 4
ceph-diff-sorted $short_w_blank $short 2>/dev/null
$assert $? -eq 4
# test unsorted input
ceph-diff-sorted $short $unsorted >"${out2}" 2>/dev/null
$assert $? -eq 4
ceph-diff-sorted $unsorted $short >"${out2}" 2>/dev/null
$assert $? -eq 4
# test bad # of args
ceph-diff-sorted 2>/dev/null
$assert $? -eq 2
ceph-diff-sorted $short 2>/dev/null
$assert $? -eq 2
# test bad file path
ceph-diff-sorted $short $fake 2>/dev/null
$assert $? -eq 3
ceph-diff-sorted $fake $short 2>/dev/null
$assert $? -eq 3
#### clean-up ####
/bin/rm -f $short $short_w_blank $long $unsorted $empty $out1 $out2
| 1,963 | 17.018349 | 77 | sh |
null | ceph-main/src/test/rgw/test-rgw-call.sh | #!/usr/bin/env bash
. "`dirname $0`/test-rgw-common.sh"
. "`dirname $0`/test-rgw-meta-sync.sh"
# Do not use eval here. We have eval in test-rgw-common.sh:x(), so adding
# one here creates a double-eval situation. Passing arguments with spaces
# becomes impossible when double-eval strips escaping and quotes.
$@
| 314 | 30.5 | 73 | sh |
null | ceph-main/src/test/rgw/test-rgw-common.sh | #!/usr/bin/env bash
rgw_flags="--debug-rgw=20 --debug-ms=1"
function _assert {
src=$1; shift
lineno=$1; shift
[ "$@" ] || echo "$src: $lineno: assert failed: $@" || exit 1
}
assert="eval _assert \$BASH_SOURCE \$LINENO"
function var_to_python_json_index {
echo "['$1']" | sed "s/\./'\]\['/g"
}
function json_extract {
var=""
[ "$1" != "" ] && var=$(var_to_python_json_index $1)
shift
python3 - <<END
import json
s='$@'
data = json.loads(s)
print(data$var)
END
}
function python_array_len {
python3 - <<END
arr=$@
print(len(arr))
END
}
function project_python_array_field {
var=$(var_to_python_json_index $1)
shift
python3 - <<END
arr=$@
s='( '
for x in arr:
s += '"' + str(x$var) + '" '
s += ')'
print(s)
END
}
x() {
# echo "x " "$@" >&2
eval "$@"
}
script_dir=`dirname $0`
root_path=`(cd $script_dir/../..; pwd)`
mstart=$root_path/mstart.sh
mstop=$root_path/mstop.sh
mrun=$root_path/mrun
mrgw=$root_path/mrgw.sh
url=http://localhost
function start_ceph_cluster {
[ $# -ne 1 ] && echo "start_ceph_cluster() needs 1 param" && exit 1
echo "$mstart $1"
}
function rgw_admin {
[ $# -lt 1 ] && echo "rgw_admin() needs 1 param" && exit 1
echo "$mrun $1 radosgw-admin"
}
function rgw {
[ $# -lt 2 ] && echo "rgw() needs at least 2 params" && exit 1
name=$1
port=$2
ssl_port=0 #ssl port not used
shift 2
echo "$mrgw $name $port $ssl_port $rgw_flags $@"
}
function init_first_zone {
[ $# -ne 7 ] && echo "init_first_zone() needs 7 params" && exit 1
cid=$1
realm=$2
zg=$3
zone=$4
endpoints=$5
access_key=$6
secret=$7
# initialize realm
x $(rgw_admin $cid) realm create --rgw-realm=$realm
# create zonegroup, zone
x $(rgw_admin $cid) zonegroup create --rgw-zonegroup=$zg --master --default
x $(rgw_admin $cid) zone create --rgw-zonegroup=$zg --rgw-zone=$zone --access-key=${access_key} --secret=${secret} --endpoints=$endpoints --default
x $(rgw_admin $cid) user create --uid=zone.user --display-name=ZoneUser --access-key=${access_key} --secret=${secret} --system
x $(rgw_admin $cid) period update --commit
}
function init_zone_in_existing_zg {
[ $# -ne 8 ] && echo "init_zone_in_existing_zg() needs 8 params" && exit 1
cid=$1
realm=$2
zg=$3
zone=$4
master_zg_zone1_port=$5
endpoints=$6
access_key=$7
secret=$8
x $(rgw_admin $cid) realm pull --url=$url:$master_zg_zone1_port --access-key=${access_key} --secret=${secret} --default
x $(rgw_admin $cid) zonegroup default --rgw-zonegroup=$zg
x $(rgw_admin $cid) zone create --rgw-zonegroup=$zg --rgw-zone=$zone --access-key=${access_key} --secret=${secret} --endpoints=$endpoints
x $(rgw_admin $cid) period update --commit
}
function init_first_zone_in_slave_zg {
[ $# -ne 8 ] && echo "init_first_zone_in_slave_zg() needs 8 params" && exit 1
cid=$1
realm=$2
zg=$3
zone=$4
master_zg_zone1_port=$5
endpoints=$6
access_key=$7
secret=$8
# create zonegroup, zone
x $(rgw_admin $cid) realm pull --url=$url:$master_zg_zone1_port --access-key=${access_key} --secret=${secret}
x $(rgw_admin $cid) realm default --rgw-realm=$realm
x $(rgw_admin $cid) zonegroup create --rgw-realm=$realm --rgw-zonegroup=$zg --endpoints=$endpoints --default
x $(rgw_admin $cid) zonegroup default --rgw-zonegroup=$zg
x $(rgw_admin $cid) zone create --rgw-zonegroup=$zg --rgw-zone=$zone --access-key=${access_key} --secret=${secret} --endpoints=$endpoints
x $(rgw_admin $cid) zone default --rgw-zone=$zone
x $(rgw_admin $cid) zonegroup add --rgw-zonegroup=$zg --rgw-zone=$zone
x $(rgw_admin $cid) period update --commit
}
function call_rgw_admin {
cid=$1
shift 1
x $(rgw_admin $cid) "$@"
}
function get_mstart_parameters {
[ $# -ne 1 ] && echo "get_mstart_parameters() needs 1 param" && exit 1
# bash arrays start from zero
index="$1"
index=$((index-1))
if [ -n "$DEV_LIST" ]; then
IFS=', ' read -r -a dev_list <<< "$DEV_LIST"
if [ ${#dev_list[@]} -gt "$index" ]; then
local dev_name=${dev_list["$index"]}
parameters="--bluestore-devs $dev_name"
fi
fi
if [ -n "$DB_DEV_LIST" ]; then
IFS=', ' read -r -a db_dev_list <<< "$DB_DEV_LIST"
if [ ${#db_dev_list[@]} -gt "$index" ]; then
local dev_name=${db_dev_list["$index"]}
parameters="$parameters"" -o bluestore_block_db_path=$dev_name"
fi
fi
if [ -n "$WAL_DEV_LIST" ]; then
IFS=', ' read -r -a wal_dev_list <<< "$WAL_DEV_LIST"
if [ ${#wal_dev_list[@]} -gt "$index" ]; then
local dev_name=${wal_dev_list["$index"]}
parameters="$parameters"" -o bluestore_block_wal_path=$dev_name"
fi
fi
echo "$parameters"
}
| 4,626 | 22.607143 | 149 | sh |
null | ceph-main/src/test/rgw/test-rgw-meta-sync.sh | #!/usr/bin/env bash
. "`dirname $0`/test-rgw-common.sh"
set -e
function get_metadata_sync_status {
cid=$1
realm=$2
meta_sync_status_json=`$(rgw_admin $cid) --rgw-realm=$realm metadata sync status`
global_sync_status=$(json_extract sync_status.info.status $meta_sync_status_json)
num_shards=$(json_extract sync_status.info.num_shards $meta_sync_status_json)
echo "sync_status: $global_sync_status"
sync_markers=$(json_extract sync_status.markers $meta_sync_status_json)
num_shards2=$(python_array_len $sync_markers)
[ "$global_sync_status" == "sync" ] && $assert $num_shards2 -eq $num_shards
sync_states=$(project_python_array_field val.state $sync_markers)
eval secondary_status=$(project_python_array_field val.marker $sync_markers)
}
function get_metadata_log_status {
cid=$1
realm=$2
master_mdlog_status_json=`$(rgw_admin $cid) --rgw-realm=$realm mdlog status`
master_meta_status=$(json_extract "" $master_mdlog_status_json)
eval master_status=$(project_python_array_field marker $master_meta_status)
}
function wait_for_meta_sync {
master_id=$1
cid=$2
realm=$3
get_metadata_log_status $master_id $realm
echo "master_status=${master_status[*]}"
while true; do
get_metadata_sync_status $cid $realm
echo "secondary_status=${secondary_status[*]}"
fail=0
for i in `seq 0 $((num_shards-1))`; do
if [ "${master_status[$i]}" \> "${secondary_status[$i]}" ]; then
echo "shard $i not done syncing (${master_status[$i]} > ${secondary_status[$i]})"
fail=1
break
fi
done
[ $fail -eq 0 ] && echo "Success" && return || echo "Sync not complete"
sleep 5
done
}
| 1,679 | 24.454545 | 89 | sh |
null | ceph-main/src/test/rgw/test-rgw-multisite.sh | #!/usr/bin/env bash
[ $# -lt 1 ] && echo "usage: $0 <num-clusters> [rgw parameters...]" && exit 1
num_clusters=$1
shift
[ $num_clusters -lt 1 ] && echo "clusters num must be at least 1" && exit 1
. "`dirname $0`/test-rgw-common.sh"
. "`dirname $0`/test-rgw-meta-sync.sh"
set -e
realm_name=earth
zg=zg1
system_access_key="1234567890"
system_secret="pencil"
# bring up first cluster
x $(start_ceph_cluster c1) -n $(get_mstart_parameters 1)
if [ -n "$RGW_PER_ZONE" ]; then
rgws="$RGW_PER_ZONE"
else
rgws=1
fi
url=http://localhost
i=1
while [ $i -le $rgws ]; do
port=$((8100+i))
endpoints="$endpoints""$url:$port,"
i=$((i+1))
done
# create realm, zonegroup, zone, start rgws
init_first_zone c1 $realm_name $zg ${zg}-1 $endpoints $system_access_key $system_secret
i=1
while [ $i -le $rgws ]; do
port=$((8100+i))
x $(rgw c1 "$port" "$@")
i="$((i+1))"
done
output=`$(rgw_admin c1) realm get`
echo realm_status=$output
# bring up next clusters
endpoints=""
i=2
while [ $i -le $num_clusters ]; do
x $(start_ceph_cluster c$i) -n $(get_mstart_parameters $i)
j=1
endpoints=""
while [ $j -le $rgws ]; do
port=$((8000+i*100+j))
endpoints="$endpoints""$url:$port,"
j=$((j+1))
done
# create new zone, start rgw
init_zone_in_existing_zg c$i $realm_name $zg ${zg}-${i} 8101 $endpoints $zone_port $system_access_key $system_secret
j=1
while [ $j -le $rgws ]; do
port=$((8000+i*100+j))
x $(rgw c$i "$port" "$@")
j="$((j+1))"
done
i=$((i+1))
done
i=2
while [ $i -le $num_clusters ]; do
wait_for_meta_sync c1 c$i $realm_name
i=$((i+1))
done
| 1,605 | 18.119048 | 118 | sh |
null | ceph-main/src/test/rgw/test_cls_fifo_legacy.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2019 Red Hat, Inc.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <cerrno>
#include <iostream>
#include <string_view>
#include "include/scope_guard.h"
#include "include/types.h"
#include "include/rados/librados.hpp"
#include "common/ceph_context.h"
#include "cls/fifo/cls_fifo_ops.h"
#include "test/librados/test_cxx.h"
#include "global/global_context.h"
#include "rgw_tools.h"
#include "cls_fifo_legacy.h"
#include "gtest/gtest.h"
using namespace std::literals;
using namespace std::string_literals;
namespace R = librados;
namespace cb = ceph::buffer;
namespace fifo = rados::cls::fifo;
namespace RCf = rgw::cls::fifo;
auto cct = new CephContext(CEPH_ENTITY_TYPE_CLIENT);
const DoutPrefix dp(cct, 1, "test legacy cls fifo: ");
namespace {
int fifo_create(const DoutPrefixProvider *dpp, R::IoCtx& ioctx,
const std::string& oid,
std::string_view id,
optional_yield y,
std::optional<fifo::objv> objv = std::nullopt,
std::optional<std::string_view> oid_prefix = std::nullopt,
bool exclusive = false,
std::uint64_t max_part_size = RCf::default_max_part_size,
std::uint64_t max_entry_size = RCf::default_max_entry_size)
{
R::ObjectWriteOperation op;
RCf::create_meta(&op, id, objv, oid_prefix, exclusive, max_part_size,
max_entry_size);
return rgw_rados_operate(dpp, ioctx, oid, &op, y);
}
}
class LegacyFIFO : public testing::Test {
protected:
const std::string pool_name = get_temp_pool_name();
const std::string fifo_id = "fifo";
R::Rados rados;
librados::IoCtx ioctx;
void SetUp() override {
ASSERT_EQ("", create_one_pool_pp(pool_name, rados));
ASSERT_EQ(0, rados.ioctx_create(pool_name.c_str(), ioctx));
}
void TearDown() override {
destroy_one_pool_pp(pool_name, rados);
}
};
using LegacyClsFIFO = LegacyFIFO;
using AioLegacyFIFO = LegacyFIFO;
TEST_F(LegacyClsFIFO, TestCreate)
{
auto r = fifo_create(&dp, ioctx, fifo_id, ""s, null_yield);
EXPECT_EQ(-EINVAL, r);
r = fifo_create(&dp, ioctx, fifo_id, fifo_id, null_yield, std::nullopt,
std::nullopt, false, 0);
EXPECT_EQ(-EINVAL, r);
r = fifo_create(&dp, ioctx, fifo_id, {}, null_yield,
std::nullopt, std::nullopt,
false, RCf::default_max_part_size, 0);
EXPECT_EQ(-EINVAL, r);
r = fifo_create(&dp, ioctx, fifo_id, fifo_id, null_yield);
EXPECT_EQ(0, r);
std::uint64_t size;
ioctx.stat(fifo_id, &size, nullptr);
EXPECT_GT(size, 0);
/* test idempotency */
r = fifo_create(&dp, ioctx, fifo_id, fifo_id, null_yield);
EXPECT_EQ(0, r);
r = fifo_create(&dp, ioctx, fifo_id, {}, null_yield, std::nullopt,
std::nullopt, false);
EXPECT_EQ(-EINVAL, r);
r = fifo_create(&dp, ioctx, fifo_id, {}, null_yield, std::nullopt,
"myprefix"sv, false);
EXPECT_EQ(-EINVAL, r);
r = fifo_create(&dp, ioctx, fifo_id, "foo"sv, null_yield,
std::nullopt, std::nullopt, false);
EXPECT_EQ(-EEXIST, r);
}
TEST_F(LegacyClsFIFO, TestGetInfo)
{
auto r = fifo_create(&dp, ioctx, fifo_id, fifo_id, null_yield);
fifo::info info;
std::uint32_t part_header_size;
std::uint32_t part_entry_overhead;
r = RCf::get_meta(&dp, ioctx, fifo_id, std::nullopt, &info, &part_header_size,
&part_entry_overhead, 0, null_yield);
EXPECT_EQ(0, r);
EXPECT_GT(part_header_size, 0);
EXPECT_GT(part_entry_overhead, 0);
EXPECT_FALSE(info.version.instance.empty());
r = RCf::get_meta(&dp, ioctx, fifo_id, info.version, &info, &part_header_size,
&part_entry_overhead, 0, null_yield);
EXPECT_EQ(0, r);
fifo::objv objv;
objv.instance = "foo";
objv.ver = 12;
r = RCf::get_meta(&dp, ioctx, fifo_id, objv, &info, &part_header_size,
&part_entry_overhead, 0, null_yield);
EXPECT_EQ(-ECANCELED, r);
}
TEST_F(LegacyFIFO, TestOpenDefault)
{
std::unique_ptr<RCf::FIFO> fifo;
auto r = RCf::FIFO::create(&dp, ioctx, fifo_id, &fifo, null_yield);
ASSERT_EQ(0, r);
// force reading from backend
r = fifo->read_meta(&dp, null_yield);
EXPECT_EQ(0, r);
auto info = fifo->meta();
EXPECT_EQ(info.id, fifo_id);
}
TEST_F(LegacyFIFO, TestOpenParams)
{
const std::uint64_t max_part_size = 10 * 1024;
const std::uint64_t max_entry_size = 128;
auto oid_prefix = "foo.123."sv;
fifo::objv objv;
objv.instance = "fooz"s;
objv.ver = 10;
/* first successful create */
std::unique_ptr<RCf::FIFO> f;
auto r = RCf::FIFO::create(&dp, ioctx, fifo_id, &f, null_yield, objv, oid_prefix,
false, max_part_size, max_entry_size);
ASSERT_EQ(0, r);
/* force reading from backend */
r = f->read_meta(&dp, null_yield);
auto info = f->meta();
EXPECT_EQ(info.id, fifo_id);
EXPECT_EQ(info.params.max_part_size, max_part_size);
EXPECT_EQ(info.params.max_entry_size, max_entry_size);
EXPECT_EQ(info.version, objv);
}
namespace {
template<class T>
std::pair<T, std::string> decode_entry(const RCf::list_entry& entry)
{
T val;
auto iter = entry.data.cbegin();
decode(val, iter);
return std::make_pair(std::move(val), entry.marker);
}
}
TEST_F(LegacyFIFO, TestPushListTrim)
{
std::unique_ptr<RCf::FIFO> f;
auto r = RCf::FIFO::create(&dp, ioctx, fifo_id, &f, null_yield);
ASSERT_EQ(0, r);
static constexpr auto max_entries = 10u;
for (uint32_t i = 0; i < max_entries; ++i) {
cb::list bl;
encode(i, bl);
r = f->push(&dp, bl, null_yield);
ASSERT_EQ(0, r);
}
std::optional<std::string> marker;
/* get entries one by one */
std::vector<RCf::list_entry> result;
bool more = false;
for (auto i = 0u; i < max_entries; ++i) {
r = f->list(&dp, 1, marker, &result, &more, null_yield);
ASSERT_EQ(0, r);
bool expected_more = (i != (max_entries - 1));
ASSERT_EQ(expected_more, more);
ASSERT_EQ(1, result.size());
std::uint32_t val;
std::tie(val, marker) = decode_entry<std::uint32_t>(result.front());
ASSERT_EQ(i, val);
result.clear();
}
/* get all entries at once */
std::string markers[max_entries];
std::uint32_t min_entry = 0;
r = f->list(&dp, max_entries * 10, std::nullopt, &result, &more, null_yield);
ASSERT_EQ(0, r);
ASSERT_FALSE(more);
ASSERT_EQ(max_entries, result.size());
for (auto i = 0u; i < max_entries; ++i) {
std::uint32_t val;
std::tie(val, markers[i]) = decode_entry<std::uint32_t>(result[i]);
ASSERT_EQ(i, val);
}
/* trim one entry */
r = f->trim(&dp, markers[min_entry], false, null_yield);
ASSERT_EQ(0, r);
++min_entry;
r = f->list(&dp, max_entries * 10, std::nullopt, &result, &more, null_yield);
ASSERT_EQ(0, r);
ASSERT_FALSE(more);
ASSERT_EQ(max_entries - min_entry, result.size());
for (auto i = min_entry; i < max_entries; ++i) {
std::uint32_t val;
std::tie(val, markers[i - min_entry]) =
decode_entry<std::uint32_t>(result[i - min_entry]);
EXPECT_EQ(i, val);
}
}
TEST_F(LegacyFIFO, TestPushTooBig)
{
static constexpr auto max_part_size = 2048ull;
static constexpr auto max_entry_size = 128ull;
std::unique_ptr<RCf::FIFO> f;
auto r = RCf::FIFO::create(&dp, ioctx, fifo_id, &f, null_yield, std::nullopt,
std::nullopt, false, max_part_size, max_entry_size);
ASSERT_EQ(0, r);
char buf[max_entry_size + 1];
memset(buf, 0, sizeof(buf));
cb::list bl;
bl.append(buf, sizeof(buf));
r = f->push(&dp, bl, null_yield);
EXPECT_EQ(-E2BIG, r);
}
TEST_F(LegacyFIFO, TestMultipleParts)
{
static constexpr auto max_part_size = 2048ull;
static constexpr auto max_entry_size = 128ull;
std::unique_ptr<RCf::FIFO> f;
auto r = RCf::FIFO::create(&dp, ioctx, fifo_id, &f, null_yield, std::nullopt,
std::nullopt, false, max_part_size,
max_entry_size);
ASSERT_EQ(0, r);
char buf[max_entry_size];
memset(buf, 0, sizeof(buf));
const auto [part_header_size, part_entry_overhead] =
f->get_part_layout_info();
const auto entries_per_part = ((max_part_size - part_header_size) /
(max_entry_size + part_entry_overhead));
const auto max_entries = entries_per_part * 4 + 1;
/* push enough entries */
for (auto i = 0u; i < max_entries; ++i) {
cb::list bl;
*(int *)buf = i;
bl.append(buf, sizeof(buf));
r = f->push(&dp, bl, null_yield);
ASSERT_EQ(0, r);
}
auto info = f->meta();
ASSERT_EQ(info.id, fifo_id);
/* head should have advanced */
ASSERT_GT(info.head_part_num, 0);
/* list all at once */
std::vector<RCf::list_entry> result;
bool more = false;
r = f->list(&dp, max_entries, std::nullopt, &result, &more, null_yield);
ASSERT_EQ(0, r);
EXPECT_EQ(false, more);
ASSERT_EQ(max_entries, result.size());
for (auto i = 0u; i < max_entries; ++i) {
auto& bl = result[i].data;
ASSERT_EQ(i, *(int *)bl.c_str());
}
std::optional<std::string> marker;
/* get entries one by one */
for (auto i = 0u; i < max_entries; ++i) {
r = f->list(&dp, 1, marker, &result, &more, null_yield);
ASSERT_EQ(0, r);
ASSERT_EQ(result.size(), 1);
const bool expected_more = (i != (max_entries - 1));
ASSERT_EQ(expected_more, more);
std::uint32_t val;
std::tie(val, marker) = decode_entry<std::uint32_t>(result.front());
auto& entry = result.front();
auto& bl = entry.data;
ASSERT_EQ(i, *(int *)bl.c_str());
marker = entry.marker;
}
/* trim one at a time */
marker.reset();
for (auto i = 0u; i < max_entries; ++i) {
/* read single entry */
r = f->list(&dp, 1, marker, &result, &more, null_yield);
ASSERT_EQ(0, r);
ASSERT_EQ(result.size(), 1);
const bool expected_more = (i != (max_entries - 1));
ASSERT_EQ(expected_more, more);
marker = result.front().marker;
r = f->trim(&dp, *marker, false, null_yield);
ASSERT_EQ(0, r);
/* check tail */
info = f->meta();
ASSERT_EQ(info.tail_part_num, i / entries_per_part);
/* try to read all again, see how many entries left */
r = f->list(&dp, max_entries, marker, &result, &more, null_yield);
ASSERT_EQ(max_entries - i - 1, result.size());
ASSERT_EQ(false, more);
}
/* tail now should point at head */
info = f->meta();
ASSERT_EQ(info.head_part_num, info.tail_part_num);
RCf::part_info partinfo;
/* check old tails are removed */
for (auto i = 0; i < info.tail_part_num; ++i) {
r = f->get_part_info(&dp, i, &partinfo, null_yield);
ASSERT_EQ(-ENOENT, r);
}
/* check current tail exists */
r = f->get_part_info(&dp, info.tail_part_num, &partinfo, null_yield);
ASSERT_EQ(0, r);
}
TEST_F(LegacyFIFO, TestTwoPushers)
{
static constexpr auto max_part_size = 2048ull;
static constexpr auto max_entry_size = 128ull;
std::unique_ptr<RCf::FIFO> f;
auto r = RCf::FIFO::create(&dp, ioctx, fifo_id, &f, null_yield, std::nullopt,
std::nullopt, false, max_part_size,
max_entry_size);
ASSERT_EQ(0, r);
char buf[max_entry_size];
memset(buf, 0, sizeof(buf));
auto [part_header_size, part_entry_overhead] = f->get_part_layout_info();
const auto entries_per_part = ((max_part_size - part_header_size) /
(max_entry_size + part_entry_overhead));
const auto max_entries = entries_per_part * 4 + 1;
std::unique_ptr<RCf::FIFO> f2;
r = RCf::FIFO::open(&dp, ioctx, fifo_id, &f2, null_yield);
std::vector fifos{&f, &f2};
for (auto i = 0u; i < max_entries; ++i) {
cb::list bl;
*(int *)buf = i;
bl.append(buf, sizeof(buf));
auto& f = *fifos[i % fifos.size()];
r = f->push(&dp, bl, null_yield);
ASSERT_EQ(0, r);
}
/* list all by both */
std::vector<RCf::list_entry> result;
bool more = false;
r = f2->list(&dp, max_entries, std::nullopt, &result, &more, null_yield);
ASSERT_EQ(0, r);
ASSERT_EQ(false, more);
ASSERT_EQ(max_entries, result.size());
r = f2->list(&dp, max_entries, std::nullopt, &result, &more, null_yield);
ASSERT_EQ(0, r);
ASSERT_EQ(false, more);
ASSERT_EQ(max_entries, result.size());
for (auto i = 0u; i < max_entries; ++i) {
auto& bl = result[i].data;
ASSERT_EQ(i, *(int *)bl.c_str());
}
}
TEST_F(LegacyFIFO, TestTwoPushersTrim)
{
static constexpr auto max_part_size = 2048ull;
static constexpr auto max_entry_size = 128ull;
std::unique_ptr<RCf::FIFO> f1;
auto r = RCf::FIFO::create(&dp, ioctx, fifo_id, &f1, null_yield, std::nullopt,
std::nullopt, false, max_part_size,
max_entry_size);
ASSERT_EQ(0, r);
char buf[max_entry_size];
memset(buf, 0, sizeof(buf));
auto [part_header_size, part_entry_overhead] = f1->get_part_layout_info();
const auto entries_per_part = ((max_part_size - part_header_size) /
(max_entry_size + part_entry_overhead));
const auto max_entries = entries_per_part * 4 + 1;
std::unique_ptr<RCf::FIFO> f2;
r = RCf::FIFO::open(&dp, ioctx, fifo_id, &f2, null_yield);
ASSERT_EQ(0, r);
/* push one entry to f2 and the rest to f1 */
for (auto i = 0u; i < max_entries; ++i) {
cb::list bl;
*(int *)buf = i;
bl.append(buf, sizeof(buf));
auto& f = (i < 1 ? f2 : f1);
r = f->push(&dp, bl, null_yield);
ASSERT_EQ(0, r);
}
/* trim half by fifo1 */
auto num = max_entries / 2;
std::string marker;
std::vector<RCf::list_entry> result;
bool more = false;
r = f1->list(&dp, num, std::nullopt, &result, &more, null_yield);
ASSERT_EQ(0, r);
ASSERT_EQ(true, more);
ASSERT_EQ(num, result.size());
for (auto i = 0u; i < num; ++i) {
auto& bl = result[i].data;
ASSERT_EQ(i, *(int *)bl.c_str());
}
auto& entry = result[num - 1];
marker = entry.marker;
r = f1->trim(&dp, marker, false, null_yield);
/* list what's left by fifo2 */
const auto left = max_entries - num;
f2->list(&dp, left, marker, &result, &more, null_yield);
ASSERT_EQ(left, result.size());
ASSERT_EQ(false, more);
for (auto i = num; i < max_entries; ++i) {
auto& bl = result[i - num].data;
ASSERT_EQ(i, *(int *)bl.c_str());
}
}
TEST_F(LegacyFIFO, TestPushBatch)
{
static constexpr auto max_part_size = 2048ull;
static constexpr auto max_entry_size = 128ull;
std::unique_ptr<RCf::FIFO> f;
auto r = RCf::FIFO::create(&dp, ioctx, fifo_id, &f, null_yield, std::nullopt,
std::nullopt, false, max_part_size,
max_entry_size);
ASSERT_EQ(0, r);
char buf[max_entry_size];
memset(buf, 0, sizeof(buf));
auto [part_header_size, part_entry_overhead] = f->get_part_layout_info();
auto entries_per_part = ((max_part_size - part_header_size) /
(max_entry_size + part_entry_overhead));
auto max_entries = entries_per_part * 4 + 1; /* enough entries to span multiple parts */
std::vector<cb::list> bufs;
for (auto i = 0u; i < max_entries; ++i) {
cb::list bl;
*(int *)buf = i;
bl.append(buf, sizeof(buf));
bufs.push_back(bl);
}
ASSERT_EQ(max_entries, bufs.size());
r = f->push(&dp, bufs, null_yield);
ASSERT_EQ(0, r);
/* list all */
std::vector<RCf::list_entry> result;
bool more = false;
r = f->list(&dp, max_entries, std::nullopt, &result, &more, null_yield);
ASSERT_EQ(0, r);
ASSERT_EQ(false, more);
ASSERT_EQ(max_entries, result.size());
for (auto i = 0u; i < max_entries; ++i) {
auto& bl = result[i].data;
ASSERT_EQ(i, *(int *)bl.c_str());
}
auto& info = f->meta();
ASSERT_EQ(info.head_part_num, 4);
}
TEST_F(LegacyFIFO, TestAioTrim)
{
static constexpr auto max_part_size = 2048ull;
static constexpr auto max_entry_size = 128ull;
std::unique_ptr<RCf::FIFO> f;
auto r = RCf::FIFO::create(&dp, ioctx, fifo_id, &f, null_yield, std::nullopt,
std::nullopt, false, max_part_size,
max_entry_size);
ASSERT_EQ(0, r);
char buf[max_entry_size];
memset(buf, 0, sizeof(buf));
const auto [part_header_size, part_entry_overhead] =
f->get_part_layout_info();
const auto entries_per_part = ((max_part_size - part_header_size) /
(max_entry_size + part_entry_overhead));
const auto max_entries = entries_per_part * 4 + 1;
/* push enough entries */
std::vector<cb::list> bufs;
for (auto i = 0u; i < max_entries; ++i) {
cb::list bl;
*(int *)buf = i;
bl.append(buf, sizeof(buf));
bufs.push_back(std::move(bl));
}
ASSERT_EQ(max_entries, bufs.size());
r = f->push(&dp, bufs, null_yield);
ASSERT_EQ(0, r);
auto info = f->meta();
ASSERT_EQ(info.id, fifo_id);
/* head should have advanced */
ASSERT_GT(info.head_part_num, 0);
/* list all at once */
std::vector<RCf::list_entry> result;
bool more = false;
r = f->list(&dp, max_entries, std::nullopt, &result, &more, null_yield);
ASSERT_EQ(0, r);
ASSERT_EQ(false, more);
ASSERT_EQ(max_entries, result.size());
std::optional<std::string> marker;
/* trim one at a time */
result.clear();
more = false;
marker.reset();
for (auto i = 0u; i < max_entries; ++i) {
/* read single entry */
r = f->list(&dp, 1, marker, &result, &more, null_yield);
ASSERT_EQ(0, r);
ASSERT_EQ(result.size(), 1);
const bool expected_more = (i != (max_entries - 1));
ASSERT_EQ(expected_more, more);
marker = result.front().marker;
std::unique_ptr<R::AioCompletion> c(rados.aio_create_completion(nullptr,
nullptr));
f->trim(&dp, *marker, false, c.get());
c->wait_for_complete();
r = c->get_return_value();
ASSERT_EQ(0, r);
/* check tail */
info = f->meta();
ASSERT_EQ(info.tail_part_num, i / entries_per_part);
/* try to read all again, see how many entries left */
r = f->list(&dp, max_entries, marker, &result, &more, null_yield);
ASSERT_EQ(max_entries - i - 1, result.size());
ASSERT_EQ(false, more);
}
/* tail now should point at head */
info = f->meta();
ASSERT_EQ(info.head_part_num, info.tail_part_num);
RCf::part_info partinfo;
/* check old tails are removed */
for (auto i = 0; i < info.tail_part_num; ++i) {
r = f->get_part_info(&dp, i, &partinfo, null_yield);
ASSERT_EQ(-ENOENT, r);
}
/* check current tail exists */
r = f->get_part_info(&dp, info.tail_part_num, &partinfo, null_yield);
ASSERT_EQ(0, r);
}
TEST_F(LegacyFIFO, TestTrimExclusive) {
std::unique_ptr<RCf::FIFO> f;
auto r = RCf::FIFO::create(&dp, ioctx, fifo_id, &f, null_yield);
ASSERT_EQ(0, r);
std::vector<RCf::list_entry> result;
bool more = false;
static constexpr auto max_entries = 10u;
for (uint32_t i = 0; i < max_entries; ++i) {
cb::list bl;
encode(i, bl);
f->push(&dp, bl, null_yield);
}
f->list(&dp, 1, std::nullopt, &result, &more, null_yield);
auto [val, marker] = decode_entry<std::uint32_t>(result.front());
ASSERT_EQ(0, val);
f->trim(&dp, marker, true, null_yield);
result.clear();
f->list(&dp, max_entries, std::nullopt, &result, &more, null_yield);
std::tie(val, marker) = decode_entry<std::uint32_t>(result.front());
ASSERT_EQ(0, val);
f->trim(&dp, result[4].marker, true, null_yield);
result.clear();
f->list(&dp, max_entries, std::nullopt, &result, &more, null_yield);
std::tie(val, marker) = decode_entry<std::uint32_t>(result.front());
ASSERT_EQ(4, val);
f->trim(&dp, result.back().marker, true, null_yield);
result.clear();
f->list(&dp, max_entries, std::nullopt, &result, &more, null_yield);
std::tie(val, marker) = decode_entry<std::uint32_t>(result.front());
ASSERT_EQ(result.size(), 1);
ASSERT_EQ(max_entries - 1, val);
}
TEST_F(AioLegacyFIFO, TestPushListTrim)
{
std::unique_ptr<RCf::FIFO> f;
auto r = RCf::FIFO::create(&dp, ioctx, fifo_id, &f, null_yield);
ASSERT_EQ(0, r);
static constexpr auto max_entries = 10u;
for (uint32_t i = 0; i < max_entries; ++i) {
cb::list bl;
encode(i, bl);
auto c = R::Rados::aio_create_completion();
f->push(&dp, bl, c);
c->wait_for_complete();
r = c->get_return_value();
c->release();
ASSERT_EQ(0, r);
}
std::optional<std::string> marker;
/* get entries one by one */
std::vector<RCf::list_entry> result;
bool more = false;
for (auto i = 0u; i < max_entries; ++i) {
auto c = R::Rados::aio_create_completion();
f->list(&dp, 1, marker, &result, &more, c);
c->wait_for_complete();
r = c->get_return_value();
c->release();
ASSERT_EQ(0, r);
bool expected_more = (i != (max_entries - 1));
ASSERT_EQ(expected_more, more);
ASSERT_EQ(1, result.size());
std::uint32_t val;
std::tie(val, marker) = decode_entry<std::uint32_t>(result.front());
ASSERT_EQ(i, val);
result.clear();
}
/* get all entries at once */
std::string markers[max_entries];
std::uint32_t min_entry = 0;
auto c = R::Rados::aio_create_completion();
f->list(&dp, max_entries * 10, std::nullopt, &result, &more, c);
c->wait_for_complete();
r = c->get_return_value();
c->release();
ASSERT_EQ(0, r);
ASSERT_FALSE(more);
ASSERT_EQ(max_entries, result.size());
for (auto i = 0u; i < max_entries; ++i) {
std::uint32_t val;
std::tie(val, markers[i]) = decode_entry<std::uint32_t>(result[i]);
ASSERT_EQ(i, val);
}
/* trim one entry */
c = R::Rados::aio_create_completion();
f->trim(&dp, markers[min_entry], false, c);
c->wait_for_complete();
r = c->get_return_value();
c->release();
ASSERT_EQ(0, r);
++min_entry;
c = R::Rados::aio_create_completion();
f->list(&dp, max_entries * 10, std::nullopt, &result, &more, c);
c->wait_for_complete();
r = c->get_return_value();
c->release();
ASSERT_EQ(0, r);
ASSERT_FALSE(more);
ASSERT_EQ(max_entries - min_entry, result.size());
for (auto i = min_entry; i < max_entries; ++i) {
std::uint32_t val;
std::tie(val, markers[i - min_entry]) =
decode_entry<std::uint32_t>(result[i - min_entry]);
EXPECT_EQ(i, val);
}
}
TEST_F(AioLegacyFIFO, TestPushTooBig)
{
static constexpr auto max_part_size = 2048ull;
static constexpr auto max_entry_size = 128ull;
std::unique_ptr<RCf::FIFO> f;
auto r = RCf::FIFO::create(&dp, ioctx, fifo_id, &f, null_yield, std::nullopt,
std::nullopt, false, max_part_size, max_entry_size);
ASSERT_EQ(0, r);
char buf[max_entry_size + 1];
memset(buf, 0, sizeof(buf));
cb::list bl;
bl.append(buf, sizeof(buf));
auto c = R::Rados::aio_create_completion();
f->push(&dp, bl, c);
c->wait_for_complete();
r = c->get_return_value();
ASSERT_EQ(-E2BIG, r);
c->release();
c = R::Rados::aio_create_completion();
f->push(&dp, std::vector<cb::list>{}, c);
c->wait_for_complete();
r = c->get_return_value();
c->release();
EXPECT_EQ(0, r);
}
TEST_F(AioLegacyFIFO, TestMultipleParts)
{
static constexpr auto max_part_size = 2048ull;
static constexpr auto max_entry_size = 128ull;
std::unique_ptr<RCf::FIFO> f;
auto r = RCf::FIFO::create(&dp, ioctx, fifo_id, &f, null_yield, std::nullopt,
std::nullopt, false, max_part_size,
max_entry_size);
ASSERT_EQ(0, r);
{
auto c = R::Rados::aio_create_completion();
f->get_head_info(&dp, [&](int r, RCf::part_info&& p) {
ASSERT_EQ(0, p.magic);
ASSERT_EQ(0, p.min_ofs);
ASSERT_EQ(0, p.last_ofs);
ASSERT_EQ(0, p.next_ofs);
ASSERT_EQ(0, p.min_index);
ASSERT_EQ(0, p.max_index);
ASSERT_EQ(ceph::real_time{}, p.max_time);
}, c);
c->wait_for_complete();
r = c->get_return_value();
c->release();
}
char buf[max_entry_size];
memset(buf, 0, sizeof(buf));
const auto [part_header_size, part_entry_overhead] =
f->get_part_layout_info();
const auto entries_per_part = ((max_part_size - part_header_size) /
(max_entry_size + part_entry_overhead));
const auto max_entries = entries_per_part * 4 + 1;
/* push enough entries */
for (auto i = 0u; i < max_entries; ++i) {
cb::list bl;
*(int *)buf = i;
bl.append(buf, sizeof(buf));
auto c = R::Rados::aio_create_completion();
f->push(&dp, bl, c);
c->wait_for_complete();
r = c->get_return_value();
c->release();
EXPECT_EQ(0, r);
}
auto info = f->meta();
ASSERT_EQ(info.id, fifo_id);
/* head should have advanced */
ASSERT_GT(info.head_part_num, 0);
/* list all at once */
std::vector<RCf::list_entry> result;
bool more = false;
auto c = R::Rados::aio_create_completion();
f->list(&dp, max_entries, std::nullopt, &result, &more, c);
c->wait_for_complete();
r = c->get_return_value();
c->release();
EXPECT_EQ(0, r);
EXPECT_EQ(false, more);
ASSERT_EQ(max_entries, result.size());
for (auto i = 0u; i < max_entries; ++i) {
auto& bl = result[i].data;
ASSERT_EQ(i, *(int *)bl.c_str());
}
std::optional<std::string> marker;
/* get entries one by one */
for (auto i = 0u; i < max_entries; ++i) {
c = R::Rados::aio_create_completion();
f->list(&dp, 1, marker, &result, &more, c);
c->wait_for_complete();
r = c->get_return_value();
c->release();
EXPECT_EQ(0, r);
ASSERT_EQ(result.size(), 1);
const bool expected_more = (i != (max_entries - 1));
ASSERT_EQ(expected_more, more);
std::uint32_t val;
std::tie(val, marker) = decode_entry<std::uint32_t>(result.front());
auto& entry = result.front();
auto& bl = entry.data;
ASSERT_EQ(i, *(int *)bl.c_str());
marker = entry.marker;
}
/* trim one at a time */
marker.reset();
for (auto i = 0u; i < max_entries; ++i) {
/* read single entry */
c = R::Rados::aio_create_completion();
f->list(&dp, 1, marker, &result, &more, c);
c->wait_for_complete();
r = c->get_return_value();
c->release();
EXPECT_EQ(0, r);
ASSERT_EQ(result.size(), 1);
const bool expected_more = (i != (max_entries - 1));
ASSERT_EQ(expected_more, more);
marker = result.front().marker;
c = R::Rados::aio_create_completion();
f->trim(&dp, *marker, false, c);
c->wait_for_complete();
r = c->get_return_value();
c->release();
EXPECT_EQ(0, r);
ASSERT_EQ(result.size(), 1);
/* check tail */
info = f->meta();
ASSERT_EQ(info.tail_part_num, i / entries_per_part);
/* try to read all again, see how many entries left */
c = R::Rados::aio_create_completion();
f->list(&dp, max_entries, marker, &result, &more, c);
c->wait_for_complete();
r = c->get_return_value();
c->release();
EXPECT_EQ(0, r);
ASSERT_EQ(max_entries - i - 1, result.size());
ASSERT_EQ(false, more);
}
/* tail now should point at head */
info = f->meta();
ASSERT_EQ(info.head_part_num, info.tail_part_num);
/* check old tails are removed */
for (auto i = 0; i < info.tail_part_num; ++i) {
c = R::Rados::aio_create_completion();
RCf::part_info partinfo;
f->get_part_info(i, &partinfo, c);
c->wait_for_complete();
r = c->get_return_value();
c->release();
ASSERT_EQ(-ENOENT, r);
}
/* check current tail exists */
std::uint64_t next_ofs;
{
c = R::Rados::aio_create_completion();
RCf::part_info partinfo;
f->get_part_info(info.tail_part_num, &partinfo, c);
c->wait_for_complete();
r = c->get_return_value();
c->release();
next_ofs = partinfo.next_ofs;
}
ASSERT_EQ(0, r);
c = R::Rados::aio_create_completion();
f->get_head_info(&dp, [&](int r, RCf::part_info&& p) {
ASSERT_EQ(next_ofs, p.next_ofs);
}, c);
c->wait_for_complete();
r = c->get_return_value();
c->release();
ASSERT_EQ(0, r);
}
TEST_F(AioLegacyFIFO, TestTwoPushers)
{
static constexpr auto max_part_size = 2048ull;
static constexpr auto max_entry_size = 128ull;
std::unique_ptr<RCf::FIFO> f;
auto r = RCf::FIFO::create(&dp, ioctx, fifo_id, &f, null_yield, std::nullopt,
std::nullopt, false, max_part_size,
max_entry_size);
ASSERT_EQ(0, r);
char buf[max_entry_size];
memset(buf, 0, sizeof(buf));
auto [part_header_size, part_entry_overhead] = f->get_part_layout_info();
const auto entries_per_part = ((max_part_size - part_header_size) /
(max_entry_size + part_entry_overhead));
const auto max_entries = entries_per_part * 4 + 1;
std::unique_ptr<RCf::FIFO> f2;
r = RCf::FIFO::open(&dp, ioctx, fifo_id, &f2, null_yield);
std::vector fifos{&f, &f2};
for (auto i = 0u; i < max_entries; ++i) {
cb::list bl;
*(int *)buf = i;
bl.append(buf, sizeof(buf));
auto& f = *fifos[i % fifos.size()];
auto c = R::Rados::aio_create_completion();
f->push(&dp, bl, c);
c->wait_for_complete();
r = c->get_return_value();
c->release();
ASSERT_EQ(0, r);
}
/* list all by both */
std::vector<RCf::list_entry> result;
bool more = false;
auto c = R::Rados::aio_create_completion();
f2->list(&dp, max_entries, std::nullopt, &result, &more, c);
c->wait_for_complete();
r = c->get_return_value();
c->release();
ASSERT_EQ(0, r);
ASSERT_EQ(false, more);
ASSERT_EQ(max_entries, result.size());
c = R::Rados::aio_create_completion();
f2->list(&dp, max_entries, std::nullopt, &result, &more, c);
c->wait_for_complete();
r = c->get_return_value();
c->release();
ASSERT_EQ(0, r);
ASSERT_EQ(false, more);
ASSERT_EQ(max_entries, result.size());
for (auto i = 0u; i < max_entries; ++i) {
auto& bl = result[i].data;
ASSERT_EQ(i, *(int *)bl.c_str());
}
}
TEST_F(AioLegacyFIFO, TestTwoPushersTrim)
{
static constexpr auto max_part_size = 2048ull;
static constexpr auto max_entry_size = 128ull;
std::unique_ptr<RCf::FIFO> f1;
auto r = RCf::FIFO::create(&dp, ioctx, fifo_id, &f1, null_yield, std::nullopt,
std::nullopt, false, max_part_size,
max_entry_size);
ASSERT_EQ(0, r);
char buf[max_entry_size];
memset(buf, 0, sizeof(buf));
auto [part_header_size, part_entry_overhead] = f1->get_part_layout_info();
const auto entries_per_part = ((max_part_size - part_header_size) /
(max_entry_size + part_entry_overhead));
const auto max_entries = entries_per_part * 4 + 1;
std::unique_ptr<RCf::FIFO> f2;
r = RCf::FIFO::open(&dp, ioctx, fifo_id, &f2, null_yield);
ASSERT_EQ(0, r);
/* push one entry to f2 and the rest to f1 */
for (auto i = 0u; i < max_entries; ++i) {
cb::list bl;
*(int *)buf = i;
bl.append(buf, sizeof(buf));
auto& f = (i < 1 ? f2 : f1);
auto c = R::Rados::aio_create_completion();
f->push(&dp, bl, c);
c->wait_for_complete();
r = c->get_return_value();
c->release();
ASSERT_EQ(0, r);
}
/* trim half by fifo1 */
auto num = max_entries / 2;
std::string marker;
std::vector<RCf::list_entry> result;
bool more = false;
auto c = R::Rados::aio_create_completion();
f1->list(&dp, num, std::nullopt, &result, &more, c);
c->wait_for_complete();
r = c->get_return_value();
c->release();
ASSERT_EQ(0, r);
ASSERT_EQ(true, more);
ASSERT_EQ(num, result.size());
for (auto i = 0u; i < num; ++i) {
auto& bl = result[i].data;
ASSERT_EQ(i, *(int *)bl.c_str());
}
auto& entry = result[num - 1];
marker = entry.marker;
c = R::Rados::aio_create_completion();
f1->trim(&dp, marker, false, c);
c->wait_for_complete();
r = c->get_return_value();
c->release();
ASSERT_EQ(0, r);
/* list what's left by fifo2 */
const auto left = max_entries - num;
c = R::Rados::aio_create_completion();
f2->list(&dp, left, marker, &result, &more, c);
c->wait_for_complete();
r = c->get_return_value();
c->release();
ASSERT_EQ(0, r);
ASSERT_EQ(left, result.size());
ASSERT_EQ(false, more);
for (auto i = num; i < max_entries; ++i) {
auto& bl = result[i - num].data;
ASSERT_EQ(i, *(int *)bl.c_str());
}
}
TEST_F(AioLegacyFIFO, TestPushBatch)
{
static constexpr auto max_part_size = 2048ull;
static constexpr auto max_entry_size = 128ull;
std::unique_ptr<RCf::FIFO> f;
auto r = RCf::FIFO::create(&dp, ioctx, fifo_id, &f, null_yield, std::nullopt,
std::nullopt, false, max_part_size,
max_entry_size);
ASSERT_EQ(0, r);
char buf[max_entry_size];
memset(buf, 0, sizeof(buf));
auto [part_header_size, part_entry_overhead] = f->get_part_layout_info();
auto entries_per_part = ((max_part_size - part_header_size) /
(max_entry_size + part_entry_overhead));
auto max_entries = entries_per_part * 4 + 1; /* enough entries to span multiple parts */
std::vector<cb::list> bufs;
for (auto i = 0u; i < max_entries; ++i) {
cb::list bl;
*(int *)buf = i;
bl.append(buf, sizeof(buf));
bufs.push_back(bl);
}
ASSERT_EQ(max_entries, bufs.size());
auto c = R::Rados::aio_create_completion();
f->push(&dp, bufs, c);
c->wait_for_complete();
r = c->get_return_value();
c->release();
ASSERT_EQ(0, r);
/* list all */
std::vector<RCf::list_entry> result;
bool more = false;
c = R::Rados::aio_create_completion();
f->list(&dp, max_entries, std::nullopt, &result, &more, c);
c->wait_for_complete();
r = c->get_return_value();
c->release();
ASSERT_EQ(0, r);
ASSERT_EQ(false, more);
ASSERT_EQ(max_entries, result.size());
for (auto i = 0u; i < max_entries; ++i) {
auto& bl = result[i].data;
ASSERT_EQ(i, *(int *)bl.c_str());
}
auto& info = f->meta();
ASSERT_EQ(info.head_part_num, 4);
}
TEST_F(LegacyFIFO, TrimAll)
{
std::unique_ptr<RCf::FIFO> f;
auto r = RCf::FIFO::create(&dp, ioctx, fifo_id, &f, null_yield);
ASSERT_EQ(0, r);
static constexpr auto max_entries = 10u;
for (uint32_t i = 0; i < max_entries; ++i) {
cb::list bl;
encode(i, bl);
r = f->push(&dp, bl, null_yield);
ASSERT_EQ(0, r);
}
/* trim one entry */
r = f->trim(&dp, RCf::marker::max().to_string(), false, null_yield);
ASSERT_EQ(-ENODATA, r);
std::vector<RCf::list_entry> result;
bool more;
r = f->list(&dp, 1, std::nullopt, &result, &more, null_yield);
ASSERT_EQ(0, r);
ASSERT_TRUE(result.empty());
}
TEST_F(LegacyFIFO, AioTrimAll)
{
std::unique_ptr<RCf::FIFO> f;
auto r = RCf::FIFO::create(&dp, ioctx, fifo_id, &f, null_yield);
ASSERT_EQ(0, r);
static constexpr auto max_entries = 10u;
for (uint32_t i = 0; i < max_entries; ++i) {
cb::list bl;
encode(i, bl);
r = f->push(&dp, bl, null_yield);
ASSERT_EQ(0, r);
}
auto c = R::Rados::aio_create_completion();
f->trim(&dp, RCf::marker::max().to_string(), false, c);
c->wait_for_complete();
r = c->get_return_value();
c->release();
ASSERT_EQ(-ENODATA, r);
std::vector<RCf::list_entry> result;
bool more;
r = f->list(&dp, 1, std::nullopt, &result, &more, null_yield);
ASSERT_EQ(0, r);
ASSERT_TRUE(result.empty());
}
| 34,352 | 27.989873 | 90 | cc |
null | ceph-main/src/test/rgw/test_d4n_directory.cc | #include "d4n_directory.h"
#include "rgw_process_env.h"
#include <cpp_redis/cpp_redis>
#include <iostream>
#include <string>
#include "gtest/gtest.h"
using namespace std;
string portStr;
string hostStr;
string redisHost = "";
string oid = "samoid";
string bucketName = "testBucket";
int blkSize = 123;
class DirectoryFixture: public ::testing::Test {
protected:
virtual void SetUp() {
blk_dir = new RGWBlockDirectory(hostStr, stoi(portStr));
c_blk = new cache_block();
c_blk->hosts_list.push_back(redisHost);
c_blk->size_in_bytes = blkSize;
c_blk->c_obj.bucket_name = bucketName;
c_blk->c_obj.obj_name = oid;
}
virtual void TearDown() {
delete blk_dir;
blk_dir = nullptr;
delete c_blk;
c_blk = nullptr;
}
RGWBlockDirectory* blk_dir;
cache_block* c_blk;
};
/* Successful initialization */
TEST_F(DirectoryFixture, DirectoryInit) {
ASSERT_NE(blk_dir, nullptr);
ASSERT_NE(c_blk, nullptr);
ASSERT_NE(redisHost.length(), (long unsigned int)0);
}
/* Successful setValue Call and Redis Check */
TEST_F(DirectoryFixture, SetValueTest) {
cpp_redis::client client;
int key_exist = -1;
string key;
string hosts;
string size;
string bucket_name;
string obj_name;
std::vector<std::string> fields;
int setReturn = blk_dir->setValue(c_blk);
ASSERT_EQ(setReturn, 0);
fields.push_back("key");
fields.push_back("hosts");
fields.push_back("size");
fields.push_back("bucket_name");
fields.push_back("obj_name");
client.connect(hostStr, stoi(portStr), nullptr, 0, 5, 1000);
ASSERT_EQ((bool)client.is_connected(), (bool)1);
client.hmget("rgw-object:" + oid + ":directory", fields, [&key, &hosts, &size, &bucket_name, &obj_name, &key_exist](cpp_redis::reply& reply) {
auto arr = reply.as_array();
if (!arr[0].is_null()) {
key_exist = 0;
key = arr[0].as_string();
hosts = arr[1].as_string();
size = arr[2].as_string();
bucket_name = arr[3].as_string();
obj_name = arr[4].as_string();
}
});
client.sync_commit();
EXPECT_EQ(key_exist, 0);
EXPECT_EQ(key, "rgw-object:" + oid + ":directory");
EXPECT_EQ(hosts, redisHost);
EXPECT_EQ(size, to_string(blkSize));
EXPECT_EQ(bucket_name, bucketName);
EXPECT_EQ(obj_name, oid);
client.flushall();
}
/* Successful getValue Calls and Redis Check */
TEST_F(DirectoryFixture, GetValueTest) {
cpp_redis::client client;
int key_exist = -1;
string key;
string hosts;
string size;
string bucket_name;
string obj_name;
std::vector<std::string> fields;
int setReturn = blk_dir->setValue(c_blk);
ASSERT_EQ(setReturn, 0);
fields.push_back("key");
fields.push_back("hosts");
fields.push_back("size");
fields.push_back("bucket_name");
fields.push_back("obj_name");
client.connect(hostStr, stoi(portStr), nullptr, 0, 5, 1000);
ASSERT_EQ((bool)client.is_connected(), (bool)1);
client.hmget("rgw-object:" + oid + ":directory", fields, [&key, &hosts, &size, &bucket_name, &obj_name, &key_exist](cpp_redis::reply& reply) {
auto arr = reply.as_array();
if (!arr[0].is_null()) {
key_exist = 0;
key = arr[0].as_string();
hosts = arr[1].as_string();
size = arr[2].as_string();
bucket_name = arr[3].as_string();
obj_name = arr[4].as_string();
}
});
client.sync_commit();
EXPECT_EQ(key_exist, 0);
EXPECT_EQ(key, "rgw-object:" + oid + ":directory");
EXPECT_EQ(hosts, redisHost);
EXPECT_EQ(size, to_string(blkSize));
EXPECT_EQ(bucket_name, bucketName);
EXPECT_EQ(obj_name, oid);
/* Check if object name in directory instance matches redis update */
client.hset("rgw-object:" + oid + ":directory", "obj_name", "newoid", [](cpp_redis::reply& reply) {
if (reply.is_integer()) {
ASSERT_EQ(reply.as_integer(), 0); /* Zero keys exist */
}
});
client.sync_commit();
int getReturn = blk_dir->getValue(c_blk);
ASSERT_EQ(getReturn, 0);
EXPECT_EQ(c_blk->c_obj.obj_name, "newoid");
client.flushall();
}
/* Successful delValue Call and Redis Check */
TEST_F(DirectoryFixture, DelValueTest) {
cpp_redis::client client;
vector<string> keys;
int setReturn = blk_dir->setValue(c_blk);
ASSERT_EQ(setReturn, 0);
/* Ensure cache entry exists in cache before deletion */
keys.push_back("rgw-object:" + oid + ":directory");
client.exists(keys, [](cpp_redis::reply& reply) {
if (reply.is_integer()) {
ASSERT_EQ(reply.as_integer(), 1);
}
});
int delReturn = blk_dir->delValue(c_blk);
ASSERT_EQ(delReturn, 0);
client.exists(keys, [](cpp_redis::reply& reply) {
if (reply.is_integer()) {
ASSERT_EQ(reply.as_integer(), 0); /* Zero keys exist */
}
});
client.flushall();
}
int main(int argc, char *argv[]) {
::testing::InitGoogleTest(&argc, argv);
/* Other ports can be passed to the program */
if (argc == 1) {
portStr = "6379";
hostStr = "127.0.0.1";
} else if (argc == 3) {
hostStr = argv[1];
portStr = argv[2];
} else {
cout << "Incorrect number of arguments." << std::endl;
return -1;
}
redisHost = hostStr + ":" + portStr;
return RUN_ALL_TESTS();
}
| 5,176 | 24.009662 | 144 | cc |
null | ceph-main/src/test/rgw/test_d4n_filter.cc | #include "gtest/gtest.h"
#include "common/ceph_context.h"
#include <iostream>
#include <string>
#include "rgw_process_env.h"
#include <cpp_redis/cpp_redis>
#include "driver/dbstore/common/dbstore.h"
#include "rgw_sal_store.h"
#include "driver/d4n/rgw_sal_d4n.h"
#include "rgw_sal.h"
#include "rgw_auth.h"
#include "rgw_auth_registry.h"
#define dout_subsys ceph_subsys_rgw
#define METADATA_LENGTH 22
using namespace std;
string portStr;
string hostStr;
string redisHost = "";
vector<const char*> args;
class Environment* env;
const DoutPrefixProvider* dpp;
class StoreObject : public rgw::sal::StoreObject {
friend class D4NFilterFixture;
FRIEND_TEST(D4NFilterFixture, StoreGetMetadata);
};
class Environment : public ::testing::Environment {
public:
Environment() {}
virtual ~Environment() {}
void SetUp() override {
/* Ensure redis instance is running */
try {
env_client.connect(hostStr, stoi(portStr), nullptr, 0, 5, 1000);
} catch (std::exception &e) {
std::cerr << "[ ] ERROR: Redis instance not running." << std::endl;
}
ASSERT_EQ((bool)env_client.is_connected(), (bool)1);
/* Proceed with environment setup */
cct = global_init(nullptr, args, CEPH_ENTITY_TYPE_CLIENT,
CODE_ENVIRONMENT_UTILITY,
CINIT_FLAG_NO_MON_CONFIG);
dpp = new DoutPrefix(cct->get(), dout_subsys, "d4n test: ");
DriverManager::Config cfg;
cfg.store_name = "dbstore";
cfg.filter_name = "d4n";
driver = DriverManager::get_storage(dpp, dpp->get_cct(),
cfg,
false,
false,
false,
false,
false,
false, null_yield,
false);
ASSERT_NE(driver, nullptr);
}
void TearDown() override {
if (env_client.is_connected()) {
delete driver;
delete dpp;
env_client.disconnect();
}
}
boost::intrusive_ptr<CephContext> cct;
rgw::sal::Driver* driver;
cpp_redis::client env_client;
};
class D4NFilterFixture : public ::testing::Test {
protected:
rgw::sal::Driver* driver;
unique_ptr<rgw::sal::User> testUser = nullptr;
unique_ptr<rgw::sal::Bucket> testBucket = nullptr;
unique_ptr<rgw::sal::Writer> testWriter = nullptr;
public:
D4NFilterFixture() {}
void SetUp() {
driver = env->driver;
}
void TearDown() {}
int createUser() {
rgw_user u("test_tenant", "test_user", "ns");
testUser = driver->get_user(u);
testUser->get_info().user_id = u;
int ret = testUser->store_user(dpp, null_yield, false);
return ret;
}
int createBucket() {
rgw_bucket b;
string zonegroup_id = "test_id";
rgw_placement_rule placement_rule;
string swift_ver_location = "test_location";
const RGWAccessControlPolicy policy;
rgw::sal::Attrs attrs;
RGWBucketInfo info;
obj_version ep_objv;
bool bucket_exists;
int ret;
CephContext* cct = get_pointer(env->cct);
RGWProcessEnv penv;
RGWEnv rgw_env;
req_state s(cct->get(), penv, &rgw_env, 0);
req_info _req_info = s.info;
b.name = "test_bucket";
placement_rule.storage_class = "test_sc";
ret = testUser->create_bucket(dpp, b,
zonegroup_id,
placement_rule,
swift_ver_location,
nullptr,
policy,
attrs,
info,
ep_objv,
false,
false,
&bucket_exists,
_req_info,
&testBucket,
null_yield);
return ret;
}
int putObject(string name) {
string object_name = "test_object_" + name;
unique_ptr<rgw::sal::Object> obj = testBucket->get_object(rgw_obj_key(object_name));
rgw_user owner;
rgw_placement_rule ptail_placement_rule;
uint64_t olh_epoch = 123;
string unique_tag;
obj->get_obj_attrs(null_yield, dpp);
testWriter = driver->get_atomic_writer(dpp,
null_yield,
obj.get(),
owner,
&ptail_placement_rule,
olh_epoch,
unique_tag);
size_t accounted_size = 4;
string etag("test_etag");
ceph::real_time mtime;
ceph::real_time set_mtime;
buffer::list bl;
string tmp = "test_attrs_value_" + name;
bl.append("test_attrs_value_" + name);
map<string, bufferlist> attrs{{"test_attrs_key_" + name, bl}};
ceph::real_time delete_at;
char if_match;
char if_nomatch;
string user_data;
rgw_zone_set zones_trace;
bool canceled;
int ret = testWriter->complete(accounted_size, etag,
&mtime, set_mtime,
attrs,
delete_at,
&if_match, &if_nomatch,
&user_data,
&zones_trace, &canceled,
null_yield);
return ret;
}
void clientSetUp(cpp_redis::client* client) {
client->connect(hostStr, stoi(portStr), nullptr, 0, 5, 1000);
ASSERT_EQ((bool)client->is_connected(), (bool)1);
client->flushdb([](cpp_redis::reply& reply) {});
client->sync_commit();
}
void clientReset(cpp_redis::client* client) {
client->flushdb([](cpp_redis::reply& reply) {});
client->sync_commit();
}
};
/* General operation-related tests */
TEST_F(D4NFilterFixture, CreateUser) {
EXPECT_EQ(createUser(), 0);
EXPECT_NE(testUser, nullptr);
}
TEST_F(D4NFilterFixture, CreateBucket) {
ASSERT_EQ(createUser(), 0);
ASSERT_NE(testUser, nullptr);
EXPECT_EQ(createBucket(), 0);
EXPECT_NE(testBucket, nullptr);
}
TEST_F(D4NFilterFixture, PutObject) {
cpp_redis::client client;
vector<string> fields;
fields.push_back("test_attrs_key_0");
clientSetUp(&client);
ASSERT_EQ(createUser(), 0);
ASSERT_NE(testUser, nullptr);
ASSERT_EQ(createBucket(), 0);
ASSERT_NE(testBucket, nullptr);
EXPECT_EQ(putObject("PutObject"), 0);
EXPECT_NE(testWriter, nullptr);
client.hgetall("rgw-object:test_object_PutObject:cache", [](cpp_redis::reply& reply) {
auto arr = reply.as_array();
if (!arr[0].is_null()) {
EXPECT_EQ((int)arr.size(), 2 + METADATA_LENGTH);
}
});
client.sync_commit();
client.hmget("rgw-object:test_object_PutObject:cache", fields, [](cpp_redis::reply& reply) {
auto arr = reply.as_array();
if (!arr[0].is_null()) {
EXPECT_EQ(arr[0].as_string(), "test_attrs_value_PutObject");
}
});
client.sync_commit();
clientReset(&client);
}
TEST_F(D4NFilterFixture, GetObject) {
cpp_redis::client client;
vector<string> fields;
fields.push_back("test_attrs_key_GetObject");
clientSetUp(&client);
ASSERT_EQ(createUser(), 0);
ASSERT_NE(testUser, nullptr);
ASSERT_EQ(createBucket(), 0);
ASSERT_NE(testBucket, nullptr);
ASSERT_EQ(putObject("GetObject"), 0);
ASSERT_NE(testWriter, nullptr);
unique_ptr<rgw::sal::Object> testObject_GetObject = testBucket->get_object(rgw_obj_key("test_object_GetObject"));
EXPECT_NE(testObject_GetObject, nullptr);
static rgw::sal::Object* nextObject = dynamic_cast<rgw::sal::FilterObject*>(testObject_GetObject.get())->get_next();
ASSERT_EQ(nextObject->get_obj_attrs(null_yield, dpp, NULL), 0);
unique_ptr<rgw::sal::Object::ReadOp> testROp = testObject_GetObject->get_read_op();
EXPECT_NE(testROp, nullptr);
EXPECT_EQ(testROp->prepare(null_yield, dpp), 0);
client.hgetall("rgw-object:test_object_GetObject:cache", [](cpp_redis::reply& reply) {
auto arr = reply.as_array();
if (!arr[0].is_null()) {
EXPECT_EQ((int)arr.size(), 2 + METADATA_LENGTH);
}
});
client.sync_commit();
client.hmget("rgw-object:test_object_GetObject:cache", fields, [](cpp_redis::reply& reply) {
auto arr = reply.as_array();
if (!arr[0].is_null()) {
EXPECT_EQ(arr[0].as_string(), "test_attrs_value_GetObject");
}
});
client.sync_commit();
clientReset(&client);
}
TEST_F(D4NFilterFixture, CopyObjectNone) {
cpp_redis::client client;
vector<string> fields;
fields.push_back("test_attrs_key_CopyObjectNone");
clientSetUp(&client);
createUser();
createBucket();
putObject("CopyObjectNone");
unique_ptr<rgw::sal::Object> testObject_CopyObjectNone = testBucket->get_object(rgw_obj_key("test_object_CopyObjectNone"));
ASSERT_NE(testObject_CopyObjectNone, nullptr);
static rgw::sal::Object* nextObject = dynamic_cast<rgw::sal::FilterObject*>(testObject_CopyObjectNone.get())->get_next();
ASSERT_EQ(nextObject->get_obj_attrs(null_yield, dpp, NULL), 0);
ASSERT_NE(nextObject->get_attrs().empty(), true);
/* Update object */
RGWEnv rgw_env;
req_info info(get_pointer(env->cct), &rgw_env);
rgw_zone_id source_zone;
rgw_placement_rule dest_placement;
ceph::real_time src_mtime;
ceph::real_time mtime;
ceph::real_time mod_ptr;
ceph::real_time unmod_ptr;
char if_match;
char if_nomatch;
rgw::sal::AttrsMod attrs_mod = rgw::sal::ATTRSMOD_NONE;
rgw::sal::Attrs attrs;
RGWObjCategory category = RGWObjCategory::Main;
uint64_t olh_epoch = 0;
ceph::real_time delete_at;
string tag;
string etag;
EXPECT_EQ(testObject_CopyObjectNone->copy_object(testUser.get(),
&info, source_zone, testObject_CopyObjectNone.get(),
testBucket.get(), testBucket.get(),
dest_placement, &src_mtime, &mtime,
&mod_ptr, &unmod_ptr, false,
&if_match, &if_nomatch, attrs_mod,
false, attrs, category, olh_epoch,
delete_at, NULL, &tag, &etag,
NULL, NULL, dpp, null_yield), 0);
client.hgetall("rgw-object:test_object_CopyObjectNone:cache", [](cpp_redis::reply& reply) {
auto arr = reply.as_array();
if (!arr[0].is_null()) {
EXPECT_EQ((int)arr.size(), 2 + METADATA_LENGTH);
}
});
client.sync_commit();
client.hmget("rgw-object:test_object_CopyObjectNone:cache", fields, [](cpp_redis::reply& reply) {
auto arr = reply.as_array();
if (!arr[0].is_null()) {
EXPECT_EQ(arr[0].as_string(), "test_attrs_value_CopyObjectNone");
}
});
client.sync_commit();
}
TEST_F(D4NFilterFixture, CopyObjectReplace) {
cpp_redis::client client;
vector<string> fields;
clientSetUp(&client);
createUser();
createBucket();
putObject("CopyObjectReplace");
unique_ptr<rgw::sal::Object> testObject_CopyObjectReplace = testBucket->get_object(rgw_obj_key("test_object_CopyObjectReplace"));
ASSERT_NE(testObject_CopyObjectReplace, nullptr);
static rgw::sal::Object* nextObject = dynamic_cast<rgw::sal::FilterObject*>(testObject_CopyObjectReplace.get())->get_next();
ASSERT_EQ(nextObject->get_obj_attrs(null_yield, dpp, NULL), 0);
ASSERT_NE(nextObject->get_attrs().empty(), true);
/* Copy to new object */
unique_ptr<rgw::sal::Writer> testWriterCopy = nullptr;
unique_ptr<rgw::sal::Object> obj = testBucket->get_object(rgw_obj_key("test_object_copy"));
rgw_user owner;
rgw_placement_rule ptail_placement_rule;
uint64_t olh_epoch_copy = 123;
string unique_tag;
obj->get_obj_attrs(null_yield, dpp);
testWriterCopy = driver->get_atomic_writer(dpp,
null_yield,
obj.get(),
owner,
&ptail_placement_rule,
olh_epoch_copy,
unique_tag);
RGWEnv rgw_env;
size_t accounted_size = 0;
req_info info(get_pointer(env->cct), &rgw_env);
rgw_zone_id source_zone;
rgw_placement_rule dest_placement;
ceph::real_time src_mtime;
ceph::real_time mtime;
ceph::real_time set_mtime;
ceph::real_time mod_ptr;
ceph::real_time unmod_ptr;
rgw::sal::AttrsMod attrs_mod = rgw::sal::ATTRSMOD_REPLACE;
char if_match;
char if_nomatch;
RGWObjCategory category = RGWObjCategory::Main;
uint64_t olh_epoch = 0;
ceph::real_time delete_at;
string tag;
string etag("test_etag_copy");
/* Attribute to replace */
buffer::list bl;
bl.append("test_attrs_copy_value");
rgw::sal::Attrs attrs{{"test_attrs_key_CopyObjectReplace", bl}};
string user_data;
rgw_zone_set zones_trace;
bool canceled;
ASSERT_EQ(testWriterCopy->complete(accounted_size, etag,
&mtime, set_mtime,
attrs,
delete_at,
&if_match, &if_nomatch,
&user_data,
&zones_trace, &canceled,
null_yield), 0);
unique_ptr<rgw::sal::Object> testObject_copy = testBucket->get_object(rgw_obj_key("test_object_copy"));
EXPECT_EQ(testObject_CopyObjectReplace->copy_object(testUser.get(),
&info, source_zone, testObject_copy.get(),
testBucket.get(), testBucket.get(),
dest_placement, &src_mtime, &mtime,
&mod_ptr, &unmod_ptr, false,
&if_match, &if_nomatch, attrs_mod,
false, attrs, category, olh_epoch,
delete_at, NULL, &tag, &etag,
NULL, NULL, dpp, null_yield), 0);
/* Ensure the original object is still in the cache */
vector<string> keys;
keys.push_back("rgw-object:test_object_CopyObjectReplace:cache");
client.exists(keys, [](cpp_redis::reply& reply) {
if (reply.is_integer()) {
EXPECT_EQ(reply.as_integer(), 1);
}
});
client.sync_commit();
/* Check copy */
client.hgetall("rgw-object:test_object_copy:cache", [](cpp_redis::reply& reply) {
auto arr = reply.as_array();
if (!arr[0].is_null()) {
EXPECT_EQ((int)arr.size(), 4 + METADATA_LENGTH); /* With etag */
}
});
client.sync_commit();
fields.push_back("test_attrs_key_CopyObjectReplace");
client.hmget("rgw-object:test_object_copy:cache", fields, [](cpp_redis::reply& reply) {
auto arr = reply.as_array();
if (!arr[0].is_null()) {
EXPECT_EQ(arr[0].as_string(), "test_attrs_copy_value");
}
});
client.sync_commit();
clientReset(&client);
}
TEST_F(D4NFilterFixture, CopyObjectMerge) {
cpp_redis::client client;
vector<string> fields;
clientSetUp(&client);
createUser();
createBucket();
putObject("CopyObjectMerge");
unique_ptr<rgw::sal::Object> testObject_CopyObjectMerge = testBucket->get_object(rgw_obj_key("test_object_CopyObjectMerge"));
ASSERT_NE(testObject_CopyObjectMerge, nullptr);
static rgw::sal::Object* nextObject = dynamic_cast<rgw::sal::FilterObject*>(testObject_CopyObjectMerge.get())->get_next();
ASSERT_EQ(nextObject->get_obj_attrs(null_yield, dpp, NULL), 0);
ASSERT_NE(nextObject->get_attrs().empty(), true);
/* Copy to new object */
unique_ptr<rgw::sal::Writer> testWriterCopy = nullptr;
string object_name = "test_object_copy";
unique_ptr<rgw::sal::Object> obj = testBucket->get_object(rgw_obj_key(object_name));
rgw_user owner;
rgw_placement_rule ptail_placement_rule;
uint64_t olh_epoch_copy = 123;
string unique_tag;
obj->get_obj_attrs(null_yield, dpp);
testWriterCopy = driver->get_atomic_writer(dpp,
null_yield,
obj.get(),
owner,
&ptail_placement_rule,
olh_epoch_copy,
unique_tag);
RGWEnv rgw_env;
size_t accounted_size = 4;
req_info info(get_pointer(env->cct), &rgw_env);
rgw_zone_id source_zone;
rgw_placement_rule dest_placement;
ceph::real_time src_mtime;
ceph::real_time mtime;
ceph::real_time set_mtime;
ceph::real_time mod_ptr;
ceph::real_time unmod_ptr;
rgw::sal::AttrsMod attrs_mod = rgw::sal::ATTRSMOD_MERGE;
char if_match;
char if_nomatch;
RGWObjCategory category = RGWObjCategory::Main;
uint64_t olh_epoch = 0;
ceph::real_time delete_at;
string tag;
string etag("test_etag_copy");
buffer::list bl;
bl.append("bad_value");
rgw::sal::Attrs attrs{{"test_attrs_key_CopyObjectMerge", bl}}; /* Existing attr */
bl.clear();
bl.append("test_attrs_copy_extra_value");
attrs.insert({"test_attrs_copy_extra_key", bl}); /* New attr */
string user_data;
rgw_zone_set zones_trace;
bool canceled;
ASSERT_EQ(testWriterCopy->complete(accounted_size, etag,
&mtime, set_mtime,
attrs,
delete_at,
&if_match, &if_nomatch,
&user_data,
&zones_trace, &canceled,
null_yield), 0);
unique_ptr<rgw::sal::Object> testObject_copy = testBucket->get_object(rgw_obj_key("test_object_copy"));
EXPECT_EQ(testObject_CopyObjectMerge->copy_object(testUser.get(),
&info, source_zone, testObject_copy.get(),
testBucket.get(), testBucket.get(),
dest_placement, &src_mtime, &mtime,
&mod_ptr, &unmod_ptr, false,
&if_match, &if_nomatch, attrs_mod,
false, attrs, category, olh_epoch,
delete_at, NULL, &tag, &etag,
NULL, NULL, dpp, null_yield), 0);
/* Ensure the original object is still in the cache */
vector<string> keys;
keys.push_back("rgw-object:test_object_CopyObjectMerge:cache");
client.exists(keys, [](cpp_redis::reply& reply) {
if (reply.is_integer()) {
EXPECT_EQ(reply.as_integer(), 1);
}
});
client.sync_commit();
/* Check copy */
client.hgetall("rgw-object:test_object_copy:cache", [](cpp_redis::reply& reply) {
auto arr = reply.as_array();
if (!arr[0].is_null()) {
EXPECT_EQ((int)arr.size(), 6 + METADATA_LENGTH); /* With etag */
}
});
client.sync_commit();
fields.push_back("test_attrs_key_CopyObjectMerge");
fields.push_back("test_attrs_copy_extra_key");
client.hmget("rgw-object:test_object_copy:cache", fields, [](cpp_redis::reply& reply) {
auto arr = reply.as_array();
if (!arr[0].is_null()) {
EXPECT_EQ(arr[0].as_string(), "test_attrs_value_CopyObjectMerge");
EXPECT_EQ(arr[1].as_string(), "test_attrs_copy_extra_value");
}
});
client.sync_commit();
clientReset(&client);
}
TEST_F(D4NFilterFixture, DelObject) {
cpp_redis::client client;
vector<string> keys;
keys.push_back("rgw-object:test_object_DelObject:cache");
clientSetUp(&client);
ASSERT_EQ(createUser(), 0);
ASSERT_NE(testUser, nullptr);
ASSERT_EQ(createBucket(), 0);
ASSERT_NE(testBucket, nullptr);
ASSERT_EQ(putObject("DelObject"), 0);
ASSERT_NE(testWriter, nullptr);
/* Check the object exists before delete op */
client.exists(keys, [](cpp_redis::reply& reply) {
if (reply.is_integer()) {
EXPECT_EQ(reply.as_integer(), 1);
}
});
client.sync_commit();
unique_ptr<rgw::sal::Object> testObject_DelObject = testBucket->get_object(rgw_obj_key("test_object_DelObject"));
EXPECT_NE(testObject_DelObject, nullptr);
unique_ptr<rgw::sal::Object::DeleteOp> testDOp = testObject_DelObject->get_delete_op();
EXPECT_NE(testDOp, nullptr);
EXPECT_EQ(testDOp->delete_obj(dpp, null_yield), 0);
/* Check the object does not exist after delete op */
client.exists(keys, [](cpp_redis::reply& reply) {
if (reply.is_integer()) {
EXPECT_EQ(reply.as_integer(), 0); /* Zero keys exist */
}
});
client.sync_commit();
clientReset(&client);
}
/* Attribute-related tests */
TEST_F(D4NFilterFixture, SetObjectAttrs) {
cpp_redis::client client;
vector<string> fields;
fields.push_back("test_attrs_key_SetObjectAttrs");
clientSetUp(&client);
createUser();
createBucket();
putObject("SetObjectAttrs");
unique_ptr<rgw::sal::Object> testObject_SetObjectAttrs = testBucket->get_object(rgw_obj_key("test_object_SetObjectAttrs"));
ASSERT_NE(testObject_SetObjectAttrs, nullptr);
buffer::list bl;
bl.append("test_attrs_value_extra");
map<string, bufferlist> test_attrs{{"test_attrs_key_extra", bl}};
fields.push_back("test_attrs_key_extra");
EXPECT_EQ(testObject_SetObjectAttrs->set_obj_attrs(dpp, &test_attrs, NULL, null_yield), 0);
client.hgetall("rgw-object:test_object_SetObjectAttrs:cache", [](cpp_redis::reply& reply) {
auto arr = reply.as_array();
if (!arr[0].is_null()) {
EXPECT_EQ((int)arr.size(), 4 + METADATA_LENGTH);
}
});
client.sync_commit();
client.hmget("rgw-object:test_object_SetObjectAttrs:cache", fields, [](cpp_redis::reply& reply) {
auto arr = reply.as_array();
if (!arr[0].is_null()) {
EXPECT_EQ(arr[0].as_string(), "test_attrs_value_SetObjectAttrs");
EXPECT_EQ(arr[1].as_string(), "test_attrs_value_extra");
}
});
client.sync_commit();
clientReset(&client);
}
TEST_F(D4NFilterFixture, GetObjectAttrs) {
cpp_redis::client client;
vector<string> fields;
fields.push_back("test_attrs_key_GetObjectAttrs");
clientSetUp(&client);
createUser();
createBucket();
putObject("GetObjectAttrs");
unique_ptr<rgw::sal::Object> testObject_GetObjectAttrs = testBucket->get_object(rgw_obj_key("test_object_GetObjectAttrs"));
ASSERT_NE(testObject_GetObjectAttrs, nullptr);
buffer::list bl;
bl.append("test_attrs_value_extra");
map<string, bufferlist> test_attrs{{"test_attrs_key_extra", bl}};
fields.push_back("test_attrs_key_extra");
static rgw::sal::Object* nextObject = dynamic_cast<rgw::sal::FilterObject*>(testObject_GetObjectAttrs.get())->get_next();
ASSERT_EQ(testObject_GetObjectAttrs->set_obj_attrs(dpp, &test_attrs, NULL, null_yield), 0);
ASSERT_EQ(nextObject->get_obj_attrs(null_yield, dpp, NULL), 0);
ASSERT_NE(nextObject->get_attrs().empty(), true);
EXPECT_EQ(testObject_GetObjectAttrs->get_obj_attrs(null_yield, dpp, NULL), 0);
client.hgetall("rgw-object:test_object_GetObjectAttrs:cache", [](cpp_redis::reply& reply) {
auto arr = reply.as_array();
if (!arr[0].is_null()) {
EXPECT_EQ((int)arr.size(), 4 + METADATA_LENGTH);
}
});
client.sync_commit();
client.hmget("rgw-object:test_object_GetObjectAttrs:cache", fields, [](cpp_redis::reply& reply) {
auto arr = reply.as_array();
if (!arr[0].is_null()) {
EXPECT_EQ(arr[0].as_string(), "test_attrs_value_GetObjectAttrs");
EXPECT_EQ(arr[1].as_string(), "test_attrs_value_extra");
}
});
client.sync_commit();
clientReset(&client);
}
TEST_F(D4NFilterFixture, DelObjectAttrs) {
cpp_redis::client client;
clientSetUp(&client);
createUser();
createBucket();
putObject("DelObjectAttrs");
unique_ptr<rgw::sal::Object> testObject_DelObjectAttrs = testBucket->get_object(rgw_obj_key("test_object_DelObjectAttrs"));
ASSERT_NE(testObject_DelObjectAttrs, nullptr);
buffer::list bl;
bl.append("test_attrs_value_extra");
map<string, bufferlist> test_attrs{{"test_attrs_key_extra", bl}};
static rgw::sal::Object* nextObject = dynamic_cast<rgw::sal::FilterObject*>(testObject_DelObjectAttrs.get())->get_next();
ASSERT_EQ(testObject_DelObjectAttrs->set_obj_attrs(dpp, &test_attrs, NULL, null_yield), 0);
ASSERT_EQ(nextObject->get_obj_attrs(null_yield, dpp, NULL), 0);
ASSERT_NE(nextObject->get_attrs().empty(), true);
/* Check that the attributes exist before deletion */
client.hgetall("rgw-object:test_object_DelObjectAttrs:cache", [](cpp_redis::reply& reply) {
auto arr = reply.as_array();
if (!arr[0].is_null()) {
EXPECT_EQ((int)arr.size(), 4 + METADATA_LENGTH);
}
});
client.sync_commit();
EXPECT_EQ(testObject_DelObjectAttrs->set_obj_attrs(dpp, NULL, &test_attrs, null_yield), 0);
/* Check that the attribute does not exist after deletion */
client.hgetall("rgw-object:test_object_DelObjectAttrs:cache", [](cpp_redis::reply& reply) {
auto arr = reply.as_array();
if (!arr[0].is_null()) {
EXPECT_EQ((int)arr.size(), 2 + METADATA_LENGTH);
}
});
client.sync_commit();
client.hexists("rgw-object:test_object_DelObjectAttrs:cache", "test_attrs_key_extra", [](cpp_redis::reply& reply) {
if (reply.is_integer()) {
EXPECT_EQ(reply.as_integer(), 0);
}
});
client.sync_commit();
clientReset(&client);
}
TEST_F(D4NFilterFixture, SetLongObjectAttrs) {
cpp_redis::client client;
map<string, bufferlist> test_attrs_long;
vector<string> fields;
fields.push_back("test_attrs_key_SetLongObjectAttrs");
clientSetUp(&client);
createUser();
createBucket();
putObject("SetLongObjectAttrs");
unique_ptr<rgw::sal::Object> testObject_SetLongObjectAttrs = testBucket->get_object(rgw_obj_key("test_object_SetLongObjectAttrs"));
ASSERT_NE(testObject_SetLongObjectAttrs, nullptr);
for (int i = 0; i < 10; ++i) {
buffer::list bl_tmp;
string tmp_value = "test_attrs_value_extra_" + to_string(i);
bl_tmp.append(tmp_value.data(), strlen(tmp_value.data()));
string tmp_key = "test_attrs_key_extra_" + to_string(i);
test_attrs_long.insert({tmp_key, bl_tmp});
fields.push_back(tmp_key);
}
EXPECT_EQ(testObject_SetLongObjectAttrs->set_obj_attrs(dpp, &test_attrs_long, NULL, null_yield), 0);
client.hgetall("rgw-object:test_object_SetLongObjectAttrs:cache", [](cpp_redis::reply& reply) {
auto arr = reply.as_array();
if (!arr[0].is_null()) {
EXPECT_EQ((int)arr.size(), 22 + METADATA_LENGTH);
}
});
client.sync_commit();
client.hmget("rgw-object:test_object_SetLongObjectAttrs:cache", fields, [](cpp_redis::reply& reply) {
auto arr = reply.as_array();
if (!arr[0].is_null()) {
EXPECT_EQ(arr[0].as_string(), "test_attrs_value_SetLongObjectAttrs");
for (int i = 1; i < 11; ++i) {
EXPECT_EQ(arr[i].as_string(), "test_attrs_value_extra_" + to_string(i - 1));
}
}
});
client.sync_commit();
clientReset(&client);
}
TEST_F(D4NFilterFixture, GetLongObjectAttrs) {
cpp_redis::client client;
map<string, bufferlist> test_attrs_long;
vector<string> fields;
fields.push_back("test_attrs_key_GetLongObjectAttrs");
clientSetUp(&client);
createUser();
createBucket();
putObject("GetLongObjectAttrs");
unique_ptr<rgw::sal::Object> testObject_GetLongObjectAttrs = testBucket->get_object(rgw_obj_key("test_object_GetLongObjectAttrs"));
ASSERT_NE(testObject_GetLongObjectAttrs, nullptr);
for (int i = 0; i < 10; ++i) {
buffer::list bl_tmp;
string tmp_value = "test_attrs_value_extra_" + to_string(i);
bl_tmp.append(tmp_value.data(), strlen(tmp_value.data()));
string tmp_key = "test_attrs_key_extra_" + to_string(i);
test_attrs_long.insert({tmp_key, bl_tmp});
fields.push_back(tmp_key);
}
static rgw::sal::Object* nextObject = dynamic_cast<rgw::sal::FilterObject*>(testObject_GetLongObjectAttrs.get())->get_next();
ASSERT_EQ(testObject_GetLongObjectAttrs->set_obj_attrs(dpp, &test_attrs_long, NULL, null_yield), 0);
ASSERT_EQ(nextObject->get_obj_attrs(null_yield, dpp, NULL), 0);
ASSERT_NE(nextObject->get_attrs().empty(), true);
EXPECT_EQ(testObject_GetLongObjectAttrs->get_obj_attrs(null_yield, dpp, NULL), 0);
client.hgetall("rgw-object:test_object_GetLongObjectAttrs:cache", [](cpp_redis::reply& reply) {
auto arr = reply.as_array();
if (!arr[0].is_null()) {
EXPECT_EQ((int)arr.size(), 22 + METADATA_LENGTH);
}
});
client.sync_commit();
client.hmget("rgw-object:test_object_GetLongObjectAttrs:cache", fields, [](cpp_redis::reply& reply) {
auto arr = reply.as_array();
if (!arr[0].is_null()) {
EXPECT_EQ(arr[0].as_string(), "test_attrs_value_GetLongObjectAttrs");
for (int i = 1; i < 11; ++i) {
EXPECT_EQ(arr[i].as_string(), "test_attrs_value_extra_" + to_string(i - 1));
}
}
});
client.sync_commit();
clientReset(&client);
}
TEST_F(D4NFilterFixture, ModifyObjectAttr) {
cpp_redis::client client;
map<string, bufferlist> test_attrs_long;
vector<string> fields;
fields.push_back("test_attrs_key_ModifyObjectAttr");
clientSetUp(&client);
createUser();
createBucket();
putObject("ModifyObjectAttr");
unique_ptr<rgw::sal::Object> testObject_ModifyObjectAttr = testBucket->get_object(rgw_obj_key("test_object_ModifyObjectAttr"));
ASSERT_NE(testObject_ModifyObjectAttr, nullptr);
for (int i = 0; i < 10; ++i) {
buffer::list bl_tmp;
string tmp_value = "test_attrs_value_extra_" + to_string(i);
bl_tmp.append(tmp_value.data(), strlen(tmp_value.data()));
string tmp_key = "test_attrs_key_extra_" + to_string(i);
test_attrs_long.insert({tmp_key, bl_tmp});
fields.push_back(tmp_key);
}
static rgw::sal::Object* nextObject = dynamic_cast<rgw::sal::FilterObject*>(testObject_ModifyObjectAttr.get())->get_next();
ASSERT_EQ(testObject_ModifyObjectAttr->set_obj_attrs(dpp, &test_attrs_long, NULL, null_yield), 0);
ASSERT_EQ(nextObject->get_obj_attrs(null_yield, dpp, NULL), 0);
ASSERT_NE(nextObject->get_attrs().empty(), true);
buffer::list bl_tmp;
string tmp_value = "new_test_attrs_value_extra_5";
bl_tmp.append(tmp_value.data(), strlen(tmp_value.data()));
EXPECT_EQ(testObject_ModifyObjectAttr->modify_obj_attrs("test_attrs_key_extra_5", bl_tmp, null_yield, dpp), 0);
client.hgetall("rgw-object:test_object_ModifyObjectAttr:cache", [](cpp_redis::reply& reply) {
auto arr = reply.as_array();
if (!arr[0].is_null()) {
EXPECT_EQ((int)arr.size(), 22 + METADATA_LENGTH);
}
});
client.sync_commit();
client.hmget("rgw-object:test_object_ModifyObjectAttr:cache", fields, [](cpp_redis::reply& reply) {
auto arr = reply.as_array();
if (!arr[0].is_null()) {
EXPECT_EQ(arr[0].as_string(), "test_attrs_value_ModifyObjectAttr");
for (int i = 1; i < 11; ++i) {
if (i == 6) {
EXPECT_EQ(arr[i].as_string(), "new_test_attrs_value_extra_" + to_string(i - 1));
} else {
EXPECT_EQ(arr[i].as_string(), "test_attrs_value_extra_" + to_string(i - 1));
}
}
}
});
client.sync_commit();
clientReset(&client);
}
TEST_F(D4NFilterFixture, DelLongObjectAttrs) {
cpp_redis::client client;
map<string, bufferlist> test_attrs_long;
vector<string> fields;
fields.push_back("test_attrs_key_DelLongObjectAttrs");
clientSetUp(&client);
createUser();
createBucket();
putObject("DelLongObjectAttrs");
unique_ptr<rgw::sal::Object> testObject_DelLongObjectAttrs = testBucket->get_object(rgw_obj_key("test_object_DelLongObjectAttrs"));
ASSERT_NE(testObject_DelLongObjectAttrs, nullptr);
for (int i = 0; i < 10; ++i) {
buffer::list bl_tmp;
string tmp_value = "test_attrs_value_extra_" + to_string(i);
bl_tmp.append(tmp_value.data(), strlen(tmp_value.data()));
string tmp_key = "test_attrs_key_extra_" + to_string(i);
test_attrs_long.insert({tmp_key, bl_tmp});
fields.push_back(tmp_key);
}
static rgw::sal::Object* nextObject = dynamic_cast<rgw::sal::FilterObject*>(testObject_DelLongObjectAttrs.get())->get_next();
ASSERT_EQ(testObject_DelLongObjectAttrs->set_obj_attrs(dpp, &test_attrs_long, NULL, null_yield), 0);
ASSERT_EQ(nextObject->get_obj_attrs(null_yield, dpp, NULL), 0);
ASSERT_NE(nextObject->get_attrs().empty(), true);
/* Check that the attributes exist before deletion */
client.hgetall("rgw-object:test_object_DelLongObjectAttrs:cache", [](cpp_redis::reply& reply) {
auto arr = reply.as_array();
if (!arr[0].is_null()) {
EXPECT_EQ((int)arr.size(), 22 + METADATA_LENGTH);
}
});
client.sync_commit();
EXPECT_EQ(testObject_DelLongObjectAttrs->set_obj_attrs(dpp, NULL, &test_attrs_long, null_yield), 0);
/* Check that the attributes do not exist after deletion */
client.hgetall("rgw-object:test_object_DelLongObjectAttrs:cache", [](cpp_redis::reply& reply) {
auto arr = reply.as_array();
if (!arr[0].is_null()) {
EXPECT_EQ((int)arr.size(), 2 + METADATA_LENGTH);
for (int i = 0; i < (int)arr.size(); ++i) {
EXPECT_EQ((int)arr[i].as_string().find("extra"), -1);
}
}
});
client.sync_commit();
clientReset(&client);
}
TEST_F(D4NFilterFixture, DelObjectAttr) {
cpp_redis::client client;
map<string, bufferlist> test_attrs_long;
vector<string> fields;
fields.push_back("test_attrs_key_DelObjectAttr");
clientSetUp(&client);
createUser();
createBucket();
putObject("DelObjectAttr");
unique_ptr<rgw::sal::Object> testObject_DelObjectAttr = testBucket->get_object(rgw_obj_key("test_object_DelObjectAttr"));
ASSERT_NE(testObject_DelObjectAttr, nullptr);
for (int i = 0; i < 10; ++i) {
buffer::list bl_tmp;
string tmp_value = "test_attrs_value_extra_" + to_string(i);
bl_tmp.append(tmp_value.data(), strlen(tmp_value.data()));
string tmp_key = "test_attrs_key_extra_" + to_string(i);
test_attrs_long.insert({tmp_key, bl_tmp});
fields.push_back(tmp_key);
}
static rgw::sal::Object* nextObject = dynamic_cast<rgw::sal::FilterObject*>(testObject_DelObjectAttr.get())->get_next();
ASSERT_EQ(testObject_DelObjectAttr->set_obj_attrs(dpp, &test_attrs_long, NULL, null_yield), 0);
ASSERT_EQ(nextObject->get_obj_attrs(null_yield, dpp, NULL), 0);
ASSERT_NE(nextObject->get_attrs().empty(), true);
/* Check that the attribute exists before deletion */
client.hgetall("rgw-object:test_object_DelObjectAttr:cache", [](cpp_redis::reply& reply) {
auto arr = reply.as_array();
if (!arr[0].is_null()) {
EXPECT_EQ((int)arr.size(), 22 + METADATA_LENGTH);
}
});
client.sync_commit();
EXPECT_EQ(testObject_DelObjectAttr->delete_obj_attrs(dpp, "test_attrs_key_extra_5", null_yield), 0);
/* Check that the attribute does not exist after deletion */
client.hgetall("rgw-object:test_object_DelObjectAttr:cache", [](cpp_redis::reply& reply) {
auto arr = reply.as_array();
if (!arr[0].is_null()) {
EXPECT_EQ((int)arr.size(), 20 + METADATA_LENGTH);
}
});
client.sync_commit();
client.hexists("rgw-object:test_object_DelObjectAttr:cache", "test_attrs_key_extra_5", [](cpp_redis::reply& reply) {
if (reply.is_integer()) {
EXPECT_EQ(reply.as_integer(), 0);
}
});
client.sync_commit();
clientReset(&client);
}
/* Edge cases */
TEST_F(D4NFilterFixture, PrepareCopyObject) {
cpp_redis::client client;
vector<string> fields;
fields.push_back("test_attrs_key_PrepareCopyObject");
clientSetUp(&client);
createUser();
createBucket();
putObject("PrepareCopyObject");
unique_ptr<rgw::sal::Object> testObject_PrepareCopyObject = testBucket->get_object(rgw_obj_key("test_object_PrepareCopyObject"));
ASSERT_NE(testObject_PrepareCopyObject, nullptr);
static rgw::sal::Object* nextObject = dynamic_cast<rgw::sal::FilterObject*>(testObject_PrepareCopyObject.get())->get_next();
ASSERT_EQ(nextObject->get_obj_attrs(null_yield, dpp, NULL), 0);
ASSERT_NE(nextObject->get_attrs().empty(), true);
unique_ptr<rgw::sal::Object::ReadOp> testROp = testObject_PrepareCopyObject->get_read_op();
ASSERT_NE(testROp, nullptr);
ASSERT_EQ(testROp->prepare(null_yield, dpp), 0);
/* Update object */
RGWEnv rgw_env;
req_info info(get_pointer(env->cct), &rgw_env);
rgw_zone_id source_zone;
rgw_placement_rule dest_placement;
ceph::real_time src_mtime;
ceph::real_time mtime;
ceph::real_time mod_ptr;
ceph::real_time unmod_ptr;
char if_match;
char if_nomatch;
rgw::sal::AttrsMod attrs_mod = rgw::sal::ATTRSMOD_NONE;
rgw::sal::Attrs attrs;
RGWObjCategory category = RGWObjCategory::Main;
uint64_t olh_epoch = 0;
ceph::real_time delete_at;
string tag;
string etag;
EXPECT_EQ(testObject_PrepareCopyObject->copy_object(testUser.get(),
&info, source_zone, testObject_PrepareCopyObject.get(),
testBucket.get(), testBucket.get(),
dest_placement, &src_mtime, &mtime,
&mod_ptr, &unmod_ptr, false,
&if_match, &if_nomatch, attrs_mod,
false, attrs, category, olh_epoch,
delete_at, NULL, &tag, &etag,
NULL, NULL, dpp, null_yield), 0);
client.hgetall("rgw-object:test_object_PrepareCopyObject:cache", [](cpp_redis::reply& reply) {
auto arr = reply.as_array();
if (!arr[0].is_null()) {
EXPECT_EQ((int)arr.size(), 2 + METADATA_LENGTH);
}
});
client.sync_commit();
client.hmget("rgw-object:test_object_PrepareCopyObject:cache", fields, [](cpp_redis::reply& reply) {
auto arr = reply.as_array();
if (!arr[0].is_null()) {
EXPECT_EQ(arr[0].as_string(), "test_attrs_value_PrepareCopyObject");
}
});
client.sync_commit();
clientReset(&client);
}
TEST_F(D4NFilterFixture, SetDelAttrs) {
cpp_redis::client client;
map<string, bufferlist> test_attrs_base;
vector<string> fields;
fields.push_back("test_attrs_key_SetDelAttrs");
clientSetUp(&client);
createUser();
createBucket();
putObject("SetDelAttrs");
unique_ptr<rgw::sal::Object> testObject_SetDelAttrs = testBucket->get_object(rgw_obj_key("test_object_SetDelAttrs"));
ASSERT_NE(testObject_SetDelAttrs, nullptr);
for (int i = 0; i < 10; ++i) {
buffer::list bl_tmp;
string tmp_value = "test_attrs_value_extra_" + to_string(i);
bl_tmp.append(tmp_value.data(), strlen(tmp_value.data()));
string tmp_key = "test_attrs_key_extra_" + to_string(i);
test_attrs_base.insert({tmp_key, bl_tmp});
}
static rgw::sal::Object* nextObject = dynamic_cast<rgw::sal::FilterObject*>(testObject_SetDelAttrs.get())->get_next();
ASSERT_EQ(testObject_SetDelAttrs->set_obj_attrs(dpp, &test_attrs_base, NULL, null_yield), 0);
ASSERT_EQ(nextObject->get_obj_attrs(null_yield, dpp, NULL), 0);
ASSERT_NE(nextObject->get_attrs().empty(), true);
/* Attempt to set and delete attrs with the same API call */
buffer::list bl;
bl.append("test_attrs_value_extra");
map<string, bufferlist> test_attrs_new{{"test_attrs_key_extra", bl}};
fields.push_back("test_attrs_key_extra");
EXPECT_EQ(testObject_SetDelAttrs->set_obj_attrs(dpp, &test_attrs_new, &test_attrs_base, null_yield), 0);
client.hgetall("rgw-object:test_object_SetDelAttrs:cache", [](cpp_redis::reply& reply) {
auto arr = reply.as_array();
if (!arr[0].is_null()) {
EXPECT_EQ((int)arr.size(), 4 + METADATA_LENGTH);
}
});
client.sync_commit();
client.hmget("rgw-object:test_object_SetDelAttrs:cache", fields, [](cpp_redis::reply& reply) {
auto arr = reply.as_array();
if (!arr[0].is_null()) {
EXPECT_EQ(arr[0].as_string(), "test_attrs_value_SetDelAttrs");
EXPECT_EQ(arr[1].as_string(), "test_attrs_value_extra");
}
});
client.sync_commit();
clientReset(&client);
}
TEST_F(D4NFilterFixture, ModifyNonexistentAttr) {
cpp_redis::client client;
map<string, bufferlist> test_attrs_base;
vector<string> fields;
fields.push_back("test_attrs_key_ModifyNonexistentAttr");
clientSetUp(&client);
createUser();
createBucket();
putObject("ModifyNonexistentAttr");
unique_ptr<rgw::sal::Object> testObject_ModifyNonexistentAttr = testBucket->get_object(rgw_obj_key("test_object_ModifyNonexistentAttr"));
ASSERT_NE(testObject_ModifyNonexistentAttr, nullptr);
for (int i = 0; i < 10; ++i) {
buffer::list bl_tmp;
string tmp_value = "test_attrs_value_extra_" + to_string(i);
bl_tmp.append(tmp_value.data(), strlen(tmp_value.data()));
string tmp_key = "test_attrs_key_extra_" + to_string(i);
test_attrs_base.insert({tmp_key, bl_tmp});
fields.push_back(tmp_key);
}
static rgw::sal::Object* nextObject = dynamic_cast<rgw::sal::FilterObject*>(testObject_ModifyNonexistentAttr.get())->get_next();
ASSERT_EQ(testObject_ModifyNonexistentAttr->set_obj_attrs(dpp, &test_attrs_base, NULL, null_yield), 0);
ASSERT_EQ(nextObject->get_obj_attrs(null_yield, dpp, NULL), 0);
ASSERT_NE(nextObject->get_attrs().empty(), true);
buffer::list bl_tmp;
bl_tmp.append("new_test_attrs_value_extra_ModifyNonexistentAttr");
EXPECT_EQ(testObject_ModifyNonexistentAttr->modify_obj_attrs("test_attrs_key_extra_ModifyNonexistentAttr", bl_tmp, null_yield, dpp), 0);
fields.push_back("test_attrs_key_extra_ModifyNonexistentAttr");
client.hgetall("rgw-object:test_object_ModifyNonexistentAttr:cache", [](cpp_redis::reply& reply) {
auto arr = reply.as_array();
if (!arr[0].is_null()) {
EXPECT_EQ((int)arr.size(), 24 + METADATA_LENGTH);
}
});
client.sync_commit();
client.hmget("rgw-object:test_object_ModifyNonexistentAttr:cache", fields, [](cpp_redis::reply& reply) {
auto arr = reply.as_array();
if (!arr[0].is_null()) {
EXPECT_EQ(arr[0].as_string(), "test_attrs_value_ModifyNonexistentAttr");
for (int i = 1; i < 11; ++i) {
EXPECT_EQ(arr[i].as_string(), "test_attrs_value_extra_" + to_string(i - 1));
}
/* New attribute will be created and stored since it was not found in the existing attributes */
EXPECT_EQ(arr[11].as_string(), "new_test_attrs_value_extra_ModifyNonexistentAttr");
}
});
client.sync_commit();
clientReset(&client);
}
TEST_F(D4NFilterFixture, ModifyGetAttrs) {
cpp_redis::client client;
map<string, bufferlist> test_attrs_base;
vector<string> fields;
fields.push_back("test_attrs_key_ModifyGetAttrs");
clientSetUp(&client);
createUser();
createBucket();
putObject("ModifyGetAttrs");
unique_ptr<rgw::sal::Object> testObject_ModifyGetAttrs = testBucket->get_object(rgw_obj_key("test_object_ModifyGetAttrs"));
ASSERT_NE(testObject_ModifyGetAttrs, nullptr);
for (int i = 0; i < 10; ++i) {
buffer::list bl_tmp;
string tmp_value = "test_attrs_value_extra_" + to_string(i);
bl_tmp.append(tmp_value.data(), strlen(tmp_value.data()));
string tmp_key = "test_attrs_key_extra_" + to_string(i);
test_attrs_base.insert({tmp_key, bl_tmp});
fields.push_back(tmp_key);
}
static rgw::sal::Object* nextObject = dynamic_cast<rgw::sal::FilterObject*>(testObject_ModifyGetAttrs.get())->get_next();
ASSERT_EQ(testObject_ModifyGetAttrs->set_obj_attrs(dpp, &test_attrs_base, NULL, null_yield), 0);
ASSERT_EQ(nextObject->get_obj_attrs(null_yield, dpp, NULL), 0);
ASSERT_NE(nextObject->get_attrs().empty(), true);
/* Attempt to get immediately after a modification */
buffer::list bl_tmp;
bl_tmp.append("new_test_attrs_value_extra_5");
ASSERT_EQ(testObject_ModifyGetAttrs->modify_obj_attrs("test_attrs_key_extra_5", bl_tmp, null_yield, dpp), 0);
ASSERT_EQ(nextObject->get_obj_attrs(null_yield, dpp, NULL), 0);
EXPECT_EQ(testObject_ModifyGetAttrs->get_obj_attrs(null_yield, dpp, NULL), 0);
client.hgetall("rgw-object:test_object_ModifyGetAttrs:cache", [](cpp_redis::reply& reply) {
auto arr = reply.as_array();
if (!arr[0].is_null()) {
EXPECT_EQ((int)arr.size(), 22 + METADATA_LENGTH);
}
});
client.sync_commit();
client.hmget("rgw-object:test_object_ModifyGetAttrs:cache", fields, [](cpp_redis::reply& reply) {
auto arr = reply.as_array();
if (!arr[0].is_null()) {
EXPECT_EQ(arr[0].as_string(), "test_attrs_value_ModifyGetAttrs");
for (int i = 1; i < 11; ++i) {
if (i == 6) {
EXPECT_EQ(arr[i].as_string(), "new_test_attrs_value_extra_5");
} else {
EXPECT_EQ(arr[i].as_string(), "test_attrs_value_extra_" + to_string(i - 1));
}
}
}
});
client.sync_commit();
clientReset(&client);
}
TEST_F(D4NFilterFixture, DelNonexistentAttr) {
cpp_redis::client client;
map<string, bufferlist> test_attrs_base;
vector<string> fields;
fields.push_back("test_attrs_key_DelNonexistentAttr");
clientSetUp(&client);
createUser();
createBucket();
putObject("DelNonexistentAttr");
unique_ptr<rgw::sal::Object> testObject_DelNonexistentAttr = testBucket->get_object(rgw_obj_key("test_object_DelNonexistentAttr"));
ASSERT_NE(testObject_DelNonexistentAttr, nullptr);
for (int i = 0; i < 10; ++i) {
buffer::list bl_tmp;
string tmp_value = "test_attrs_value_extra_" + to_string(i);
bl_tmp.append(tmp_value.data(), strlen(tmp_value.data()));
string tmp_key = "test_attrs_key_extra_" + to_string(i);
test_attrs_base.insert({tmp_key, bl_tmp});
fields.push_back(tmp_key);
}
static rgw::sal::Object* nextObject = dynamic_cast<rgw::sal::FilterObject*>(testObject_DelNonexistentAttr.get())->get_next();
ASSERT_EQ(testObject_DelNonexistentAttr->set_obj_attrs(dpp, &test_attrs_base, NULL, null_yield), 0);
ASSERT_EQ(nextObject->get_obj_attrs(null_yield, dpp, NULL), 0);
ASSERT_NE(nextObject->get_attrs().empty(), true);
/* Attempt to delete an attribute that does not exist */
ASSERT_EQ(testObject_DelNonexistentAttr->delete_obj_attrs(dpp, "test_attrs_key_extra_12", null_yield), 0);
client.hgetall("rgw-object:test_object_DelNonexistentAttr:cache", [](cpp_redis::reply& reply) {
auto arr = reply.as_array();
if (!arr[0].is_null()) {
EXPECT_EQ((int)arr.size(), 22 + METADATA_LENGTH);
}
});
client.sync_commit();
client.hmget("rgw-object:test_object_DelNonexistentAttr:cache", fields, [](cpp_redis::reply& reply) {
auto arr = reply.as_array();
if (!arr[0].is_null()) {
EXPECT_EQ(arr[0].as_string(), "test_attrs_value_DelNonexistentAttr");
for (int i = 1; i < 11; ++i) {
EXPECT_EQ(arr[i].as_string(), "test_attrs_value_extra_" + to_string(i - 1));
}
}
});
client.sync_commit();
clientReset(&client);
}
TEST_F(D4NFilterFixture, DelSetWithNonexisentAttr) {
cpp_redis::client client;
map<string, bufferlist> test_attrs_base;
vector<string> fields;
fields.push_back("test_attrs_key_DelSetWithNonexistentAttr");
clientSetUp(&client);
createUser();
createBucket();
putObject("DelSetWithNonexistentAttr");
unique_ptr<rgw::sal::Object> testObject_DelSetWithNonexistentAttr = testBucket->get_object(rgw_obj_key("test_object_DelSetWithNonexistentAttr"));
ASSERT_NE(testObject_DelSetWithNonexistentAttr, nullptr);
for (int i = 0; i < 10; ++i) {
buffer::list bl_tmp;
string tmp_value = "test_attrs_value_extra_" + to_string(i);
bl_tmp.append(tmp_value.data(), strlen(tmp_value.data()));
string tmp_key = "test_attrs_key_extra_" + to_string(i);
test_attrs_base.insert({tmp_key, bl_tmp});
}
ASSERT_EQ(testObject_DelSetWithNonexistentAttr->set_obj_attrs(dpp, &test_attrs_base, NULL, null_yield), 0);
static rgw::sal::Object* nextObject = dynamic_cast<rgw::sal::FilterObject*>(testObject_DelSetWithNonexistentAttr.get())->get_next();
ASSERT_EQ(nextObject->get_obj_attrs(null_yield, dpp, NULL), 0);
ASSERT_NE(nextObject->get_attrs().empty(), true);
EXPECT_EQ(testObject_DelSetWithNonexistentAttr->delete_obj_attrs(dpp, "test_attrs_key_extra_5", null_yield), 0);
/* Attempt to delete a set of attrs, including one that does not exist */
EXPECT_EQ(testObject_DelSetWithNonexistentAttr->set_obj_attrs(dpp, NULL, &test_attrs_base, null_yield), 0);
client.hgetall("rgw-object:test_object_DelSetWithNonexistentAttr:cache", [](cpp_redis::reply& reply) {
auto arr = reply.as_array();
if (!arr[0].is_null()) {
EXPECT_EQ((int)arr.size(), 2 + METADATA_LENGTH);
}
});
client.sync_commit();
clientReset(&client);
}
/* Underlying store attribute check */
TEST_F(D4NFilterFixture, StoreSetAttr) {
createUser();
createBucket();
putObject("StoreSetAttr");
unique_ptr<rgw::sal::Object> testObject_StoreSetAttr = testBucket->get_object(rgw_obj_key("test_object_StoreSetAttr"));
/* Get the underlying store */
static rgw::sal::Object* nextObject = dynamic_cast<rgw::sal::FilterObject*>(testObject_StoreSetAttr.get())->get_next();
EXPECT_NE(nextObject, nullptr);
/* Set one attribute */
ASSERT_EQ(nextObject->get_obj_attrs(null_yield, dpp, NULL), 0);
ASSERT_NE(nextObject->get_attrs().empty(), true);
/* Check the attribute */
rgw::sal::Attrs driverAttrs = nextObject->get_attrs();
pair<string, string> value(driverAttrs.begin()->first, driverAttrs.begin()->second.to_str());
EXPECT_EQ(value, make_pair(string("test_attrs_key_StoreSetAttr"), string("test_attrs_value_StoreSetAttr")));
}
TEST_F(D4NFilterFixture, StoreSetAttrs) {
createUser();
createBucket();
putObject("StoreSetAttrs");
unique_ptr<rgw::sal::Object> testObject_StoreSetAttrs = testBucket->get_object(rgw_obj_key("test_object_StoreSetAttrs"));
/* Get the underlying store */
static rgw::sal::Object* nextObject = dynamic_cast<rgw::sal::FilterObject*>(testObject_StoreSetAttrs.get())->get_next();
EXPECT_NE(nextObject, nullptr);
/* Delete base attribute for easier comparison */
testObject_StoreSetAttrs->delete_obj_attrs(dpp, "test_attrs_key_StoreSetAttrs", null_yield);
/* Set more attributes */
map<string, bufferlist> test_attrs_base;
for (int i = 0; i < 10; ++i) {
buffer::list bl_tmp;
string tmp_value = "test_attrs_value_extra_" + to_string(i);
bl_tmp.append(tmp_value.data(), strlen(tmp_value.data()));
string tmp_key = "test_attrs_key_extra_" + to_string(i);
test_attrs_base.insert({tmp_key, bl_tmp});
}
testObject_StoreSetAttrs->set_obj_attrs(dpp, &test_attrs_base, NULL, null_yield);
ASSERT_EQ(nextObject->get_obj_attrs(null_yield, dpp, NULL), 0);
/* Check the attributes */
rgw::sal::Attrs driverAttrs = nextObject->get_attrs();
rgw::sal::Attrs::iterator attrs;
vector< pair<string, string> > values;
for (attrs = driverAttrs.begin(); attrs != driverAttrs.end(); ++attrs) {
values.push_back(make_pair(attrs->first, attrs->second.to_str()));
}
int i = 0;
for (const auto& pair : values) {
string tmp_key = "test_attrs_key_extra_" + to_string(i);
string tmp_value = "test_attrs_value_extra_" + to_string(i);
EXPECT_EQ(pair, make_pair(tmp_key, tmp_value));
++i;
}
}
TEST_F(D4NFilterFixture, StoreGetAttrs) {
cpp_redis::client client;
map<string, bufferlist> test_attrs_base;
clientSetUp(&client);
createUser();
createBucket();
putObject("StoreGetAttrs");
unique_ptr<rgw::sal::Object> testObject_StoreGetAttrs = testBucket->get_object(rgw_obj_key("test_object_StoreGetAttrs"));
/* Get the underlying store */
static rgw::sal::Object* nextObject = dynamic_cast<rgw::sal::FilterObject*>(testObject_StoreGetAttrs.get())->get_next();
EXPECT_NE(nextObject, nullptr);
ASSERT_EQ(nextObject->get_obj_attrs(null_yield, dpp, NULL), 0);
/* Delete base attribute for easier comparison */
testObject_StoreGetAttrs->delete_obj_attrs(dpp, "test_attrs_key_StoreGetAttrs", null_yield);
ASSERT_EQ(nextObject->get_obj_attrs(null_yield, dpp, NULL), 0);
/* Set more attributes */
for (int i = 0; i < 10; ++i) {
buffer::list bl_tmp;
string tmp_value = "test_attrs_value_extra_" + to_string(i);
bl_tmp.append(tmp_value.data(), strlen(tmp_value.data()));
string tmp_key = "test_attrs_key_extra_" + to_string(i);
test_attrs_base.insert({tmp_key, bl_tmp});
}
testObject_StoreGetAttrs->set_obj_attrs(dpp, &test_attrs_base, NULL, null_yield);
nextObject->get_obj_attrs(null_yield, dpp, NULL);
/* Change an attribute through redis */
vector< pair<string, string> > value;
value.push_back(make_pair("test_attrs_key_extra_5", "new_test_attrs_value_extra_5"));
client.hmset("rgw-object:test_object_StoreGetAttrs:cache", value, [&](cpp_redis::reply& reply) {
if (!reply.is_null()) {
EXPECT_EQ(reply.as_string(), "OK");
}
});
client.sync_commit();
/* Artificially adding the data field so getObject will succeed
for the purposes of this test */
value.clear();
value.push_back(make_pair("data", ""));
client.hmset("rgw-object:test_object_StoreGetAttrs:cache", value, [&](cpp_redis::reply& reply) {
if (!reply.is_null()) {
ASSERT_EQ(reply.as_string(), "OK");
}
});
client.sync_commit();
ASSERT_EQ(testObject_StoreGetAttrs->get_obj_attrs(null_yield, dpp, NULL), 0); /* Cache attributes */
/* Check the attributes on the store layer */
rgw::sal::Attrs driverAttrs = nextObject->get_attrs();
rgw::sal::Attrs::iterator driverattrs;
vector< pair<string, string> > driverValues;
for (driverattrs = driverAttrs.begin(); driverattrs != driverAttrs.end(); ++driverattrs) {
driverValues.push_back(make_pair(driverattrs->first, driverattrs->second.to_str()));
}
EXPECT_EQ((int)driverValues.size(), 10);
int i = 0;
for (const auto& pair : driverValues) {
string tmp_key = "test_attrs_key_extra_" + to_string(i);
string tmp_value = "test_attrs_value_extra_" + to_string(i);
if (i == 5) {
tmp_value = "new_" + tmp_value;
}
EXPECT_EQ(pair, make_pair(tmp_key, tmp_value));
++i;
}
/* Restore and check original attributes */
nextObject->get_obj_attrs(null_yield, dpp, NULL);
driverAttrs = nextObject->get_attrs();
driverValues.clear();
for (driverattrs = driverAttrs.begin(); driverattrs != driverAttrs.end(); ++driverattrs) {
driverValues.push_back(make_pair(driverattrs->first, driverattrs->second.to_str()));
}
EXPECT_EQ((int)driverValues.size(), 10);
i = 0;
for (const auto& pair : driverValues) {
string tmp_key = "test_attrs_key_extra_" + to_string(i);
string tmp_value = "test_attrs_value_extra_" + to_string(i);
EXPECT_EQ(pair, make_pair(tmp_key, tmp_value));
++i;
}
clientReset(&client);
}
TEST_F(D4NFilterFixture, StoreGetMetadata) {
cpp_redis::client client;
clientSetUp(&client);
createUser();
createBucket();
putObject("StoreGetMetadata");
unique_ptr<rgw::sal::Object> testObject_StoreGetMetadata = testBucket->get_object(rgw_obj_key("test_object_StoreGetMetadata"));
/* Get the underlying store */
static rgw::sal::Object* nextObject = dynamic_cast<rgw::sal::FilterObject*>(testObject_StoreGetMetadata.get())->get_next();
EXPECT_NE(nextObject, nullptr);
ASSERT_EQ(nextObject->get_obj_attrs(null_yield, dpp, NULL), 0);
/* Change metadata values through redis */
vector< pair<string, string> > value;
value.push_back(make_pair("mtime", "2021-11-08T21:13:38.334696731Z"));
value.push_back(make_pair("object_size", "100"));
value.push_back(make_pair("accounted_size", "200"));
value.push_back(make_pair("epoch", "3")); /* version_id is not tested because the object does not have an instance */
value.push_back(make_pair("source_zone_short_id", "300"));
value.push_back(make_pair("bucket_count", "10"));
value.push_back(make_pair("bucket_size", "20"));
value.push_back(make_pair("user_quota.max_size", "0"));
value.push_back(make_pair("user_quota.max_objects", "0"));
value.push_back(make_pair("max_buckets", "2000"));
client.hmset("rgw-object:test_object_StoreGetMetadata:cache", value, [](cpp_redis::reply& reply) {
if (!reply.is_null()) {
EXPECT_EQ(reply.as_string(), "OK");
}
});
client.sync_commit();
/* Artificially adding the data field so getObject will succeed
for the purposes of this test */
value.clear();
value.push_back(make_pair("data", ""));
client.hmset("rgw-object:test_object_StoreGetMetadata:cache", value, [](cpp_redis::reply& reply) {
if (!reply.is_null()) {
ASSERT_EQ(reply.as_string(), "OK");
}
});
client.sync_commit();
unique_ptr<rgw::sal::Object::ReadOp> testROp = testObject_StoreGetMetadata->get_read_op();
ASSERT_NE(testROp, nullptr);
ASSERT_EQ(testROp->prepare(null_yield, dpp), 0);
/* Check updated metadata values */
RGWUserInfo info = testObject_StoreGetMetadata->get_bucket()->get_owner()->get_info();
static StoreObject* storeObject = static_cast<StoreObject*>(dynamic_cast<rgw::sal::FilterObject*>(testObject_StoreGetMetadata.get())->get_next());
EXPECT_EQ(to_iso_8601(storeObject->state.mtime), "2021-11-08T21:13:38.334696731Z");
EXPECT_EQ(testObject_StoreGetMetadata->get_obj_size(), (uint64_t)100);
EXPECT_EQ(storeObject->state.accounted_size, (uint64_t)200);
EXPECT_EQ(storeObject->state.epoch, (uint64_t)3);
EXPECT_EQ(storeObject->state.zone_short_id, (uint32_t)300);
EXPECT_EQ(testObject_StoreGetMetadata->get_bucket()->get_count(), (uint64_t)10);
EXPECT_EQ(testObject_StoreGetMetadata->get_bucket()->get_size(), (uint64_t)20);
EXPECT_EQ(info.quota.user_quota.max_size, (int64_t)0);
EXPECT_EQ(info.quota.user_quota.max_objects, (int64_t)0);
EXPECT_EQ(testObject_StoreGetMetadata->get_bucket()->get_owner()->get_max_buckets(), (int32_t)2000);
}
TEST_F(D4NFilterFixture, StoreModifyAttr) {
createUser();
createBucket();
putObject("StoreModifyAttr");
unique_ptr<rgw::sal::Object> testObject_StoreModifyAttr = testBucket->get_object(rgw_obj_key("test_object_StoreModifyAttr"));
/* Get the underlying store */
static rgw::sal::Object* nextObject = dynamic_cast<rgw::sal::FilterObject*>(testObject_StoreModifyAttr.get())->get_next();
ASSERT_NE(nextObject, nullptr);
/* Modify existing attribute */
buffer::list bl_tmp;
string tmp_value = "new_test_attrs_value_StoreModifyAttr";
bl_tmp.append(tmp_value.data(), strlen(tmp_value.data()));
testObject_StoreModifyAttr->modify_obj_attrs("test_attrs_key_StoreModifyAttr", bl_tmp, null_yield, dpp);
ASSERT_EQ(nextObject->get_obj_attrs(null_yield, dpp, NULL), 0);
/* Check the attribute */
rgw::sal::Attrs driverAttrs = nextObject->get_attrs();
pair<string, string> value(driverAttrs.begin()->first, driverAttrs.begin()->second.to_str());
EXPECT_EQ(value, make_pair(string("test_attrs_key_StoreModifyAttr"), string("new_test_attrs_value_StoreModifyAttr")));
}
TEST_F(D4NFilterFixture, StoreDelAttrs) {
createUser();
createBucket();
putObject("StoreDelAttrs");
unique_ptr<rgw::sal::Object> testObject_StoreDelAttrs = testBucket->get_object(rgw_obj_key("test_object_StoreDelAttrs"));
/* Get the underlying store */
static rgw::sal::Object* nextObject = dynamic_cast<rgw::sal::FilterObject*>(testObject_StoreDelAttrs.get())->get_next();
ASSERT_NE(nextObject, nullptr);
/* Set more attributes */
map<string, bufferlist> test_attrs_base;
for (int i = 0; i < 10; ++i) {
buffer::list bl_tmp;
string tmp_value = "test_attrs_value_extra_" + to_string(i);
bl_tmp.append(tmp_value.data(), strlen(tmp_value.data()));
string tmp_key = "test_attrs_key_extra_" + to_string(i);
test_attrs_base.insert({tmp_key, bl_tmp});
}
testObject_StoreDelAttrs->set_obj_attrs(dpp, &test_attrs_base, NULL, null_yield);
ASSERT_EQ(nextObject->get_obj_attrs(null_yield, dpp, NULL), 0);
/* Check that the attributes exist before deletion */
rgw::sal::Attrs driverAttrs = nextObject->get_attrs();
EXPECT_EQ(driverAttrs.size(), (long unsigned int)11);
rgw::sal::Attrs::iterator driverattrs;
vector< pair<string, string> > driverValues;
for (driverattrs = ++driverAttrs.begin(); driverattrs != driverAttrs.end(); ++driverattrs) {
driverValues.push_back(make_pair(driverattrs->first, driverattrs->second.to_str()));
}
int i = 0;
for (const auto& pair : driverValues) {
string tmp_key = "test_attrs_key_extra_" + to_string(i);
string tmp_value = "test_attrs_value_extra_" + to_string(i);
EXPECT_EQ(pair, make_pair(tmp_key, tmp_value));
++i;
}
testObject_StoreDelAttrs->set_obj_attrs(dpp, NULL, &test_attrs_base, null_yield);
ASSERT_EQ(nextObject->get_obj_attrs(null_yield, dpp, NULL), 0);
/* Check that the attributes do not exist after deletion */
driverAttrs = nextObject->get_attrs();
EXPECT_EQ(driverAttrs.size(), (long unsigned int)1);
pair<string, string> value(driverAttrs.begin()->first, driverAttrs.begin()->second.to_str());
EXPECT_EQ(value, make_pair(string("test_attrs_key_StoreDelAttrs"), string("test_attrs_value_StoreDelAttrs")));
}
/* SAL object data storage check */
TEST_F(D4NFilterFixture, DataCheck) {
cpp_redis::client client;
clientSetUp(&client);
createUser();
createBucket();
/* Prepare, process, and complete object write */
unique_ptr<rgw::sal::Object> obj = testBucket->get_object(rgw_obj_key("test_object_DataCheck"));
rgw_user owner;
rgw_placement_rule ptail_placement_rule;
uint64_t olh_epoch = 123;
string unique_tag;
obj->get_obj_attrs(null_yield, dpp);
testWriter = driver->get_atomic_writer(dpp,
null_yield,
obj.get(),
owner,
&ptail_placement_rule,
olh_epoch,
unique_tag);
size_t accounted_size = 4;
string etag("test_etag");
ceph::real_time mtime;
ceph::real_time set_mtime;
buffer::list bl;
string tmp = "test_attrs_value_DataCheck";
bl.append("test_attrs_value_DataCheck");
map<string, bufferlist> attrs{{"test_attrs_key_DataCheck", bl}};
buffer::list data;
data.append("test data");
ceph::real_time delete_at;
char if_match;
char if_nomatch;
string user_data;
rgw_zone_set zones_trace;
bool canceled;
ASSERT_EQ(testWriter->prepare(null_yield), 0);
ASSERT_EQ(testWriter->process(move(data), 0), 0);
ASSERT_EQ(testWriter->complete(accounted_size, etag,
&mtime, set_mtime,
attrs,
delete_at,
&if_match, &if_nomatch,
&user_data,
&zones_trace, &canceled,
null_yield), 0);
client.hget("rgw-object:test_object_DataCheck:cache", "data", [&data](cpp_redis::reply& reply) {
if (reply.is_string()) {
EXPECT_EQ(reply.as_string(), data.to_str());
}
});
client.sync_commit();
/* Change data and ensure redis stores the new value */
buffer::list dataNew;
dataNew.append("new test data");
ASSERT_EQ(testWriter->prepare(null_yield), 0);
ASSERT_EQ(testWriter->process(move(dataNew), 0), 0);
ASSERT_EQ(testWriter->complete(accounted_size, etag,
&mtime, set_mtime,
attrs,
delete_at,
&if_match, &if_nomatch,
&user_data,
&zones_trace, &canceled,
null_yield), 0);
client.hget("rgw-object:test_object_DataCheck:cache", "data", [&dataNew](cpp_redis::reply& reply) {
if (reply.is_string()) {
EXPECT_EQ(reply.as_string(), dataNew.to_str());
}
});
client.sync_commit();
clientReset(&client);
}
int main(int argc, char *argv[]) {
::testing::InitGoogleTest(&argc, argv);
/* Other host and port can be passed to the program */
if (argc == 1) {
portStr = "6379";
hostStr = "127.0.0.1";
} else if (argc == 3) {
hostStr = argv[1];
portStr = argv[2];
} else {
std::cout << "Incorrect number of arguments." << std::endl;
return -1;
}
redisHost = hostStr + ":" + portStr;
env = new Environment();
::testing::AddGlobalTestEnvironment(env);
return RUN_ALL_TESTS();
}
| 61,670 | 30.178463 | 148 | cc |
null | ceph-main/src/test/rgw/test_http_manager.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2015 Red Hat
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "rgw_rados.h"
#include "rgw_http_client.h"
#include "global/global_init.h"
#include "common/ceph_argparse.h"
#include <unistd.h>
#include <curl/curl.h>
#include <boost/asio/ip/tcp.hpp>
#include <boost/asio/write.hpp>
#include <thread>
#include <gtest/gtest.h>
using namespace std;
namespace {
using tcp = boost::asio::ip::tcp;
// if we have a racing where another thread manages to bind and listen the
// port picked by this acceptor, try again.
static constexpr int MAX_BIND_RETRIES = 60;
tcp::acceptor try_bind(boost::asio::io_context& ioctx) {
using tcp = boost::asio::ip::tcp;
tcp::endpoint endpoint(tcp::v4(), 0);
tcp::acceptor acceptor(ioctx);
acceptor.open(endpoint.protocol());
for (int retries = 0;; retries++) {
try {
acceptor.bind(endpoint);
// yay!
break;
} catch (const boost::system::system_error& e) {
if (retries == MAX_BIND_RETRIES) {
throw;
}
if (e.code() != boost::system::errc::address_in_use) {
throw;
}
}
// backoff a little bit
sleep(1);
}
return acceptor;
}
}
TEST(HTTPManager, ReadTruncated)
{
using tcp = boost::asio::ip::tcp;
boost::asio::io_context ioctx;
auto acceptor = try_bind(ioctx);
acceptor.listen();
std::thread server{[&] {
tcp::socket socket{ioctx};
acceptor.accept(socket);
std::string_view response =
"HTTP/1.1 200 OK\r\n"
"Content-Length: 1024\r\n"
"\r\n"
"short body";
boost::asio::write(socket, boost::asio::buffer(response));
}};
const auto url = std::string{"http://127.0.0.1:"} + std::to_string(acceptor.local_endpoint().port());
RGWHTTPClient client{g_ceph_context, "GET", url};
EXPECT_EQ(-EAGAIN, RGWHTTP::process(&client, null_yield));
server.join();
}
TEST(HTTPManager, Head)
{
using tcp = boost::asio::ip::tcp;
boost::asio::io_context ioctx;
auto acceptor = try_bind(ioctx);
acceptor.listen();
std::thread server{[&] {
tcp::socket socket{ioctx};
acceptor.accept(socket);
std::string_view response =
"HTTP/1.1 200 OK\r\n"
"Content-Length: 1024\r\n"
"\r\n";
boost::asio::write(socket, boost::asio::buffer(response));
}};
const auto url = std::string{"http://127.0.0.1:"} + std::to_string(acceptor.local_endpoint().port());
RGWHTTPClient client{g_ceph_context, "HEAD", url};
EXPECT_EQ(0, RGWHTTP::process(&client, null_yield));
server.join();
}
TEST(HTTPManager, SignalThread)
{
auto cct = g_ceph_context;
RGWHTTPManager http(cct);
ASSERT_EQ(0, http.start());
// default pipe buffer size according to man pipe
constexpr size_t max_pipe_buffer_size = 65536;
// each signal writes 4 bytes to the pipe
constexpr size_t max_pipe_signals = max_pipe_buffer_size / sizeof(uint32_t);
// add_request and unregister_request
constexpr size_t pipe_signals_per_request = 2;
// number of http requests to fill the pipe buffer
constexpr size_t max_requests = max_pipe_signals / pipe_signals_per_request;
// send one extra request to test that we don't deadlock
constexpr size_t num_requests = max_requests + 1;
for (size_t i = 0; i < num_requests; i++) {
RGWHTTPClient client{cct, "PUT", "http://127.0.0.1:80"};
http.add_request(&client);
}
}
int main(int argc, char** argv)
{
auto args = argv_to_vec(argc, argv);
auto cct = global_init(NULL, args, CEPH_ENTITY_TYPE_CLIENT,
CODE_ENVIRONMENT_UTILITY,
CINIT_FLAG_NO_DEFAULT_CONFIG_FILE);
common_init_finish(g_ceph_context);
rgw_http_client_init(cct->get());
rgw_setup_saved_curl_handles();
::testing::InitGoogleTest(&argc, argv);
int r = RUN_ALL_TESTS();
rgw_release_all_curl_handles();
rgw_http_client_cleanup();
return r;
}
| 4,109 | 26.583893 | 103 | cc |
null | ceph-main/src/test/rgw/test_log_backing.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2019 Red Hat, Inc.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "rgw_log_backing.h"
#include <cerrno>
#include <iostream>
#include <string_view>
#include <fmt/format.h>
#include "include/types.h"
#include "include/rados/librados.hpp"
#include "test/librados/test_cxx.h"
#include "global/global_context.h"
#include "cls/log/cls_log_client.h"
#include "rgw_tools.h"
#include "cls_fifo_legacy.h"
#include "gtest/gtest.h"
namespace lr = librados;
namespace cb = ceph::buffer;
namespace fifo = rados::cls::fifo;
namespace RCf = rgw::cls::fifo;
auto cct = new CephContext(CEPH_ENTITY_TYPE_CLIENT);
const DoutPrefix dp(cct, 1, "test log backing: ");
class LogBacking : public testing::Test {
protected:
static constexpr int SHARDS = 3;
const std::string pool_name = get_temp_pool_name();
lr::Rados rados;
lr::IoCtx ioctx;
lr::Rados rados2;
lr::IoCtx ioctx2;
void SetUp() override {
ASSERT_EQ("", create_one_pool_pp(pool_name, rados));
ASSERT_EQ(0, rados.ioctx_create(pool_name.c_str(), ioctx));
connect_cluster_pp(rados2);
ASSERT_EQ(0, rados2.ioctx_create(pool_name.c_str(), ioctx2));
}
void TearDown() override {
destroy_one_pool_pp(pool_name, rados);
}
std::string get_oid(uint64_t gen_id, int i) const {
return (gen_id > 0 ?
fmt::format("shard@G{}.{}", gen_id, i) :
fmt::format("shard.{}", i));
}
void make_omap() {
for (int i = 0; i < SHARDS; ++i) {
using ceph::encode;
lr::ObjectWriteOperation op;
cb::list bl;
encode(i, bl);
cls_log_add(op, ceph_clock_now(), {}, "meow", bl);
auto r = rgw_rados_operate(&dp, ioctx, get_oid(0, i), &op, null_yield);
ASSERT_GE(r, 0);
}
}
void add_omap(int i) {
using ceph::encode;
lr::ObjectWriteOperation op;
cb::list bl;
encode(i, bl);
cls_log_add(op, ceph_clock_now(), {}, "meow", bl);
auto r = rgw_rados_operate(&dp, ioctx, get_oid(0, i), &op, null_yield);
ASSERT_GE(r, 0);
}
void empty_omap() {
for (int i = 0; i < SHARDS; ++i) {
auto oid = get_oid(0, i);
std::string to_marker;
{
lr::ObjectReadOperation op;
std::list<cls_log_entry> entries;
bool truncated = false;
cls_log_list(op, {}, {}, {}, 1, entries, &to_marker, &truncated);
auto r = rgw_rados_operate(&dp, ioctx, oid, &op, nullptr, null_yield);
ASSERT_GE(r, 0);
ASSERT_FALSE(entries.empty());
}
{
lr::ObjectWriteOperation op;
cls_log_trim(op, {}, {}, {}, to_marker);
auto r = rgw_rados_operate(&dp, ioctx, oid, &op, null_yield);
ASSERT_GE(r, 0);
}
{
lr::ObjectReadOperation op;
std::list<cls_log_entry> entries;
bool truncated = false;
cls_log_list(op, {}, {}, {}, 1, entries, &to_marker, &truncated);
auto r = rgw_rados_operate(&dp, ioctx, oid, &op, nullptr, null_yield);
ASSERT_GE(r, 0);
ASSERT_TRUE(entries.empty());
}
}
}
void make_fifo()
{
for (int i = 0; i < SHARDS; ++i) {
std::unique_ptr<RCf::FIFO> fifo;
auto r = RCf::FIFO::create(&dp, ioctx, get_oid(0, i), &fifo, null_yield);
ASSERT_EQ(0, r);
ASSERT_TRUE(fifo);
}
}
void add_fifo(int i)
{
using ceph::encode;
std::unique_ptr<RCf::FIFO> fifo;
auto r = RCf::FIFO::open(&dp, ioctx, get_oid(0, i), &fifo, null_yield);
ASSERT_GE(0, r);
ASSERT_TRUE(fifo);
cb::list bl;
encode(i, bl);
r = fifo->push(&dp, bl, null_yield);
ASSERT_GE(0, r);
}
void assert_empty() {
std::vector<lr::ObjectItem> result;
lr::ObjectCursor next;
auto r = ioctx.object_list(ioctx.object_list_begin(), ioctx.object_list_end(),
100, {}, &result, &next);
ASSERT_GE(r, 0);
ASSERT_TRUE(result.empty());
}
};
TEST_F(LogBacking, TestOmap)
{
make_omap();
auto stat = log_backing_type(&dp, ioctx, log_type::fifo, SHARDS,
[this](int shard){ return get_oid(0, shard); },
null_yield);
ASSERT_EQ(log_type::omap, *stat);
}
TEST_F(LogBacking, TestOmapEmpty)
{
auto stat = log_backing_type(&dp, ioctx, log_type::omap, SHARDS,
[this](int shard){ return get_oid(0, shard); },
null_yield);
ASSERT_EQ(log_type::omap, *stat);
}
TEST_F(LogBacking, TestFIFO)
{
make_fifo();
auto stat = log_backing_type(&dp, ioctx, log_type::fifo, SHARDS,
[this](int shard){ return get_oid(0, shard); },
null_yield);
ASSERT_EQ(log_type::fifo, *stat);
}
TEST_F(LogBacking, TestFIFOEmpty)
{
auto stat = log_backing_type(&dp, ioctx, log_type::fifo, SHARDS,
[this](int shard){ return get_oid(0, shard); },
null_yield);
ASSERT_EQ(log_type::fifo, *stat);
}
TEST(CursorGen, RoundTrip) {
const std::string_view pcurs = "fded";
{
auto gc = gencursor(0, pcurs);
ASSERT_EQ(pcurs, gc);
auto [gen, cursor] = cursorgen(gc);
ASSERT_EQ(0, gen);
ASSERT_EQ(pcurs, cursor);
}
{
auto gc = gencursor(53, pcurs);
ASSERT_NE(pcurs, gc);
auto [gen, cursor] = cursorgen(gc);
ASSERT_EQ(53, gen);
ASSERT_EQ(pcurs, cursor);
}
}
class generations final : public logback_generations {
public:
entries_t got_entries;
std::optional<uint64_t> tail;
using logback_generations::logback_generations;
bs::error_code handle_init(entries_t e) noexcept {
got_entries = e;
return {};
}
bs::error_code handle_new_gens(entries_t e) noexcept {
got_entries = e;
return {};
}
bs::error_code handle_empty_to(uint64_t new_tail) noexcept {
tail = new_tail;
return {};
}
};
TEST_F(LogBacking, GenerationSingle)
{
auto lgr = logback_generations::init<generations>(
&dp, ioctx, "foobar", [this](uint64_t gen_id, int shard) {
return get_oid(gen_id, shard);
}, SHARDS, log_type::fifo, null_yield);
ASSERT_TRUE(lgr);
auto lg = std::move(*lgr);
ASSERT_EQ(0, lg->got_entries.begin()->first);
ASSERT_EQ(0, lg->got_entries[0].gen_id);
ASSERT_EQ(log_type::fifo, lg->got_entries[0].type);
ASSERT_FALSE(lg->got_entries[0].pruned);
auto ec = lg->empty_to(&dp, 0, null_yield);
ASSERT_TRUE(ec);
lg.reset();
lg = *logback_generations::init<generations>(
&dp, ioctx, "foobar", [this](uint64_t gen_id, int shard) {
return get_oid(gen_id, shard);
}, SHARDS, log_type::fifo, null_yield);
ASSERT_EQ(0, lg->got_entries.begin()->first);
ASSERT_EQ(0, lg->got_entries[0].gen_id);
ASSERT_EQ(log_type::fifo, lg->got_entries[0].type);
ASSERT_FALSE(lg->got_entries[0].pruned);
lg->got_entries.clear();
ec = lg->new_backing(&dp, log_type::omap, null_yield);
ASSERT_FALSE(ec);
ASSERT_EQ(1, lg->got_entries.size());
ASSERT_EQ(1, lg->got_entries[1].gen_id);
ASSERT_EQ(log_type::omap, lg->got_entries[1].type);
ASSERT_FALSE(lg->got_entries[1].pruned);
lg.reset();
lg = *logback_generations::init<generations>(
&dp, ioctx, "foobar", [this](uint64_t gen_id, int shard) {
return get_oid(gen_id, shard);
}, SHARDS, log_type::fifo, null_yield);
ASSERT_EQ(2, lg->got_entries.size());
ASSERT_EQ(0, lg->got_entries[0].gen_id);
ASSERT_EQ(log_type::fifo, lg->got_entries[0].type);
ASSERT_FALSE(lg->got_entries[0].pruned);
ASSERT_EQ(1, lg->got_entries[1].gen_id);
ASSERT_EQ(log_type::omap, lg->got_entries[1].type);
ASSERT_FALSE(lg->got_entries[1].pruned);
ec = lg->empty_to(&dp, 0, null_yield);
ASSERT_FALSE(ec);
ASSERT_EQ(0, *lg->tail);
lg.reset();
lg = *logback_generations::init<generations>(
&dp, ioctx, "foobar", [this](uint64_t gen_id, int shard) {
return get_oid(gen_id, shard);
}, SHARDS, log_type::fifo, null_yield);
ASSERT_EQ(1, lg->got_entries.size());
ASSERT_EQ(1, lg->got_entries[1].gen_id);
ASSERT_EQ(log_type::omap, lg->got_entries[1].type);
ASSERT_FALSE(lg->got_entries[1].pruned);
}
TEST_F(LogBacking, GenerationWN)
{
auto lg1 = *logback_generations::init<generations>(
&dp, ioctx, "foobar", [this](uint64_t gen_id, int shard) {
return get_oid(gen_id, shard);
}, SHARDS, log_type::fifo, null_yield);
auto ec = lg1->new_backing(&dp, log_type::omap, null_yield);
ASSERT_FALSE(ec);
ASSERT_EQ(1, lg1->got_entries.size());
ASSERT_EQ(1, lg1->got_entries[1].gen_id);
ASSERT_EQ(log_type::omap, lg1->got_entries[1].type);
ASSERT_FALSE(lg1->got_entries[1].pruned);
lg1->got_entries.clear();
auto lg2 = *logback_generations::init<generations>(
&dp, ioctx2, "foobar", [this](uint64_t gen_id, int shard) {
return get_oid(gen_id, shard);
}, SHARDS, log_type::fifo, null_yield);
ASSERT_EQ(2, lg2->got_entries.size());
ASSERT_EQ(0, lg2->got_entries[0].gen_id);
ASSERT_EQ(log_type::fifo, lg2->got_entries[0].type);
ASSERT_FALSE(lg2->got_entries[0].pruned);
ASSERT_EQ(1, lg2->got_entries[1].gen_id);
ASSERT_EQ(log_type::omap, lg2->got_entries[1].type);
ASSERT_FALSE(lg2->got_entries[1].pruned);
lg2->got_entries.clear();
ec = lg1->new_backing(&dp, log_type::fifo, null_yield);
ASSERT_FALSE(ec);
ASSERT_EQ(1, lg1->got_entries.size());
ASSERT_EQ(2, lg1->got_entries[2].gen_id);
ASSERT_EQ(log_type::fifo, lg1->got_entries[2].type);
ASSERT_FALSE(lg1->got_entries[2].pruned);
ASSERT_EQ(1, lg2->got_entries.size());
ASSERT_EQ(2, lg2->got_entries[2].gen_id);
ASSERT_EQ(log_type::fifo, lg2->got_entries[2].type);
ASSERT_FALSE(lg2->got_entries[2].pruned);
lg1->got_entries.clear();
lg2->got_entries.clear();
ec = lg2->empty_to(&dp, 1, null_yield);
ASSERT_FALSE(ec);
ASSERT_EQ(1, *lg1->tail);
ASSERT_EQ(1, *lg2->tail);
lg1->tail.reset();
lg2->tail.reset();
}
| 9,825 | 25.846995 | 82 | cc |
null | ceph-main/src/test/rgw/test_multen.py | # Test of mult-tenancy
import json
import sys
from boto.s3.connection import S3Connection, OrdinaryCallingFormat
# XXX once we're done, break out the common code into a library module
# See https://github.com/ceph/ceph/pull/8646
import test_multi as t
class TestException(Exception):
pass
#
# Create a traditional user, S3-only, global (empty) tenant
#
def test2(cluster):
uid = "tester2"
display_name = "'Test User 2'"
access_key = "tester2KEY"
s3_secret = "test3pass"
cmd = t.build_cmd('--uid', uid,
'--display-name', display_name,
'--access-key', access_key,
'--secret', s3_secret,
"user create")
out, ret = cluster.rgw_admin(cmd, check_retcode=False)
if ret != 0:
raise TestException("failed command: user create --uid %s" % uid)
try:
outj = json.loads(out.decode('utf-8'))
except ValueError:
raise TestException("invalid json after: user create --uid %s" % uid)
if not isinstance(outj, dict):
raise TestException("bad json after: user create --uid %s" % uid)
if outj['user_id'] != uid:
raise TestException(
"command: user create --uid %s, returned user_id %s" %
(uid, outj['user_id']))
#
# Create a tenantized user with --tenant foo
#
def test3(cluster):
tid = "testx3"
uid = "tester3"
display_name = "Test_User_3"
access_key = "tester3KEY"
s3_secret = "test3pass"
cmd = t.build_cmd(
'--tenant', tid,
'--uid', uid,
'--display-name', display_name,
'--access-key', access_key,
'--secret', s3_secret,
"user create")
out, ret = cluster.rgw_admin(cmd, check_retcode=False)
if ret != 0:
raise TestException("failed command: user create --uid %s" % uid)
try:
outj = json.loads(out.decode('utf-8'))
except ValueError:
raise TestException("invalid json after: user create --uid %s" % uid)
if not isinstance(outj, dict):
raise TestException("bad json after: user create --uid %s" % uid)
tid_uid = "%s$%s" % (tid, uid)
if outj['user_id'] != tid_uid:
raise TestException(
"command: user create --uid %s, returned user_id %s" %
(tid_uid, outj['user_id']))
#
# Create a tenantized user with a subuser
#
# N.B. The aim of this test is not just to create a subuser, but to create
# the key with a separate command, which does not use --tenant, but extracts
# the tenant from the subuser. No idea why we allow this. There was some kind
# of old script that did this.
#
def test4(cluster):
tid = "testx4"
uid = "tester4"
subid = "test4"
display_name = "Test_User_4"
cmd = t.build_cmd(
'--tenant', tid,
'--uid', uid,
'--display-name', display_name,
'--subuser', '%s:%s' % (uid, subid),
'--key-type', 'swift',
'--access', 'full',
"user create")
out, ret = cluster.rgw_admin(cmd, check_retcode=False)
if ret != 0:
raise TestException("failed command: user create --uid %s" % uid)
try:
outj = json.loads(out.decode('utf-8'))
except ValueError:
raise TestException("invalid json after: user create --uid %s" % uid)
if not isinstance(outj, dict):
raise TestException("bad json after: user create --uid %s" % uid)
tid_uid = "%s$%s" % (tid, uid)
if outj['user_id'] != tid_uid:
raise TestException(
"command: user create --uid %s, returned user_id %s" %
(tid_uid, outj['user_id']))
# Note that this tests a way to identify a fully-qualified subuser
# without --tenant and --uid. This is a historic use that we support.
swift_secret = "test3pass"
cmd = t.build_cmd(
'--subuser', "'%s$%s:%s'" % (tid, uid, subid),
'--key-type', 'swift',
'--secret', swift_secret,
"key create")
out, ret = cluster.rgw_admin(cmd, check_retcode=False)
if ret != 0:
raise TestException("failed command: key create --uid %s" % uid)
try:
outj = json.loads(out.decode('utf-8'))
except ValueError:
raise TestException("invalid json after: key create --uid %s" % uid)
if not isinstance(outj, dict):
raise TestException("bad json after: key create --uid %s" % uid)
tid_uid = "%s$%s" % (tid, uid)
if outj['user_id'] != tid_uid:
raise TestException(
"command: key create --uid %s, returned user_id %s" %
(tid_uid, outj['user_id']))
# These tests easily can throw KeyError, needs a try: XXX
skj = outj['swift_keys'][0]
if skj['secret_key'] != swift_secret:
raise TestException(
"command: key create --uid %s, returned swift key %s" %
(tid_uid, skj['secret_key']))
#
# Access the cluster, create containers in two tenants, verify it all works.
#
def test5_add_s3_key(cluster, tid, uid):
secret = "%spass" % uid
if tid:
tid_uid = "%s$%s" % (tid, uid)
else:
tid_uid = uid
cmd = t.build_cmd(
'--uid', "'%s'" % (tid_uid,),
'--access-key', uid,
'--secret', secret,
"key create")
out, ret = cluster.rgw_admin(cmd, check_retcode=False)
if ret != 0:
raise TestException("failed command: key create --uid %s" % uid)
try:
outj = json.loads(out.decode('utf-8'))
except ValueError:
raise TestException("invalid json after: key create --uid %s" % uid)
if not isinstance(outj, dict):
raise TestException("bad json after: key create --uid %s" % uid)
if outj['user_id'] != tid_uid:
raise TestException(
"command: key create --uid %s, returned user_id %s" %
(uid, outj['user_id']))
skj = outj['keys'][0]
if skj['secret_key'] != secret:
raise TestException(
"command: key create --uid %s, returned s3 key %s" %
(uid, skj['secret_key']))
def test5_add_swift_key(cluster, tid, uid, subid):
secret = "%spass" % uid
if tid:
tid_uid = "%s$%s" % (tid, uid)
else:
tid_uid = uid
cmd = t.build_cmd(
'--subuser', "'%s:%s'" % (tid_uid, subid),
'--key-type', 'swift',
'--secret', secret,
"key create")
out, ret = cluster.rgw_admin(cmd, check_retcode=False)
if ret != 0:
raise TestException("failed command: key create --uid %s" % uid)
try:
outj = json.loads(out.decode('utf-8'))
except ValueError:
raise TestException("invalid json after: key create --uid %s" % uid)
if not isinstance(outj, dict):
raise TestException("bad json after: key create --uid %s" % uid)
if outj['user_id'] != tid_uid:
raise TestException(
"command: key create --uid %s, returned user_id %s" %
(uid, outj['user_id']))
# XXX checking wrong thing here (S3 key)
skj = outj['keys'][0]
if skj['secret_key'] != secret:
raise TestException(
"command: key create --uid %s, returned s3 key %s" %
(uid, skj['secret_key']))
def test5_make_user(cluster, tid, uid, subid):
"""
:param tid: Tenant ID string or None for the legacy tenant
:param uid: User ID string
:param subid: Subuser ID, may be None for S3-only users
"""
display_name = "'Test User %s'" % uid
cmd = ""
if tid:
cmd = t.build_cmd(cmd,
'--tenant', tid)
cmd = t.build_cmd(cmd,
'--uid', uid,
'--display-name', display_name)
if subid:
cmd = t.build_cmd(cmd,
'--subuser', '%s:%s' % (uid, subid),
'--key-type', 'swift')
cmd = t.build_cmd(cmd,
'--access', 'full',
"user create")
out, ret = cluster.rgw_admin(cmd, check_retcode=False)
if ret != 0:
raise TestException("failed command: user create --uid %s" % uid)
try:
outj = json.loads(out.decode('utf-8'))
except ValueError:
raise TestException("invalid json after: user create --uid %s" % uid)
if not isinstance(outj, dict):
raise TestException("bad json after: user create --uid %s" % uid)
if tid:
tid_uid = "%s$%s" % (tid, uid)
else:
tid_uid = uid
if outj['user_id'] != tid_uid:
raise TestException(
"command: user create --uid %s, returned user_id %s" %
(tid_uid, outj['user_id']))
#
# For now, this uses hardcoded passwords based on uid.
# They are all different for ease of debugging in case something crosses.
#
test5_add_s3_key(cluster, tid, uid)
if subid:
test5_add_swift_key(cluster, tid, uid, subid)
def test5_poke_s3(cluster):
bucketname = "test5cont1"
objname = "obj1"
# Not sure if we like useless information printed, but the rest of the
# test framework is insanely talkative when it executes commands.
# So, to keep it in line and have a marker when things go wrong, this.
print("PUT bucket %s object %s for tenant A (empty)" %
(bucketname, objname))
c = S3Connection(
aws_access_key_id="tester5a",
aws_secret_access_key="tester5apass",
is_secure=False,
host="localhost",
port = cluster.port,
calling_format = OrdinaryCallingFormat())
bucket = c.create_bucket(bucketname)
key = bucket.new_key(objname)
headers = { "Content-Type": "text/plain" }
key.set_contents_from_string(b"Test5A\n", headers)
key.set_acl('public-read')
#
# Now it's getting interesting. We're logging into a tenantized user.
#
print("PUT bucket %s object %s for tenant B" % (bucketname, objname))
c = S3Connection(
aws_access_key_id="tester5b1",
aws_secret_access_key="tester5b1pass",
is_secure=False,
host="localhost",
port = cluster.port,
calling_format = OrdinaryCallingFormat())
bucket = c.create_bucket(bucketname)
bucket.set_canned_acl('public-read')
key = bucket.new_key(objname)
headers = { "Content-Type": "text/plain" }
key.set_contents_from_string(b"Test5B\n", headers)
key.set_acl('public-read')
#
# Finally, let's fetch a couple of objects and verify that they
# are what they should be and we didn't get them overwritten.
# Note that we access one of objects across tenants using the colon.
#
print("GET bucket %s object %s for tenants A and B" %
(bucketname, objname))
c = S3Connection(
aws_access_key_id="tester5a",
aws_secret_access_key="tester5apass",
is_secure=False,
host="localhost",
port = cluster.port,
calling_format = OrdinaryCallingFormat())
bucket = c.get_bucket(bucketname)
key = bucket.get_key(objname)
body = key.get_contents_as_string()
if body != b"Test5A\n":
raise TestException("failed body check, bucket %s object %s" %
(bucketname, objname))
bucket = c.get_bucket("test5b:"+bucketname)
key = bucket.get_key(objname)
body = key.get_contents_as_string()
if body != b"Test5B\n":
raise TestException(
"failed body check, tenant %s bucket %s object %s" %
("test5b", bucketname, objname))
print("Poke OK")
def test5(cluster):
# Plan:
# 0. create users tester5a and test5b$tester5b1 test5b$tester5b2
# 1. create buckets "test5cont" under test5a and test5b
# 2. create objects in the buckets
# 3. access objects (across users in container test5b)
test5_make_user(cluster, None, "tester5a", "test5a")
test5_make_user(cluster, "test5b", "tester5b1", "test5b1")
test5_make_user(cluster, "test5b", "tester5b2", "test5b2")
test5_poke_s3(cluster)
# XXX this parse_args boolean makes no sense. we should pass argv[] instead,
# possibly empty. (copied from test_multi, correct it there too)
def init(parse_args):
#argv = []
#if parse_args:
# argv = sys.argv[1:]
#args = parser.parse_args(argv)
#rgw_multi = RGWMulti(int(args.num_zones))
#rgw_multi.setup(not args.no_bootstrap)
# __init__():
port = 8001
clnum = 1 # number of clusters
clid = 1 # 1-based
cluster = t.RGWCluster(clid, port)
# setup():
cluster.start()
cluster.start_rgw()
# The cluster is always reset at this point, so we don't need to list
# users or delete pre-existing users.
try:
test2(cluster)
test3(cluster)
test4(cluster)
test5(cluster)
except TestException as e:
cluster.stop_rgw()
cluster.stop()
sys.stderr.write("FAIL\n")
sys.stderr.write("%s\n" % str(e))
return 1
# teardown():
cluster.stop_rgw()
cluster.stop()
return 0
def setup_module():
return init(False)
if __name__ == "__main__":
sys.exit(init(True))
| 12,904 | 31.182045 | 77 | py |
null | ceph-main/src/test/rgw/test_multi.md | # Multi Site Test Framework
This framework allows you to write and run tests against a **local** multi-cluster environment. The framework is using the `mstart.sh` script in order to setup the environment according to a configuration file, and then uses the [nose](https://nose.readthedocs.io/en/latest/) test framework to actually run the tests.
Tests are written in python2.7, but can invoke shell scripts, binaries etc.
## Running Tests
Entry point for all tests is `/path/to/ceph/src/test/rgw/test_multi.py`. And the actual tests are located inside the `/path/to/ceph/src/test/rgw/rgw_multi` subdirectory.
So, to run all tests use:
```
$ cd /path/to/ceph/src/test/rgw/
$ nosetests test_multi.py
```
This will assume a configuration file called `/path/to/ceph/src/test/rgw/test_multi.conf` exists.
To use a different configuration file, set the `RGW_MULTI_TEST_CONF` environment variable to point to that file.
Since we use the same entry point file for all tests, running specific tests is possible using the following format:
```
$ nosetests test_multi.py:<specific_test_name>
```
To run miltiple tests based on wildcard string, use the following format:
```
$ nosetests test_multi.py -m "<wildcard string>"
```
Note that the test to run, does not have to be inside the `test_multi.py` file.
Note that different options for running specific and multiple tests exists in the [nose documentation](https://nose.readthedocs.io/en/latest/usage.html#options), as well as other options to control the execution of the tests.
## Configuration
### Environment Variables
Following RGW environment variables are taken into consideration when running the tests:
- `RGW_FRONTEND`: used to change frontend to 'civetweb' or 'beast' (default)
- `RGW_VALGRIND`: used to run the radosgw under valgrind. e.g. RGW_VALGRIND=yes
Other environment variables used to configure elements other than RGW can also be used as they are used in vstart.sh. E.g. MON, OSD, MGR, MSD
The configuration file for the run has 3 sections:
### Default
This section holds the following parameters:
- `num_zonegroups`: number of zone groups (integer, default 1)
- `num_zones`: number of regular zones in each group (integer, default 3)
- `num_az_zones`: number of archive zones (integer, default 0, max value 1)
- `gateways_per_zone`: number of RADOS gateways per zone (integer, default 2)
- `no_bootstrap`: whether to assume that the cluster is already up and does not need to be setup again. If set to "false", it will try to re-run the cluster, so, `mstop.sh` must be called beforehand. Should be set to false, anytime the configuration is changed. Otherwise, and assuming the cluster is already up, it should be set to "true" to save on execution time (boolean, default false)
- `log_level`: console log level of the logs in the tests, note that any program invoked from the test my emit logs regardless of that setting (integer, default 20)
- 20 and up -> DEBUG
- 10 and up -> INFO
- 5 and up -> WARNING
- 1 and up -> ERROR
- CRITICAL is always logged
- `log_file`: log file name. If not set, only console logs exists (string, default None)
- `file_log_level`: file log level of the logs in the tests. Similar to `log_level`
- `tenant`: name of tenant (string, default None)
- `checkpoint_retries`: *TODO* (integer, default 60)
- `checkpoint_delay`: *TODO* (integer, default 5)
- `reconfigure_delay`: *TODO* (integer, default 5)
### Elasticsearch
*TODO*
### Cloud
*TODO*
## Writing Tests
New tests should be added into the `/path/to/ceph/src/test/rgw/rgw_multi` subdirectory.
- Base classes are in: `/path/to/ceph/src/test/rgw/rgw_multi/multisite.py`
- `/path/to/ceph/src/test/rgw/rgw_multi/tests.py` holds the majority of the tests, but also many utility and infrastructure functions that could be used in other tests files
| 3,831 | 66.22807 | 391 | md |
null | ceph-main/src/test/rgw/test_multi.py | import subprocess
import os
import random
import string
import argparse
import sys
import logging
try:
import configparser
except ImportError:
import ConfigParser as configparser
import nose.core
from rgw_multi import multisite
from rgw_multi.zone_rados import RadosZone as RadosZone
from rgw_multi.zone_es import ESZone as ESZone
from rgw_multi.zone_es import ESZoneConfig as ESZoneConfig
from rgw_multi.zone_cloud import CloudZone as CloudZone
from rgw_multi.zone_cloud import CloudZoneConfig as CloudZoneConfig
from rgw_multi.zone_az import AZone as AZone
from rgw_multi.zone_az import AZoneConfig as AZoneConfig
# make tests from rgw_multi.tests available to nose
from rgw_multi.tests import *
from rgw_multi.tests_es import *
from rgw_multi.tests_az import *
mstart_path = os.getenv('MSTART_PATH')
if mstart_path is None:
mstart_path = os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + '/../..') + '/'
test_path = os.path.normpath(os.path.dirname(os.path.realpath(__file__))) + '/'
# configure logging for the tests module
log = logging.getLogger('rgw_multi.tests')
def bash(cmd, **kwargs):
log.debug('running cmd: %s', ' '.join(cmd))
check_retcode = kwargs.pop('check_retcode', True)
kwargs['stdout'] = subprocess.PIPE
process = subprocess.Popen(cmd, **kwargs)
s = process.communicate()[0].decode('utf-8')
log.debug('command returned status=%d stdout=%s', process.returncode, s)
if check_retcode:
assert(process.returncode == 0)
return (s, process.returncode)
class Cluster(multisite.Cluster):
""" cluster implementation based on mstart/mrun scripts """
def __init__(self, cluster_id):
super(Cluster, self).__init__()
self.cluster_id = cluster_id
self.needs_reset = True
def admin(self, args = None, **kwargs):
""" radosgw-admin command """
cmd = [test_path + 'test-rgw-call.sh', 'call_rgw_admin', self.cluster_id]
if args:
cmd += args
cmd += ['--debug-rgw=' + str(kwargs.pop('debug_rgw', 0))]
cmd += ['--debug-ms=' + str(kwargs.pop('debug_ms', 0))]
if kwargs.pop('read_only', False):
cmd += ['--rgw-cache-enabled=false']
return bash(cmd, **kwargs)
def start(self):
cmd = [mstart_path + 'mstart.sh', self.cluster_id]
env = None
if self.needs_reset:
env = os.environ.copy()
env['CEPH_NUM_MDS'] = '0'
cmd += ['-n']
# cmd += ['-o']
# cmd += ['rgw_cache_enabled=false']
bash(cmd, env=env)
self.needs_reset = False
def stop(self):
cmd = [mstart_path + 'mstop.sh', self.cluster_id]
bash(cmd)
class Gateway(multisite.Gateway):
""" gateway implementation based on mrgw/mstop scripts """
def __init__(self, client_id = None, *args, **kwargs):
super(Gateway, self).__init__(*args, **kwargs)
self.id = client_id
def start(self, args = None):
""" start the gateway """
assert(self.cluster)
env = os.environ.copy()
# to change frontend, set RGW_FRONTEND env variable
# e.g. RGW_FRONTEND=civetweb
# to run test under valgrind memcheck, set RGW_VALGRIND to 'yes'
# e.g. RGW_VALGRIND=yes
cmd = [mstart_path + 'mrgw.sh', self.cluster.cluster_id, str(self.port), str(self.ssl_port)]
if self.id:
cmd += ['-i', self.id]
cmd += ['--debug-rgw=20', '--debug-ms=1']
if args:
cmd += args
bash(cmd, env=env)
def stop(self):
""" stop the gateway """
assert(self.cluster)
cmd = [mstart_path + 'mstop.sh', self.cluster.cluster_id, 'radosgw', str(self.port)]
bash(cmd)
def gen_access_key():
return ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(16))
def gen_secret():
return ''.join(random.choice(string.ascii_uppercase + string.ascii_lowercase + string.digits) for _ in range(32))
def gen_credentials():
return multisite.Credentials(gen_access_key(), gen_secret())
def cluster_name(cluster_num):
return 'c' + str(cluster_num)
def zonegroup_name(zonegroup_num):
return string.ascii_lowercase[zonegroup_num]
def zone_name(zonegroup_num, zone_num):
return zonegroup_name(zonegroup_num) + str(zone_num + 1)
def gateway_port(zonegroup_num, gateway_num):
return 8000 + 100 * zonegroup_num + gateway_num
def gateway_name(zonegroup_num, zone_num, gateway_num):
return zone_name(zonegroup_num, zone_num) + '-' + str(gateway_num + 1)
def zone_endpoints(zonegroup_num, zone_num, gateways_per_zone):
endpoints = []
base = gateway_port(zonegroup_num, zone_num * gateways_per_zone)
for i in range(0, gateways_per_zone):
endpoints.append('http://localhost:' + str(base + i))
return endpoints
def get_log_level(log_level):
if log_level >= 20:
return logging.DEBUG
if log_level >= 10:
return logging.INFO
if log_level >= 5:
return logging.WARN
if log_level >= 1:
return logging.ERROR
return logging.CRITICAL
def setup_logging(log_level_console, log_file, log_level_file):
if log_file:
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
fh = logging.FileHandler(log_file)
fh.setFormatter(formatter)
fh.setLevel(get_log_level(log_level_file))
log.addHandler(fh)
formatter = logging.Formatter('%(levelname)s %(message)s')
ch = logging.StreamHandler()
ch.setFormatter(formatter)
ch.setLevel(get_log_level(log_level_console))
log.addHandler(ch)
log.setLevel(get_log_level(log_level_console))
def init(parse_args):
cfg = configparser.RawConfigParser({
'num_zonegroups': 1,
'num_zones': 3,
'num_az_zones': 0,
'gateways_per_zone': 2,
'no_bootstrap': 'false',
'log_level': 20,
'log_file': None,
'file_log_level': 20,
'tenant': None,
'checkpoint_retries': 60,
'checkpoint_delay': 5,
'reconfigure_delay': 5,
'use_ssl': 'false',
})
try:
path = os.environ['RGW_MULTI_TEST_CONF']
except KeyError:
path = test_path + 'test_multi.conf'
try:
with open(path) as f:
cfg.readfp(f)
except:
print('WARNING: error reading test config. Path can be set through the RGW_MULTI_TEST_CONF env variable')
pass
parser = argparse.ArgumentParser(
description='Run rgw multi-site tests',
usage='test_multi [--num-zonegroups <num>] [--num-zones <num>] [--no-bootstrap]')
section = 'DEFAULT'
parser.add_argument('--num-zonegroups', type=int, default=cfg.getint(section, 'num_zonegroups'))
parser.add_argument('--num-zones', type=int, default=cfg.getint(section, 'num_zones'))
parser.add_argument('--gateways-per-zone', type=int, default=cfg.getint(section, 'gateways_per_zone'))
parser.add_argument('--no-bootstrap', action='store_true', default=cfg.getboolean(section, 'no_bootstrap'))
parser.add_argument('--log-level', type=int, default=cfg.getint(section, 'log_level'))
parser.add_argument('--log-file', type=str, default=cfg.get(section, 'log_file'))
parser.add_argument('--file-log-level', type=int, default=cfg.getint(section, 'file_log_level'))
parser.add_argument('--tenant', type=str, default=cfg.get(section, 'tenant'))
parser.add_argument('--checkpoint-retries', type=int, default=cfg.getint(section, 'checkpoint_retries'))
parser.add_argument('--checkpoint-delay', type=int, default=cfg.getint(section, 'checkpoint_delay'))
parser.add_argument('--reconfigure-delay', type=int, default=cfg.getint(section, 'reconfigure_delay'))
parser.add_argument('--use-ssl', type=bool, default=cfg.getboolean(section, 'use_ssl'))
es_cfg = []
cloud_cfg = []
az_cfg = []
for s in cfg.sections():
if s.startswith('elasticsearch'):
es_cfg.append(ESZoneConfig(cfg, s))
elif s.startswith('cloud'):
cloud_cfg.append(CloudZoneConfig(cfg, s))
elif s.startswith('archive'):
az_cfg.append(AZoneConfig(cfg, s))
argv = []
if parse_args:
argv = sys.argv[1:]
args = parser.parse_args(argv)
bootstrap = not args.no_bootstrap
setup_logging(args.log_level, args.log_file, args.file_log_level)
# start first cluster
c1 = Cluster(cluster_name(1))
if bootstrap:
c1.start()
clusters = []
clusters.append(c1)
admin_creds = gen_credentials()
admin_user = multisite.User('zone.user')
user_creds = gen_credentials()
user = multisite.User('tester', tenant=args.tenant)
realm = multisite.Realm('r')
if bootstrap:
# create the realm on c1
realm.create(c1)
else:
realm.get(c1)
period = multisite.Period(realm=realm)
realm.current_period = period
num_es_zones = len(es_cfg)
num_cloud_zones = len(cloud_cfg)
num_az_zones = cfg.getint(section, 'num_az_zones')
num_zones = args.num_zones + num_es_zones + num_cloud_zones + num_az_zones
use_ssl = cfg.getboolean(section, 'use_ssl')
if use_ssl and bootstrap:
cmd = ['openssl', 'req',
'-x509',
'-newkey', 'rsa:4096',
'-sha256',
'-nodes',
'-keyout', 'key.pem',
'-out', 'cert.pem',
'-subj', '/CN=localhost',
'-days', '3650']
bash(cmd)
# append key to cert
fkey = open('./key.pem', 'r')
if fkey.mode == 'r':
fcert = open('./cert.pem', 'a')
fcert.write(fkey.read())
fcert.close()
fkey.close()
for zg in range(0, args.num_zonegroups):
zonegroup = multisite.ZoneGroup(zonegroup_name(zg), period)
period.zonegroups.append(zonegroup)
is_master_zg = zg == 0
if is_master_zg:
period.master_zonegroup = zonegroup
for z in range(0, num_zones):
is_master = z == 0
# start a cluster, or use c1 for first zone
cluster = None
if is_master_zg and is_master:
cluster = c1
else:
cluster = Cluster(cluster_name(len(clusters) + 1))
clusters.append(cluster)
if bootstrap:
cluster.start()
# pull realm configuration from the master's gateway
gateway = realm.meta_master_zone().gateways[0]
realm.pull(cluster, gateway, admin_creds)
endpoints = zone_endpoints(zg, z, args.gateways_per_zone)
if is_master:
if bootstrap:
# create the zonegroup on its first zone's cluster
arg = []
if is_master_zg:
arg += ['--master']
if len(endpoints): # use master zone's endpoints
arg += ['--endpoints', ','.join(endpoints)]
zonegroup.create(cluster, arg)
else:
zonegroup.get(cluster)
es_zone = (z >= args.num_zones and z < args.num_zones + num_es_zones)
cloud_zone = (z >= args.num_zones + num_es_zones and z < args.num_zones + num_es_zones + num_cloud_zones)
az_zone = (z >= args.num_zones + num_es_zones + num_cloud_zones)
# create the zone in its zonegroup
zone = multisite.Zone(zone_name(zg, z), zonegroup, cluster)
if es_zone:
zone_index = z - args.num_zones
zone = ESZone(zone_name(zg, z), es_cfg[zone_index].endpoint, zonegroup, cluster)
elif cloud_zone:
zone_index = z - args.num_zones - num_es_zones
ccfg = cloud_cfg[zone_index]
zone = CloudZone(zone_name(zg, z), ccfg.endpoint, ccfg.credentials, ccfg.source_bucket,
ccfg.target_path, zonegroup, cluster)
elif az_zone:
zone_index = z - args.num_zones - num_es_zones - num_cloud_zones
zone = AZone(zone_name(zg, z), zonegroup, cluster)
else:
zone = RadosZone(zone_name(zg, z), zonegroup, cluster)
if bootstrap:
arg = admin_creds.credential_args()
if is_master:
arg += ['--master']
if len(endpoints):
arg += ['--endpoints', ','.join(endpoints)]
zone.create(cluster, arg)
else:
zone.get(cluster)
zonegroup.zones.append(zone)
if is_master:
zonegroup.master_zone = zone
zonegroup.zones_by_type.setdefault(zone.tier_type(), []).append(zone)
if zone.is_read_only():
zonegroup.ro_zones.append(zone)
else:
zonegroup.rw_zones.append(zone)
# update/commit the period
if bootstrap:
period.update(zone, commit=True)
ssl_port_offset = 1000
# start the gateways
for g in range(0, args.gateways_per_zone):
port = gateway_port(zg, g + z * args.gateways_per_zone)
client_id = gateway_name(zg, z, g)
gateway = Gateway(client_id, 'localhost', port, cluster, zone,
ssl_port = port+ssl_port_offset if use_ssl else 0)
if bootstrap:
gateway.start()
zone.gateways.append(gateway)
if is_master_zg and is_master:
if bootstrap:
# create admin user
arg = ['--display-name', '"Zone User"', '--system']
arg += admin_creds.credential_args()
admin_user.create(zone, arg)
# create test user
arg = ['--display-name', '"Test User"', '--caps', 'roles=*']
arg += user_creds.credential_args()
user.create(zone, arg)
else:
# read users and update keys
admin_user.info(zone)
admin_creds = admin_user.credentials[0]
arg = []
user.info(zone, arg)
user_creds = user.credentials[0]
if not bootstrap:
period.get(c1)
config = Config(checkpoint_retries=args.checkpoint_retries,
checkpoint_delay=args.checkpoint_delay,
reconfigure_delay=args.reconfigure_delay,
tenant=args.tenant)
init_multi(realm, user, config)
def setup_module():
init(False)
if __name__ == "__main__":
init(True)
| 15,346 | 36.340633 | 117 | py |
null | ceph-main/src/test/rgw/test_rgw_amqp.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "rgw_amqp.h"
#include "common/ceph_context.h"
#include "amqp_mock.h"
#include <gtest/gtest.h>
#include <chrono>
#include <thread>
#include <atomic>
using namespace rgw;
const std::chrono::milliseconds wait_time(10);
const std::chrono::milliseconds long_wait_time = wait_time*50;
const std::chrono::seconds idle_time(30);
class CctCleaner {
CephContext* cct;
public:
CctCleaner(CephContext* _cct) : cct(_cct) {}
~CctCleaner() {
#ifdef WITH_SEASTAR
delete cct;
#else
cct->put();
#endif
}
};
auto cct = new CephContext(CEPH_ENTITY_TYPE_CLIENT);
CctCleaner cleaner(cct);
class TestAMQP : public ::testing::Test {
protected:
amqp::connection_id_t conn_id;
unsigned current_dequeued = 0U;
void SetUp() override {
ASSERT_TRUE(amqp::init(cct));
}
void TearDown() override {
amqp::shutdown();
}
// wait for at least one new (since last drain) message to be dequeueud
// and then wait for all pending answers to be received
void wait_until_drained() {
while (amqp::get_dequeued() == current_dequeued) {
std::this_thread::sleep_for(wait_time);
}
while (amqp::get_inflight() > 0) {
std::this_thread::sleep_for(wait_time);
}
current_dequeued = amqp::get_dequeued();
}
};
std::atomic<bool> callback_invoked = false;
std::atomic<int> callbacks_invoked = 0;
// note: because these callback are shared among different "publish" calls
// they should be used on different connections
void my_callback_expect_ack(int rc) {
EXPECT_EQ(0, rc);
callback_invoked = true;
}
void my_callback_expect_nack(int rc) {
EXPECT_LT(rc, 0);
callback_invoked = true;
}
void my_callback_expect_multiple_acks(int rc) {
EXPECT_EQ(0, rc);
++callbacks_invoked;
}
class dynamic_callback_wrapper {
dynamic_callback_wrapper() = default;
public:
static dynamic_callback_wrapper* create() {
return new dynamic_callback_wrapper;
}
void callback(int rc) {
EXPECT_EQ(0, rc);
++callbacks_invoked;
delete this;
}
};
void my_callback_expect_close_or_ack(int rc) {
// deleting the connection should trigger the callback with -4098
// but due to race conditions, some my get an ack
EXPECT_TRUE(-4098 == rc || 0 == rc);
}
TEST_F(TestAMQP, ConnectionOK)
{
const auto connection_number = amqp::get_connection_count();
auto rc = amqp::connect(conn_id, "amqp://localhost", "ex1", false, false, boost::none);
EXPECT_TRUE(rc);
EXPECT_EQ(amqp::get_connection_count(), connection_number + 1);
rc = amqp::publish(conn_id, "topic", "message");
EXPECT_EQ(rc, 0);
}
TEST_F(TestAMQP, SSLConnectionOK)
{
const int port = 5671;
const auto connection_number = amqp::get_connection_count();
amqp_mock::set_valid_port(port);
auto rc = amqp::connect(conn_id, "amqps://localhost", "ex1", false, false, boost::none);
EXPECT_TRUE(rc);
EXPECT_EQ(amqp::get_connection_count(), connection_number + 1);
rc = amqp::publish(conn_id, "topic", "message");
EXPECT_EQ(rc, 0);
amqp_mock::set_valid_port(5672);
}
TEST_F(TestAMQP, PlainAndSSLConnectionsOK)
{
const int port = 5671;
const auto connection_number = amqp::get_connection_count();
amqp_mock::set_valid_port(port);
amqp::connection_id_t conn_id1;
auto rc = amqp::connect(conn_id1, "amqps://localhost", "ex1", false, false, boost::none);
EXPECT_TRUE(rc);
EXPECT_EQ(amqp::get_connection_count(), connection_number + 1);
rc = amqp::publish(conn_id1, "topic", "message");
EXPECT_EQ(rc, 0);
EXPECT_EQ(amqp::to_string(conn_id1), "amqps://localhost:5671/?exchange=ex1");
amqp_mock::set_valid_port(5672);
amqp::connection_id_t conn_id2;
rc = amqp::connect(conn_id2, "amqp://localhost", "ex1", false, false, boost::none);
EXPECT_TRUE(rc);
EXPECT_EQ(amqp::to_string(conn_id2), "amqp://localhost:5672/?exchange=ex1");
EXPECT_EQ(amqp::get_connection_count(), connection_number + 2);
rc = amqp::publish(conn_id2, "topic", "message");
EXPECT_EQ(rc, 0);
}
TEST_F(TestAMQP, ConnectionReuse)
{
amqp::connection_id_t conn_id1;
auto rc = amqp::connect(conn_id1, "amqp://localhost", "ex1", false, false, boost::none);
EXPECT_TRUE(rc);
const auto connection_number = amqp::get_connection_count();
amqp::connection_id_t conn_id2;
rc = amqp::connect(conn_id2, "amqp://localhost", "ex1", false, false, boost::none);
EXPECT_TRUE(rc);
EXPECT_EQ(amqp::get_connection_count(), connection_number);
rc = amqp::publish(conn_id1, "topic", "message");
EXPECT_EQ(rc, 0);
}
TEST_F(TestAMQP, NameResolutionFail)
{
callback_invoked = false;
const auto connection_number = amqp::get_connection_count();
amqp::connection_id_t conn_id;
auto rc = amqp::connect(conn_id, "amqp://kaboom", "ex1", false, false, boost::none);
EXPECT_TRUE(rc);
EXPECT_EQ(amqp::get_connection_count(), connection_number + 1);
rc = publish_with_confirm(conn_id, "topic", "message", my_callback_expect_nack);
EXPECT_EQ(rc, 0);
wait_until_drained();
EXPECT_TRUE(callback_invoked);
}
TEST_F(TestAMQP, InvalidPort)
{
callback_invoked = false;
const auto connection_number = amqp::get_connection_count();
amqp::connection_id_t conn_id;
auto rc = amqp::connect(conn_id, "amqp://localhost:1234", "ex1", false, false, boost::none);
EXPECT_TRUE(rc);
EXPECT_EQ(amqp::get_connection_count(), connection_number + 1);
rc = publish_with_confirm(conn_id, "topic", "message", my_callback_expect_nack);
EXPECT_EQ(rc, 0);
wait_until_drained();
EXPECT_TRUE(callback_invoked);
}
TEST_F(TestAMQP, InvalidHost)
{
callback_invoked = false;
const auto connection_number = amqp::get_connection_count();
amqp::connection_id_t conn_id;
auto rc = amqp::connect(conn_id, "amqp://0.0.0.1", "ex1", false, false, boost::none);
EXPECT_TRUE(rc);
EXPECT_EQ(amqp::get_connection_count(), connection_number + 1);
EXPECT_EQ(amqp::get_connection_count(), connection_number + 1);
rc = publish_with_confirm(conn_id, "topic", "message", my_callback_expect_nack);
EXPECT_EQ(rc, 0);
wait_until_drained();
EXPECT_TRUE(callback_invoked);
}
TEST_F(TestAMQP, InvalidVhost)
{
callback_invoked = false;
const auto connection_number = amqp::get_connection_count();
amqp::connection_id_t conn_id;
auto rc = amqp::connect(conn_id, "amqp://localhost/kaboom", "ex1", false, false, boost::none);
EXPECT_TRUE(rc);
EXPECT_EQ(amqp::get_connection_count(), connection_number + 1);
rc = publish_with_confirm(conn_id, "topic", "message", my_callback_expect_nack);
EXPECT_EQ(rc, 0);
wait_until_drained();
EXPECT_TRUE(callback_invoked);
}
TEST_F(TestAMQP, UserPassword)
{
amqp_mock::set_valid_host("127.0.0.1");
{
callback_invoked = false;
const auto connection_number = amqp::get_connection_count();
amqp::connection_id_t conn_id;
auto rc = amqp::connect(conn_id, "amqp://foo:bar@127.0.0.1", "ex1", false, false, boost::none);
EXPECT_TRUE(rc);
EXPECT_EQ(amqp::get_connection_count(), connection_number + 1);
rc = publish_with_confirm(conn_id, "topic", "message", my_callback_expect_nack);
EXPECT_EQ(rc, 0);
wait_until_drained();
EXPECT_TRUE(callback_invoked);
}
// now try the same connection with default user/password
amqp_mock::set_valid_host("127.0.0.2");
{
callback_invoked = false;
const auto connection_number = amqp::get_connection_count();
amqp::connection_id_t conn_id;
auto rc = amqp::connect(conn_id, "amqp://guest:guest@127.0.0.2", "ex1", false, false, boost::none);
EXPECT_TRUE(rc);
EXPECT_EQ(amqp::get_connection_count(), connection_number + 1);
rc = publish_with_confirm(conn_id, "topic", "message", my_callback_expect_ack);
EXPECT_EQ(rc, 0);
wait_until_drained();
EXPECT_TRUE(callback_invoked);
}
amqp_mock::set_valid_host("localhost");
}
TEST_F(TestAMQP, URLParseError)
{
callback_invoked = false;
const auto connection_number = amqp::get_connection_count();
amqp::connection_id_t conn_id;
auto rc = amqp::connect(conn_id, "http://localhost", "ex1", false, false, boost::none);
EXPECT_FALSE(rc);
EXPECT_EQ(amqp::get_connection_count(), connection_number);
rc = publish_with_confirm(conn_id, "topic", "message", my_callback_expect_nack);
EXPECT_EQ(rc, 0);
wait_until_drained();
EXPECT_TRUE(callback_invoked);
}
TEST_F(TestAMQP, ExchangeMismatch)
{
callback_invoked = false;
const auto connection_number = amqp::get_connection_count();
amqp::connection_id_t conn_id;
auto rc = amqp::connect(conn_id, "http://localhost", "ex2", false, false, boost::none);
EXPECT_FALSE(rc);
EXPECT_EQ(amqp::get_connection_count(), connection_number);
rc = publish_with_confirm(conn_id, "topic", "message", my_callback_expect_nack);
EXPECT_EQ(rc, 0);
wait_until_drained();
EXPECT_TRUE(callback_invoked);
}
TEST_F(TestAMQP, MaxConnections)
{
// fill up all connections
std::vector<amqp::connection_id_t> connections;
auto remaining_connections = amqp::get_max_connections() - amqp::get_connection_count();
while (remaining_connections > 0) {
const auto host = "127.10.0." + std::to_string(remaining_connections);
amqp_mock::set_valid_host(host);
amqp::connection_id_t conn_id;
auto rc = amqp::connect(conn_id, "amqp://" + host, "ex1", false, false, boost::none);
EXPECT_TRUE(rc);
rc = publish_with_confirm(conn_id, "topic", "message", my_callback_expect_ack);
EXPECT_EQ(rc, 0);
--remaining_connections;
connections.push_back(conn_id);
}
EXPECT_EQ(amqp::get_connection_count(), amqp::get_max_connections());
wait_until_drained();
// try to add another connection
{
const std::string host = "toomany";
amqp_mock::set_valid_host(host);
amqp::connection_id_t conn_id;
auto rc = amqp::connect(conn_id, "amqp://" + host, "ex1", false, false, boost::none);
EXPECT_FALSE(rc);
rc = publish_with_confirm(conn_id, "topic", "message", my_callback_expect_nack);
EXPECT_EQ(rc, 0);
wait_until_drained();
}
EXPECT_EQ(amqp::get_connection_count(), amqp::get_max_connections());
amqp_mock::set_valid_host("localhost");
}
TEST_F(TestAMQP, ReceiveAck)
{
callback_invoked = false;
const std::string host("localhost1");
amqp_mock::set_valid_host(host);
amqp::connection_id_t conn_id;
auto rc = amqp::connect(conn_id, "amqp://" + host, "ex1", false, false, boost::none);
EXPECT_TRUE(rc);
rc = publish_with_confirm(conn_id, "topic", "message", my_callback_expect_ack);
EXPECT_EQ(rc, 0);
wait_until_drained();
EXPECT_TRUE(callback_invoked);
amqp_mock::set_valid_host("localhost");
}
TEST_F(TestAMQP, ImplicitConnectionClose)
{
callback_invoked = false;
const std::string host("localhost1");
amqp_mock::set_valid_host(host);
amqp::connection_id_t conn_id;
auto rc = amqp::connect(conn_id, "amqp://" + host, "ex1", false, false, boost::none);
EXPECT_TRUE(rc);
const auto NUMBER_OF_CALLS = 2000;
for (auto i = 0; i < NUMBER_OF_CALLS; ++i) {
auto rc = publish_with_confirm(conn_id, "topic", "message", my_callback_expect_close_or_ack);
EXPECT_EQ(rc, 0);
}
wait_until_drained();
amqp_mock::set_valid_host("localhost");
}
TEST_F(TestAMQP, ReceiveMultipleAck)
{
callbacks_invoked = 0;
const std::string host("localhost1");
amqp_mock::set_valid_host(host);
amqp::connection_id_t conn_id;
auto rc = amqp::connect(conn_id, "amqp://" + host, "ex1", false, false, boost::none);
EXPECT_TRUE(rc);
const auto NUMBER_OF_CALLS = 100;
for (auto i=0; i < NUMBER_OF_CALLS; ++i) {
auto rc = publish_with_confirm(conn_id, "topic", "message", my_callback_expect_multiple_acks);
EXPECT_EQ(rc, 0);
}
wait_until_drained();
EXPECT_EQ(callbacks_invoked, NUMBER_OF_CALLS);
callbacks_invoked = 0;
amqp_mock::set_valid_host("localhost");
}
TEST_F(TestAMQP, ReceiveAckForMultiple)
{
callbacks_invoked = 0;
const std::string host("localhost1");
amqp_mock::set_valid_host(host);
amqp::connection_id_t conn_id;
auto rc = amqp::connect(conn_id, "amqp://" + host, "ex1", false, false, boost::none);
EXPECT_TRUE(rc);
amqp_mock::set_multiple(59);
const auto NUMBER_OF_CALLS = 100;
for (auto i=0; i < NUMBER_OF_CALLS; ++i) {
rc = publish_with_confirm(conn_id, "topic", "message", my_callback_expect_multiple_acks);
EXPECT_EQ(rc, 0);
}
wait_until_drained();
EXPECT_EQ(callbacks_invoked, NUMBER_OF_CALLS);
callbacks_invoked = 0;
amqp_mock::set_valid_host("localhost");
}
TEST_F(TestAMQP, DynamicCallback)
{
callbacks_invoked = 0;
const std::string host("localhost1");
amqp_mock::set_valid_host(host);
amqp::connection_id_t conn_id;
auto rc = amqp::connect(conn_id, "amqp://" + host, "ex1", false, false, boost::none);
EXPECT_TRUE(rc);
amqp_mock::set_multiple(59);
const auto NUMBER_OF_CALLS = 100;
for (auto i=0; i < NUMBER_OF_CALLS; ++i) {
rc = publish_with_confirm(conn_id, "topic", "message",
std::bind(&dynamic_callback_wrapper::callback, dynamic_callback_wrapper::create(), std::placeholders::_1));
EXPECT_EQ(rc, 0);
}
wait_until_drained();
EXPECT_EQ(callbacks_invoked, NUMBER_OF_CALLS);
callbacks_invoked = 0;
amqp_mock::set_valid_host("localhost");
}
TEST_F(TestAMQP, ReceiveNack)
{
callback_invoked = false;
amqp_mock::REPLY_ACK = false;
const std::string host("localhost2");
amqp_mock::set_valid_host(host);
amqp::connection_id_t conn_id;
auto rc = amqp::connect(conn_id, "amqp://" + host, "ex1", false, false, boost::none);
EXPECT_TRUE(rc);
rc = publish_with_confirm(conn_id, "topic", "message", my_callback_expect_nack);
EXPECT_EQ(rc, 0);
wait_until_drained();
EXPECT_TRUE(callback_invoked);
amqp_mock::REPLY_ACK = true;
callback_invoked = false;
amqp_mock::set_valid_host("localhost");
}
TEST_F(TestAMQP, FailWrite)
{
callback_invoked = false;
amqp_mock::FAIL_NEXT_WRITE = true;
const std::string host("localhost2");
amqp_mock::set_valid_host(host);
amqp::connection_id_t conn_id;
auto rc = amqp::connect(conn_id, "amqp://" + host, "ex1", false, false, boost::none);
EXPECT_TRUE(rc);
rc = publish_with_confirm(conn_id, "topic", "message", my_callback_expect_nack);
EXPECT_EQ(rc, 0);
wait_until_drained();
EXPECT_TRUE(callback_invoked);
amqp_mock::FAIL_NEXT_WRITE = false;
callback_invoked = false;
amqp_mock::set_valid_host("localhost");
}
TEST_F(TestAMQP, RetryInvalidHost)
{
callback_invoked = false;
const std::string host = "192.168.0.1";
const auto connection_number = amqp::get_connection_count();
amqp::connection_id_t conn_id;
auto rc = amqp::connect(conn_id, "amqp://"+host, "ex1", false, false, boost::none);
EXPECT_TRUE(rc);
EXPECT_EQ(amqp::get_connection_count(), connection_number + 1);
rc = publish_with_confirm(conn_id, "topic", "message", my_callback_expect_nack);
EXPECT_EQ(rc, 0);
wait_until_drained();
EXPECT_TRUE(callback_invoked);
// now next retry should be ok
callback_invoked = false;
amqp_mock::set_valid_host(host);
std::this_thread::sleep_for(long_wait_time);
rc = publish_with_confirm(conn_id, "topic", "message", my_callback_expect_ack);
EXPECT_EQ(rc, 0);
wait_until_drained();
EXPECT_TRUE(callback_invoked);
amqp_mock::set_valid_host("localhost");
}
TEST_F(TestAMQP, RetryInvalidPort)
{
callback_invoked = false;
const int port = 9999;
const auto connection_number = amqp::get_connection_count();
amqp::connection_id_t conn_id;
auto rc = amqp::connect(conn_id, "amqp://localhost:" + std::to_string(port), "ex1", false, false, boost::none);
EXPECT_TRUE(rc);
EXPECT_EQ(amqp::get_connection_count(), connection_number + 1);
rc = publish_with_confirm(conn_id, "topic", "message", my_callback_expect_nack);
EXPECT_EQ(rc, 0);
wait_until_drained();
EXPECT_TRUE(callback_invoked);
// now next retry should be ok
callback_invoked = false;
amqp_mock::set_valid_port(port);
std::this_thread::sleep_for(long_wait_time);
rc = publish_with_confirm(conn_id, "topic", "message", my_callback_expect_ack);
EXPECT_EQ(rc, 0);
wait_until_drained();
EXPECT_TRUE(callback_invoked);
amqp_mock::set_valid_port(5672);
}
TEST_F(TestAMQP, RetryFailWrite)
{
callback_invoked = false;
amqp_mock::FAIL_NEXT_WRITE = true;
const std::string host("localhost2");
amqp_mock::set_valid_host(host);
amqp::connection_id_t conn_id;
auto rc = amqp::connect(conn_id, "amqp://" + host, "ex1", false, false, boost::none);
EXPECT_TRUE(rc);
rc = publish_with_confirm(conn_id, "topic", "message", my_callback_expect_nack);
EXPECT_EQ(rc, 0);
wait_until_drained();
EXPECT_TRUE(callback_invoked);
// now next retry should be ok
amqp_mock::FAIL_NEXT_WRITE = false;
callback_invoked = false;
std::this_thread::sleep_for(long_wait_time);
rc = publish_with_confirm(conn_id, "topic", "message", my_callback_expect_ack);
EXPECT_EQ(rc, 0);
wait_until_drained();
EXPECT_TRUE(callback_invoked);
amqp_mock::set_valid_host("localhost");
}
TEST_F(TestAMQP, IdleConnection)
{
// this test is skipped since it takes 30seconds
//GTEST_SKIP();
const auto connection_number = amqp::get_connection_count();
amqp::connection_id_t conn_id;
auto rc = amqp::connect(conn_id, "amqp://localhost", "ex1", false, false, boost::none);
EXPECT_TRUE(rc);
EXPECT_EQ(amqp::get_connection_count(), connection_number + 1);
std::this_thread::sleep_for(idle_time);
EXPECT_EQ(amqp::get_connection_count(), connection_number);
rc = publish_with_confirm(conn_id, "topic", "message", my_callback_expect_nack);
EXPECT_EQ(rc, 0);
wait_until_drained();
EXPECT_TRUE(callback_invoked);
}
| 17,570 | 32.15283 | 119 | cc |
null | ceph-main/src/test/rgw/test_rgw_arn.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "rgw_arn.h"
#include <gtest/gtest.h>
using namespace rgw;
const int BASIC_ENTRIES = 6;
const std::string basic_str[BASIC_ENTRIES] = {"arn:aws:s3:us-east-1:12345:resource",
"arn:aws:s3:us-east-1:12345:resourceType/resource",
"arn:aws:s3:us-east-1:12345:resourceType/resource/qualifier",
"arn:aws:s3:us-east-1:12345:resourceType/resource:qualifier",
"arn:aws:s3:us-east-1:12345:resourceType:resource",
"arn:aws:s3:us-east-1:12345:resourceType:resource/qualifier"};
const std::string expected_basic_resource[BASIC_ENTRIES] = {"resource",
"resourceType/resource",
"resourceType/resource/qualifier",
"resourceType/resource:qualifier",
"resourceType:resource",
"resourceType:resource/qualifier"};
TEST(TestARN, Basic)
{
for (auto i = 0; i < BASIC_ENTRIES; ++i) {
boost::optional<ARN> arn = ARN::parse(basic_str[i]);
ASSERT_TRUE(arn);
EXPECT_EQ(arn->partition, Partition::aws);
EXPECT_EQ(arn->service, Service::s3);
EXPECT_STREQ(arn->region.c_str(), "us-east-1");
EXPECT_STREQ(arn->account.c_str(), "12345");
EXPECT_STREQ(arn->resource.c_str(), expected_basic_resource[i].c_str());
}
}
TEST(TestARN, ToString)
{
for (auto i = 0; i < BASIC_ENTRIES; ++i) {
boost::optional<ARN> arn = ARN::parse(basic_str[i]);
ASSERT_TRUE(arn);
EXPECT_STREQ(to_string(*arn).c_str(), basic_str[i].c_str());
}
}
const std::string expected_basic_resource_type[BASIC_ENTRIES] =
{"", "resourceType", "resourceType", "resourceType", "resourceType", "resourceType"};
const std::string expected_basic_qualifier[BASIC_ENTRIES] =
{"", "", "qualifier", "qualifier", "", "qualifier"};
TEST(TestARNResource, Basic)
{
for (auto i = 0; i < BASIC_ENTRIES; ++i) {
boost::optional<ARN> arn = ARN::parse(basic_str[i]);
ASSERT_TRUE(arn);
ASSERT_FALSE(arn->resource.empty());
boost::optional<ARNResource> resource = ARNResource::parse(arn->resource);
ASSERT_TRUE(resource);
EXPECT_STREQ(resource->resource.c_str(), "resource");
EXPECT_STREQ(resource->resource_type.c_str(), expected_basic_resource_type[i].c_str());
EXPECT_STREQ(resource->qualifier.c_str(), expected_basic_qualifier[i].c_str());
}
}
const int EMPTY_ENTRIES = 4;
const std::string empty_str[EMPTY_ENTRIES] = {"arn:aws:s3:::resource",
"arn:aws:s3::12345:resource",
"arn:aws:s3:us-east-1::resource",
"arn:aws:s3:us-east-1:12345:"};
TEST(TestARN, Empty)
{
for (auto i = 0; i < EMPTY_ENTRIES; ++i) {
boost::optional<ARN> arn = ARN::parse(empty_str[i]);
ASSERT_TRUE(arn);
EXPECT_EQ(arn->partition, Partition::aws);
EXPECT_EQ(arn->service, Service::s3);
EXPECT_TRUE(arn->region.empty() || arn->region == "us-east-1");
EXPECT_TRUE(arn->account.empty() || arn->account == "12345");
EXPECT_TRUE(arn->resource.empty() || arn->resource == "resource");
}
}
const int WILDCARD_ENTRIES = 3;
const std::string wildcard_str[WILDCARD_ENTRIES] = {"arn:aws:s3:*:*:resource",
"arn:aws:s3:*:12345:resource",
"arn:aws:s3:us-east-1:*:resource"};
// FIXME: currently the following: "arn:aws:s3:us-east-1:12345:*"
// does not fail, even if "wildcard" is not set to "true"
TEST(TestARN, Wildcard)
{
for (auto i = 0; i < WILDCARD_ENTRIES; ++i) {
EXPECT_FALSE(ARN::parse(wildcard_str[i]));
boost::optional<ARN> arn = ARN::parse(wildcard_str[i], true);
ASSERT_TRUE(arn);
EXPECT_EQ(arn->partition, Partition::aws);
EXPECT_EQ(arn->service, Service::s3);
EXPECT_TRUE(arn->region == "*" || arn->region == "us-east-1");
EXPECT_TRUE(arn->account == "*" || arn->account == "12345");
EXPECT_TRUE(arn->resource == "*" || arn->resource == "resource");
}
}
| 4,304 | 38.861111 | 96 | cc |
null | ceph-main/src/test/rgw/test_rgw_bencode.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "gtest/gtest.h"
#include "rgw_torrent.h"
using namespace std;
TEST(Bencode, String)
{
bufferlist bl;
bencode("foo", bl);
bencode("bar", bl);
bencode("baz", bl);
string s(bl.c_str(), bl.length());
ASSERT_STREQ("3:foo3:bar3:baz", s.c_str());
}
TEST(Bencode, Integers)
{
bufferlist bl;
bencode(0, bl);
bencode(-3, bl);
bencode(7, bl);
string s(bl.c_str(), bl.length());
ASSERT_STREQ("i0ei-3ei7e", s.c_str());
}
TEST(Bencode, Dict)
{
bufferlist bl;
bencode_dict(bl);
bencode("foo", 5, bl);
bencode("bar", "baz", bl);
bencode_end(bl);
string s(bl.c_str(), bl.length());
ASSERT_STREQ("d3:fooi5e3:bar3:baze", s.c_str());
}
TEST(Bencode, List)
{
bufferlist bl;
bencode_list(bl);
bencode("foo", 5, bl);
bencode("bar", "baz", bl);
bencode_end(bl);
string s(bl.c_str(), bl.length());
ASSERT_STREQ("l3:fooi5e3:bar3:baze", s.c_str());
}
| 1,001 | 15.16129 | 70 | cc |
null | ceph-main/src/test/rgw/test_rgw_bucket_sync_cache.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2020 Red Hat, Inc
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*/
#include "rgw_bucket_sync_cache.h"
#include <gtest/gtest.h>
using namespace rgw::bucket_sync;
// helper function to construct rgw_bucket_shard
static rgw_bucket_shard make_key(const std::string& tenant,
const std::string& bucket, int shard)
{
auto key = rgw_bucket_key{tenant, bucket};
return rgw_bucket_shard{std::move(key), shard};
}
TEST(BucketSyncCache, ReturnCachedPinned)
{
auto cache = Cache::create(0);
const auto key = make_key("", "1", 0);
auto h1 = cache->get(key, std::nullopt); // pin
h1->counter = 1;
auto h2 = cache->get(key, std::nullopt);
EXPECT_EQ(1, h2->counter);
}
TEST(BucketSyncCache, ReturnNewUnpinned)
{
auto cache = Cache::create(0);
const auto key = make_key("", "1", 0);
cache->get(key, std::nullopt)->counter = 1; // pin+unpin
EXPECT_EQ(0, cache->get(key, std::nullopt)->counter);
}
TEST(BucketSyncCache, DistinctTenant)
{
auto cache = Cache::create(2);
const auto key1 = make_key("a", "bucket", 0);
const auto key2 = make_key("b", "bucket", 0);
cache->get(key1, std::nullopt)->counter = 1;
EXPECT_EQ(0, cache->get(key2, std::nullopt)->counter);
}
TEST(BucketSyncCache, DistinctShards)
{
auto cache = Cache::create(2);
const auto key1 = make_key("", "bucket", 0);
const auto key2 = make_key("", "bucket", 1);
cache->get(key1, std::nullopt)->counter = 1;
EXPECT_EQ(0, cache->get(key2, std::nullopt)->counter);
}
TEST(BucketSyncCache, DistinctGen)
{
auto cache = Cache::create(2);
const auto key = make_key("", "bucket", 0);
std::optional<uint64_t> gen1; // empty
std::optional<uint64_t> gen2 = 5;
cache->get(key, gen1)->counter = 1;
EXPECT_EQ(0, cache->get(key, gen2)->counter);
}
TEST(BucketSyncCache, DontEvictPinned)
{
auto cache = Cache::create(0);
const auto key1 = make_key("", "1", 0);
const auto key2 = make_key("", "2", 0);
auto h1 = cache->get(key1, std::nullopt);
EXPECT_EQ(key1, h1->key.first);
auto h2 = cache->get(key2, std::nullopt);
EXPECT_EQ(key2, h2->key.first);
EXPECT_EQ(key1, h1->key.first); // h1 unchanged
}
TEST(BucketSyncCache, HandleLifetime)
{
const auto key = make_key("", "1", 0);
Handle h; // test that handles keep the cache referenced
{
auto cache = Cache::create(0);
h = cache->get(key, std::nullopt);
}
EXPECT_EQ(key, h->key.first);
}
TEST(BucketSyncCache, TargetSize)
{
auto cache = Cache::create(2);
const auto key1 = make_key("", "1", 0);
const auto key2 = make_key("", "2", 0);
const auto key3 = make_key("", "3", 0);
// fill cache up to target_size=2
cache->get(key1, std::nullopt)->counter = 1;
cache->get(key2, std::nullopt)->counter = 2;
// test that each unpinned entry is still cached
EXPECT_EQ(1, cache->get(key1, std::nullopt)->counter);
EXPECT_EQ(2, cache->get(key2, std::nullopt)->counter);
// overflow the cache and recycle key1
cache->get(key3, std::nullopt)->counter = 3;
// test that the oldest entry was recycled
EXPECT_EQ(0, cache->get(key1, std::nullopt)->counter);
}
TEST(BucketSyncCache, HandleMoveAssignEmpty)
{
auto cache = Cache::create(0);
const auto key1 = make_key("", "1", 0);
const auto key2 = make_key("", "2", 0);
Handle j1;
{
auto h1 = cache->get(key1, std::nullopt);
j1 = std::move(h1); // assign over empty handle
EXPECT_EQ(key1, j1->key.first);
}
auto h2 = cache->get(key2, std::nullopt);
EXPECT_EQ(key1, j1->key.first); // j1 stays pinned
}
TEST(BucketSyncCache, HandleMoveAssignExisting)
{
const auto key1 = make_key("", "1", 0);
const auto key2 = make_key("", "2", 0);
Handle h1;
{
auto cache1 = Cache::create(0);
h1 = cache1->get(key1, std::nullopt);
} // j1 has the last ref to cache1
{
auto cache2 = Cache::create(0);
auto h2 = cache2->get(key2, std::nullopt);
h1 = std::move(h2); // assign over existing handle
}
EXPECT_EQ(key2, h1->key.first);
}
TEST(BucketSyncCache, HandleCopyAssignEmpty)
{
auto cache = Cache::create(0);
const auto key1 = make_key("", "1", 0);
const auto key2 = make_key("", "2", 0);
Handle j1;
{
auto h1 = cache->get(key1, std::nullopt);
j1 = h1; // assign over empty handle
EXPECT_EQ(&*h1, &*j1);
}
auto h2 = cache->get(key2, std::nullopt);
EXPECT_EQ(key1, j1->key.first); // j1 stays pinned
}
TEST(BucketSyncCache, HandleCopyAssignExisting)
{
const auto key1 = make_key("", "1", 0);
const auto key2 = make_key("", "2", 0);
Handle h1;
{
auto cache1 = Cache::create(0);
h1 = cache1->get(key1, std::nullopt);
} // j1 has the last ref to cache1
{
auto cache2 = Cache::create(0);
auto h2 = cache2->get(key2, std::nullopt);
h1 = h2; // assign over existing handle
EXPECT_EQ(&*h1, &*h2);
}
EXPECT_EQ(key2, h1->key.first);
}
| 5,163 | 26.178947 | 70 | cc |
null | ceph-main/src/test/rgw/test_rgw_common.cc | #include "test_rgw_common.h"
void test_rgw_add_placement(RGWZoneGroup *zonegroup, RGWZoneParams *zone_params, const std::string& name, bool is_default)
{
zonegroup->placement_targets[name] = { name };
RGWZonePlacementInfo& pinfo = zone_params->placement_pools[name];
pinfo.index_pool = rgw_pool(name + ".index").to_str();
rgw_pool data_pool(name + ".data");
pinfo.storage_classes.set_storage_class(RGW_STORAGE_CLASS_STANDARD, &data_pool, nullptr);
pinfo.data_extra_pool = rgw_pool(name + ".extra").to_str();
if (is_default) {
zonegroup->default_placement = rgw_placement_rule(name, RGW_STORAGE_CLASS_STANDARD);
}
}
void test_rgw_init_env(RGWZoneGroup *zonegroup, RGWZoneParams *zone_params)
{
test_rgw_add_placement(zonegroup, zone_params, "default-placement", true);
}
void test_rgw_populate_explicit_placement_bucket(rgw_bucket *b, const char *t, const char *n, const char *dp, const char *ip, const char *m, const char *id)
{
b->tenant = t;
b->name = n;
b->marker = m;
b->bucket_id = id;
b->explicit_placement.data_pool = rgw_pool(dp);
b->explicit_placement.index_pool = rgw_pool(ip);
}
void test_rgw_populate_old_bucket(old_rgw_bucket *b, const char *t, const char *n, const char *dp, const char *ip, const char *m, const char *id)
{
b->tenant = t;
b->name = n;
b->marker = m;
b->bucket_id = id;
b->data_pool = dp;
b->index_pool = ip;
}
std::string test_rgw_get_obj_oid(const rgw_obj& obj)
{
std::string oid;
std::string loc;
get_obj_bucket_and_oid_loc(obj, oid, loc);
return oid;
}
void test_rgw_init_explicit_placement_bucket(rgw_bucket *bucket, const char *name)
{
test_rgw_populate_explicit_placement_bucket(bucket, "", name, ".data-pool", ".index-pool", "marker", "bucket-id");
}
void test_rgw_init_old_bucket(old_rgw_bucket *bucket, const char *name)
{
test_rgw_populate_old_bucket(bucket, "", name, ".data-pool", ".index-pool", "marker", "bucket-id");
}
void test_rgw_populate_bucket(rgw_bucket *b, const char *t, const char *n, const char *m, const char *id)
{
b->tenant = t;
b->name = n;
b->marker = m;
b->bucket_id = id;
}
void test_rgw_init_bucket(rgw_bucket *bucket, const char *name)
{
test_rgw_populate_bucket(bucket, "", name, "marker", "bucket-id");
}
rgw_obj test_rgw_create_obj(const rgw_bucket& bucket, const std::string& name, const std::string& instance, const std::string& ns)
{
rgw_obj obj(bucket, name);
if (!instance.empty()) {
obj.key.set_instance(instance);
}
if (!ns.empty()) {
obj.key.ns = ns;
}
obj.bucket = bucket;
return obj;
}
| 2,572 | 26.967391 | 156 | cc |
null | ceph-main/src/test/rgw/test_rgw_common.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2013 eNovance SAS <licensing@enovance.com>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <iostream>
#include "common/ceph_json.h"
#include "common/Formatter.h"
#include "rgw_common.h"
#include "rgw_rados.h"
#include "rgw_zone.h"
#ifndef CEPH_TEST_RGW_COMMON_H
#define CEPH_TEST_RGW_COMMON_H
struct old_rgw_bucket {
std::string tenant;
std::string name;
std::string data_pool;
std::string data_extra_pool; /* if not set, then we should use data_pool instead */
std::string index_pool;
std::string marker;
std::string bucket_id;
std::string oid; /*
* runtime in-memory only info. If not empty, points to the bucket instance object
*/
old_rgw_bucket() { }
// cppcheck-suppress noExplicitConstructor
old_rgw_bucket(const std::string& s) : name(s) {
data_pool = index_pool = s;
marker = "";
}
explicit old_rgw_bucket(const char *n) : name(n) {
data_pool = index_pool = n;
marker = "";
}
old_rgw_bucket(const char *t, const char *n, const char *dp, const char *ip, const char *m, const char *id, const char *h) :
tenant(t), name(n), data_pool(dp), index_pool(ip), marker(m), bucket_id(id) {}
void encode(bufferlist& bl) const {
ENCODE_START(8, 3, bl);
encode(name, bl);
encode(data_pool, bl);
encode(marker, bl);
encode(bucket_id, bl);
encode(index_pool, bl);
encode(data_extra_pool, bl);
encode(tenant, bl);
ENCODE_FINISH(bl);
}
void decode(bufferlist::const_iterator& bl) {
DECODE_START_LEGACY_COMPAT_LEN(8, 3, 3, bl);
decode(name, bl);
decode(data_pool, bl);
if (struct_v >= 2) {
decode(marker, bl);
if (struct_v <= 3) {
uint64_t id;
decode(id, bl);
char buf[16];
snprintf(buf, sizeof(buf), "%llu", (long long)id);
bucket_id = buf;
} else {
decode(bucket_id, bl);
}
}
if (struct_v >= 5) {
decode(index_pool, bl);
} else {
index_pool = data_pool;
}
if (struct_v >= 7) {
decode(data_extra_pool, bl);
}
if (struct_v >= 8) {
decode(tenant, bl);
}
DECODE_FINISH(bl);
}
// format a key for the bucket/instance. pass delim=0 to skip a field
std::string get_key(char tenant_delim = '/',
char id_delim = ':') const;
const std::string& get_data_extra_pool() {
if (data_extra_pool.empty()) {
return data_pool;
}
return data_extra_pool;
}
void dump(Formatter *f) const;
void decode_json(JSONObj *obj);
static void generate_test_instances(std::list<old_rgw_bucket*>& o);
bool operator<(const old_rgw_bucket& b) const {
return name.compare(b.name) < 0;
}
};
WRITE_CLASS_ENCODER(old_rgw_bucket)
class old_rgw_obj {
std::string orig_obj;
std::string loc;
std::string object;
std::string instance;
public:
const std::string& get_object() const { return object; }
const std::string& get_orig_obj() const { return orig_obj; }
const std::string& get_loc() const { return loc; }
const std::string& get_instance() const { return instance; }
old_rgw_bucket bucket;
std::string ns;
bool in_extra_data; /* in-memory only member, does not serialize */
// Represents the hash index source for this object once it is set (non-empty)
std::string index_hash_source;
old_rgw_obj() : in_extra_data(false) {}
old_rgw_obj(old_rgw_bucket& b, const std::string& o) : in_extra_data(false) {
init(b, o);
}
old_rgw_obj(old_rgw_bucket& b, const rgw_obj_key& k) : in_extra_data(false) {
from_index_key(b, k);
}
void init(old_rgw_bucket& b, const std::string& o) {
bucket = b;
set_obj(o);
reset_loc();
}
void init_ns(old_rgw_bucket& b, const std::string& o, const std::string& n) {
bucket = b;
set_ns(n);
set_obj(o);
reset_loc();
}
int set_ns(const char *n) {
if (!n)
return -EINVAL;
std::string ns_str(n);
return set_ns(ns_str);
}
int set_ns(const std::string& n) {
if (n[0] == '_')
return -EINVAL;
ns = n;
set_obj(orig_obj);
return 0;
}
int set_instance(const std::string& i) {
if (i[0] == '_')
return -EINVAL;
instance = i;
set_obj(orig_obj);
return 0;
}
int clear_instance() {
return set_instance(std::string());
}
void set_loc(const std::string& k) {
loc = k;
}
void reset_loc() {
loc.clear();
/*
* For backward compatibility. Older versions used to have object locator on all objects,
* however, the orig_obj was the effective object locator. This had the same effect as not
* having object locator at all for most objects but the ones that started with underscore as
* these were escaped.
*/
if (orig_obj[0] == '_' && ns.empty()) {
loc = orig_obj;
}
}
bool have_null_instance() {
return instance == "null";
}
bool have_instance() {
return !instance.empty();
}
bool need_to_encode_instance() {
return have_instance() && !have_null_instance();
}
void set_obj(const std::string& o) {
object.reserve(128);
orig_obj = o;
if (ns.empty() && !need_to_encode_instance()) {
if (o.empty()) {
return;
}
if (o.size() < 1 || o[0] != '_') {
object = o;
return;
}
object = "_";
object.append(o);
} else {
object = "_";
object.append(ns);
if (need_to_encode_instance()) {
object.append(std::string(":") + instance);
}
object.append("_");
object.append(o);
}
reset_loc();
}
/*
* get the object's key name as being referred to by the bucket index.
*/
std::string get_index_key_name() const {
if (ns.empty()) {
if (orig_obj.size() < 1 || orig_obj[0] != '_') {
return orig_obj;
}
return std::string("_") + orig_obj;
};
char buf[ns.size() + 16];
snprintf(buf, sizeof(buf), "_%s_", ns.c_str());
return std::string(buf) + orig_obj;
};
void from_index_key(old_rgw_bucket& b, const rgw_obj_key& key) {
if (key.name[0] != '_') {
init(b, key.name);
set_instance(key.instance);
return;
}
if (key.name[1] == '_') {
init(b, key.name.substr(1));
set_instance(key.instance);
return;
}
ssize_t pos = key.name.find('_', 1);
if (pos < 0) {
/* shouldn't happen, just use key */
init(b, key.name);
set_instance(key.instance);
return;
}
init_ns(b, key.name.substr(pos + 1), key.name.substr(1, pos -1));
set_instance(key.instance);
}
void get_index_key(rgw_obj_key *key) const {
key->name = get_index_key_name();
key->instance = instance;
}
static void parse_ns_field(std::string& ns, std::string& instance) {
int pos = ns.find(':');
if (pos >= 0) {
instance = ns.substr(pos + 1);
ns = ns.substr(0, pos);
} else {
instance.clear();
}
}
std::string& get_hash_object() {
return index_hash_source.empty() ? orig_obj : index_hash_source;
}
/**
* Translate a namespace-mangled object name to the user-facing name
* existing in the given namespace.
*
* If the object is part of the given namespace, it returns true
* and cuts down the name to the unmangled version. If it is not
* part of the given namespace, it returns false.
*/
static bool translate_raw_obj_to_obj_in_ns(std::string& obj, std::string& instance, std::string& ns) {
if (obj[0] != '_') {
if (ns.empty()) {
return true;
}
return false;
}
std::string obj_ns;
bool ret = parse_raw_oid(obj, &obj, &instance, &obj_ns);
if (!ret) {
return ret;
}
return (ns == obj_ns);
}
static bool parse_raw_oid(const std::string& oid, std::string *obj_name, std::string *obj_instance, std::string *obj_ns) {
obj_instance->clear();
obj_ns->clear();
if (oid[0] != '_') {
*obj_name = oid;
return true;
}
if (oid.size() >= 2 && oid[1] == '_') {
*obj_name = oid.substr(1);
return true;
}
if (oid[0] != '_' || oid.size() < 3) // for namespace, min size would be 3: _x_
return false;
int pos = oid.find('_', 1);
if (pos <= 1) // if it starts with __, it's not in our namespace
return false;
*obj_ns = oid.substr(1, pos - 1);
parse_ns_field(*obj_ns, *obj_instance);
*obj_name = oid.substr(pos + 1);
return true;
}
/**
* Given a mangled object name and an empty namespace string, this
* function extracts the namespace into the string and sets the object
* name to be the unmangled version.
*
* It returns true after successfully doing so, or
* false if it fails.
*/
static bool strip_namespace_from_object(std::string& obj, std::string& ns, std::string& instance) {
ns.clear();
instance.clear();
if (obj[0] != '_') {
return true;
}
size_t pos = obj.find('_', 1);
if (pos == std::string::npos) {
return false;
}
if (obj[1] == '_') {
obj = obj.substr(1);
return true;
}
size_t period_pos = obj.find('.');
if (period_pos < pos) {
return false;
}
ns = obj.substr(1, pos-1);
obj = obj.substr(pos+1, std::string::npos);
parse_ns_field(ns, instance);
return true;
}
void set_in_extra_data(bool val) {
in_extra_data = val;
}
bool is_in_extra_data() const {
return in_extra_data;
}
void encode(bufferlist& bl) const {
ENCODE_START(5, 3, bl);
encode(bucket.name, bl);
encode(loc, bl);
encode(ns, bl);
encode(object, bl);
encode(bucket, bl);
encode(instance, bl);
if (!ns.empty() || !instance.empty()) {
encode(orig_obj, bl);
}
ENCODE_FINISH(bl);
}
void decode(bufferlist::const_iterator& bl) {
DECODE_START_LEGACY_COMPAT_LEN(5, 3, 3, bl);
decode(bucket.name, bl);
decode(loc, bl);
decode(ns, bl);
decode(object, bl);
if (struct_v >= 2)
decode(bucket, bl);
if (struct_v >= 4)
decode(instance, bl);
if (ns.empty() && instance.empty()) {
if (object[0] != '_') {
orig_obj = object;
} else {
orig_obj = object.substr(1);
}
} else {
if (struct_v >= 5) {
decode(orig_obj, bl);
} else {
ssize_t pos = object.find('_', 1);
if (pos < 0) {
throw buffer::malformed_input();
}
orig_obj = object.substr(pos);
}
}
DECODE_FINISH(bl);
}
bool operator==(const old_rgw_obj& o) const {
return (object.compare(o.object) == 0) &&
(bucket.name.compare(o.bucket.name) == 0) &&
(ns.compare(o.ns) == 0) &&
(instance.compare(o.instance) == 0);
}
bool operator<(const old_rgw_obj& o) const {
int r = bucket.name.compare(o.bucket.name);
if (r == 0) {
r = bucket.bucket_id.compare(o.bucket.bucket_id);
if (r == 0) {
r = object.compare(o.object);
if (r == 0) {
r = ns.compare(o.ns);
if (r == 0) {
r = instance.compare(o.instance);
}
}
}
}
return (r < 0);
}
};
WRITE_CLASS_ENCODER(old_rgw_obj)
static inline void prepend_old_bucket_marker(const old_rgw_bucket& bucket, const std::string& orig_oid, std::string& oid)
{
if (bucket.marker.empty() || orig_oid.empty()) {
oid = orig_oid;
} else {
oid = bucket.marker;
oid.append("_");
oid.append(orig_oid);
}
}
void test_rgw_init_env(RGWZoneGroup *zonegroup, RGWZoneParams *zone_params);
struct test_rgw_env {
RGWZoneGroup zonegroup;
RGWZoneParams zone_params;
rgw_data_placement_target default_placement;
test_rgw_env() {
test_rgw_init_env(&zonegroup, &zone_params);
default_placement.data_pool = rgw_pool(zone_params.placement_pools[zonegroup.default_placement.name].get_standard_data_pool());
default_placement.data_extra_pool = rgw_pool(zone_params.placement_pools[zonegroup.default_placement.name].data_extra_pool);
}
rgw_data_placement_target get_placement(const std::string& placement_id) {
const RGWZonePlacementInfo& pi = zone_params.placement_pools[placement_id];
rgw_data_placement_target pt;
pt.index_pool = pi.index_pool;
pt.data_pool = pi.get_standard_data_pool();
pt.data_extra_pool = pi.data_extra_pool;
return pt;
}
rgw_raw_obj get_raw(const rgw_obj& obj) {
rgw_obj_select s(obj);
return s.get_raw_obj(zonegroup, zone_params);
}
rgw_raw_obj get_raw(const rgw_obj_select& os) {
return os.get_raw_obj(zonegroup, zone_params);
}
};
void test_rgw_add_placement(RGWZoneGroup *zonegroup, RGWZoneParams *zone_params, const std::string& name, bool is_default);
void test_rgw_populate_explicit_placement_bucket(rgw_bucket *b, const char *t, const char *n, const char *dp, const char *ip, const char *m, const char *id);
void test_rgw_populate_old_bucket(old_rgw_bucket *b, const char *t, const char *n, const char *dp, const char *ip, const char *m, const char *id);
std::string test_rgw_get_obj_oid(const rgw_obj& obj);
void test_rgw_init_explicit_placement_bucket(rgw_bucket *bucket, const char *name);
void test_rgw_init_old_bucket(old_rgw_bucket *bucket, const char *name);
void test_rgw_populate_bucket(rgw_bucket *b, const char *t, const char *n, const char *m, const char *id);
void test_rgw_init_bucket(rgw_bucket *bucket, const char *name);
rgw_obj test_rgw_create_obj(const rgw_bucket& bucket, const std::string& name, const std::string& instance, const std::string& ns);
#endif
| 13,858 | 26.335306 | 157 | h |
null | ceph-main/src/test/rgw/test_rgw_compression.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "gtest/gtest.h"
#include "rgw_compression.h"
class ut_get_sink : public RGWGetObj_Filter {
bufferlist sink;
public:
ut_get_sink() {}
virtual ~ut_get_sink() {}
int handle_data(bufferlist& bl, off_t bl_ofs, off_t bl_len) override
{
auto& bl_buffers = bl.buffers();
auto i = bl_buffers.begin();
while (bl_len > 0)
{
ceph_assert(i != bl_buffers.end());
off_t len = std::min<off_t>(bl_len, i->length());
sink.append(*i, 0, len);
bl_len -= len;
i++;
}
return 0;
}
bufferlist& get_sink()
{
return sink;
}
};
class ut_get_sink_size : public RGWGetObj_Filter {
size_t max_size = 0;
public:
ut_get_sink_size() {}
virtual ~ut_get_sink_size() {}
int handle_data(bufferlist& bl, off_t bl_ofs, off_t bl_len) override
{
if (bl_len > (off_t)max_size)
max_size = bl_len;
return 0;
}
size_t get_size()
{
return max_size;
}
};
class ut_put_sink: public rgw::sal::DataProcessor
{
bufferlist sink;
public:
int process(bufferlist&& bl, uint64_t ofs) override
{
sink.claim_append(bl);
return 0;
}
bufferlist& get_sink()
{
return sink;
}
};
struct MockGetDataCB : public RGWGetObj_Filter {
int handle_data(bufferlist& bl, off_t bl_ofs, off_t bl_len) override {
return 0;
}
} cb;
using range_t = std::pair<off_t, off_t>;
// call filter->fixup_range() and return the range as a pair. this makes it easy
// to fit on a single line for ASSERT_EQ()
range_t fixup_range(RGWGetObj_Decompress *filter, off_t ofs, off_t end)
{
filter->fixup_range(ofs, end);
return {ofs, end};
}
TEST(Decompress, FixupRangePartial)
{
RGWCompressionInfo cs_info;
// array of blocks with original len=8, compressed to len=6
auto& blocks = cs_info.blocks;
blocks.emplace_back(compression_block{0, 0, 6});
blocks.emplace_back(compression_block{8, 6, 6});
blocks.emplace_back(compression_block{16, 12, 6});
blocks.emplace_back(compression_block{24, 18, 6});
const bool partial = true;
RGWGetObj_Decompress decompress(g_ceph_context, &cs_info, partial, &cb);
// test translation from logical ranges to compressed ranges
ASSERT_EQ(range_t(0, 5), fixup_range(&decompress, 0, 1));
ASSERT_EQ(range_t(0, 5), fixup_range(&decompress, 1, 7));
ASSERT_EQ(range_t(0, 11), fixup_range(&decompress, 7, 8));
ASSERT_EQ(range_t(0, 11), fixup_range(&decompress, 0, 9));
ASSERT_EQ(range_t(0, 11), fixup_range(&decompress, 7, 9));
ASSERT_EQ(range_t(6, 11), fixup_range(&decompress, 8, 9));
ASSERT_EQ(range_t(6, 17), fixup_range(&decompress, 8, 16));
ASSERT_EQ(range_t(6, 17), fixup_range(&decompress, 8, 17));
ASSERT_EQ(range_t(12, 23), fixup_range(&decompress, 16, 24));
ASSERT_EQ(range_t(12, 23), fixup_range(&decompress, 16, 999));
ASSERT_EQ(range_t(18, 23), fixup_range(&decompress, 998, 999));
}
TEST(Compress, LimitedChunkSize)
{
CompressorRef plugin;
plugin = Compressor::create(g_ceph_context, Compressor::COMP_ALG_ZLIB);
ASSERT_NE(plugin.get(), nullptr);
for (size_t s = 100 ; s < 10000000 ; s = s*5/4)
{
bufferptr bp(s);
bufferlist bl;
bl.append(bp);
ut_put_sink c_sink;
RGWPutObj_Compress compressor(g_ceph_context, plugin, &c_sink);
compressor.process(std::move(bl), 0);
compressor.process({}, s); // flush
RGWCompressionInfo cs_info;
cs_info.compression_type = plugin->get_type_name();
cs_info.orig_size = s;
cs_info.compressor_message = compressor.get_compressor_message();
cs_info.blocks = move(compressor.get_compression_blocks());
ut_get_sink_size d_sink;
RGWGetObj_Decompress decompress(g_ceph_context, &cs_info, false, &d_sink);
off_t f_begin = 0;
off_t f_end = s - 1;
decompress.fixup_range(f_begin, f_end);
decompress.handle_data(c_sink.get_sink(), 0, c_sink.get_sink().length());
bufferlist empty;
decompress.handle_data(empty, 0, 0);
ASSERT_LE(d_sink.get_size(), (size_t)g_ceph_context->_conf->rgw_max_chunk_size);
}
}
TEST(Compress, BillionZeros)
{
CompressorRef plugin;
ut_put_sink c_sink;
plugin = Compressor::create(g_ceph_context, Compressor::COMP_ALG_ZLIB);
ASSERT_NE(plugin.get(), nullptr);
RGWPutObj_Compress compressor(g_ceph_context, plugin, &c_sink);
constexpr size_t size = 1000000;
bufferptr bp(size);
bufferlist bl;
bl.append(bp);
for (int i=0; i<1000;i++)
compressor.process(bufferlist{bl}, size*i);
compressor.process({}, size*1000); // flush
RGWCompressionInfo cs_info;
cs_info.compression_type = plugin->get_type_name();
cs_info.orig_size = size*1000;
cs_info.compressor_message = compressor.get_compressor_message();
cs_info.blocks = move(compressor.get_compression_blocks());
ut_get_sink d_sink;
RGWGetObj_Decompress decompress(g_ceph_context, &cs_info, false, &d_sink);
off_t f_begin = 0;
off_t f_end = size*1000 - 1;
decompress.fixup_range(f_begin, f_end);
decompress.handle_data(c_sink.get_sink(), 0, c_sink.get_sink().length());
bufferlist empty;
decompress.handle_data(empty, 0, 0);
ASSERT_EQ(d_sink.get_sink().length() , size*1000);
}
| 5,192 | 26.770053 | 84 | cc |
null | ceph-main/src/test/rgw/test_rgw_crypto.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2016 Mirantis <akupczyk@mirantis.com>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <iostream>
#include "global/global_init.h"
#include "common/ceph_argparse.h"
#include "rgw_common.h"
#include "rgw_rados.h"
#include "rgw_crypt.h"
#include <gtest/gtest.h>
#include "include/ceph_assert.h"
#define dout_subsys ceph_subsys_rgw
using namespace std;
std::unique_ptr<BlockCrypt> AES_256_CBC_create(const DoutPrefixProvider *dpp, CephContext* cct, const uint8_t* key, size_t len);
class ut_get_sink : public RGWGetObj_Filter {
std::stringstream sink;
public:
ut_get_sink() {}
virtual ~ut_get_sink() {}
int handle_data(bufferlist& bl, off_t bl_ofs, off_t bl_len) override
{
sink << std::string_view(bl.c_str()+bl_ofs, bl_len);
return 0;
}
std::string get_sink()
{
return sink.str();
}
};
class ut_put_sink: public rgw::sal::DataProcessor
{
std::stringstream sink;
public:
int process(bufferlist&& bl, uint64_t ofs) override
{
sink << std::string_view(bl.c_str(),bl.length());
return 0;
}
std::string get_sink()
{
return sink.str();
}
};
class BlockCryptNone: public BlockCrypt {
size_t block_size = 256;
public:
BlockCryptNone(){};
BlockCryptNone(size_t sz) : block_size(sz) {}
virtual ~BlockCryptNone(){};
size_t get_block_size() override
{
return block_size;
}
bool encrypt(bufferlist& input,
off_t in_ofs,
size_t size,
bufferlist& output,
off_t stream_offset,
optional_yield y) override
{
output.clear();
output.append(input.c_str(), input.length());
return true;
}
bool decrypt(bufferlist& input,
off_t in_ofs,
size_t size,
bufferlist& output,
off_t stream_offset,
optional_yield y) override
{
output.clear();
output.append(input.c_str(), input.length());
return true;
}
};
TEST(TestRGWCrypto, verify_AES_256_CBC_identity)
{
const NoDoutPrefix no_dpp(g_ceph_context, dout_subsys);
//create some input for encryption
const off_t test_range = 1024*1024;
buffer::ptr buf(test_range);
char* p = buf.c_str();
for(size_t i = 0; i < buf.length(); i++)
p[i] = i + i*i + (i >> 2);
bufferlist input;
input.append(buf);
for (unsigned int step : {1, 2, 3, 5, 7, 11, 13, 17})
{
//make some random key
uint8_t key[32];
for(size_t i=0;i<sizeof(key);i++)
key[i]=i*step;
auto aes(AES_256_CBC_create(&no_dpp, g_ceph_context, &key[0], 32));
ASSERT_NE(aes.get(), nullptr);
size_t block_size = aes->get_block_size();
ASSERT_NE(block_size, 0u);
for (size_t r = 97; r < 123 ; r++)
{
off_t begin = (r*r*r*r*r % test_range);
begin = begin - begin % block_size;
off_t end = begin + r*r*r*r*r*r*r % (test_range - begin);
if (r % 3)
end = end - end % block_size;
off_t offset = r*r*r*r*r*r*r*r % (1000*1000*1000);
offset = offset - offset % block_size;
ASSERT_EQ(begin % block_size, 0u);
ASSERT_LE(end, test_range);
ASSERT_EQ(offset % block_size, 0u);
bufferlist encrypted;
ASSERT_TRUE(aes->encrypt(input, begin, end - begin, encrypted, offset, null_yield));
bufferlist decrypted;
ASSERT_TRUE(aes->decrypt(encrypted, 0, end - begin, decrypted, offset, null_yield));
ASSERT_EQ(decrypted.length(), end - begin);
ASSERT_EQ(std::string_view(input.c_str() + begin, end - begin),
std::string_view(decrypted.c_str(), end - begin) );
}
}
}
TEST(TestRGWCrypto, verify_AES_256_CBC_identity_2)
{
const NoDoutPrefix no_dpp(g_ceph_context, dout_subsys);
//create some input for encryption
const off_t test_range = 1024*1024;
buffer::ptr buf(test_range);
char* p = buf.c_str();
for(size_t i = 0; i < buf.length(); i++)
p[i] = i + i*i + (i >> 2);
bufferlist input;
input.append(buf);
for (unsigned int step : {1, 2, 3, 5, 7, 11, 13, 17})
{
//make some random key
uint8_t key[32];
for(size_t i=0;i<sizeof(key);i++)
key[i]=i*step;
auto aes(AES_256_CBC_create(&no_dpp, g_ceph_context, &key[0], 32));
ASSERT_NE(aes.get(), nullptr);
size_t block_size = aes->get_block_size();
ASSERT_NE(block_size, 0u);
for (off_t end = 1; end < 6096 ; end+=3)
{
off_t begin = 0;
off_t offset = end*end*end*end*end % (1000*1000*1000);
offset = offset - offset % block_size;
ASSERT_EQ(begin % block_size, 0u);
ASSERT_LE(end, test_range);
ASSERT_EQ(offset % block_size, 0u);
bufferlist encrypted;
ASSERT_TRUE(aes->encrypt(input, begin, end, encrypted, offset, null_yield));
bufferlist decrypted;
ASSERT_TRUE(aes->decrypt(encrypted, 0, end, decrypted, offset, null_yield));
ASSERT_EQ(decrypted.length(), end);
ASSERT_EQ(std::string_view(input.c_str(), end),
std::string_view(decrypted.c_str(), end) );
}
}
}
TEST(TestRGWCrypto, verify_AES_256_CBC_identity_3)
{
const NoDoutPrefix no_dpp(g_ceph_context, dout_subsys);
//create some input for encryption
const off_t test_range = 1024*1024;
buffer::ptr buf(test_range);
char* p = buf.c_str();
for(size_t i = 0; i < buf.length(); i++)
p[i] = i + i*i + (i >> 2);
bufferlist input;
input.append(buf);
for (unsigned int step : {1, 2, 3, 5, 7, 11, 13, 17})
{
//make some random key
uint8_t key[32];
for(size_t i=0;i<sizeof(key);i++)
key[i]=i*step;
auto aes(AES_256_CBC_create(&no_dpp, g_ceph_context, &key[0], 32));
ASSERT_NE(aes.get(), nullptr);
size_t block_size = aes->get_block_size();
ASSERT_NE(block_size, 0u);
size_t rr = 111;
for (size_t r = 97; r < 123 ; r++)
{
off_t begin = 0;
off_t end = begin + r*r*r*r*r*r*r % (test_range - begin);
//sometimes make aligned
if (r % 3)
end = end - end % block_size;
off_t offset = r*r*r*r*r*r*r*r % (1000*1000*1000);
offset = offset - offset % block_size;
ASSERT_EQ(begin % block_size, 0u);
ASSERT_LE(end, test_range);
ASSERT_EQ(offset % block_size, 0u);
bufferlist encrypted1;
bufferlist encrypted2;
off_t pos = begin;
off_t chunk;
while (pos < end) {
chunk = block_size + (rr/3)*(rr+17)*(rr+71)*(rr+123)*(rr+131) % 50000;
chunk = chunk - chunk % block_size;
if (pos + chunk > end)
chunk = end - pos;
bufferlist tmp;
ASSERT_TRUE(aes->encrypt(input, pos, chunk, tmp, offset + pos, null_yield));
encrypted1.append(tmp);
pos += chunk;
rr++;
}
pos = begin;
while (pos < end) {
chunk = block_size + (rr/3)*(rr+97)*(rr+151)*(rr+213)*(rr+251) % 50000;
chunk = chunk - chunk % block_size;
if (pos + chunk > end)
chunk = end - pos;
bufferlist tmp;
ASSERT_TRUE(aes->encrypt(input, pos, chunk, tmp, offset + pos, null_yield));
encrypted2.append(tmp);
pos += chunk;
rr++;
}
ASSERT_EQ(encrypted1.length(), end);
ASSERT_EQ(encrypted2.length(), end);
ASSERT_EQ(std::string_view(encrypted1.c_str(), end),
std::string_view(encrypted2.c_str(), end) );
}
}
}
TEST(TestRGWCrypto, verify_AES_256_CBC_size_0_15)
{
const NoDoutPrefix no_dpp(g_ceph_context, dout_subsys);
//create some input for encryption
const off_t test_range = 1024*1024;
buffer::ptr buf(test_range);
char* p = buf.c_str();
for(size_t i = 0; i < buf.length(); i++)
p[i] = i + i*i + (i >> 2);
bufferlist input;
input.append(buf);
for (unsigned int step : {1, 2, 3, 5, 7, 11, 13, 17})
{
//make some random key
uint8_t key[32];
for(size_t i=0;i<sizeof(key);i++)
key[i]=i*step;
auto aes(AES_256_CBC_create(&no_dpp, g_ceph_context, &key[0], 32));
ASSERT_NE(aes.get(), nullptr);
size_t block_size = aes->get_block_size();
ASSERT_NE(block_size, 0u);
for (size_t r = 97; r < 123 ; r++)
{
off_t begin = 0;
off_t end = begin + r*r*r*r*r*r*r % (16);
off_t offset = r*r*r*r*r*r*r*r % (1000*1000*1000);
offset = offset - offset % block_size;
ASSERT_EQ(begin % block_size, 0u);
ASSERT_LE(end, test_range);
ASSERT_EQ(offset % block_size, 0u);
bufferlist encrypted;
bufferlist decrypted;
ASSERT_TRUE(aes->encrypt(input, 0, end, encrypted, offset, null_yield));
ASSERT_TRUE(aes->encrypt(encrypted, 0, end, decrypted, offset, null_yield));
ASSERT_EQ(encrypted.length(), end);
ASSERT_EQ(decrypted.length(), end);
ASSERT_EQ(std::string_view(input.c_str(), end),
std::string_view(decrypted.c_str(), end) );
}
}
}
TEST(TestRGWCrypto, verify_AES_256_CBC_identity_last_block)
{
const NoDoutPrefix no_dpp(g_ceph_context, dout_subsys);
//create some input for encryption
const off_t test_range = 1024*1024;
buffer::ptr buf(test_range);
char* p = buf.c_str();
for(size_t i = 0; i < buf.length(); i++)
p[i] = i + i*i + (i >> 2);
bufferlist input;
input.append(buf);
for (unsigned int step : {1, 2, 3, 5, 7, 11, 13, 17})
{
//make some random key
uint8_t key[32];
for(size_t i=0;i<sizeof(key);i++)
key[i]=i*step;
auto aes(AES_256_CBC_create(&no_dpp, g_ceph_context, &key[0], 32));
ASSERT_NE(aes.get(), nullptr);
size_t block_size = aes->get_block_size();
ASSERT_NE(block_size, 0u);
size_t rr = 111;
for (size_t r = 97; r < 123 ; r++)
{
off_t begin = 0;
off_t end = r*r*r*r*r*r*r % (test_range - 16);
end = end - end % block_size;
end = end + (r+3)*(r+5)*(r+7) % 16;
off_t offset = r*r*r*r*r*r*r*r % (1000*1000*1000);
offset = offset - offset % block_size;
ASSERT_EQ(begin % block_size, 0u);
ASSERT_LE(end, test_range);
ASSERT_EQ(offset % block_size, 0u);
bufferlist encrypted1;
bufferlist encrypted2;
off_t pos = begin;
off_t chunk;
while (pos < end) {
chunk = block_size + (rr/3)*(rr+17)*(rr+71)*(rr+123)*(rr+131) % 50000;
chunk = chunk - chunk % block_size;
if (pos + chunk > end)
chunk = end - pos;
bufferlist tmp;
ASSERT_TRUE(aes->encrypt(input, pos, chunk, tmp, offset + pos, null_yield));
encrypted1.append(tmp);
pos += chunk;
rr++;
}
pos = begin;
while (pos < end) {
chunk = block_size + (rr/3)*(rr+97)*(rr+151)*(rr+213)*(rr+251) % 50000;
chunk = chunk - chunk % block_size;
if (pos + chunk > end)
chunk = end - pos;
bufferlist tmp;
ASSERT_TRUE(aes->encrypt(input, pos, chunk, tmp, offset + pos, null_yield));
encrypted2.append(tmp);
pos += chunk;
rr++;
}
ASSERT_EQ(encrypted1.length(), end);
ASSERT_EQ(encrypted2.length(), end);
ASSERT_EQ(std::string_view(encrypted1.c_str(), end),
std::string_view(encrypted2.c_str(), end) );
}
}
}
TEST(TestRGWCrypto, verify_RGWGetObj_BlockDecrypt_ranges)
{
const NoDoutPrefix no_dpp(g_ceph_context, dout_subsys);
//create some input for encryption
const off_t test_range = 1024*1024;
bufferptr buf(test_range);
char* p = buf.c_str();
for(size_t i = 0; i < buf.length(); i++)
p[i] = i + i*i + (i >> 2);
bufferlist input;
input.append(buf);
uint8_t key[32];
for(size_t i=0;i<sizeof(key);i++)
key[i] = i;
auto cbc = AES_256_CBC_create(&no_dpp, g_ceph_context, &key[0], 32);
ASSERT_NE(cbc.get(), nullptr);
bufferlist encrypted;
ASSERT_TRUE(cbc->encrypt(input, 0, test_range, encrypted, 0, null_yield));
for (off_t r = 93; r < 150; r++ )
{
ut_get_sink get_sink;
auto cbc = AES_256_CBC_create(&no_dpp, g_ceph_context, &key[0], 32);
ASSERT_NE(cbc.get(), nullptr);
RGWGetObj_BlockDecrypt decrypt(&no_dpp, g_ceph_context, &get_sink, std::move(cbc), null_yield);
//random ranges
off_t begin = (r/3)*r*(r+13)*(r+23)*(r+53)*(r+71) % test_range;
off_t end = begin + (r/5)*(r+7)*(r+13)*(r+101)*(r*103) % (test_range - begin) - 1;
off_t f_begin = begin;
off_t f_end = end;
decrypt.fixup_range(f_begin, f_end);
decrypt.handle_data(encrypted, f_begin, f_end - f_begin + 1);
decrypt.flush();
const std::string& decrypted = get_sink.get_sink();
size_t expected_len = end - begin + 1;
ASSERT_EQ(decrypted.length(), expected_len);
ASSERT_EQ(decrypted, std::string_view(input.c_str()+begin, expected_len));
}
}
TEST(TestRGWCrypto, verify_RGWGetObj_BlockDecrypt_chunks)
{
const NoDoutPrefix no_dpp(g_ceph_context, dout_subsys);
//create some input for encryption
const off_t test_range = 1024*1024;
bufferptr buf(test_range);
char* p = buf.c_str();
for(size_t i = 0; i < buf.length(); i++)
p[i] = i + i*i + (i >> 2);
bufferlist input;
input.append(buf);
uint8_t key[32];
for(size_t i=0;i<sizeof(key);i++)
key[i] = i;
auto cbc = AES_256_CBC_create(&no_dpp, g_ceph_context, &key[0], 32);
ASSERT_NE(cbc.get(), nullptr);
bufferlist encrypted;
ASSERT_TRUE(cbc->encrypt(input, 0, test_range, encrypted, 0, null_yield));
for (off_t r = 93; r < 150; r++ )
{
ut_get_sink get_sink;
auto cbc = AES_256_CBC_create(&no_dpp, g_ceph_context, &key[0], 32);
ASSERT_NE(cbc.get(), nullptr);
RGWGetObj_BlockDecrypt decrypt(&no_dpp, g_ceph_context, &get_sink, std::move(cbc), null_yield);
//random
off_t begin = (r/3)*r*(r+13)*(r+23)*(r+53)*(r+71) % test_range;
off_t end = begin + (r/5)*(r+7)*(r+13)*(r+101)*(r*103) % (test_range - begin) - 1;
off_t f_begin = begin;
off_t f_end = end;
decrypt.fixup_range(f_begin, f_end);
off_t pos = f_begin;
do
{
off_t size = 2 << ((pos * 17 + pos / 113 + r) % 16);
size = (pos + 1117) * (pos + 2229) % size + 1;
if (pos + size > f_end + 1)
size = f_end + 1 - pos;
decrypt.handle_data(encrypted, pos, size);
pos = pos + size;
} while (pos < f_end + 1);
decrypt.flush();
const std::string& decrypted = get_sink.get_sink();
size_t expected_len = end - begin + 1;
ASSERT_EQ(decrypted.length(), expected_len);
ASSERT_EQ(decrypted, std::string_view(input.c_str()+begin, expected_len));
}
}
using range_t = std::pair<off_t, off_t>;
// call filter->fixup_range() and return the range as a pair. this makes it easy
// to fit on a single line for ASSERT_EQ()
range_t fixup_range(RGWGetObj_BlockDecrypt *decrypt, off_t ofs, off_t end)
{
decrypt->fixup_range(ofs, end);
return {ofs, end};
}
TEST(TestRGWCrypto, check_RGWGetObj_BlockDecrypt_fixup)
{
const NoDoutPrefix no_dpp(g_ceph_context, dout_subsys);
ut_get_sink get_sink;
auto nonecrypt = std::unique_ptr<BlockCrypt>(new BlockCryptNone);
RGWGetObj_BlockDecrypt decrypt(&no_dpp, g_ceph_context, &get_sink,
std::move(nonecrypt), null_yield);
ASSERT_EQ(fixup_range(&decrypt,0,0), range_t(0,255));
ASSERT_EQ(fixup_range(&decrypt,1,256), range_t(0,511));
ASSERT_EQ(fixup_range(&decrypt,0,255), range_t(0,255));
ASSERT_EQ(fixup_range(&decrypt,255,256), range_t(0,511));
ASSERT_EQ(fixup_range(&decrypt,511,1023), range_t(256,1023));
ASSERT_EQ(fixup_range(&decrypt,513,1024), range_t(512,1024+255));
}
using parts_len_t = std::vector<size_t>;
class TestRGWGetObj_BlockDecrypt : public RGWGetObj_BlockDecrypt {
using RGWGetObj_BlockDecrypt::RGWGetObj_BlockDecrypt;
public:
void set_parts_len(parts_len_t&& other) {
parts_len = std::move(other);
}
};
std::vector<size_t> create_mp_parts(size_t obj_size, size_t mp_part_len){
std::vector<size_t> parts_len;
size_t part_size;
size_t ofs=0;
while (ofs < obj_size){
part_size = std::min(mp_part_len, (obj_size - ofs));
ofs += part_size;
parts_len.push_back(part_size);
}
return parts_len;
}
const size_t part_size = 5*1024*1024;
const size_t obj_size = 30*1024*1024;
TEST(TestRGWCrypto, check_RGWGetObj_BlockDecrypt_fixup_simple)
{
const NoDoutPrefix no_dpp(g_ceph_context, dout_subsys);
ut_get_sink get_sink;
auto nonecrypt = std::make_unique<BlockCryptNone>(4096);
TestRGWGetObj_BlockDecrypt decrypt(&no_dpp, g_ceph_context, &get_sink,
std::move(nonecrypt), null_yield);
decrypt.set_parts_len(create_mp_parts(obj_size, part_size));
ASSERT_EQ(fixup_range(&decrypt,0,0), range_t(0,4095));
ASSERT_EQ(fixup_range(&decrypt,1,4096), range_t(0,8191));
ASSERT_EQ(fixup_range(&decrypt,0,4095), range_t(0,4095));
ASSERT_EQ(fixup_range(&decrypt,4095,4096), range_t(0,8191));
// ranges are end-end inclusive, we request bytes just spanning short of first
// part to exceeding the first part, part_size - 1 is aligned to a 4095 boundary
ASSERT_EQ(fixup_range(&decrypt, 0, part_size - 2), range_t(0, part_size -1));
ASSERT_EQ(fixup_range(&decrypt, 0, part_size - 1), range_t(0, part_size -1));
ASSERT_EQ(fixup_range(&decrypt, 0, part_size), range_t(0, part_size + 4095));
ASSERT_EQ(fixup_range(&decrypt, 0, part_size + 1), range_t(0, part_size + 4095));
// request bytes spanning 2 parts
ASSERT_EQ(fixup_range(&decrypt, part_size -2, part_size + 2),
range_t(part_size - 4096, part_size + 4095));
// request last byte
ASSERT_EQ(fixup_range(&decrypt, obj_size - 1, obj_size -1),
range_t(obj_size - 4096, obj_size -1));
}
TEST(TestRGWCrypto, check_RGWGetObj_BlockDecrypt_fixup_non_aligned_obj_size)
{
const NoDoutPrefix no_dpp(g_ceph_context, dout_subsys);
ut_get_sink get_sink;
auto nonecrypt = std::make_unique<BlockCryptNone>(4096);
TestRGWGetObj_BlockDecrypt decrypt(&no_dpp, g_ceph_context, &get_sink,
std::move(nonecrypt), null_yield);
auto na_obj_size = obj_size + 1;
decrypt.set_parts_len(create_mp_parts(na_obj_size, part_size));
// these should be unaffected here
ASSERT_EQ(fixup_range(&decrypt, 0, part_size - 2), range_t(0, part_size -1));
ASSERT_EQ(fixup_range(&decrypt, 0, part_size - 1), range_t(0, part_size -1));
ASSERT_EQ(fixup_range(&decrypt, 0, part_size), range_t(0, part_size + 4095));
ASSERT_EQ(fixup_range(&decrypt, 0, part_size + 1), range_t(0, part_size + 4095));
// request last 2 bytes; spanning 2 parts
ASSERT_EQ(fixup_range(&decrypt, na_obj_size -2 , na_obj_size -1),
range_t(na_obj_size - 1 - 4096, na_obj_size - 1));
// request last byte, spans last 1B part only
ASSERT_EQ(fixup_range(&decrypt, na_obj_size -1, na_obj_size - 1),
range_t(na_obj_size - 1, na_obj_size -1));
}
TEST(TestRGWCrypto, check_RGWGetObj_BlockDecrypt_fixup_non_aligned_part_size)
{
const NoDoutPrefix no_dpp(g_ceph_context, dout_subsys);
ut_get_sink get_sink;
auto nonecrypt = std::make_unique<BlockCryptNone>(4096);
TestRGWGetObj_BlockDecrypt decrypt(&no_dpp, g_ceph_context, &get_sink,
std::move(nonecrypt), null_yield);
auto na_part_size = part_size + 1;
decrypt.set_parts_len(create_mp_parts(obj_size, na_part_size));
// na_part_size -2, ie. part_size -1 is aligned to 4095 boundary
ASSERT_EQ(fixup_range(&decrypt, 0, na_part_size - 2), range_t(0, na_part_size -2));
// even though na_part_size -1 should not align to a 4095 boundary, the range
// should not span the next part
ASSERT_EQ(fixup_range(&decrypt, 0, na_part_size - 1), range_t(0, na_part_size -1));
ASSERT_EQ(fixup_range(&decrypt, 0, na_part_size), range_t(0, na_part_size + 4095));
ASSERT_EQ(fixup_range(&decrypt, 0, na_part_size + 1), range_t(0, na_part_size + 4095));
// request spanning 2 parts
ASSERT_EQ(fixup_range(&decrypt, na_part_size - 2, na_part_size + 2),
range_t(na_part_size - 1 - 4096, na_part_size + 4095));
// request last byte, this will be interesting, since this a multipart upload
// with 5MB+1 size, the last part is actually 5 bytes short of 5 MB, which
// should be considered for the ranges alignment; an easier way to look at
// this will be that the last offset aligned to a 5MiB part will be 5MiB -
// 4095, this is a part that is 5MiB - 5 B
ASSERT_EQ(fixup_range(&decrypt, obj_size - 1, obj_size -1),
range_t(obj_size +5 -4096, obj_size -1));
}
TEST(TestRGWCrypto, check_RGWGetObj_BlockDecrypt_fixup_non_aligned)
{
const NoDoutPrefix no_dpp(g_ceph_context, dout_subsys);
ut_get_sink get_sink;
auto nonecrypt = std::make_unique<BlockCryptNone>(4096);
TestRGWGetObj_BlockDecrypt decrypt(&no_dpp, g_ceph_context, &get_sink,
std::move(nonecrypt), null_yield);
auto na_part_size = part_size + 1;
auto na_obj_size = obj_size + 7; // (6*(5MiB + 1) + 1) for the last 1B overflow
decrypt.set_parts_len(create_mp_parts(na_obj_size, na_part_size));
// na_part_size -2, ie. part_size -1 is aligned to 4095 boundary
ASSERT_EQ(fixup_range(&decrypt, 0, na_part_size - 2), range_t(0, na_part_size -2));
// even though na_part_size -1 should not align to a 4095 boundary, the range
// should not span the next part
ASSERT_EQ(fixup_range(&decrypt, 0, na_part_size - 1), range_t(0, na_part_size -1));
ASSERT_EQ(fixup_range(&decrypt, 0, na_part_size), range_t(0, na_part_size + 4095));
ASSERT_EQ(fixup_range(&decrypt, 0, na_part_size + 1), range_t(0, na_part_size + 4095));
// request last byte, spans last 1B part only
ASSERT_EQ(fixup_range(&decrypt, na_obj_size -1, na_obj_size - 1),
range_t(na_obj_size - 1, na_obj_size -1));
ASSERT_EQ(fixup_range(&decrypt, na_obj_size -2, na_obj_size -1),
range_t(na_obj_size - 2, na_obj_size -1));
}
TEST(TestRGWCrypto, check_RGWGetObj_BlockDecrypt_fixup_invalid_ranges)
{
const NoDoutPrefix no_dpp(g_ceph_context, dout_subsys);
ut_get_sink get_sink;
auto nonecrypt = std::make_unique<BlockCryptNone>(4096);
TestRGWGetObj_BlockDecrypt decrypt(&no_dpp, g_ceph_context, &get_sink,
std::move(nonecrypt), null_yield);
decrypt.set_parts_len(create_mp_parts(obj_size, part_size));
// the ranges below would be mostly unreachable in current code as rgw
// would've returned a 411 before reaching, but we're just doing this to make
// sure we don't have invalid access
ASSERT_EQ(fixup_range(&decrypt, obj_size - 1, obj_size + 100),
range_t(obj_size - 4096, obj_size - 1));
ASSERT_EQ(fixup_range(&decrypt, obj_size, obj_size + 1),
range_t(obj_size - 1, obj_size - 1));
ASSERT_EQ(fixup_range(&decrypt, obj_size+1, obj_size + 100),
range_t(obj_size - 1, obj_size - 1));
}
TEST(TestRGWCrypto, verify_RGWPutObj_BlockEncrypt_chunks)
{
const NoDoutPrefix no_dpp(g_ceph_context, dout_subsys);
//create some input for encryption
const off_t test_range = 1024*1024;
bufferptr buf(test_range);
char* p = buf.c_str();
for(size_t i = 0; i < buf.length(); i++)
p[i] = i + i*i + (i >> 2);
bufferlist input;
input.append(buf);
uint8_t key[32];
for(size_t i=0;i<sizeof(key);i++)
key[i] = i;
for (off_t r = 93; r < 150; r++ )
{
ut_put_sink put_sink;
auto cbc = AES_256_CBC_create(&no_dpp, g_ceph_context, &key[0], 32);
ASSERT_NE(cbc.get(), nullptr);
RGWPutObj_BlockEncrypt encrypt(&no_dpp, g_ceph_context, &put_sink,
std::move(cbc), null_yield);
off_t test_size = (r/5)*(r+7)*(r+13)*(r+101)*(r*103) % (test_range - 1) + 1;
off_t pos = 0;
do
{
off_t size = 2 << ((pos * 17 + pos / 113 + r) % 16);
size = (pos + 1117) * (pos + 2229) % size + 1;
if (pos + size > test_size)
size = test_size - pos;
bufferlist bl;
bl.append(input.c_str()+pos, size);
encrypt.process(std::move(bl), pos);
pos = pos + size;
} while (pos < test_size);
encrypt.process({}, pos);
ASSERT_EQ(put_sink.get_sink().length(), static_cast<size_t>(test_size));
cbc = AES_256_CBC_create(&no_dpp, g_ceph_context, &key[0], 32);
ASSERT_NE(cbc.get(), nullptr);
bufferlist encrypted;
bufferlist decrypted;
encrypted.append(put_sink.get_sink());
ASSERT_TRUE(cbc->decrypt(encrypted, 0, test_size, decrypted, 0, null_yield));
ASSERT_EQ(decrypted.length(), test_size);
ASSERT_EQ(std::string_view(decrypted.c_str(), test_size),
std::string_view(input.c_str(), test_size));
}
}
TEST(TestRGWCrypto, verify_Encrypt_Decrypt)
{
const NoDoutPrefix no_dpp(g_ceph_context, dout_subsys);
uint8_t key[32];
for(size_t i=0;i<sizeof(key);i++)
key[i]=i;
size_t fi_a = 0;
size_t fi_b = 1;
size_t test_size;
do
{
//fibonacci
size_t tmp = fi_b;
fi_b = fi_a + fi_b;
fi_a = tmp;
test_size = fi_b;
uint8_t* test_in = new uint8_t[test_size];
//fill with something
memset(test_in, test_size & 0xff, test_size);
ut_put_sink put_sink;
RGWPutObj_BlockEncrypt encrypt(&no_dpp, g_ceph_context, &put_sink,
AES_256_CBC_create(&no_dpp, g_ceph_context, &key[0], 32),
null_yield);
bufferlist bl;
bl.append((char*)test_in, test_size);
encrypt.process(std::move(bl), 0);
encrypt.process({}, test_size);
ASSERT_EQ(put_sink.get_sink().length(), test_size);
bl.append(put_sink.get_sink().data(), put_sink.get_sink().length());
ASSERT_EQ(bl.length(), test_size);
ut_get_sink get_sink;
RGWGetObj_BlockDecrypt decrypt(&no_dpp, g_ceph_context, &get_sink,
AES_256_CBC_create(&no_dpp, g_ceph_context, &key[0], 32),
null_yield);
off_t bl_ofs = 0;
off_t bl_end = test_size - 1;
decrypt.fixup_range(bl_ofs, bl_end);
decrypt.handle_data(bl, 0, bl.length());
decrypt.flush();
ASSERT_EQ(get_sink.get_sink().length(), test_size);
ASSERT_EQ(get_sink.get_sink(), std::string_view((char*)test_in,test_size));
}
while (test_size < 20000);
}
int main(int argc, char **argv) {
auto args = argv_to_vec(argc, argv);
auto cct = global_init(NULL, args, CEPH_ENTITY_TYPE_CLIENT,
CODE_ENVIRONMENT_UTILITY,
CINIT_FLAG_NO_DEFAULT_CONFIG_FILE);
common_init_finish(g_ceph_context);
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}
| 26,493 | 31.036276 | 128 | cc |
null | ceph-main/src/test/rgw/test_rgw_dmclock_scheduler.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2018 Red Hat, Inc.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
//#define BOOST_ASIO_ENABLE_HANDLER_TRACKING
#include "rgw_dmclock_sync_scheduler.h"
#include "rgw_dmclock_async_scheduler.h"
#include <optional>
#include <spawn/spawn.hpp>
#include <gtest/gtest.h>
#include "acconfig.h"
#include "global/global_context.h"
namespace rgw::dmclock {
using boost::system::error_code;
// return a lambda that can be used as a callback to capture its arguments
auto capture(std::optional<error_code>& opt_ec,
std::optional<PhaseType>& opt_phase)
{
return [&] (error_code ec, PhaseType phase) {
opt_ec = ec;
opt_phase = phase;
};
}
TEST(Queue, SyncRequest)
{
ClientCounters counters(g_ceph_context);
auto client_info_f = [] (client_id client) -> ClientInfo* {
static ClientInfo clients[] = {
{1, 1, 1}, //admin: satisfy by reservation
{0, 1, 1}, //auth: satisfy by priority
};
return &clients[static_cast<size_t>(client)];
};
std::atomic <bool> ready = false;
auto server_ready_f = [&ready]() -> bool { return ready.load();};
SyncScheduler queue(g_ceph_context, std::ref(counters),
client_info_f, server_ready_f,
std::ref(SyncScheduler::handle_request_cb)
);
auto now = get_time();
ready = true;
queue.add_request(client_id::admin, {}, now, 1);
queue.add_request(client_id::auth, {}, now, 1);
// We can't see the queue at length 1 as the queue len is decremented as the
//request is processed
EXPECT_EQ(0u, counters(client_id::admin)->get(queue_counters::l_qlen));
EXPECT_EQ(1u, counters(client_id::admin)->get(queue_counters::l_res));
EXPECT_EQ(0u, counters(client_id::admin)->get(queue_counters::l_prio));
EXPECT_EQ(0u, counters(client_id::admin)->get(queue_counters::l_limit));
EXPECT_EQ(0u, counters(client_id::admin)->get(queue_counters::l_cancel));
EXPECT_EQ(0u, counters(client_id::auth)->get(queue_counters::l_qlen));
EXPECT_EQ(0u, counters(client_id::auth)->get(queue_counters::l_res));
EXPECT_EQ(1u, counters(client_id::auth)->get(queue_counters::l_prio));
EXPECT_EQ(0u, counters(client_id::auth)->get(queue_counters::l_limit));
EXPECT_EQ(0u, counters(client_id::auth)->get(queue_counters::l_cancel));
}
TEST(Queue, RateLimit)
{
boost::asio::io_context context;
ClientCounters counters(g_ceph_context);
AsyncScheduler queue(g_ceph_context, context, std::ref(counters), nullptr,
[] (client_id client) -> ClientInfo* {
static ClientInfo clients[] = {
{1, 1, 1}, // admin
{0, 1, 1}, // auth
};
return &clients[static_cast<size_t>(client)];
}, AtLimit::Reject);
std::optional<error_code> ec1, ec2, ec3, ec4;
std::optional<PhaseType> p1, p2, p3, p4;
auto now = get_time();
queue.async_request(client_id::admin, {}, now, 1, capture(ec1, p1));
queue.async_request(client_id::admin, {}, now, 1, capture(ec2, p2));
queue.async_request(client_id::auth, {}, now, 1, capture(ec3, p3));
queue.async_request(client_id::auth, {}, now, 1, capture(ec4, p4));
EXPECT_FALSE(ec1);
EXPECT_FALSE(ec2);
EXPECT_FALSE(ec3);
EXPECT_FALSE(ec4);
EXPECT_EQ(1u, counters(client_id::admin)->get(queue_counters::l_qlen));
EXPECT_EQ(1u, counters(client_id::auth)->get(queue_counters::l_qlen));
context.run_for(std::chrono::milliseconds(1));
EXPECT_TRUE(context.stopped());
ASSERT_TRUE(ec1);
EXPECT_EQ(boost::system::errc::success, *ec1);
ASSERT_TRUE(p1);
EXPECT_EQ(PhaseType::reservation, *p1);
ASSERT_TRUE(ec2);
EXPECT_EQ(boost::system::errc::resource_unavailable_try_again, *ec2);
ASSERT_TRUE(ec3);
EXPECT_EQ(boost::system::errc::success, *ec3);
ASSERT_TRUE(p3);
EXPECT_EQ(PhaseType::priority, *p3);
ASSERT_TRUE(ec4);
EXPECT_EQ(boost::system::errc::resource_unavailable_try_again, *ec4);
EXPECT_EQ(0u, counters(client_id::admin)->get(queue_counters::l_qlen));
EXPECT_EQ(1u, counters(client_id::admin)->get(queue_counters::l_res));
EXPECT_EQ(0u, counters(client_id::admin)->get(queue_counters::l_prio));
EXPECT_EQ(1u, counters(client_id::admin)->get(queue_counters::l_limit));
EXPECT_EQ(0u, counters(client_id::admin)->get(queue_counters::l_cancel));
EXPECT_EQ(0u, counters(client_id::auth)->get(queue_counters::l_qlen));
EXPECT_EQ(0u, counters(client_id::auth)->get(queue_counters::l_res));
EXPECT_EQ(1u, counters(client_id::auth)->get(queue_counters::l_prio));
EXPECT_EQ(1u, counters(client_id::auth)->get(queue_counters::l_limit));
EXPECT_EQ(0u, counters(client_id::auth)->get(queue_counters::l_cancel));
}
TEST(Queue, AsyncRequest)
{
boost::asio::io_context context;
ClientCounters counters(g_ceph_context);
AsyncScheduler queue(g_ceph_context, context, std::ref(counters), nullptr,
[] (client_id client) -> ClientInfo* {
static ClientInfo clients[] = {
{1, 1, 1}, // admin: satisfy by reservation
{0, 1, 1}, // auth: satisfy by priority
};
return &clients[static_cast<size_t>(client)];
}, AtLimit::Reject
);
std::optional<error_code> ec1, ec2;
std::optional<PhaseType> p1, p2;
auto now = get_time();
queue.async_request(client_id::admin, {}, now, 1, capture(ec1, p1));
queue.async_request(client_id::auth, {}, now, 1, capture(ec2, p2));
EXPECT_FALSE(ec1);
EXPECT_FALSE(ec2);
EXPECT_EQ(1u, counters(client_id::admin)->get(queue_counters::l_qlen));
EXPECT_EQ(1u, counters(client_id::auth)->get(queue_counters::l_qlen));
context.run_for(std::chrono::milliseconds(1));
EXPECT_TRUE(context.stopped());
ASSERT_TRUE(ec1);
EXPECT_EQ(boost::system::errc::success, *ec1);
ASSERT_TRUE(p1);
EXPECT_EQ(PhaseType::reservation, *p1);
ASSERT_TRUE(ec2);
EXPECT_EQ(boost::system::errc::success, *ec2);
ASSERT_TRUE(p2);
EXPECT_EQ(PhaseType::priority, *p2);
EXPECT_EQ(0u, counters(client_id::admin)->get(queue_counters::l_qlen));
EXPECT_EQ(1u, counters(client_id::admin)->get(queue_counters::l_res));
EXPECT_EQ(0u, counters(client_id::admin)->get(queue_counters::l_prio));
EXPECT_EQ(0u, counters(client_id::admin)->get(queue_counters::l_limit));
EXPECT_EQ(0u, counters(client_id::admin)->get(queue_counters::l_cancel));
EXPECT_EQ(0u, counters(client_id::auth)->get(queue_counters::l_qlen));
EXPECT_EQ(0u, counters(client_id::auth)->get(queue_counters::l_res));
EXPECT_EQ(1u, counters(client_id::auth)->get(queue_counters::l_prio));
EXPECT_EQ(0u, counters(client_id::auth)->get(queue_counters::l_limit));
EXPECT_EQ(0u, counters(client_id::auth)->get(queue_counters::l_cancel));
}
TEST(Queue, Cancel)
{
boost::asio::io_context context;
ClientCounters counters(g_ceph_context);
AsyncScheduler queue(g_ceph_context, context, std::ref(counters), nullptr,
[] (client_id client) -> ClientInfo* {
static ClientInfo info{0, 1, 1};
return &info;
});
std::optional<error_code> ec1, ec2;
std::optional<PhaseType> p1, p2;
auto now = get_time();
queue.async_request(client_id::admin, {}, now, 1, capture(ec1, p1));
queue.async_request(client_id::auth, {}, now, 1, capture(ec2, p2));
EXPECT_FALSE(ec1);
EXPECT_FALSE(ec2);
EXPECT_EQ(1u, counters(client_id::admin)->get(queue_counters::l_qlen));
EXPECT_EQ(1u, counters(client_id::auth)->get(queue_counters::l_qlen));
queue.cancel();
EXPECT_FALSE(ec1);
EXPECT_FALSE(ec2);
context.run_for(std::chrono::milliseconds(1));
EXPECT_TRUE(context.stopped());
ASSERT_TRUE(ec1);
EXPECT_EQ(boost::asio::error::operation_aborted, *ec1);
ASSERT_TRUE(ec2);
EXPECT_EQ(boost::asio::error::operation_aborted, *ec2);
EXPECT_EQ(0u, counters(client_id::admin)->get(queue_counters::l_qlen));
EXPECT_EQ(0u, counters(client_id::admin)->get(queue_counters::l_res));
EXPECT_EQ(0u, counters(client_id::admin)->get(queue_counters::l_prio));
EXPECT_EQ(0u, counters(client_id::admin)->get(queue_counters::l_limit));
EXPECT_EQ(1u, counters(client_id::admin)->get(queue_counters::l_cancel));
EXPECT_EQ(0u, counters(client_id::auth)->get(queue_counters::l_qlen));
EXPECT_EQ(0u, counters(client_id::auth)->get(queue_counters::l_res));
EXPECT_EQ(0u, counters(client_id::auth)->get(queue_counters::l_prio));
EXPECT_EQ(0u, counters(client_id::auth)->get(queue_counters::l_limit));
EXPECT_EQ(1u, counters(client_id::auth)->get(queue_counters::l_cancel));
}
TEST(Queue, CancelClient)
{
boost::asio::io_context context;
ClientCounters counters(g_ceph_context);
AsyncScheduler queue(g_ceph_context, context, std::ref(counters), nullptr,
[] (client_id client) -> ClientInfo* {
static ClientInfo info{0, 1, 1};
return &info;
});
std::optional<error_code> ec1, ec2;
std::optional<PhaseType> p1, p2;
auto now = get_time();
queue.async_request(client_id::admin, {}, now, 1, capture(ec1, p1));
queue.async_request(client_id::auth, {}, now, 1, capture(ec2, p2));
EXPECT_FALSE(ec1);
EXPECT_FALSE(ec2);
EXPECT_EQ(1u, counters(client_id::admin)->get(queue_counters::l_qlen));
EXPECT_EQ(1u, counters(client_id::auth)->get(queue_counters::l_qlen));
queue.cancel(client_id::admin);
EXPECT_FALSE(ec1);
EXPECT_FALSE(ec2);
context.run_for(std::chrono::milliseconds(1));
EXPECT_TRUE(context.stopped());
ASSERT_TRUE(ec1);
EXPECT_EQ(boost::asio::error::operation_aborted, *ec1);
ASSERT_TRUE(ec2);
EXPECT_EQ(boost::system::errc::success, *ec2);
ASSERT_TRUE(p2);
EXPECT_EQ(PhaseType::priority, *p2);
EXPECT_EQ(0u, counters(client_id::admin)->get(queue_counters::l_qlen));
EXPECT_EQ(0u, counters(client_id::admin)->get(queue_counters::l_res));
EXPECT_EQ(0u, counters(client_id::admin)->get(queue_counters::l_prio));
EXPECT_EQ(0u, counters(client_id::admin)->get(queue_counters::l_limit));
EXPECT_EQ(1u, counters(client_id::admin)->get(queue_counters::l_cancel));
EXPECT_EQ(0u, counters(client_id::auth)->get(queue_counters::l_qlen));
EXPECT_EQ(0u, counters(client_id::auth)->get(queue_counters::l_res));
EXPECT_EQ(1u, counters(client_id::auth)->get(queue_counters::l_prio));
EXPECT_EQ(0u, counters(client_id::auth)->get(queue_counters::l_limit));
EXPECT_EQ(0u, counters(client_id::auth)->get(queue_counters::l_cancel));
}
TEST(Queue, CancelOnDestructor)
{
boost::asio::io_context context;
std::optional<error_code> ec1, ec2;
std::optional<PhaseType> p1, p2;
ClientCounters counters(g_ceph_context);
{
AsyncScheduler queue(g_ceph_context, context, std::ref(counters), nullptr,
[] (client_id client) -> ClientInfo* {
static ClientInfo info{0, 1, 1};
return &info;
});
auto now = get_time();
queue.async_request(client_id::admin, {}, now, 1, capture(ec1, p1));
queue.async_request(client_id::auth, {}, now, 1, capture(ec2, p2));
EXPECT_EQ(1u, counters(client_id::admin)->get(queue_counters::l_qlen));
EXPECT_EQ(1u, counters(client_id::auth)->get(queue_counters::l_qlen));
}
EXPECT_FALSE(ec1);
EXPECT_FALSE(ec2);
context.run_for(std::chrono::milliseconds(1));
EXPECT_TRUE(context.stopped());
ASSERT_TRUE(ec1);
EXPECT_EQ(boost::asio::error::operation_aborted, *ec1);
ASSERT_TRUE(ec2);
EXPECT_EQ(boost::asio::error::operation_aborted, *ec2);
EXPECT_EQ(0u, counters(client_id::admin)->get(queue_counters::l_qlen));
EXPECT_EQ(0u, counters(client_id::admin)->get(queue_counters::l_res));
EXPECT_EQ(0u, counters(client_id::admin)->get(queue_counters::l_prio));
EXPECT_EQ(0u, counters(client_id::admin)->get(queue_counters::l_limit));
EXPECT_EQ(1u, counters(client_id::admin)->get(queue_counters::l_cancel));
EXPECT_EQ(0u, counters(client_id::auth)->get(queue_counters::l_qlen));
EXPECT_EQ(0u, counters(client_id::auth)->get(queue_counters::l_res));
EXPECT_EQ(0u, counters(client_id::auth)->get(queue_counters::l_prio));
EXPECT_EQ(0u, counters(client_id::auth)->get(queue_counters::l_limit));
EXPECT_EQ(1u, counters(client_id::auth)->get(queue_counters::l_cancel));
}
// return a lambda from capture() that's bound to run on the given executor
template <typename Executor>
auto capture(const Executor& ex, std::optional<error_code>& opt_ec,
std::optional<PhaseType>& opt_res)
{
return boost::asio::bind_executor(ex, capture(opt_ec, opt_res));
}
TEST(Queue, CrossExecutorRequest)
{
boost::asio::io_context queue_context;
ClientCounters counters(g_ceph_context);
AsyncScheduler queue(g_ceph_context, queue_context, std::ref(counters), nullptr,
[] (client_id client) -> ClientInfo* {
static ClientInfo info{0, 1, 1};
return &info;
});
// create a separate execution context to use for all callbacks to test that
// pending requests maintain executor work guards on both executors
boost::asio::io_context callback_context;
auto ex2 = callback_context.get_executor();
std::optional<error_code> ec1, ec2;
std::optional<PhaseType> p1, p2;
auto now = get_time();
queue.async_request(client_id::admin, {}, now, 1, capture(ex2, ec1, p1));
queue.async_request(client_id::auth, {}, now, 1, capture(ex2, ec2, p2));
EXPECT_EQ(1u, counters(client_id::admin)->get(queue_counters::l_qlen));
EXPECT_EQ(1u, counters(client_id::auth)->get(queue_counters::l_qlen));
callback_context.run_for(std::chrono::milliseconds(1));
// maintains work on callback executor while in queue
EXPECT_FALSE(callback_context.stopped());
EXPECT_FALSE(ec1);
EXPECT_FALSE(ec2);
queue_context.run_for(std::chrono::milliseconds(1));
EXPECT_TRUE(queue_context.stopped());
EXPECT_FALSE(ec1); // no callbacks until callback executor runs
EXPECT_FALSE(ec2);
callback_context.run_for(std::chrono::milliseconds(1));
EXPECT_TRUE(callback_context.stopped());
ASSERT_TRUE(ec1);
EXPECT_EQ(boost::system::errc::success, *ec1);
ASSERT_TRUE(p1);
EXPECT_EQ(PhaseType::priority, *p1);
ASSERT_TRUE(ec2);
EXPECT_EQ(boost::system::errc::success, *ec2);
ASSERT_TRUE(p2);
EXPECT_EQ(PhaseType::priority, *p2);
}
TEST(Queue, SpawnAsyncRequest)
{
boost::asio::io_context context;
spawn::spawn(context, [&] (yield_context yield) {
ClientCounters counters(g_ceph_context);
AsyncScheduler queue(g_ceph_context, context, std::ref(counters), nullptr,
[] (client_id client) -> ClientInfo* {
static ClientInfo clients[] = {
{1, 1, 1}, // admin: satisfy by reservation
{0, 1, 1}, // auth: satisfy by priority
};
return &clients[static_cast<size_t>(client)];
});
error_code ec1, ec2;
auto p1 = queue.async_request(client_id::admin, {}, get_time(), 1, yield[ec1]);
EXPECT_EQ(boost::system::errc::success, ec1);
EXPECT_EQ(PhaseType::reservation, p1);
auto p2 = queue.async_request(client_id::auth, {}, get_time(), 1, yield[ec2]);
EXPECT_EQ(boost::system::errc::success, ec2);
EXPECT_EQ(PhaseType::priority, p2);
});
context.run_for(std::chrono::milliseconds(1));
EXPECT_TRUE(context.stopped());
}
} // namespace rgw::dmclock
| 15,482 | 35.090909 | 98 | cc |
null | ceph-main/src/test/rgw/test_rgw_gc_log.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "rgw_gc_log.h"
#include "test/librados/test_cxx.h"
#include "gtest/gtest.h"
// creates a rados client and temporary pool
struct RadosEnv : public ::testing::Environment {
static std::optional<std::string> pool_name;
public:
static std::optional<librados::Rados> rados;
void SetUp() override {
rados.emplace();
// create pool
std::string name = get_temp_pool_name();
ASSERT_EQ("", create_one_pool_pp(name, *rados));
pool_name = name;
}
void TearDown() override {
if (pool_name) {
ASSERT_EQ(0, destroy_one_pool_pp(*pool_name, *rados));
}
rados.reset();
}
static int ioctx_create(librados::IoCtx& ioctx) {
return rados->ioctx_create(pool_name->c_str(), ioctx);
}
};
std::optional<std::string> RadosEnv::pool_name;
std::optional<librados::Rados> RadosEnv::rados;
auto *const rados_env = ::testing::AddGlobalTestEnvironment(new RadosEnv);
class rgw_gc_log : public ::testing::Test {
protected:
static librados::IoCtx ioctx;
static void SetUpTestSuite() {
ASSERT_EQ(0, RadosEnv::ioctx_create(ioctx));
}
static void TearDownTestSuite() {
ioctx.close();
}
// use the test's name as the oid so different tests don't conflict
std::string get_test_oid() const {
return ::testing::UnitTest::GetInstance()->current_test_info()->name();
}
};
librados::IoCtx rgw_gc_log::ioctx;
TEST_F(rgw_gc_log, init_existing_queue)
{
const std::string oid = get_test_oid();
{
// successfully inits new object
librados::ObjectWriteOperation op;
gc_log_init2(op, 1, 1);
ASSERT_EQ(0, ioctx.operate(oid, &op));
}
{
// version check fails on second init
librados::ObjectWriteOperation op;
gc_log_init2(op, 1, 1);
ASSERT_EQ(-ECANCELED, ioctx.operate(oid, &op));
}
}
TEST_F(rgw_gc_log, init_existing_omap)
{
const std::string oid = get_test_oid();
{
librados::ObjectWriteOperation op;
cls_rgw_gc_obj_info info;
gc_log_enqueue1(op, 5, info);
ASSERT_EQ(0, ioctx.operate(oid, &op));
}
{
// init succeeds with existing omap entries
librados::ObjectWriteOperation op;
gc_log_init2(op, 1, 1);
ASSERT_EQ(0, ioctx.operate(oid, &op));
}
}
TEST_F(rgw_gc_log, enqueue1_after_init)
{
const std::string oid = get_test_oid();
{
librados::ObjectWriteOperation op;
gc_log_init2(op, 1, 1);
ASSERT_EQ(0, ioctx.operate(oid, &op));
}
{
// version check fails on omap enqueue
librados::ObjectWriteOperation op;
cls_rgw_gc_obj_info info;
gc_log_enqueue1(op, 5, info);
ASSERT_EQ(-ECANCELED, ioctx.operate(oid, &op));
}
}
TEST_F(rgw_gc_log, enqueue2_before_init)
{
const std::string oid = get_test_oid();
{
// version check fails on cls_rgw_gc enqueue
librados::ObjectWriteOperation op;
gc_log_enqueue2(op, 5, {});
ASSERT_EQ(-ECANCELED, ioctx.operate(oid, &op));
}
}
TEST_F(rgw_gc_log, defer1_after_init)
{
const std::string oid = get_test_oid();
{
librados::ObjectWriteOperation op;
gc_log_init2(op, 1, 1);
ASSERT_EQ(0, ioctx.operate(oid, &op));
}
{
// version check fails on omap defer
librados::ObjectWriteOperation op;
gc_log_defer1(op, 5, {});
ASSERT_EQ(-ECANCELED, ioctx.operate(oid, &op));
}
}
TEST_F(rgw_gc_log, defer2_before_init)
{
const std::string oid = get_test_oid();
{
// version check fails on cls_rgw_gc defer
librados::ObjectWriteOperation op;
gc_log_defer2(op, 5, {});
ASSERT_EQ(-ECANCELED, ioctx.operate(oid, &op));
}
}
| 3,599 | 23.827586 | 75 | cc |
null | ceph-main/src/test/rgw/test_rgw_iam_policy.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2015 Red Hat
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <string>
#include <boost/intrusive_ptr.hpp>
#include <boost/optional.hpp>
#include <gtest/gtest.h>
#include "include/stringify.h"
#include "common/code_environment.h"
#include "common/ceph_context.h"
#include "global/global_init.h"
#include "rgw_auth.h"
#include "rgw_auth_registry.h"
#include "rgw_iam_policy.h"
#include "rgw_op.h"
#include "rgw_process_env.h"
#include "rgw_sal_rados.h"
using std::string;
using std::vector;
using boost::container::flat_set;
using boost::intrusive_ptr;
using boost::make_optional;
using boost::none;
using rgw::auth::Identity;
using rgw::auth::Principal;
using rgw::ARN;
using rgw::IAM::Effect;
using rgw::IAM::Environment;
using rgw::Partition;
using rgw::IAM::Policy;
using rgw::IAM::s3All;
using rgw::IAM::s3Count;
using rgw::IAM::s3GetAccelerateConfiguration;
using rgw::IAM::s3GetBucketAcl;
using rgw::IAM::s3GetBucketCORS;
using rgw::IAM::s3GetBucketLocation;
using rgw::IAM::s3GetBucketLogging;
using rgw::IAM::s3GetBucketNotification;
using rgw::IAM::s3GetBucketPolicy;
using rgw::IAM::s3GetBucketPolicyStatus;
using rgw::IAM::s3GetBucketPublicAccessBlock;
using rgw::IAM::s3GetBucketEncryption;
using rgw::IAM::s3GetBucketRequestPayment;
using rgw::IAM::s3GetBucketTagging;
using rgw::IAM::s3GetBucketVersioning;
using rgw::IAM::s3GetBucketWebsite;
using rgw::IAM::s3GetLifecycleConfiguration;
using rgw::IAM::s3GetObject;
using rgw::IAM::s3GetObjectAcl;
using rgw::IAM::s3GetObjectVersionAcl;
using rgw::IAM::s3GetObjectTorrent;
using rgw::IAM::s3GetObjectTagging;
using rgw::IAM::s3GetObjectVersion;
using rgw::IAM::s3GetObjectVersionTagging;
using rgw::IAM::s3GetObjectVersionTorrent;
using rgw::IAM::s3GetPublicAccessBlock;
using rgw::IAM::s3GetReplicationConfiguration;
using rgw::IAM::s3ListAllMyBuckets;
using rgw::IAM::s3ListBucket;
using rgw::IAM::s3ListBucketMultipartUploads;
using rgw::IAM::s3ListBucketVersions;
using rgw::IAM::s3ListMultipartUploadParts;
using rgw::IAM::None;
using rgw::IAM::s3PutBucketAcl;
using rgw::IAM::s3PutBucketPolicy;
using rgw::IAM::s3GetBucketObjectLockConfiguration;
using rgw::IAM::s3GetObjectRetention;
using rgw::IAM::s3GetObjectLegalHold;
using rgw::Service;
using rgw::IAM::TokenID;
using rgw::IAM::Version;
using rgw::IAM::Action_t;
using rgw::IAM::NotAction_t;
using rgw::IAM::iamCreateRole;
using rgw::IAM::iamDeleteRole;
using rgw::IAM::iamAll;
using rgw::IAM::stsAll;
using rgw::IAM::allCount;
class FakeIdentity : public Identity {
const Principal id;
public:
explicit FakeIdentity(Principal&& id) : id(std::move(id)) {}
uint32_t get_perms_from_aclspec(const DoutPrefixProvider* dpp, const aclspec_t& aclspec) const override {
ceph_abort();
return 0;
};
bool is_admin_of(const rgw_user& uid) const override {
ceph_abort();
return false;
}
bool is_owner_of(const rgw_user& uid) const override {
ceph_abort();
return false;
}
virtual uint32_t get_perm_mask() const override {
ceph_abort();
return 0;
}
string get_acct_name() const override {
abort();
return 0;
}
string get_subuser() const override {
abort();
return 0;
}
void to_str(std::ostream& out) const override {
out << id;
}
bool is_identity(const flat_set<Principal>& ids) const override {
if (id.is_wildcard() && (!ids.empty())) {
return true;
}
return ids.find(id) != ids.end() || ids.find(Principal::wildcard()) != ids.end();
}
uint32_t get_identity_type() const override {
return TYPE_RGW;
}
};
class PolicyTest : public ::testing::Test {
protected:
intrusive_ptr<CephContext> cct;
static const string arbitrary_tenant;
static string example1;
static string example2;
static string example3;
static string example4;
static string example5;
static string example6;
static string example7;
public:
PolicyTest() {
cct = new CephContext(CEPH_ENTITY_TYPE_CLIENT);
}
};
TEST_F(PolicyTest, Parse1) {
boost::optional<Policy> p;
ASSERT_NO_THROW(p = Policy(cct.get(), arbitrary_tenant,
bufferlist::static_from_string(example1),
true));
ASSERT_TRUE(p);
EXPECT_EQ(p->text, example1);
EXPECT_EQ(p->version, Version::v2012_10_17);
EXPECT_FALSE(p->id);
EXPECT_FALSE(p->statements[0].sid);
EXPECT_FALSE(p->statements.empty());
EXPECT_EQ(p->statements.size(), 1U);
EXPECT_TRUE(p->statements[0].princ.empty());
EXPECT_TRUE(p->statements[0].noprinc.empty());
EXPECT_EQ(p->statements[0].effect, Effect::Allow);
Action_t act;
act[s3ListBucket] = 1;
EXPECT_EQ(p->statements[0].action, act);
EXPECT_EQ(p->statements[0].notaction, None);
ASSERT_FALSE(p->statements[0].resource.empty());
ASSERT_EQ(p->statements[0].resource.size(), 1U);
EXPECT_EQ(p->statements[0].resource.begin()->partition, Partition::aws);
EXPECT_EQ(p->statements[0].resource.begin()->service, Service::s3);
EXPECT_TRUE(p->statements[0].resource.begin()->region.empty());
EXPECT_EQ(p->statements[0].resource.begin()->account, arbitrary_tenant);
EXPECT_EQ(p->statements[0].resource.begin()->resource, "example_bucket");
EXPECT_TRUE(p->statements[0].notresource.empty());
EXPECT_TRUE(p->statements[0].conditions.empty());
}
TEST_F(PolicyTest, Eval1) {
auto p = Policy(cct.get(), arbitrary_tenant,
bufferlist::static_from_string(example1), true);
Environment e;
ARN arn1(Partition::aws, Service::s3,
"", arbitrary_tenant, "example_bucket");
EXPECT_EQ(p.eval(e, none, s3ListBucket, arn1),
Effect::Allow);
ARN arn2(Partition::aws, Service::s3,
"", arbitrary_tenant, "example_bucket");
EXPECT_EQ(p.eval(e, none, s3PutBucketAcl, arn2),
Effect::Pass);
ARN arn3(Partition::aws, Service::s3,
"", arbitrary_tenant, "erroneous_bucket");
EXPECT_EQ(p.eval(e, none, s3ListBucket, arn3),
Effect::Pass);
}
TEST_F(PolicyTest, Parse2) {
boost::optional<Policy> p;
ASSERT_NO_THROW(p = Policy(cct.get(), arbitrary_tenant,
bufferlist::static_from_string(example2),
true));
ASSERT_TRUE(p);
EXPECT_EQ(p->text, example2);
EXPECT_EQ(p->version, Version::v2012_10_17);
EXPECT_EQ(*p->id, "S3-Account-Permissions");
ASSERT_FALSE(p->statements.empty());
EXPECT_EQ(p->statements.size(), 1U);
EXPECT_EQ(*p->statements[0].sid, "1");
EXPECT_FALSE(p->statements[0].princ.empty());
EXPECT_EQ(p->statements[0].princ.size(), 1U);
EXPECT_EQ(*p->statements[0].princ.begin(),
Principal::tenant("ACCOUNT-ID-WITHOUT-HYPHENS"));
EXPECT_TRUE(p->statements[0].noprinc.empty());
EXPECT_EQ(p->statements[0].effect, Effect::Allow);
Action_t act;
for (auto i = 0ULL; i < s3Count; i++)
act[i] = 1;
act[s3All] = 1;
EXPECT_EQ(p->statements[0].action, act);
EXPECT_EQ(p->statements[0].notaction, None);
ASSERT_FALSE(p->statements[0].resource.empty());
ASSERT_EQ(p->statements[0].resource.size(), 2U);
EXPECT_EQ(p->statements[0].resource.begin()->partition, Partition::aws);
EXPECT_EQ(p->statements[0].resource.begin()->service, Service::s3);
EXPECT_TRUE(p->statements[0].resource.begin()->region.empty());
EXPECT_EQ(p->statements[0].resource.begin()->account, arbitrary_tenant);
EXPECT_EQ(p->statements[0].resource.begin()->resource, "mybucket");
EXPECT_EQ((p->statements[0].resource.begin() + 1)->partition,
Partition::aws);
EXPECT_EQ((p->statements[0].resource.begin() + 1)->service,
Service::s3);
EXPECT_TRUE((p->statements[0].resource.begin() + 1)->region.empty());
EXPECT_EQ((p->statements[0].resource.begin() + 1)->account,
arbitrary_tenant);
EXPECT_EQ((p->statements[0].resource.begin() + 1)->resource, "mybucket/*");
EXPECT_TRUE(p->statements[0].notresource.empty());
EXPECT_TRUE(p->statements[0].conditions.empty());
}
TEST_F(PolicyTest, Eval2) {
auto p = Policy(cct.get(), arbitrary_tenant,
bufferlist::static_from_string(example2), true);
Environment e;
auto trueacct = FakeIdentity(
Principal::tenant("ACCOUNT-ID-WITHOUT-HYPHENS"));
auto notacct = FakeIdentity(
Principal::tenant("some-other-account"));
for (auto i = 0ULL; i < s3Count; ++i) {
ARN arn1(Partition::aws, Service::s3,
"", arbitrary_tenant, "mybucket");
EXPECT_EQ(p.eval(e, trueacct, i, arn1),
Effect::Allow);
ARN arn2(Partition::aws, Service::s3,
"", arbitrary_tenant, "mybucket/myobject");
EXPECT_EQ(p.eval(e, trueacct, i, arn2),
Effect::Allow);
ARN arn3(Partition::aws, Service::s3,
"", arbitrary_tenant, "mybucket");
EXPECT_EQ(p.eval(e, notacct, i, arn3),
Effect::Pass);
ARN arn4(Partition::aws, Service::s3,
"", arbitrary_tenant, "mybucket/myobject");
EXPECT_EQ(p.eval(e, notacct, i, arn4),
Effect::Pass);
ARN arn5(Partition::aws, Service::s3,
"", arbitrary_tenant, "notyourbucket");
EXPECT_EQ(p.eval(e, trueacct, i, arn5),
Effect::Pass);
ARN arn6(Partition::aws, Service::s3,
"", arbitrary_tenant, "notyourbucket/notyourobject");
EXPECT_EQ(p.eval(e, trueacct, i, arn6),
Effect::Pass);
}
}
TEST_F(PolicyTest, Parse3) {
boost::optional<Policy> p;
ASSERT_NO_THROW(p = Policy(cct.get(), arbitrary_tenant,
bufferlist::static_from_string(example3), true));
ASSERT_TRUE(p);
EXPECT_EQ(p->text, example3);
EXPECT_EQ(p->version, Version::v2012_10_17);
EXPECT_FALSE(p->id);
ASSERT_FALSE(p->statements.empty());
EXPECT_EQ(p->statements.size(), 3U);
EXPECT_EQ(*p->statements[0].sid, "FirstStatement");
EXPECT_TRUE(p->statements[0].princ.empty());
EXPECT_TRUE(p->statements[0].noprinc.empty());
EXPECT_EQ(p->statements[0].effect, Effect::Allow);
Action_t act;
act[s3PutBucketPolicy] = 1;
EXPECT_EQ(p->statements[0].action, act);
EXPECT_EQ(p->statements[0].notaction, None);
ASSERT_FALSE(p->statements[0].resource.empty());
ASSERT_EQ(p->statements[0].resource.size(), 1U);
EXPECT_EQ(p->statements[0].resource.begin()->partition, Partition::wildcard);
EXPECT_EQ(p->statements[0].resource.begin()->service, Service::wildcard);
EXPECT_EQ(p->statements[0].resource.begin()->region, "*");
EXPECT_EQ(p->statements[0].resource.begin()->account, arbitrary_tenant);
EXPECT_EQ(p->statements[0].resource.begin()->resource, "*");
EXPECT_TRUE(p->statements[0].notresource.empty());
EXPECT_TRUE(p->statements[0].conditions.empty());
EXPECT_EQ(*p->statements[1].sid, "SecondStatement");
EXPECT_TRUE(p->statements[1].princ.empty());
EXPECT_TRUE(p->statements[1].noprinc.empty());
EXPECT_EQ(p->statements[1].effect, Effect::Allow);
Action_t act1;
act1[s3ListAllMyBuckets] = 1;
EXPECT_EQ(p->statements[1].action, act1);
EXPECT_EQ(p->statements[1].notaction, None);
ASSERT_FALSE(p->statements[1].resource.empty());
ASSERT_EQ(p->statements[1].resource.size(), 1U);
EXPECT_EQ(p->statements[1].resource.begin()->partition, Partition::wildcard);
EXPECT_EQ(p->statements[1].resource.begin()->service, Service::wildcard);
EXPECT_EQ(p->statements[1].resource.begin()->region, "*");
EXPECT_EQ(p->statements[1].resource.begin()->account, arbitrary_tenant);
EXPECT_EQ(p->statements[1].resource.begin()->resource, "*");
EXPECT_TRUE(p->statements[1].notresource.empty());
EXPECT_TRUE(p->statements[1].conditions.empty());
EXPECT_EQ(*p->statements[2].sid, "ThirdStatement");
EXPECT_TRUE(p->statements[2].princ.empty());
EXPECT_TRUE(p->statements[2].noprinc.empty());
EXPECT_EQ(p->statements[2].effect, Effect::Allow);
Action_t act2;
act2[s3ListMultipartUploadParts] = 1;
act2[s3ListBucket] = 1;
act2[s3ListBucketVersions] = 1;
act2[s3ListAllMyBuckets] = 1;
act2[s3ListBucketMultipartUploads] = 1;
act2[s3GetObject] = 1;
act2[s3GetObjectVersion] = 1;
act2[s3GetObjectAcl] = 1;
act2[s3GetObjectVersionAcl] = 1;
act2[s3GetObjectTorrent] = 1;
act2[s3GetObjectVersionTorrent] = 1;
act2[s3GetAccelerateConfiguration] = 1;
act2[s3GetBucketAcl] = 1;
act2[s3GetBucketCORS] = 1;
act2[s3GetBucketVersioning] = 1;
act2[s3GetBucketRequestPayment] = 1;
act2[s3GetBucketLocation] = 1;
act2[s3GetBucketPolicy] = 1;
act2[s3GetBucketNotification] = 1;
act2[s3GetBucketLogging] = 1;
act2[s3GetBucketTagging] = 1;
act2[s3GetBucketWebsite] = 1;
act2[s3GetLifecycleConfiguration] = 1;
act2[s3GetReplicationConfiguration] = 1;
act2[s3GetObjectTagging] = 1;
act2[s3GetObjectVersionTagging] = 1;
act2[s3GetBucketObjectLockConfiguration] = 1;
act2[s3GetObjectRetention] = 1;
act2[s3GetObjectLegalHold] = 1;
act2[s3GetBucketPolicyStatus] = 1;
act2[s3GetBucketPublicAccessBlock] = 1;
act2[s3GetPublicAccessBlock] = 1;
act2[s3GetBucketEncryption] = 1;
EXPECT_EQ(p->statements[2].action, act2);
EXPECT_EQ(p->statements[2].notaction, None);
ASSERT_FALSE(p->statements[2].resource.empty());
ASSERT_EQ(p->statements[2].resource.size(), 2U);
EXPECT_EQ(p->statements[2].resource.begin()->partition, Partition::aws);
EXPECT_EQ(p->statements[2].resource.begin()->service, Service::s3);
EXPECT_TRUE(p->statements[2].resource.begin()->region.empty());
EXPECT_EQ(p->statements[2].resource.begin()->account, arbitrary_tenant);
EXPECT_EQ(p->statements[2].resource.begin()->resource, "confidential-data");
EXPECT_EQ((p->statements[2].resource.begin() + 1)->partition,
Partition::aws);
EXPECT_EQ((p->statements[2].resource.begin() + 1)->service, Service::s3);
EXPECT_TRUE((p->statements[2].resource.begin() + 1)->region.empty());
EXPECT_EQ((p->statements[2].resource.begin() + 1)->account,
arbitrary_tenant);
EXPECT_EQ((p->statements[2].resource.begin() + 1)->resource,
"confidential-data/*");
EXPECT_TRUE(p->statements[2].notresource.empty());
ASSERT_FALSE(p->statements[2].conditions.empty());
ASSERT_EQ(p->statements[2].conditions.size(), 1U);
EXPECT_EQ(p->statements[2].conditions[0].op, TokenID::Bool);
EXPECT_EQ(p->statements[2].conditions[0].key, "aws:MultiFactorAuthPresent");
EXPECT_FALSE(p->statements[2].conditions[0].ifexists);
ASSERT_FALSE(p->statements[2].conditions[0].vals.empty());
EXPECT_EQ(p->statements[2].conditions[0].vals.size(), 1U);
EXPECT_EQ(p->statements[2].conditions[0].vals[0], "true");
}
TEST_F(PolicyTest, Eval3) {
auto p = Policy(cct.get(), arbitrary_tenant,
bufferlist::static_from_string(example3), true);
Environment em;
Environment tr = { { "aws:MultiFactorAuthPresent", "true" } };
Environment fa = { { "aws:MultiFactorAuthPresent", "false" } };
Action_t s3allow;
s3allow[s3ListMultipartUploadParts] = 1;
s3allow[s3ListBucket] = 1;
s3allow[s3ListBucketVersions] = 1;
s3allow[s3ListAllMyBuckets] = 1;
s3allow[s3ListBucketMultipartUploads] = 1;
s3allow[s3GetObject] = 1;
s3allow[s3GetObjectVersion] = 1;
s3allow[s3GetObjectAcl] = 1;
s3allow[s3GetObjectVersionAcl] = 1;
s3allow[s3GetObjectTorrent] = 1;
s3allow[s3GetObjectVersionTorrent] = 1;
s3allow[s3GetAccelerateConfiguration] = 1;
s3allow[s3GetBucketAcl] = 1;
s3allow[s3GetBucketCORS] = 1;
s3allow[s3GetBucketVersioning] = 1;
s3allow[s3GetBucketRequestPayment] = 1;
s3allow[s3GetBucketLocation] = 1;
s3allow[s3GetBucketPolicy] = 1;
s3allow[s3GetBucketNotification] = 1;
s3allow[s3GetBucketLogging] = 1;
s3allow[s3GetBucketTagging] = 1;
s3allow[s3GetBucketWebsite] = 1;
s3allow[s3GetLifecycleConfiguration] = 1;
s3allow[s3GetReplicationConfiguration] = 1;
s3allow[s3GetObjectTagging] = 1;
s3allow[s3GetObjectVersionTagging] = 1;
s3allow[s3GetBucketObjectLockConfiguration] = 1;
s3allow[s3GetObjectRetention] = 1;
s3allow[s3GetObjectLegalHold] = 1;
s3allow[s3GetBucketPolicyStatus] = 1;
s3allow[s3GetBucketPublicAccessBlock] = 1;
s3allow[s3GetPublicAccessBlock] = 1;
s3allow[s3GetBucketEncryption] = 1;
ARN arn1(Partition::aws, Service::s3,
"", arbitrary_tenant, "mybucket");
EXPECT_EQ(p.eval(em, none, s3PutBucketPolicy, arn1),
Effect::Allow);
ARN arn2(Partition::aws, Service::s3,
"", arbitrary_tenant, "mybucket");
EXPECT_EQ(p.eval(em, none, s3PutBucketPolicy, arn2),
Effect::Allow);
for (auto op = 0ULL; op < s3Count; ++op) {
if ((op == s3ListAllMyBuckets) || (op == s3PutBucketPolicy)) {
continue;
}
ARN arn3(Partition::aws, Service::s3,
"", arbitrary_tenant, "confidential-data");
EXPECT_EQ(p.eval(em, none, op, arn3),
Effect::Pass);
ARN arn4(Partition::aws, Service::s3,
"", arbitrary_tenant, "confidential-data");
EXPECT_EQ(p.eval(tr, none, op, arn4),
s3allow[op] ? Effect::Allow : Effect::Pass);
ARN arn5(Partition::aws, Service::s3,
"", arbitrary_tenant, "confidential-data");
EXPECT_EQ(p.eval(fa, none, op, arn5),
Effect::Pass);
ARN arn6(Partition::aws, Service::s3,
"", arbitrary_tenant, "confidential-data/moo");
EXPECT_EQ(p.eval(em, none, op, arn6),
Effect::Pass);
ARN arn7(Partition::aws, Service::s3,
"", arbitrary_tenant, "confidential-data/moo");
EXPECT_EQ(p.eval(tr, none, op, arn7),
s3allow[op] ? Effect::Allow : Effect::Pass);
ARN arn8(Partition::aws, Service::s3,
"", arbitrary_tenant, "confidential-data/moo");
EXPECT_EQ(p.eval(fa, none, op, arn8),
Effect::Pass);
ARN arn9(Partition::aws, Service::s3,
"", arbitrary_tenant, "really-confidential-data");
EXPECT_EQ(p.eval(em, none, op, arn9),
Effect::Pass);
ARN arn10(Partition::aws, Service::s3,
"", arbitrary_tenant, "really-confidential-data");
EXPECT_EQ(p.eval(tr, none, op, arn10),
Effect::Pass);
ARN arn11(Partition::aws, Service::s3,
"", arbitrary_tenant, "really-confidential-data");
EXPECT_EQ(p.eval(fa, none, op, arn11),
Effect::Pass);
ARN arn12(Partition::aws, Service::s3,
"", arbitrary_tenant,
"really-confidential-data/moo");
EXPECT_EQ(p.eval(em, none, op, arn12), Effect::Pass);
ARN arn13(Partition::aws, Service::s3,
"", arbitrary_tenant,
"really-confidential-data/moo");
EXPECT_EQ(p.eval(tr, none, op, arn13), Effect::Pass);
ARN arn14(Partition::aws, Service::s3,
"", arbitrary_tenant,
"really-confidential-data/moo");
EXPECT_EQ(p.eval(fa, none, op, arn14), Effect::Pass);
}
}
TEST_F(PolicyTest, Parse4) {
boost::optional<Policy> p;
ASSERT_NO_THROW(p = Policy(cct.get(), arbitrary_tenant,
bufferlist::static_from_string(example4), true));
ASSERT_TRUE(p);
EXPECT_EQ(p->text, example4);
EXPECT_EQ(p->version, Version::v2012_10_17);
EXPECT_FALSE(p->id);
EXPECT_FALSE(p->statements[0].sid);
EXPECT_FALSE(p->statements.empty());
EXPECT_EQ(p->statements.size(), 1U);
EXPECT_TRUE(p->statements[0].princ.empty());
EXPECT_TRUE(p->statements[0].noprinc.empty());
EXPECT_EQ(p->statements[0].effect, Effect::Allow);
Action_t act;
act[iamCreateRole] = 1;
EXPECT_EQ(p->statements[0].action, act);
EXPECT_EQ(p->statements[0].notaction, None);
ASSERT_FALSE(p->statements[0].resource.empty());
ASSERT_EQ(p->statements[0].resource.size(), 1U);
EXPECT_EQ(p->statements[0].resource.begin()->partition, Partition::wildcard);
EXPECT_EQ(p->statements[0].resource.begin()->service, Service::wildcard);
EXPECT_EQ(p->statements[0].resource.begin()->region, "*");
EXPECT_EQ(p->statements[0].resource.begin()->account, arbitrary_tenant);
EXPECT_EQ(p->statements[0].resource.begin()->resource, "*");
EXPECT_TRUE(p->statements[0].notresource.empty());
EXPECT_TRUE(p->statements[0].conditions.empty());
}
TEST_F(PolicyTest, Eval4) {
auto p = Policy(cct.get(), arbitrary_tenant,
bufferlist::static_from_string(example4), true);
Environment e;
ARN arn1(Partition::aws, Service::iam,
"", arbitrary_tenant, "role/example_role");
EXPECT_EQ(p.eval(e, none, iamCreateRole, arn1),
Effect::Allow);
ARN arn2(Partition::aws, Service::iam,
"", arbitrary_tenant, "role/example_role");
EXPECT_EQ(p.eval(e, none, iamDeleteRole, arn2),
Effect::Pass);
}
TEST_F(PolicyTest, Parse5) {
boost::optional<Policy> p;
ASSERT_NO_THROW(p = Policy(cct.get(), arbitrary_tenant,
bufferlist::static_from_string(example5), true));
ASSERT_TRUE(p);
EXPECT_EQ(p->text, example5);
EXPECT_EQ(p->version, Version::v2012_10_17);
EXPECT_FALSE(p->id);
EXPECT_FALSE(p->statements[0].sid);
EXPECT_FALSE(p->statements.empty());
EXPECT_EQ(p->statements.size(), 1U);
EXPECT_TRUE(p->statements[0].princ.empty());
EXPECT_TRUE(p->statements[0].noprinc.empty());
EXPECT_EQ(p->statements[0].effect, Effect::Allow);
Action_t act;
for (auto i = s3All+1; i <= iamAll; i++)
act[i] = 1;
EXPECT_EQ(p->statements[0].action, act);
EXPECT_EQ(p->statements[0].notaction, None);
ASSERT_FALSE(p->statements[0].resource.empty());
ASSERT_EQ(p->statements[0].resource.size(), 1U);
EXPECT_EQ(p->statements[0].resource.begin()->partition, Partition::aws);
EXPECT_EQ(p->statements[0].resource.begin()->service, Service::iam);
EXPECT_EQ(p->statements[0].resource.begin()->region, "");
EXPECT_EQ(p->statements[0].resource.begin()->account, arbitrary_tenant);
EXPECT_EQ(p->statements[0].resource.begin()->resource, "role/example_role");
EXPECT_TRUE(p->statements[0].notresource.empty());
EXPECT_TRUE(p->statements[0].conditions.empty());
}
TEST_F(PolicyTest, Eval5) {
auto p = Policy(cct.get(), arbitrary_tenant,
bufferlist::static_from_string(example5), true);
Environment e;
ARN arn1(Partition::aws, Service::iam,
"", arbitrary_tenant, "role/example_role");
EXPECT_EQ(p.eval(e, none, iamCreateRole, arn1),
Effect::Allow);
ARN arn2(Partition::aws, Service::iam,
"", arbitrary_tenant, "role/example_role");
EXPECT_EQ(p.eval(e, none, s3ListBucket, arn2),
Effect::Pass);
ARN arn3(Partition::aws, Service::iam,
"", "", "role/example_role");
EXPECT_EQ(p.eval(e, none, iamCreateRole, arn3),
Effect::Pass);
}
TEST_F(PolicyTest, Parse6) {
boost::optional<Policy> p;
ASSERT_NO_THROW(p = Policy(cct.get(), arbitrary_tenant,
bufferlist::static_from_string(example6), true));
ASSERT_TRUE(p);
EXPECT_EQ(p->text, example6);
EXPECT_EQ(p->version, Version::v2012_10_17);
EXPECT_FALSE(p->id);
EXPECT_FALSE(p->statements[0].sid);
EXPECT_FALSE(p->statements.empty());
EXPECT_EQ(p->statements.size(), 1U);
EXPECT_TRUE(p->statements[0].princ.empty());
EXPECT_TRUE(p->statements[0].noprinc.empty());
EXPECT_EQ(p->statements[0].effect, Effect::Allow);
Action_t act;
for (auto i = 0U; i <= stsAll; i++)
act[i] = 1;
EXPECT_EQ(p->statements[0].action, act);
EXPECT_EQ(p->statements[0].notaction, None);
ASSERT_FALSE(p->statements[0].resource.empty());
ASSERT_EQ(p->statements[0].resource.size(), 1U);
EXPECT_EQ(p->statements[0].resource.begin()->partition, Partition::aws);
EXPECT_EQ(p->statements[0].resource.begin()->service, Service::iam);
EXPECT_EQ(p->statements[0].resource.begin()->region, "");
EXPECT_EQ(p->statements[0].resource.begin()->account, arbitrary_tenant);
EXPECT_EQ(p->statements[0].resource.begin()->resource, "user/A");
EXPECT_TRUE(p->statements[0].notresource.empty());
EXPECT_TRUE(p->statements[0].conditions.empty());
}
TEST_F(PolicyTest, Eval6) {
auto p = Policy(cct.get(), arbitrary_tenant,
bufferlist::static_from_string(example6), true);
Environment e;
ARN arn1(Partition::aws, Service::iam,
"", arbitrary_tenant, "user/A");
EXPECT_EQ(p.eval(e, none, iamCreateRole, arn1),
Effect::Allow);
ARN arn2(Partition::aws, Service::iam,
"", arbitrary_tenant, "user/A");
EXPECT_EQ(p.eval(e, none, s3ListBucket, arn2),
Effect::Allow);
}
TEST_F(PolicyTest, Parse7) {
boost::optional<Policy> p;
ASSERT_NO_THROW(p = Policy(cct.get(), arbitrary_tenant,
bufferlist::static_from_string(example7), true));
ASSERT_TRUE(p);
EXPECT_EQ(p->text, example7);
EXPECT_EQ(p->version, Version::v2012_10_17);
ASSERT_FALSE(p->statements.empty());
EXPECT_EQ(p->statements.size(), 1U);
EXPECT_FALSE(p->statements[0].princ.empty());
EXPECT_EQ(p->statements[0].princ.size(), 1U);
EXPECT_TRUE(p->statements[0].noprinc.empty());
EXPECT_EQ(p->statements[0].effect, Effect::Allow);
Action_t act;
act[s3ListBucket] = 1;
EXPECT_EQ(p->statements[0].action, act);
EXPECT_EQ(p->statements[0].notaction, None);
ASSERT_FALSE(p->statements[0].resource.empty());
ASSERT_EQ(p->statements[0].resource.size(), 1U);
EXPECT_EQ(p->statements[0].resource.begin()->partition, Partition::aws);
EXPECT_EQ(p->statements[0].resource.begin()->service, Service::s3);
EXPECT_TRUE(p->statements[0].resource.begin()->region.empty());
EXPECT_EQ(p->statements[0].resource.begin()->account, arbitrary_tenant);
EXPECT_EQ(p->statements[0].resource.begin()->resource, "mybucket/*");
EXPECT_TRUE(p->statements[0].princ.begin()->is_user());
EXPECT_FALSE(p->statements[0].princ.begin()->is_wildcard());
EXPECT_EQ(p->statements[0].princ.begin()->get_tenant(), "");
EXPECT_EQ(p->statements[0].princ.begin()->get_id(), "A:subA");
EXPECT_TRUE(p->statements[0].notresource.empty());
EXPECT_TRUE(p->statements[0].conditions.empty());
}
TEST_F(PolicyTest, Eval7) {
auto p = Policy(cct.get(), arbitrary_tenant,
bufferlist::static_from_string(example7), true);
Environment e;
auto subacct = FakeIdentity(
Principal::user(std::move(""), "A:subA"));
auto parentacct = FakeIdentity(
Principal::user(std::move(""), "A"));
auto sub2acct = FakeIdentity(
Principal::user(std::move(""), "A:sub2A"));
ARN arn1(Partition::aws, Service::s3,
"", arbitrary_tenant, "mybucket/*");
EXPECT_EQ(p.eval(e, subacct, s3ListBucket, arn1),
Effect::Allow);
ARN arn2(Partition::aws, Service::s3,
"", arbitrary_tenant, "mybucket/*");
EXPECT_EQ(p.eval(e, parentacct, s3ListBucket, arn2),
Effect::Pass);
ARN arn3(Partition::aws, Service::s3,
"", arbitrary_tenant, "mybucket/*");
EXPECT_EQ(p.eval(e, sub2acct, s3ListBucket, arn3),
Effect::Pass);
}
const string PolicyTest::arbitrary_tenant = "arbitrary_tenant";
string PolicyTest::example1 = R"(
{
"Version": "2012-10-17",
"Statement": {
"Effect": "Allow",
"Action": "s3:ListBucket",
"Resource": "arn:aws:s3:::example_bucket"
}
}
)";
string PolicyTest::example2 = R"(
{
"Version": "2012-10-17",
"Id": "S3-Account-Permissions",
"Statement": [{
"Sid": "1",
"Effect": "Allow",
"Principal": {"AWS": ["arn:aws:iam::ACCOUNT-ID-WITHOUT-HYPHENS:root"]},
"Action": "s3:*",
"Resource": [
"arn:aws:s3:::mybucket",
"arn:aws:s3:::mybucket/*"
]
}]
}
)";
string PolicyTest::example3 = R"(
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "FirstStatement",
"Effect": "Allow",
"Action": ["s3:PutBucketPolicy"],
"Resource": "*"
},
{
"Sid": "SecondStatement",
"Effect": "Allow",
"Action": "s3:ListAllMyBuckets",
"Resource": "*"
},
{
"Sid": "ThirdStatement",
"Effect": "Allow",
"Action": [
"s3:List*",
"s3:Get*"
],
"Resource": [
"arn:aws:s3:::confidential-data",
"arn:aws:s3:::confidential-data/*"
],
"Condition": {"Bool": {"aws:MultiFactorAuthPresent": "true"}}
}
]
}
)";
string PolicyTest::example4 = R"(
{
"Version": "2012-10-17",
"Statement": {
"Effect": "Allow",
"Action": "iam:CreateRole",
"Resource": "*"
}
}
)";
string PolicyTest::example5 = R"(
{
"Version": "2012-10-17",
"Statement": {
"Effect": "Allow",
"Action": "iam:*",
"Resource": "arn:aws:iam:::role/example_role"
}
}
)";
string PolicyTest::example6 = R"(
{
"Version": "2012-10-17",
"Statement": {
"Effect": "Allow",
"Action": "*",
"Resource": "arn:aws:iam:::user/A"
}
}
)";
string PolicyTest::example7 = R"(
{
"Version": "2012-10-17",
"Statement": {
"Effect": "Allow",
"Principal": {"AWS": ["arn:aws:iam:::user/A:subA"]},
"Action": "s3:ListBucket",
"Resource": "arn:aws:s3:::mybucket/*"
}
}
)";
class IPPolicyTest : public ::testing::Test {
protected:
intrusive_ptr<CephContext> cct;
static const string arbitrary_tenant;
static string ip_address_allow_example;
static string ip_address_deny_example;
static string ip_address_full_example;
// 192.168.1.0/24
const rgw::IAM::MaskedIP allowedIPv4Range = { false, rgw::IAM::Address("11000000101010000000000100000000"), 24 };
// 192.168.1.1/32
const rgw::IAM::MaskedIP blocklistedIPv4 = { false, rgw::IAM::Address("11000000101010000000000100000001"), 32 };
// 2001:db8:85a3:0:0:8a2e:370:7334/128
const rgw::IAM::MaskedIP allowedIPv6 = { true, rgw::IAM::Address("00100000000000010000110110111000100001011010001100000000000000000000000000000000100010100010111000000011011100000111001100110100"), 128 };
// ::1
const rgw::IAM::MaskedIP blocklistedIPv6 = { true, rgw::IAM::Address(1), 128 };
// 2001:db8:85a3:0:0:8a2e:370:7330/124
const rgw::IAM::MaskedIP allowedIPv6Range = { true, rgw::IAM::Address("00100000000000010000110110111000100001011010001100000000000000000000000000000000100010100010111000000011011100000111001100110000"), 124 };
public:
IPPolicyTest() {
cct = new CephContext(CEPH_ENTITY_TYPE_CLIENT);
}
};
const string IPPolicyTest::arbitrary_tenant = "arbitrary_tenant";
TEST_F(IPPolicyTest, MaskedIPOperations) {
EXPECT_EQ(stringify(allowedIPv4Range), "192.168.1.0/24");
EXPECT_EQ(stringify(blocklistedIPv4), "192.168.1.1/32");
EXPECT_EQ(stringify(allowedIPv6), "2001:db8:85a3:0:0:8a2e:370:7334/128");
EXPECT_EQ(stringify(allowedIPv6Range), "2001:db8:85a3:0:0:8a2e:370:7330/124");
EXPECT_EQ(stringify(blocklistedIPv6), "0:0:0:0:0:0:0:1/128");
EXPECT_EQ(allowedIPv4Range, blocklistedIPv4);
EXPECT_EQ(allowedIPv6Range, allowedIPv6);
}
TEST_F(IPPolicyTest, asNetworkIPv4Range) {
auto actualIPv4Range = rgw::IAM::Condition::as_network("192.168.1.0/24");
ASSERT_TRUE(actualIPv4Range.is_initialized());
EXPECT_EQ(*actualIPv4Range, allowedIPv4Range);
}
TEST_F(IPPolicyTest, asNetworkIPv4) {
auto actualIPv4 = rgw::IAM::Condition::as_network("192.168.1.1");
ASSERT_TRUE(actualIPv4.is_initialized());
EXPECT_EQ(*actualIPv4, blocklistedIPv4);
}
TEST_F(IPPolicyTest, asNetworkIPv6Range) {
auto actualIPv6Range = rgw::IAM::Condition::as_network("2001:db8:85a3:0:0:8a2e:370:7330/124");
ASSERT_TRUE(actualIPv6Range.is_initialized());
EXPECT_EQ(*actualIPv6Range, allowedIPv6Range);
}
TEST_F(IPPolicyTest, asNetworkIPv6) {
auto actualIPv6 = rgw::IAM::Condition::as_network("2001:db8:85a3:0:0:8a2e:370:7334");
ASSERT_TRUE(actualIPv6.is_initialized());
EXPECT_EQ(*actualIPv6, allowedIPv6);
}
TEST_F(IPPolicyTest, asNetworkInvalid) {
EXPECT_FALSE(rgw::IAM::Condition::as_network(""));
EXPECT_FALSE(rgw::IAM::Condition::as_network("192.168.1.1/33"));
EXPECT_FALSE(rgw::IAM::Condition::as_network("2001:db8:85a3:0:0:8a2e:370:7334/129"));
EXPECT_FALSE(rgw::IAM::Condition::as_network("192.168.1.1:"));
EXPECT_FALSE(rgw::IAM::Condition::as_network("1.2.3.10000"));
}
TEST_F(IPPolicyTest, IPEnvironment) {
RGWProcessEnv penv;
// Unfortunately RGWCivetWeb is too tightly tied to civetweb to test RGWCivetWeb::init_env.
RGWEnv rgw_env;
rgw::sal::RadosStore store;
std::unique_ptr<rgw::sal::User> user = store.get_user(rgw_user());
rgw_env.set("REMOTE_ADDR", "192.168.1.1");
rgw_env.set("HTTP_HOST", "1.2.3.4");
req_state rgw_req_state(cct.get(), penv, &rgw_env, 0);
rgw_req_state.set_user(user);
rgw_build_iam_environment(&store, &rgw_req_state);
auto ip = rgw_req_state.env.find("aws:SourceIp");
ASSERT_NE(ip, rgw_req_state.env.end());
EXPECT_EQ(ip->second, "192.168.1.1");
ASSERT_EQ(cct.get()->_conf.set_val("rgw_remote_addr_param", "SOME_VAR"), 0);
EXPECT_EQ(cct.get()->_conf->rgw_remote_addr_param, "SOME_VAR");
rgw_req_state.env.clear();
rgw_build_iam_environment(&store, &rgw_req_state);
ip = rgw_req_state.env.find("aws:SourceIp");
EXPECT_EQ(ip, rgw_req_state.env.end());
rgw_env.set("SOME_VAR", "192.168.1.2");
rgw_req_state.env.clear();
rgw_build_iam_environment(&store, &rgw_req_state);
ip = rgw_req_state.env.find("aws:SourceIp");
ASSERT_NE(ip, rgw_req_state.env.end());
EXPECT_EQ(ip->second, "192.168.1.2");
ASSERT_EQ(cct.get()->_conf.set_val("rgw_remote_addr_param", "HTTP_X_FORWARDED_FOR"), 0);
rgw_env.set("HTTP_X_FORWARDED_FOR", "192.168.1.3");
rgw_req_state.env.clear();
rgw_build_iam_environment(&store, &rgw_req_state);
ip = rgw_req_state.env.find("aws:SourceIp");
ASSERT_NE(ip, rgw_req_state.env.end());
EXPECT_EQ(ip->second, "192.168.1.3");
rgw_env.set("HTTP_X_FORWARDED_FOR", "192.168.1.4, 4.3.2.1, 2001:db8:85a3:8d3:1319:8a2e:370:7348");
rgw_req_state.env.clear();
rgw_build_iam_environment(&store, &rgw_req_state);
ip = rgw_req_state.env.find("aws:SourceIp");
ASSERT_NE(ip, rgw_req_state.env.end());
EXPECT_EQ(ip->second, "192.168.1.4");
}
TEST_F(IPPolicyTest, ParseIPAddress) {
boost::optional<Policy> p;
ASSERT_NO_THROW(
p = Policy(cct.get(), arbitrary_tenant,
bufferlist::static_from_string(ip_address_full_example), true));
ASSERT_TRUE(p);
EXPECT_EQ(p->text, ip_address_full_example);
EXPECT_EQ(p->version, Version::v2012_10_17);
EXPECT_EQ(*p->id, "S3IPPolicyTest");
EXPECT_FALSE(p->statements.empty());
EXPECT_EQ(p->statements.size(), 1U);
EXPECT_EQ(*p->statements[0].sid, "IPAllow");
EXPECT_FALSE(p->statements[0].princ.empty());
EXPECT_EQ(p->statements[0].princ.size(), 1U);
EXPECT_EQ(*p->statements[0].princ.begin(),
Principal::wildcard());
EXPECT_TRUE(p->statements[0].noprinc.empty());
EXPECT_EQ(p->statements[0].effect, Effect::Allow);
Action_t act;
act[s3ListBucket] = 1;
EXPECT_EQ(p->statements[0].action, act);
EXPECT_EQ(p->statements[0].notaction, None);
ASSERT_FALSE(p->statements[0].resource.empty());
ASSERT_EQ(p->statements[0].resource.size(), 2U);
EXPECT_EQ(p->statements[0].resource.begin()->partition, Partition::aws);
EXPECT_EQ(p->statements[0].resource.begin()->service, Service::s3);
EXPECT_TRUE(p->statements[0].resource.begin()->region.empty());
EXPECT_EQ(p->statements[0].resource.begin()->account, arbitrary_tenant);
EXPECT_EQ(p->statements[0].resource.begin()->resource, "example_bucket");
EXPECT_EQ((p->statements[0].resource.begin() + 1)->resource, "example_bucket/*");
EXPECT_TRUE(p->statements[0].notresource.empty());
ASSERT_FALSE(p->statements[0].conditions.empty());
ASSERT_EQ(p->statements[0].conditions.size(), 2U);
EXPECT_EQ(p->statements[0].conditions[0].op, TokenID::IpAddress);
EXPECT_EQ(p->statements[0].conditions[0].key, "aws:SourceIp");
ASSERT_FALSE(p->statements[0].conditions[0].vals.empty());
EXPECT_EQ(p->statements[0].conditions[0].vals.size(), 2U);
EXPECT_EQ(p->statements[0].conditions[0].vals[0], "192.168.1.0/24");
EXPECT_EQ(p->statements[0].conditions[0].vals[1], "::1");
boost::optional<rgw::IAM::MaskedIP> convertedIPv4 = rgw::IAM::Condition::as_network(p->statements[0].conditions[0].vals[0]);
EXPECT_TRUE(convertedIPv4.is_initialized());
if (convertedIPv4.is_initialized()) {
EXPECT_EQ(*convertedIPv4, allowedIPv4Range);
}
EXPECT_EQ(p->statements[0].conditions[1].op, TokenID::NotIpAddress);
EXPECT_EQ(p->statements[0].conditions[1].key, "aws:SourceIp");
ASSERT_FALSE(p->statements[0].conditions[1].vals.empty());
EXPECT_EQ(p->statements[0].conditions[1].vals.size(), 2U);
EXPECT_EQ(p->statements[0].conditions[1].vals[0], "192.168.1.1/32");
EXPECT_EQ(p->statements[0].conditions[1].vals[1], "2001:0db8:85a3:0000:0000:8a2e:0370:7334");
boost::optional<rgw::IAM::MaskedIP> convertedIPv6 = rgw::IAM::Condition::as_network(p->statements[0].conditions[1].vals[1]);
EXPECT_TRUE(convertedIPv6.is_initialized());
if (convertedIPv6.is_initialized()) {
EXPECT_EQ(*convertedIPv6, allowedIPv6);
}
}
TEST_F(IPPolicyTest, EvalIPAddress) {
auto allowp =
Policy(cct.get(), arbitrary_tenant,
bufferlist::static_from_string(ip_address_allow_example), true);
auto denyp =
Policy(cct.get(), arbitrary_tenant,
bufferlist::static_from_string(ip_address_deny_example), true);
auto fullp =
Policy(cct.get(), arbitrary_tenant,
bufferlist::static_from_string(ip_address_full_example), true);
Environment e;
Environment allowedIP, blocklistedIP, allowedIPv6, blocklistedIPv6;
allowedIP.emplace("aws:SourceIp","192.168.1.2");
allowedIPv6.emplace("aws:SourceIp", "::1");
blocklistedIP.emplace("aws:SourceIp", "192.168.1.1");
blocklistedIPv6.emplace("aws:SourceIp", "2001:0db8:85a3:0000:0000:8a2e:0370:7334");
auto trueacct = FakeIdentity(
Principal::tenant("ACCOUNT-ID-WITHOUT-HYPHENS"));
// Without an IP address in the environment then evaluation will always pass
ARN arn1(Partition::aws, Service::s3,
"", arbitrary_tenant, "example_bucket");
EXPECT_EQ(allowp.eval(e, trueacct, s3ListBucket, arn1),
Effect::Pass);
ARN arn2(Partition::aws, Service::s3,
"", arbitrary_tenant, "example_bucket/myobject");
EXPECT_EQ(fullp.eval(e, trueacct, s3ListBucket, arn2),
Effect::Pass);
ARN arn3(Partition::aws, Service::s3,
"", arbitrary_tenant, "example_bucket");
EXPECT_EQ(allowp.eval(allowedIP, trueacct, s3ListBucket, arn3),
Effect::Allow);
ARN arn4(Partition::aws, Service::s3,
"", arbitrary_tenant, "example_bucket");
EXPECT_EQ(allowp.eval(blocklistedIPv6, trueacct, s3ListBucket, arn4),
Effect::Pass);
ARN arn5(Partition::aws, Service::s3,
"", arbitrary_tenant, "example_bucket");
EXPECT_EQ(denyp.eval(allowedIP, trueacct, s3ListBucket, arn5),
Effect::Deny);
ARN arn6(Partition::aws, Service::s3,
"", arbitrary_tenant, "example_bucket/myobject");
EXPECT_EQ(denyp.eval(allowedIP, trueacct, s3ListBucket, arn6),
Effect::Deny);
ARN arn7(Partition::aws, Service::s3,
"", arbitrary_tenant, "example_bucket");
EXPECT_EQ(denyp.eval(blocklistedIP, trueacct, s3ListBucket, arn7),
Effect::Pass);
ARN arn8(Partition::aws, Service::s3,
"", arbitrary_tenant, "example_bucket/myobject");
EXPECT_EQ(denyp.eval(blocklistedIP, trueacct, s3ListBucket, arn8),
Effect::Pass);
ARN arn9(Partition::aws, Service::s3,
"", arbitrary_tenant, "example_bucket");
EXPECT_EQ(denyp.eval(blocklistedIPv6, trueacct, s3ListBucket, arn9),
Effect::Pass);
ARN arn10(Partition::aws, Service::s3,
"", arbitrary_tenant, "example_bucket/myobject");
EXPECT_EQ(denyp.eval(blocklistedIPv6, trueacct, s3ListBucket, arn10),
Effect::Pass);
ARN arn11(Partition::aws, Service::s3,
"", arbitrary_tenant, "example_bucket");
EXPECT_EQ(denyp.eval(allowedIPv6, trueacct, s3ListBucket, arn11),
Effect::Deny);
ARN arn12(Partition::aws, Service::s3,
"", arbitrary_tenant, "example_bucket/myobject");
EXPECT_EQ(denyp.eval(allowedIPv6, trueacct, s3ListBucket, arn12),
Effect::Deny);
ARN arn13(Partition::aws, Service::s3,
"", arbitrary_tenant, "example_bucket");
EXPECT_EQ(fullp.eval(allowedIP, trueacct, s3ListBucket, arn13),
Effect::Allow);
ARN arn14(Partition::aws, Service::s3,
"", arbitrary_tenant, "example_bucket/myobject");
EXPECT_EQ(fullp.eval(allowedIP, trueacct, s3ListBucket, arn14),
Effect::Allow);
ARN arn15(Partition::aws, Service::s3,
"", arbitrary_tenant, "example_bucket");
EXPECT_EQ(fullp.eval(blocklistedIP, trueacct, s3ListBucket, arn15),
Effect::Pass);
ARN arn16(Partition::aws, Service::s3,
"", arbitrary_tenant, "example_bucket/myobject");
EXPECT_EQ(fullp.eval(blocklistedIP, trueacct, s3ListBucket, arn16),
Effect::Pass);
ARN arn17(Partition::aws, Service::s3,
"", arbitrary_tenant, "example_bucket");
EXPECT_EQ(fullp.eval(allowedIPv6, trueacct, s3ListBucket, arn17),
Effect::Allow);
ARN arn18(Partition::aws, Service::s3,
"", arbitrary_tenant, "example_bucket/myobject");
EXPECT_EQ(fullp.eval(allowedIPv6, trueacct, s3ListBucket, arn18),
Effect::Allow);
ARN arn19(Partition::aws, Service::s3,
"", arbitrary_tenant, "example_bucket");
EXPECT_EQ(fullp.eval(blocklistedIPv6, trueacct, s3ListBucket, arn19),
Effect::Pass);
ARN arn20(Partition::aws, Service::s3,
"", arbitrary_tenant, "example_bucket/myobject");
EXPECT_EQ(fullp.eval(blocklistedIPv6, trueacct, s3ListBucket, arn20),
Effect::Pass);
}
string IPPolicyTest::ip_address_allow_example = R"(
{
"Version": "2012-10-17",
"Id": "S3SimpleIPPolicyTest",
"Statement": [{
"Sid": "1",
"Effect": "Allow",
"Principal": {"AWS": ["arn:aws:iam::ACCOUNT-ID-WITHOUT-HYPHENS:root"]},
"Action": "s3:ListBucket",
"Resource": [
"arn:aws:s3:::example_bucket"
],
"Condition": {
"IpAddress": {"aws:SourceIp": "192.168.1.0/24"}
}
}]
}
)";
string IPPolicyTest::ip_address_deny_example = R"(
{
"Version": "2012-10-17",
"Id": "S3IPPolicyTest",
"Statement": {
"Effect": "Deny",
"Sid": "IPDeny",
"Action": "s3:ListBucket",
"Principal": {"AWS": ["arn:aws:iam::ACCOUNT-ID-WITHOUT-HYPHENS:root"]},
"Resource": [
"arn:aws:s3:::example_bucket",
"arn:aws:s3:::example_bucket/*"
],
"Condition": {
"NotIpAddress": {"aws:SourceIp": ["192.168.1.1/32", "2001:0db8:85a3:0000:0000:8a2e:0370:7334"]}
}
}
}
)";
string IPPolicyTest::ip_address_full_example = R"(
{
"Version": "2012-10-17",
"Id": "S3IPPolicyTest",
"Statement": {
"Effect": "Allow",
"Sid": "IPAllow",
"Action": "s3:ListBucket",
"Principal": "*",
"Resource": [
"arn:aws:s3:::example_bucket",
"arn:aws:s3:::example_bucket/*"
],
"Condition": {
"IpAddress": {"aws:SourceIp": ["192.168.1.0/24", "::1"]},
"NotIpAddress": {"aws:SourceIp": ["192.168.1.1/32", "2001:0db8:85a3:0000:0000:8a2e:0370:7334"]}
}
}
}
)";
TEST(MatchWildcards, Simple)
{
EXPECT_TRUE(match_wildcards("", ""));
EXPECT_TRUE(match_wildcards("", "", MATCH_CASE_INSENSITIVE));
EXPECT_FALSE(match_wildcards("", "abc"));
EXPECT_FALSE(match_wildcards("", "abc", MATCH_CASE_INSENSITIVE));
EXPECT_FALSE(match_wildcards("abc", ""));
EXPECT_FALSE(match_wildcards("abc", "", MATCH_CASE_INSENSITIVE));
EXPECT_TRUE(match_wildcards("abc", "abc"));
EXPECT_TRUE(match_wildcards("abc", "abc", MATCH_CASE_INSENSITIVE));
EXPECT_FALSE(match_wildcards("abc", "abC"));
EXPECT_TRUE(match_wildcards("abc", "abC", MATCH_CASE_INSENSITIVE));
EXPECT_FALSE(match_wildcards("abC", "abc"));
EXPECT_TRUE(match_wildcards("abC", "abc", MATCH_CASE_INSENSITIVE));
EXPECT_FALSE(match_wildcards("abc", "abcd"));
EXPECT_FALSE(match_wildcards("abc", "abcd", MATCH_CASE_INSENSITIVE));
EXPECT_FALSE(match_wildcards("abcd", "abc"));
EXPECT_FALSE(match_wildcards("abcd", "abc", MATCH_CASE_INSENSITIVE));
}
TEST(MatchWildcards, QuestionMark)
{
EXPECT_FALSE(match_wildcards("?", ""));
EXPECT_FALSE(match_wildcards("?", "", MATCH_CASE_INSENSITIVE));
EXPECT_TRUE(match_wildcards("?", "a"));
EXPECT_TRUE(match_wildcards("?", "a", MATCH_CASE_INSENSITIVE));
EXPECT_TRUE(match_wildcards("?bc", "abc"));
EXPECT_TRUE(match_wildcards("?bc", "abc", MATCH_CASE_INSENSITIVE));
EXPECT_TRUE(match_wildcards("a?c", "abc"));
EXPECT_TRUE(match_wildcards("a?c", "abc", MATCH_CASE_INSENSITIVE));
EXPECT_FALSE(match_wildcards("abc", "a?c"));
EXPECT_FALSE(match_wildcards("abc", "a?c", MATCH_CASE_INSENSITIVE));
EXPECT_FALSE(match_wildcards("a?c", "abC"));
EXPECT_TRUE(match_wildcards("a?c", "abC", MATCH_CASE_INSENSITIVE));
EXPECT_TRUE(match_wildcards("ab?", "abc"));
EXPECT_TRUE(match_wildcards("ab?", "abc", MATCH_CASE_INSENSITIVE));
EXPECT_TRUE(match_wildcards("a?c?e", "abcde"));
EXPECT_TRUE(match_wildcards("a?c?e", "abcde", MATCH_CASE_INSENSITIVE));
EXPECT_TRUE(match_wildcards("???", "abc"));
EXPECT_TRUE(match_wildcards("???", "abc", MATCH_CASE_INSENSITIVE));
EXPECT_FALSE(match_wildcards("???", "abcd"));
EXPECT_FALSE(match_wildcards("???", "abcd", MATCH_CASE_INSENSITIVE));
}
TEST(MatchWildcards, Asterisk)
{
EXPECT_TRUE(match_wildcards("*", ""));
EXPECT_TRUE(match_wildcards("*", "", MATCH_CASE_INSENSITIVE));
EXPECT_FALSE(match_wildcards("", "*"));
EXPECT_FALSE(match_wildcards("", "*", MATCH_CASE_INSENSITIVE));
EXPECT_FALSE(match_wildcards("*a", ""));
EXPECT_FALSE(match_wildcards("*a", "", MATCH_CASE_INSENSITIVE));
EXPECT_TRUE(match_wildcards("*a", "a"));
EXPECT_TRUE(match_wildcards("*a", "a", MATCH_CASE_INSENSITIVE));
EXPECT_TRUE(match_wildcards("a*", "a"));
EXPECT_TRUE(match_wildcards("a*", "a", MATCH_CASE_INSENSITIVE));
EXPECT_TRUE(match_wildcards("a*c", "ac"));
EXPECT_TRUE(match_wildcards("a*c", "ac", MATCH_CASE_INSENSITIVE));
EXPECT_TRUE(match_wildcards("a*c", "abbc"));
EXPECT_TRUE(match_wildcards("a*c", "abbc", MATCH_CASE_INSENSITIVE));
EXPECT_FALSE(match_wildcards("a*c", "abbC"));
EXPECT_TRUE(match_wildcards("a*c", "abbC", MATCH_CASE_INSENSITIVE));
EXPECT_TRUE(match_wildcards("a*c*e", "abBce"));
EXPECT_TRUE(match_wildcards("a*c*e", "abBce", MATCH_CASE_INSENSITIVE));
EXPECT_TRUE(match_wildcards("http://*.example.com",
"http://www.example.com"));
EXPECT_TRUE(match_wildcards("http://*.example.com",
"http://www.example.com", MATCH_CASE_INSENSITIVE));
EXPECT_FALSE(match_wildcards("http://*.example.com",
"http://www.Example.com"));
EXPECT_TRUE(match_wildcards("http://*.example.com",
"http://www.Example.com", MATCH_CASE_INSENSITIVE));
EXPECT_TRUE(match_wildcards("http://example.com/*",
"http://example.com/index.html"));
EXPECT_TRUE(match_wildcards("http://example.com/*/*.jpg",
"http://example.com/fun/smiley.jpg"));
// note: parsing of * is not greedy, so * does not match 'bc' here
EXPECT_FALSE(match_wildcards("a*c", "abcc"));
EXPECT_FALSE(match_wildcards("a*c", "abcc", MATCH_CASE_INSENSITIVE));
}
TEST(MatchPolicy, Action)
{
constexpr auto flag = MATCH_POLICY_ACTION;
EXPECT_TRUE(match_policy("a:b:c", "a:b:c", flag));
EXPECT_TRUE(match_policy("a:b:c", "A:B:C", flag)); // case insensitive
EXPECT_TRUE(match_policy("a:*:e", "a:bcd:e", flag));
EXPECT_FALSE(match_policy("a:*", "a:b:c", flag)); // cannot span segments
}
TEST(MatchPolicy, Resource)
{
constexpr auto flag = MATCH_POLICY_RESOURCE;
EXPECT_TRUE(match_policy("a:b:c", "a:b:c", flag));
EXPECT_FALSE(match_policy("a:b:c", "A:B:C", flag)); // case sensitive
EXPECT_TRUE(match_policy("a:*:e", "a:bcd:e", flag));
EXPECT_TRUE(match_policy("a:*", "a:b:c", flag)); // can span segments
}
TEST(MatchPolicy, ARN)
{
constexpr auto flag = MATCH_POLICY_ARN;
EXPECT_TRUE(match_policy("a:b:c", "a:b:c", flag));
EXPECT_TRUE(match_policy("a:b:c", "A:B:C", flag)); // case insensitive
EXPECT_TRUE(match_policy("a:*:e", "a:bcd:e", flag));
EXPECT_FALSE(match_policy("a:*", "a:b:c", flag)); // cannot span segments
}
TEST(MatchPolicy, String)
{
constexpr auto flag = MATCH_POLICY_STRING;
EXPECT_TRUE(match_policy("a:b:c", "a:b:c", flag));
EXPECT_FALSE(match_policy("a:b:c", "A:B:C", flag)); // case sensitive
EXPECT_TRUE(match_policy("a:*:e", "a:bcd:e", flag));
EXPECT_TRUE(match_policy("a:*", "a:b:c", flag)); // can span segments
}
Action_t set_range_bits(std::uint64_t start, std::uint64_t end)
{
Action_t result;
for (uint64_t i = start; i < end; i++) {
result.set(i);
}
return result;
}
using rgw::IAM::s3AllValue;
using rgw::IAM::stsAllValue;
using rgw::IAM::allValue;
using rgw::IAM::iamAllValue;
TEST(set_cont_bits, iamconsts)
{
EXPECT_EQ(s3AllValue, set_range_bits(0, s3All));
EXPECT_EQ(iamAllValue, set_range_bits(s3All+1, iamAll));
EXPECT_EQ(stsAllValue, set_range_bits(iamAll+1, stsAll));
EXPECT_EQ(allValue , set_range_bits(0, allCount));
}
| 48,012 | 35.318457 | 211 | cc |
null | ceph-main/src/test/rgw/test_rgw_kms.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include <gtest/gtest.h>
#include <gmock/gmock.h>
#include "common/ceph_context.h"
#include "rgw_common.h"
#define FORTEST_VIRTUAL virtual
#include "rgw_kms.cc"
using ::testing::_;
using ::testing::Action;
using ::testing::ActionInterface;
using ::testing::MakeAction;
using ::testing::StrEq;
class MockTransitSecretEngine : public TransitSecretEngine {
public:
MockTransitSecretEngine(CephContext *cct, SSEContext & kctx, EngineParmMap parms) : TransitSecretEngine(cct, kctx, parms){}
MOCK_METHOD(int, send_request, (const DoutPrefixProvider *dpp, const char *method, std::string_view infix, std::string_view key_id, const std::string& postdata, bufferlist &bl), (override));
};
class MockKvSecretEngine : public KvSecretEngine {
public:
MockKvSecretEngine(CephContext *cct, SSEContext & kctx, EngineParmMap parms) : KvSecretEngine(cct, kctx, parms){}
MOCK_METHOD(int, send_request, (const DoutPrefixProvider *dpp, const char *method, std::string_view infix, std::string_view key_id, const std::string& postdata, bufferlist &bl), (override));
};
class TestSSEKMS : public ::testing::Test {
protected:
CephContext *cct;
MockTransitSecretEngine* old_engine;
MockKvSecretEngine* kv_engine;
MockTransitSecretEngine* transit_engine;
void SetUp() override {
EngineParmMap old_parms, kv_parms, new_parms;
cct = (new CephContext(CEPH_ENTITY_TYPE_ANY))->get();
KMSContext kctx { cct };
old_parms["compat"] = "2";
old_engine = new MockTransitSecretEngine(cct, kctx, std::move(old_parms));
kv_engine = new MockKvSecretEngine(cct, kctx, std::move(kv_parms));
new_parms["compat"] = "1";
transit_engine = new MockTransitSecretEngine(cct, kctx, std::move(new_parms));
}
void TearDown() {
delete old_engine;
delete kv_engine;
delete transit_engine;
}
};
TEST_F(TestSSEKMS, vault_token_file_unset)
{
cct->_conf.set_val("rgw_crypt_vault_auth", "token");
EngineParmMap old_parms, kv_parms;
KMSContext kctx { cct };
TransitSecretEngine te(cct, kctx, std::move(old_parms));
KvSecretEngine kv(cct, kctx, std::move(kv_parms));
const NoDoutPrefix no_dpp(cct, 1);
std::string_view key_id("my_key");
std::string actual_key;
ASSERT_EQ(te.get_key(&no_dpp, key_id, actual_key), -EINVAL);
ASSERT_EQ(kv.get_key(&no_dpp, key_id, actual_key), -EINVAL);
}
TEST_F(TestSSEKMS, non_existent_vault_token_file)
{
cct->_conf.set_val("rgw_crypt_vault_auth", "token");
cct->_conf.set_val("rgw_crypt_vault_token_file", "/nonexistent/file");
EngineParmMap old_parms, kv_parms;
KMSContext kctx { cct };
TransitSecretEngine te(cct, kctx, std::move(old_parms));
KvSecretEngine kv(cct, kctx, std::move(kv_parms));
const NoDoutPrefix no_dpp(cct, 1);
std::string_view key_id("my_key/1");
std::string actual_key;
ASSERT_EQ(te.get_key(&no_dpp, key_id, actual_key), -ENOENT);
ASSERT_EQ(kv.get_key(&no_dpp, key_id, actual_key), -ENOENT);
}
typedef int SendRequestMethod(const DoutPrefixProvider *dpp, const char *,
std::string_view, std::string_view,
const std::string &, bufferlist &);
class SetPointedValueAction : public ActionInterface<SendRequestMethod> {
public:
std::string json;
SetPointedValueAction(std::string json){
this->json = json;
}
int Perform(const ::std::tuple<const DoutPrefixProvider*, const char *, std::string_view, std::string_view, const std::string &, bufferlist &>& args) override {
// const DoutPrefixProvider *dpp = ::std::get<0>(args);
// const char *method = ::std::get<1>(args);
// std::string_view infix = ::std::get<2>(args);
// std::string_view key_id = ::std::get<3>(args);
// const std::string& postdata = ::std::get<4>(args);
bufferlist& bl = ::std::get<5>(args);
// std::cout << "method = " << method << " infix = " << infix << " key_id = " << key_id
// << " postdata = " << postdata
// << " => json = " << json
// << std::endl;
bl.append(json);
// note: in the bufferlist, the string is not
// necessarily 0 terminated at this point. Logic in
// rgw_kms.cc must handle this (by appending a 0.)
return 0;
}
};
Action<SendRequestMethod> SetPointedValue(std::string json) {
return MakeAction(new SetPointedValueAction(json));
}
TEST_F(TestSSEKMS, test_transit_key_version_extraction){
const NoDoutPrefix no_dpp(cct, 1);
string json = R"({"data": {"keys": {"6": "8qgPWvdtf6zrriS5+nkOzDJ14IGVR6Bgkub5dJn6qeg="}}})";
EXPECT_CALL(*old_engine, send_request(&no_dpp, StrEq("GET"), StrEq(""), StrEq("1/2/3/4/5/6"), StrEq(""), _)).WillOnce(SetPointedValue(json));
std::string actual_key;
std::string tests[11] {"/", "my_key/", "my_key", "", "my_key/a", "my_key/1a",
"my_key/a1", "my_key/1a1", "my_key/1/a", "1", "my_key/1/"
};
int res;
for (const auto &test: tests) {
res = old_engine->get_key(&no_dpp, std::string_view(test), actual_key);
ASSERT_EQ(res, -EINVAL);
}
res = old_engine->get_key(&no_dpp, std::string_view("1/2/3/4/5/6"), actual_key);
ASSERT_EQ(res, 0);
ASSERT_EQ(actual_key, from_base64("8qgPWvdtf6zrriS5+nkOzDJ14IGVR6Bgkub5dJn6qeg="));
}
TEST_F(TestSSEKMS, test_transit_backend){
std::string_view my_key("my_key/1");
std::string actual_key;
// Mocks the expected return Value from Vault Server using custom Argument Action
string json = R"({"data": {"keys": {"1": "8qgPWvdtf6zrriS5+nkOzDJ14IGVR6Bgkub5dJn6qeg="}}})";
const NoDoutPrefix no_dpp(cct, 1);
EXPECT_CALL(*old_engine, send_request(&no_dpp, StrEq("GET"), StrEq(""), StrEq("my_key/1"), StrEq(""), _)).WillOnce(SetPointedValue(json));
int res = old_engine->get_key(&no_dpp, my_key, actual_key);
ASSERT_EQ(res, 0);
ASSERT_EQ(actual_key, from_base64("8qgPWvdtf6zrriS5+nkOzDJ14IGVR6Bgkub5dJn6qeg="));
}
TEST_F(TestSSEKMS, test_transit_makekey){
std::string_view my_key("my_key");
std::string actual_key;
map<string, bufferlist> attrs;
const NoDoutPrefix no_dpp(cct, 1);
// Mocks the expected return Value from Vault Server using custom Argument Action
string post_json = R"({"data": {"ciphertext": "vault:v2:HbdxLnUztGVo+RseCIaYVn/4wEUiJNT6GQfw57KXQmhXVe7i1/kgLWegEPg1I6lexhIuXAM6Q2YvY0aZ","key_version": 1,"plaintext": "3xfTra/dsIf3TMa3mAT2IxPpM7YWm/NvUb4gDfSDX4g="}})";
EXPECT_CALL(*transit_engine, send_request(&no_dpp, StrEq("POST"), StrEq("/datakey/plaintext/"), StrEq("my_key"), _, _))
.WillOnce(SetPointedValue(post_json));
set_attr(attrs, RGW_ATTR_CRYPT_CONTEXT, R"({"aws:s3:arn": "fred"})");
set_attr(attrs, RGW_ATTR_CRYPT_KEYID, my_key);
int res = transit_engine->make_actual_key(&no_dpp, attrs, actual_key);
std::string cipher_text { get_str_attribute(attrs,RGW_ATTR_CRYPT_DATAKEY) };
ASSERT_EQ(res, 0);
ASSERT_EQ(actual_key, from_base64("3xfTra/dsIf3TMa3mAT2IxPpM7YWm/NvUb4gDfSDX4g="));
ASSERT_EQ(cipher_text, "vault:v2:HbdxLnUztGVo+RseCIaYVn/4wEUiJNT6GQfw57KXQmhXVe7i1/kgLWegEPg1I6lexhIuXAM6Q2YvY0aZ");
}
TEST_F(TestSSEKMS, test_transit_reconstitutekey){
std::string_view my_key("my_key");
std::string actual_key;
map<string, bufferlist> attrs;
const NoDoutPrefix no_dpp(cct, 1);
// Mocks the expected return Value from Vault Server using custom Argument Action
set_attr(attrs, RGW_ATTR_CRYPT_DATAKEY, "vault:v2:HbdxLnUztGVo+RseCIaYVn/4wEUiJNT6GQfw57KXQmhXVe7i1/kgLWegEPg1I6lexhIuXAM6Q2YvY0aZ");
string post_json = R"({"data": {"key_version": 1,"plaintext": "3xfTra/dsIf3TMa3mAT2IxPpM7YWm/NvUb4gDfSDX4g="}})";
EXPECT_CALL(*transit_engine, send_request(&no_dpp, StrEq("POST"), StrEq("/decrypt/"), StrEq("my_key"), _, _))
.WillOnce(SetPointedValue(post_json));
set_attr(attrs, RGW_ATTR_CRYPT_CONTEXT, R"({"aws:s3:arn": "fred"})");
set_attr(attrs, RGW_ATTR_CRYPT_KEYID, my_key);
int res = transit_engine->reconstitute_actual_key(&no_dpp, attrs, actual_key);
ASSERT_EQ(res, 0);
ASSERT_EQ(actual_key, from_base64("3xfTra/dsIf3TMa3mAT2IxPpM7YWm/NvUb4gDfSDX4g="));
}
TEST_F(TestSSEKMS, test_kv_backend){
std::string_view my_key("my_key");
std::string actual_key;
const NoDoutPrefix no_dpp(cct, 1);
// Mocks the expected return value from Vault Server using custom Argument Action
string json = R"({"data": {"data": {"key": "8qgPWvdtf6zrriS5+nkOzDJ14IGVR6Bgkub5dJn6qeg="}}})";
EXPECT_CALL(*kv_engine, send_request(&no_dpp, StrEq("GET"), StrEq(""), StrEq("my_key"), StrEq(""), _))
.WillOnce(SetPointedValue(json));
int res = kv_engine->get_key(&no_dpp, my_key, actual_key);
ASSERT_EQ(res, 0);
ASSERT_EQ(actual_key, from_base64("8qgPWvdtf6zrriS5+nkOzDJ14IGVR6Bgkub5dJn6qeg="));
}
TEST_F(TestSSEKMS, concat_url)
{
// Each test has 3 strings:
// * the base URL
// * the path we want to concatenate
// * the exepected final URL
std::string tests[9][3] ={
{"", "", ""},
{"", "bar", "/bar"},
{"", "/bar", "/bar"},
{"foo", "", "foo"},
{"foo", "bar", "foo/bar"},
{"foo", "/bar", "foo/bar"},
{"foo/", "", "foo/"},
{"foo/", "bar", "foo/bar"},
{"foo/", "/bar", "foo/bar"},
};
for (const auto &test: tests) {
std::string url(test[0]), path(test[1]), expected(test[2]);
concat_url(url, path);
ASSERT_EQ(url, expected);
}
}
TEST_F(TestSSEKMS, string_ends_maybe_slash)
{
struct { std::string hay, needle; bool expected; } tests[] ={
{"jack here", "fred", false},
{"here is a fred", "fred", true},
{"and a fred/", "fred", true},
{"no fred here", "fred", false},
{"double fred//", "fred", true},
};
for (const auto &test: tests) {
bool expected { string_ends_maybe_slash(test.hay, test.needle) };
ASSERT_EQ(expected, test.expected);
}
}
TEST_F(TestSSEKMS, test_transit_backend_empty_response)
{
std::string_view my_key("/key/nonexistent/1");
std::string actual_key;
const NoDoutPrefix no_dpp(cct, 1);
// Mocks the expected return Value from Vault Server using custom Argument Action
string json = R"({"errors": ["version does not exist or cannot be found"]})";
EXPECT_CALL(*old_engine, send_request(&no_dpp, StrEq("GET"), StrEq(""), StrEq("/key/nonexistent/1"), StrEq(""), _)).WillOnce(SetPointedValue(json));
int res = old_engine->get_key(&no_dpp, my_key, actual_key);
ASSERT_EQ(res, -EINVAL);
ASSERT_EQ(actual_key, from_base64(""));
}
| 10,248 | 33.742373 | 221 | cc |
null | ceph-main/src/test/rgw/test_rgw_lc.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "rgw_xml.h"
#include "rgw_lc.h"
#include "rgw_lc_s3.h"
#include <gtest/gtest.h>
//#include <spawn/spawn.hpp>
#include <string>
#include <vector>
#include <stdexcept>
static const char* xmldoc_1 =
R"(<Filter>
<And>
<Prefix>tax/</Prefix>
<Tag>
<Key>key1</Key>
<Value>value1</Value>
</Tag>
<Tag>
<Key>key2</Key>
<Value>value2</Value>
</Tag>
</And>
</Filter>
)";
TEST(TestLCFilterDecoder, XMLDoc1)
{
RGWXMLDecoder::XMLParser parser;
ASSERT_TRUE(parser.init());
ASSERT_TRUE(parser.parse(xmldoc_1, strlen(xmldoc_1), 1));
LCFilter_S3 filter;
auto result = RGWXMLDecoder::decode_xml("Filter", filter, &parser, true);
ASSERT_TRUE(result);
/* check repeated Tag element */
auto tag_map = filter.get_tags().get_tags();
auto val1 = tag_map.find("key1");
ASSERT_EQ(val1->second, "value1");
auto val2 = tag_map.find("key2");
ASSERT_EQ(val2->second, "value2");
/* check our flags */
ASSERT_EQ(filter.get_flags(), 0);
}
static const char* xmldoc_2 =
R"(<Filter>
<And>
<ArchiveZone />
<Tag>
<Key>spongebob</Key>
<Value>squarepants</Value>
</Tag>
</And>
</Filter>
)";
TEST(TestLCFilterDecoder, XMLDoc2)
{
RGWXMLDecoder::XMLParser parser;
ASSERT_TRUE(parser.init());
ASSERT_TRUE(parser.parse(xmldoc_2, strlen(xmldoc_2), 1));
LCFilter_S3 filter;
auto result = RGWXMLDecoder::decode_xml("Filter", filter, &parser, true);
ASSERT_TRUE(result);
/* check tags */
auto tag_map = filter.get_tags().get_tags();
auto val1 = tag_map.find("spongebob");
ASSERT_EQ(val1->second, "squarepants");
/* check our flags */
ASSERT_EQ(filter.get_flags(), LCFilter::make_flag(LCFlagType::ArchiveZone));
}
// invalid And element placement
static const char* xmldoc_3 =
R"(<Filter>
<And>
<Tag>
<Key>miles</Key>
<Value>davis</Value>
</Tag>
</And>
<Tag>
<Key>spongebob</Key>
<Value>squarepants</Value>
</Tag>
</Filter>
)";
TEST(TestLCFilterInvalidAnd, XMLDoc3)
{
RGWXMLDecoder::XMLParser parser;
ASSERT_TRUE(parser.init());
ASSERT_TRUE(parser.parse(xmldoc_3, strlen(xmldoc_3), 1));
LCFilter_S3 filter;
auto result = RGWXMLDecoder::decode_xml("Filter", filter, &parser, true);
ASSERT_TRUE(result);
/* check repeated Tag element */
auto tag_map = filter.get_tags().get_tags();
auto val1 = tag_map.find("spongebob");
ASSERT_TRUE(val1 == tag_map.end());
/* because the invalid 2nd tag element was not recognized,
* we cannot access it:
ASSERT_EQ(val1->second, "squarepants");
*/
/* check our flags */
ASSERT_EQ(filter.get_flags(), uint32_t(LCFlagType::none));
}
| 2,795 | 24.418182 | 78 | cc |
null | ceph-main/src/test/rgw/test_rgw_lua.cc | #include <gtest/gtest.h>
#include "common/ceph_context.h"
#include "rgw_common.h"
#include "rgw_auth_registry.h"
#include "rgw_process_env.h"
#include "rgw_sal_rados.h"
#include "rgw_lua_request.h"
#include "rgw_lua_background.h"
#include "rgw_lua_data_filter.h"
using namespace std;
using namespace rgw;
using boost::container::flat_set;
using rgw::auth::Identity;
using rgw::auth::Principal;
class CctCleaner {
CephContext* cct;
public:
CctCleaner(CephContext* _cct) : cct(_cct) {}
~CctCleaner() {
#ifdef WITH_SEASTAR
delete cct;
#else
cct->put();
#endif
}
};
class FakeIdentity : public Identity {
public:
FakeIdentity() = default;
uint32_t get_perms_from_aclspec(const DoutPrefixProvider* dpp, const aclspec_t& aclspec) const override {
return 0;
};
bool is_admin_of(const rgw_user& uid) const override {
return false;
}
bool is_owner_of(const rgw_user& uid) const override {
return false;
}
virtual uint32_t get_perm_mask() const override {
return 0;
}
uint32_t get_identity_type() const override {
return TYPE_RGW;
}
string get_acct_name() const override {
return "";
}
string get_subuser() const override {
return "";
}
void to_str(std::ostream& out) const override {
return;
}
bool is_identity(const flat_set<Principal>& ids) const override {
return false;
}
};
class TestUser : public sal::StoreUser {
public:
virtual std::unique_ptr<User> clone() override {
return std::unique_ptr<User>(new TestUser(*this));
}
virtual int list_buckets(const DoutPrefixProvider *dpp, const string&, const string&, uint64_t, bool, sal::BucketList&, optional_yield y) override {
return 0;
}
virtual int create_bucket(const DoutPrefixProvider* dpp, const rgw_bucket& b, const std::string& zonegroup_id, rgw_placement_rule& placement_rule, std::string& swift_ver_location, const RGWQuotaInfo* pquota_info, const RGWAccessControlPolicy& policy, sal::Attrs& attrs, RGWBucketInfo& info, obj_version& ep_objv, bool exclusive, bool obj_lock_enabled, bool* existed, req_info& req_info, std::unique_ptr<sal::Bucket>* bucket, optional_yield y) override {
return 0;
}
virtual int read_attrs(const DoutPrefixProvider *dpp, optional_yield y) override {
return 0;
}
virtual int read_stats(const DoutPrefixProvider *dpp, optional_yield y, RGWStorageStats* stats, ceph::real_time *last_stats_sync, ceph::real_time *last_stats_update) override {
return 0;
}
virtual int read_stats_async(const DoutPrefixProvider *dpp, RGWGetUserStats_CB *cb) override {
return 0;
}
virtual int complete_flush_stats(const DoutPrefixProvider *dpp, optional_yield y) override {
return 0;
}
virtual int read_usage(const DoutPrefixProvider *dpp, uint64_t start_epoch, uint64_t end_epoch, uint32_t max_entries, bool *is_truncated, RGWUsageIter& usage_iter, map<rgw_user_bucket, rgw_usage_log_entry>& usage) override {
return 0;
}
virtual int trim_usage(const DoutPrefixProvider *dpp, uint64_t start_epoch, uint64_t end_epoch, optional_yield y) override {
return 0;
}
virtual int load_user(const DoutPrefixProvider *dpp, optional_yield y) override {
return 0;
}
virtual int store_user(const DoutPrefixProvider* dpp, optional_yield y, bool exclusive, RGWUserInfo* old_info) override {
return 0;
}
virtual int remove_user(const DoutPrefixProvider* dpp, optional_yield y) override {
return 0;
}
virtual int merge_and_store_attrs(const DoutPrefixProvider *dpp, rgw::sal::Attrs& attrs, optional_yield y) override {
return 0;
}
virtual int verify_mfa(const std::string& mfa_str, bool* verified, const DoutPrefixProvider* dpp, optional_yield y) override {
return 0;
}
virtual ~TestUser() = default;
};
class TestAccounter : public io::Accounter, public io::BasicClient {
RGWEnv env;
protected:
virtual int init_env(CephContext *cct) override {
return 0;
}
public:
~TestAccounter() = default;
virtual void set_account(bool enabled) override {
}
virtual uint64_t get_bytes_sent() const override {
return 0;
}
virtual uint64_t get_bytes_received() const override {
return 0;
}
virtual RGWEnv& get_env() noexcept override {
return env;
}
virtual size_t complete_request() override {
return 0;
}
};
auto g_cct = new CephContext(CEPH_ENTITY_TYPE_CLIENT);
CctCleaner cleaner(g_cct);
tracing::Tracer tracer;
#define DEFINE_REQ_STATE RGWProcessEnv pe; RGWEnv e; req_state s(g_cct, pe, &e, 0);
#define INIT_TRACE tracer.init("test"); \
s.trace = tracer.start_trace("test", true);
TEST(TestRGWLua, EmptyScript)
{
const std::string script;
DEFINE_REQ_STATE;
const auto rc = lua::request::execute(nullptr, nullptr, nullptr, &s, nullptr, script);
ASSERT_EQ(rc, 0);
}
TEST(TestRGWLua, SyntaxError)
{
const std::string script = R"(
if 3 < 5 then
RGWDebugLog("missing 'end'")
)";
DEFINE_REQ_STATE;
const auto rc = lua::request::execute(nullptr, nullptr, nullptr, &s, nullptr, script);
ASSERT_EQ(rc, -1);
}
TEST(TestRGWLua, Hello)
{
const std::string script = R"(
RGWDebugLog("hello from lua")
)";
DEFINE_REQ_STATE;
const auto rc = lua::request::execute(nullptr, nullptr, nullptr, &s, nullptr, script);
ASSERT_EQ(rc, 0);
}
TEST(TestRGWLua, RGWDebugLogNumber)
{
const std::string script = R"(
RGWDebugLog(1234567890)
)";
DEFINE_REQ_STATE;
const auto rc = lua::request::execute(nullptr, nullptr, nullptr, &s, nullptr, script);
ASSERT_EQ(rc, 0);
}
TEST(TestRGWLua, RGWDebugNil)
{
const std::string script = R"(
RGWDebugLog(nil)
)";
DEFINE_REQ_STATE;
const auto rc = lua::request::execute(nullptr, nullptr, nullptr, &s, nullptr, script);
ASSERT_EQ(rc, -1);
}
TEST(TestRGWLua, URI)
{
const std::string script = R"(
RGWDebugLog(Request.DecodedURI)
assert(Request.DecodedURI == "http://hello.world/")
)";
DEFINE_REQ_STATE;
s.decoded_uri = "http://hello.world/";
const auto rc = lua::request::execute(nullptr, nullptr, nullptr, &s, nullptr, script);
ASSERT_EQ(rc, 0);
}
TEST(TestRGWLua, Response)
{
const std::string script = R"(
assert(Request.Response.Message == "This is a bad request")
assert(Request.Response.HTTPStatus == "Bad Request")
assert(Request.Response.RGWCode == 4000)
assert(Request.Response.HTTPStatusCode == 400)
)";
DEFINE_REQ_STATE;
s.err.http_ret = 400;
s.err.ret = 4000;
s.err.err_code = "Bad Request";
s.err.message = "This is a bad request";
const auto rc = lua::request::execute(nullptr, nullptr, nullptr, &s, nullptr, script);
ASSERT_EQ(rc, 0);
}
TEST(TestRGWLua, SetResponse)
{
const std::string script = R"(
assert(Request.Response.Message == "this is a bad request")
Request.Response.Message = "this is a good request"
assert(Request.Response.Message == "this is a good request")
)";
DEFINE_REQ_STATE;
s.err.message = "this is a bad request";
const auto rc = lua::request::execute(nullptr, nullptr, nullptr, &s, nullptr, script);
ASSERT_EQ(rc, 0);
}
TEST(TestRGWLua, RGWIdNotWriteable)
{
const std::string script = R"(
assert(Request.RGWId == "foo")
Request.RGWId = "bar"
)";
DEFINE_REQ_STATE;
s.host_id = "foo";
const auto rc = lua::request::execute(nullptr, nullptr, nullptr, &s, nullptr, script);
ASSERT_NE(rc, 0);
}
TEST(TestRGWLua, InvalidField)
{
const std::string script = R"(
RGWDebugLog(Request.Kaboom)
)";
DEFINE_REQ_STATE;
s.host_id = "foo";
const auto rc = lua::request::execute(nullptr, nullptr, nullptr, &s, nullptr, script);
ASSERT_EQ(rc, -1);
}
TEST(TestRGWLua, InvalidSubField)
{
const std::string script = R"(
RGWDebugLog(Request.Error.Kaboom)
)";
DEFINE_REQ_STATE;
const auto rc = lua::request::execute(nullptr, nullptr, nullptr, &s, nullptr, script);
ASSERT_EQ(rc, -1);
}
TEST(TestRGWLua, Bucket)
{
const std::string script = R"(
assert(Request.Bucket)
RGWDebugLog("Bucket Id: " .. Request.Bucket.Id)
assert(Request.Bucket.Marker == "mymarker")
assert(Request.Bucket.Name == "myname")
assert(Request.Bucket.Tenant == "mytenant")
assert(Request.Bucket.Count == 0)
assert(Request.Bucket.Size == 0)
assert(Request.Bucket.ZoneGroupId)
assert(Request.Bucket.CreationTime)
assert(Request.Bucket.MTime)
assert(Request.Bucket.Quota.MaxSize == -1)
assert(Request.Bucket.Quota.MaxObjects == -1)
assert(tostring(Request.Bucket.Quota.Enabled))
assert(tostring(Request.Bucket.Quota.Rounded))
assert(Request.Bucket.User.Id)
assert(Request.Bucket.User.Tenant)
)";
DEFINE_REQ_STATE;
rgw_bucket b;
b.tenant = "mytenant";
b.name = "myname";
b.marker = "mymarker";
b.bucket_id = "myid";
s.bucket.reset(new sal::RadosBucket(nullptr, b));
const auto rc = lua::request::execute(nullptr, nullptr, nullptr, &s, nullptr, script);
ASSERT_EQ(rc, 0);
}
TEST(TestRGWLua, WriteBucket)
{
const std::string script = R"(
assert(Request.Bucket)
assert(Request.Bucket.Name == "myname")
Request.Bucket.Name = "othername"
)";
DEFINE_REQ_STATE;
s.init_state.url_bucket = "myname";
const auto rc = lua::request::execute(nullptr, nullptr, nullptr, &s, nullptr, script);
ASSERT_EQ(rc, 0);
ASSERT_EQ(s.init_state.url_bucket, "othername");
}
TEST(TestRGWLua, WriteBucketFail)
{
const std::string script = R"(
assert(Request.Bucket)
assert(Request.Bucket.Name == "myname")
Request.Bucket.Name = "othername"
)";
DEFINE_REQ_STATE;
rgw_bucket b;
b.name = "myname";
s.bucket.reset(new sal::RadosBucket(nullptr, b));
const auto rc = lua::request::execute(nullptr, nullptr, nullptr, &s, nullptr, script);
ASSERT_NE(rc, 0);
}
TEST(TestRGWLua, GenericAttributes)
{
const std::string script = R"(
assert(Request.GenericAttributes["hello"] == "world")
assert(Request.GenericAttributes["foo"] == "bar")
assert(Request.GenericAttributes["kaboom"] == nil)
assert(#Request.GenericAttributes == 4)
for k, v in pairs(Request.GenericAttributes) do
assert(k)
assert(v)
end
)";
DEFINE_REQ_STATE;
s.generic_attrs["hello"] = "world";
s.generic_attrs["foo"] = "bar";
s.generic_attrs["goodbye"] = "cruel world";
s.generic_attrs["ka"] = "boom";
const auto rc = lua::request::execute(nullptr, nullptr, nullptr, &s, nullptr, script);
ASSERT_EQ(rc, 0);
}
TEST(TestRGWLua, Environment)
{
const std::string script = R"(
assert(Request.Environment[""] == "bar")
assert(Request.Environment["goodbye"] == "cruel world")
assert(Request.Environment["ka"] == "boom")
assert(#Request.Environment == 3, #Request.Environment)
for k, v in pairs(Request.Environment) do
assert(k)
assert(v)
end
)";
DEFINE_REQ_STATE;
s.env.emplace("", "bar");
s.env.emplace("goodbye", "cruel world");
s.env.emplace("ka", "boom");
const auto rc = lua::request::execute(nullptr, nullptr, nullptr, &s, nullptr, script);
ASSERT_EQ(rc, 0);
}
TEST(TestRGWLua, Tags)
{
const std::string script = R"(
assert(#Request.Tags == 4)
assert(Request.Tags["foo"] == "bar")
for k, v in pairs(Request.Tags) do
assert(k)
assert(v)
end
)";
DEFINE_REQ_STATE;
s.tagset.add_tag("hello", "world");
s.tagset.add_tag("foo", "bar");
s.tagset.add_tag("goodbye", "cruel world");
s.tagset.add_tag("ka", "boom");
const auto rc = lua::request::execute(nullptr, nullptr, nullptr, &s, nullptr, script);
ASSERT_EQ(rc, 0);
}
TEST(TestRGWLua, TagsNotWriteable)
{
const std::string script = R"(
Request.Tags["hello"] = "goodbye"
)";
DEFINE_REQ_STATE;
s.tagset.add_tag("hello", "world");
const auto rc = lua::request::execute(nullptr, nullptr, nullptr, &s, nullptr, script);
ASSERT_NE(rc, 0);
}
TEST(TestRGWLua, Metadata)
{
const std::string script = R"(
assert(#Request.HTTP.Metadata == 3)
for k, v in pairs(Request.HTTP.Metadata) do
assert(k)
assert(v)
end
assert(Request.HTTP.Metadata["hello"] == "world")
assert(Request.HTTP.Metadata["kaboom"] == nil)
Request.HTTP.Metadata["hello"] = "goodbye"
Request.HTTP.Metadata["kaboom"] = "boom"
assert(#Request.HTTP.Metadata == 4)
assert(Request.HTTP.Metadata["hello"] == "goodbye")
assert(Request.HTTP.Metadata["kaboom"] == "boom")
)";
DEFINE_REQ_STATE;
s.info.x_meta_map["hello"] = "world";
s.info.x_meta_map["foo"] = "bar";
s.info.x_meta_map["ka"] = "boom";
const auto rc = lua::request::execute(nullptr, nullptr, nullptr, &s, nullptr, script);
ASSERT_EQ(rc, 0);
}
TEST(TestRGWLua, WriteMetadata)
{
const std::string script = R"(
-- change existing entry
Request.HTTP.Metadata["hello"] = "earth"
-- add new entry
Request.HTTP.Metadata["goodbye"] = "mars"
-- delete existing entry
Request.HTTP.Metadata["foo"] = nil
-- delete missing entry
Request.HTTP.Metadata["venus"] = nil
assert(Request.HTTP.Metadata["hello"] == "earth")
assert(Request.HTTP.Metadata["goodbye"] == "mars")
assert(Request.HTTP.Metadata["foo"] == nil)
assert(Request.HTTP.Metadata["venus"] == nil)
)";
DEFINE_REQ_STATE;
s.info.x_meta_map["hello"] = "world";
s.info.x_meta_map["foo"] = "bar";
s.info.x_meta_map["ka"] = "boom";
const auto rc = lua::request::execute(nullptr, nullptr, nullptr, &s, nullptr, script);
ASSERT_EQ(rc, 0);
}
TEST(TestRGWLua, MetadataIterateWrite)
{
const std::string script = R"(
counter = 0
for k,v in pairs(Request.HTTP.Metadata) do
counter = counter + 1
print(k,v)
if tostring(k) == "c" then
Request.HTTP.Metadata["c"] = nil
print("'c' is deleted and 'd' is skipped")
end
end
assert(counter == 6)
counter = 0
for k,v in pairs(Request.HTTP.Metadata) do
counter = counter + 1
print(k,v)
if tostring(k) == "d" then
Request.HTTP.Metadata["e"] = nil
print("'e' is deleted")
end
end
assert(counter == 5)
)";
DEFINE_REQ_STATE;
s.info.x_meta_map["a"] = "1";
s.info.x_meta_map["b"] = "2";
s.info.x_meta_map["c"] = "3";
s.info.x_meta_map["d"] = "4";
s.info.x_meta_map["e"] = "5";
s.info.x_meta_map["f"] = "6";
s.info.x_meta_map["g"] = "7";
const auto rc = lua::request::execute(nullptr, nullptr, nullptr, &s, nullptr, script);
ASSERT_EQ(rc, 0);
ASSERT_EQ(s.info.x_meta_map.count("c"), 0);
}
TEST(TestRGWLua, MetadataIterator)
{
DEFINE_REQ_STATE;
s.info.x_meta_map["a"] = "1";
s.info.x_meta_map["b"] = "2";
s.info.x_meta_map["c"] = "3";
s.info.x_meta_map["d"] = "4";
s.info.x_meta_map["e"] = "5";
s.info.x_meta_map["f"] = "6";
s.info.x_meta_map["g"] = "7";
std::string script = R"(
print("nested loop")
counter = 0
for k1,v1 in pairs(Request.HTTP.Metadata) do
print(tostring(k1)..","..v1.." outer loop "..tostring(counter))
for k2,v2 in pairs(Request.HTTP.Metadata) do
print(k2,v2)
end
counter = counter + 1
end
)";
auto rc = lua::request::execute(nullptr, nullptr, nullptr, &s, nullptr, script);
ASSERT_NE(rc, 0);
script = R"(
print("break loop")
counter = 0
for k,v in pairs(Request.HTTP.Metadata) do
counter = counter + 1
print(k,v)
if counter == 3 then
break
end
counter = counter + 1
end
print("full loop")
for k,v in pairs(Request.HTTP.Metadata) do
print(k,v)
end
)";
rc = lua::request::execute(nullptr, nullptr, nullptr, &s, nullptr, script);
ASSERT_NE(rc, 0);
script = R"(
print("2 loops")
counter = 0
for k,v in pairs(Request.HTTP.Metadata) do
print(k,v)
counter = counter + 1
end
assert(counter == #Request.HTTP.Metadata)
counter = 0
for k,v in pairs(Request.HTTP.Metadata) do
print(k,v)
counter = counter + 1
end
assert(counter == #Request.HTTP.Metadata)
)";
rc = lua::request::execute(nullptr, nullptr, nullptr, &s, nullptr, script);
ASSERT_EQ(rc, 0);
}
TEST(TestRGWLua, Acl)
{
const std::string script = R"(
function print_grant(k, g)
print("Grant Key: " .. tostring(k))
print("Grant Type: " .. g.Type)
print("Grant Group Type: " .. g.GroupType)
print("Grant Referer: " .. g.Referer)
if (g.User) then
print("Grant User.Tenant: " .. g.User.Tenant)
print("Grant User.Id: " .. g.User.Id)
end
end
assert(Request.UserAcl.Owner.DisplayName == "jack black", Request.UserAcl.Owner.DisplayName)
assert(Request.UserAcl.Owner.User.Id == "black", Request.UserAcl.Owner.User.Id)
assert(Request.UserAcl.Owner.User.Tenant == "jack", Request.UserAcl.Owner.User.Tenant)
assert(#Request.UserAcl.Grants == 7)
print_grant("", Request.UserAcl.Grants[""])
for k, v in pairs(Request.UserAcl.Grants) do
if tostring(k) == "john$doe" then
assert(v.Permission == 4)
elseif tostring(k) == "jane$doe" then
assert(v.Permission == 1)
elseif tostring(k) == "kill$bill" then
assert(v.Permission == 6 or v.Permission == 7)
elseif tostring(k) ~= "" then
assert(false)
end
end
)";
DEFINE_REQ_STATE;
ACLOwner owner;
owner.set_id(rgw_user("jack", "black"));
owner.set_name("jack black");
s.user_acl.reset(new RGWAccessControlPolicy(g_cct));
s.user_acl->set_owner(owner);
ACLGrant grant1, grant2, grant3, grant4, grant5, grant6_1, grant6_2;
grant1.set_canon(rgw_user("jane", "doe"), "her grant", 1);
grant2.set_group(ACL_GROUP_ALL_USERS ,2);
grant3.set_referer("http://localhost/ref2", 3);
grant4.set_canon(rgw_user("john", "doe"), "his grant", 4);
grant5.set_group(ACL_GROUP_AUTHENTICATED_USERS, 5);
grant6_1.set_canon(rgw_user("kill", "bill"), "his grant", 6);
grant6_2.set_canon(rgw_user("kill", "bill"), "her grant", 7);
s.user_acl->get_acl().add_grant(&grant1);
s.user_acl->get_acl().add_grant(&grant2);
s.user_acl->get_acl().add_grant(&grant3);
s.user_acl->get_acl().add_grant(&grant4);
s.user_acl->get_acl().add_grant(&grant5);
s.user_acl->get_acl().add_grant(&grant6_1);
s.user_acl->get_acl().add_grant(&grant6_2);
const auto rc = lua::request::execute(nullptr, nullptr, nullptr, &s, nullptr, script);
ASSERT_EQ(rc, 0);
}
TEST(TestRGWLua, User)
{
const std::string script = R"(
assert(Request.User)
assert(Request.User.Id == "myid")
assert(Request.User.Tenant == "mytenant")
)";
DEFINE_REQ_STATE;
rgw_user u;
u.tenant = "mytenant";
u.id = "myid";
s.user.reset(new sal::RadosUser(nullptr, u));
const auto rc = lua::request::execute(nullptr, nullptr, nullptr, &s, nullptr, script);
ASSERT_EQ(rc, 0);
}
TEST(TestRGWLua, UseFunction)
{
const std::string script = R"(
function print_owner(owner)
print("Owner Dispaly Name: " .. owner.DisplayName)
print("Owner Id: " .. owner.User.Id)
print("Owner Tenanet: " .. owner.User.Tenant)
end
print_owner(Request.ObjectOwner)
function print_acl(acl_type)
index = acl_type .. "ACL"
acl = Request[index]
if acl then
print(acl_type .. "ACL Owner")
print_owner(acl.Owner)
else
print("no " .. acl_type .. " ACL in request: " .. Request.Id)
end
end
print_acl("User")
print_acl("Bucket")
print_acl("Object")
)";
DEFINE_REQ_STATE;
s.owner.set_name("user two");
s.owner.set_id(rgw_user("tenant2", "user2"));
s.user_acl.reset(new RGWAccessControlPolicy());
s.user_acl->get_owner().set_name("user three");
s.user_acl->get_owner().set_id(rgw_user("tenant3", "user3"));
s.bucket_acl.reset(new RGWAccessControlPolicy());
s.bucket_acl->get_owner().set_name("user four");
s.bucket_acl->get_owner().set_id(rgw_user("tenant4", "user4"));
s.object_acl.reset(new RGWAccessControlPolicy());
s.object_acl->get_owner().set_name("user five");
s.object_acl->get_owner().set_id(rgw_user("tenant5", "user5"));
const auto rc = lua::request::execute(nullptr, nullptr, nullptr, &s, nullptr, script);
ASSERT_EQ(rc, 0);
}
TEST(TestRGWLua, WithLib)
{
const std::string script = R"(
expected_result = {"my", "bucket", "name", "is", "fish"}
i = 1
for p in string.gmatch(Request.Bucket.Name, "%a+") do
assert(p == expected_result[i])
i = i + 1
end
)";
DEFINE_REQ_STATE;
rgw_bucket b;
b.name = "my-bucket-name-is-fish";
s.bucket.reset(new sal::RadosBucket(nullptr, b));
const auto rc = lua::request::execute(nullptr, nullptr, nullptr, &s, nullptr, script);
ASSERT_EQ(rc, 0);
}
TEST(TestRGWLua, NotAllowedInLib)
{
const std::string script = R"(
os.clock() -- this should be ok
os.exit() -- this should fail (os.exit() is removed)
)";
DEFINE_REQ_STATE;
const auto rc = lua::request::execute(nullptr, nullptr, nullptr, &s, nullptr, script);
ASSERT_NE(rc, 0);
}
#define MAKE_STORE auto store = std::unique_ptr<sal::RadosStore>(new sal::RadosStore); \
store->setRados(new RGWRados);
TEST(TestRGWLua, OpsLog)
{
const std::string script = R"(
if Request.Response.HTTPStatusCode == 200 then
assert(Request.Response.Message == "Life is great")
else
assert(Request.Bucket)
assert(Request.Log() == 0)
end
)";
MAKE_STORE;
struct MockOpsLogSink : OpsLogSink {
bool logged = false;
int log(req_state*, rgw_log_entry&) override { logged = true; return 0; }
};
MockOpsLogSink olog;
DEFINE_REQ_STATE;
s.err.http_ret = 200;
s.err.ret = 0;
s.err.err_code = "200OK";
s.err.message = "Life is great";
rgw_bucket b;
b.tenant = "tenant";
b.name = "name";
b.marker = "marker";
b.bucket_id = "id";
s.bucket.reset(new sal::RadosBucket(nullptr, b));
s.bucket_name = "name";
s.enable_ops_log = true;
s.enable_usage_log = false;
s.user.reset(new TestUser());
TestAccounter ac;
s.cio = ∾
s.cct->_conf->rgw_ops_log_rados = false;
s.auth.identity = std::unique_ptr<rgw::auth::Identity>(
new FakeIdentity());
auto rc = lua::request::execute(store.get(), nullptr, &olog, &s, nullptr, script);
EXPECT_EQ(rc, 0);
EXPECT_FALSE(olog.logged); // don't log http_ret=200
s.err.http_ret = 400;
rc = lua::request::execute(store.get(), nullptr, &olog, &s, nullptr, script);
EXPECT_EQ(rc, 0);
EXPECT_TRUE(olog.logged);
}
class TestBackground : public rgw::lua::Background {
const unsigned read_time;
protected:
int read_script() override {
// don't read the object from the store
std::this_thread::sleep_for(std::chrono::seconds(read_time));
return 0;
}
public:
TestBackground(sal::RadosStore* store, const std::string& script, unsigned read_time = 0) :
rgw::lua::Background(store, g_cct, "", /* luarocks path */ 1 /* run every second */),
read_time(read_time) {
// the script is passed in the constructor
rgw_script = script;
}
~TestBackground() override {
shutdown();
}
};
TEST(TestRGWLuaBackground, Start)
{
MAKE_STORE;
{
// ctr and dtor without running
TestBackground lua_background(store.get(), "");
}
{
// ctr and dtor with running
TestBackground lua_background(store.get(), "");
lua_background.start();
}
}
constexpr auto wait_time = std::chrono::seconds(3);
template<typename T>
const T& get_table_value(const TestBackground& b, const std::string& index) {
try {
return std::get<T>(b.get_table_value(index));
} catch (std::bad_variant_access const& ex) {
std::cout << "expected RGW[" << index << "] to be: " << typeid(T).name() << std::endl;
throw(ex);
}
}
TEST(TestRGWLuaBackground, Script)
{
const std::string script = R"(
local key = "hello"
local value = "world"
RGW[key] = value
)";
MAKE_STORE;
TestBackground lua_background(store.get(), script);
lua_background.start();
std::this_thread::sleep_for(wait_time);
EXPECT_EQ(get_table_value<std::string>(lua_background, "hello"), "world");
}
TEST(TestRGWLuaBackground, RequestScript)
{
const std::string background_script = R"(
local key = "hello"
local value = "from background"
RGW[key] = value
)";
MAKE_STORE;
TestBackground lua_background(store.get(), background_script);
lua_background.start();
std::this_thread::sleep_for(wait_time);
const std::string request_script = R"(
local key = "hello"
assert(RGW[key] == "from background")
local value = "from request"
RGW[key] = value
)";
DEFINE_REQ_STATE;
pe.lua.background = &lua_background;
// to make sure test is consistent we have to puase the background
lua_background.pause();
const auto rc = lua::request::execute(nullptr, nullptr, nullptr, &s, nullptr, request_script);
ASSERT_EQ(rc, 0);
EXPECT_EQ(get_table_value<std::string>(lua_background, "hello"), "from request");
// now we resume and let the background set the value
lua_background.resume(store.get());
std::this_thread::sleep_for(wait_time);
EXPECT_EQ(get_table_value<std::string>(lua_background, "hello"), "from background");
}
TEST(TestRGWLuaBackground, Pause)
{
const std::string script = R"(
local key = "hello"
local value = "1"
if RGW[key] then
RGW[key] = value..RGW[key]
else
RGW[key] = value
end
)";
MAKE_STORE;
TestBackground lua_background(store.get(), script);
lua_background.start();
std::this_thread::sleep_for(wait_time);
const auto value_len = get_table_value<std::string>(lua_background, "hello").size();
EXPECT_GT(value_len, 0);
lua_background.pause();
std::this_thread::sleep_for(wait_time);
// no change in len
EXPECT_EQ(value_len, get_table_value<std::string>(lua_background, "hello").size());
}
TEST(TestRGWLuaBackground, PauseWhileReading)
{
const std::string script = R"(
local key = "hello"
local value = "world"
RGW[key] = value
if RGW[key] then
RGW[key] = value..RGW[key]
else
RGW[key] = value
end
)";
MAKE_STORE;
constexpr auto long_wait_time = std::chrono::seconds(6);
TestBackground lua_background(store.get(), script, 2);
lua_background.start();
std::this_thread::sleep_for(long_wait_time);
const auto value_len = get_table_value<std::string>(lua_background, "hello").size();
EXPECT_GT(value_len, 0);
lua_background.pause();
std::this_thread::sleep_for(long_wait_time);
// one execution might occur after pause
EXPECT_TRUE(value_len + 1 >= get_table_value<std::string>(lua_background, "hello").size());
}
TEST(TestRGWLuaBackground, ReadWhilePaused)
{
const std::string script = R"(
local key = "hello"
local value = "world"
RGW[key] = value
)";
MAKE_STORE;
TestBackground lua_background(store.get(), script);
lua_background.pause();
lua_background.start();
std::this_thread::sleep_for(wait_time);
EXPECT_EQ(get_table_value<std::string>(lua_background, "hello"), "");
lua_background.resume(store.get());
std::this_thread::sleep_for(wait_time);
EXPECT_EQ(get_table_value<std::string>(lua_background, "hello"), "world");
}
TEST(TestRGWLuaBackground, PauseResume)
{
const std::string script = R"(
local key = "hello"
local value = "1"
if RGW[key] then
RGW[key] = value..RGW[key]
else
RGW[key] = value
end
)";
MAKE_STORE;
TestBackground lua_background(store.get(), script);
lua_background.start();
std::this_thread::sleep_for(wait_time);
const auto value_len = get_table_value<std::string>(lua_background, "hello").size();
EXPECT_GT(value_len, 0);
lua_background.pause();
std::this_thread::sleep_for(wait_time);
// no change in len
EXPECT_EQ(value_len, get_table_value<std::string>(lua_background, "hello").size());
lua_background.resume(store.get());
std::this_thread::sleep_for(wait_time);
// should be a change in len
EXPECT_GT(get_table_value<std::string>(lua_background, "hello").size(), value_len);
}
TEST(TestRGWLuaBackground, MultipleStarts)
{
const std::string script = R"(
local key = "hello"
local value = "1"
if RGW[key] then
RGW[key] = value..RGW[key]
else
RGW[key] = value
end
)";
MAKE_STORE;
TestBackground lua_background(store.get(), script);
lua_background.start();
std::this_thread::sleep_for(wait_time);
const auto value_len = get_table_value<std::string>(lua_background, "hello").size();
EXPECT_GT(value_len, 0);
lua_background.start();
lua_background.shutdown();
lua_background.shutdown();
std::this_thread::sleep_for(wait_time);
lua_background.start();
std::this_thread::sleep_for(wait_time);
// should be a change in len
EXPECT_GT(get_table_value<std::string>(lua_background, "hello").size(), value_len);
}
TEST(TestRGWLuaBackground, TableValues)
{
MAKE_STORE;
TestBackground lua_background(store.get(), "");
const std::string request_script = R"(
RGW["key1"] = "string value"
RGW["key2"] = 42
RGW["key3"] = 42.2
RGW["key4"] = true
)";
DEFINE_REQ_STATE;
pe.lua.background = &lua_background;
const auto rc = lua::request::execute(nullptr, nullptr, nullptr, &s, nullptr, request_script);
ASSERT_EQ(rc, 0);
EXPECT_EQ(get_table_value<std::string>(lua_background, "key1"), "string value");
EXPECT_EQ(get_table_value<long long int>(lua_background, "key2"), 42);
EXPECT_EQ(get_table_value<double>(lua_background, "key3"), 42.2);
EXPECT_TRUE(get_table_value<bool>(lua_background, "key4"));
}
TEST(TestRGWLuaBackground, TablePersist)
{
MAKE_STORE;
TestBackground lua_background(store.get(), "");
std::string request_script = R"(
RGW["key1"] = "string value"
RGW["key2"] = 42
)";
DEFINE_REQ_STATE;
pe.lua.background = &lua_background;
auto rc = lua::request::execute(nullptr, nullptr, nullptr, &s, nullptr, request_script);
ASSERT_EQ(rc, 0);
EXPECT_EQ(get_table_value<std::string>(lua_background, "key1"), "string value");
EXPECT_EQ(get_table_value<long long int>(lua_background, "key2"), 42);
request_script = R"(
RGW["key3"] = RGW["key1"]
RGW["key4"] = RGW["key2"]
)";
rc = lua::request::execute(nullptr, nullptr, nullptr, &s, nullptr, request_script);
ASSERT_EQ(rc, 0);
EXPECT_EQ(get_table_value<std::string>(lua_background, "key1"), "string value");
EXPECT_EQ(get_table_value<long long int>(lua_background, "key2"), 42);
EXPECT_EQ(get_table_value<std::string>(lua_background, "key3"), "string value");
EXPECT_EQ(get_table_value<long long int>(lua_background, "key4"), 42);
}
TEST(TestRGWLuaBackground, TableValuesFromRequest)
{
MAKE_STORE;
TestBackground lua_background(store.get(), "");
lua_background.start();
const std::string request_script = R"(
RGW["key1"] = Request.Response.RGWCode
RGW["key2"] = Request.Response.Message
RGW["key3"] = Request.Response.RGWCode*0.1
RGW["key4"] = Request.Tags["key1"] == Request.Tags["key2"]
)";
DEFINE_REQ_STATE;
pe.lua.background = &lua_background;
s.tagset.add_tag("key1", "val1");
s.tagset.add_tag("key2", "val1");
s.err.ret = -99;
s.err.message = "hi";
const auto rc = lua::request::execute(nullptr, nullptr, nullptr, &s, nullptr, request_script);
ASSERT_EQ(rc, 0);
EXPECT_EQ(get_table_value<long long int>(lua_background, "key1"), -99);
EXPECT_EQ(get_table_value<std::string>(lua_background, "key2"), "hi");
EXPECT_EQ(get_table_value<double>(lua_background, "key3"), -9.9);
EXPECT_EQ(get_table_value<bool>(lua_background, "key4"), true);
}
TEST(TestRGWLuaBackground, TableInvalidValue)
{
MAKE_STORE;
TestBackground lua_background(store.get(), "");
lua_background.start();
const std::string request_script = R"(
RGW["key1"] = "val1"
RGW["key2"] = 42
RGW["key3"] = 42.2
RGW["key4"] = true
RGW["key5"] = Request.Tags
)";
DEFINE_REQ_STATE;
pe.lua.background = &lua_background;
s.tagset.add_tag("key1", "val1");
s.tagset.add_tag("key2", "val2");
const auto rc = lua::request::execute(nullptr, nullptr, nullptr, &s, nullptr, request_script);
ASSERT_NE(rc, 0);
EXPECT_EQ(get_table_value<std::string>(lua_background, "key1"), "val1");
EXPECT_EQ(get_table_value<long long int>(lua_background, "key2"), 42);
EXPECT_EQ(get_table_value<double>(lua_background, "key3"), 42.2);
EXPECT_EQ(get_table_value<bool>(lua_background, "key4"), true);
}
TEST(TestRGWLuaBackground, TableErase)
{
MAKE_STORE;
TestBackground lua_background(store.get(), "");
std::string request_script = R"(
RGW["size"] = 0
RGW["key1"] = "string value"
RGW["key2"] = 42
RGW["key3"] = "another string value"
RGW["size"] = #RGW
)";
DEFINE_REQ_STATE;
pe.lua.background = &lua_background;
auto rc = lua::request::execute(nullptr, nullptr, nullptr, &s, nullptr, request_script);
ASSERT_EQ(rc, 0);
EXPECT_EQ(get_table_value<std::string>(lua_background, "key1"), "string value");
EXPECT_EQ(get_table_value<long long int>(lua_background, "key2"), 42);
EXPECT_EQ(get_table_value<std::string>(lua_background, "key3"), "another string value");
EXPECT_EQ(get_table_value<long long int>(lua_background, "size"), 4);
request_script = R"(
-- erase key1
RGW["key1"] = nil
-- following should be a no op
RGW["key4"] = nil
RGW["size"] = #RGW
)";
rc = lua::request::execute(nullptr, nullptr, nullptr, &s, nullptr, request_script);
ASSERT_EQ(rc, 0);
EXPECT_EQ(get_table_value<std::string>(lua_background, "key1"), "");
EXPECT_EQ(get_table_value<long long int>(lua_background, "key2"), 42);
EXPECT_EQ(get_table_value<std::string>(lua_background, "key3"), "another string value");
EXPECT_EQ(get_table_value<long long int>(lua_background, "size"), 3);
}
TEST(TestRGWLuaBackground, TableIterate)
{
MAKE_STORE;
TestBackground lua_background(store.get(), "");
const std::string request_script = R"(
RGW["key1"] = "string value"
RGW["key2"] = 42
RGW["key3"] = 42.2
RGW["key4"] = true
RGW["size"] = 0
for k, v in pairs(RGW) do
RGW["size"] = RGW["size"] + 1
end
)";
DEFINE_REQ_STATE;
pe.lua.background = &lua_background;
const auto rc = lua::request::execute(nullptr, nullptr, nullptr, &s, nullptr, request_script);
ASSERT_EQ(rc, 0);
EXPECT_EQ(get_table_value<std::string>(lua_background, "key1"), "string value");
EXPECT_EQ(get_table_value<long long int>(lua_background, "key2"), 42);
EXPECT_EQ(get_table_value<double>(lua_background, "key3"), 42.2);
EXPECT_TRUE(get_table_value<bool>(lua_background, "key4"));
EXPECT_EQ(get_table_value<long long int>(lua_background, "size"), 5);
}
TEST(TestRGWLuaBackground, TableIterateWrite)
{
MAKE_STORE;
TestBackground lua_background(store.get(), "");
const std::string request_script = R"(
RGW["a"] = 1
RGW["b"] = 2
RGW["c"] = 3
RGW["d"] = 4
RGW["e"] = 5
counter = 0
for k, v in pairs(RGW) do
counter = counter + 1
print(k, v)
if tostring(k) == "c" then
RGW["c"] = nil
end
end
assert(counter == 4)
)";
DEFINE_REQ_STATE;
pe.lua.background = &lua_background;
const auto rc = lua::request::execute(nullptr, nullptr, nullptr, &s, nullptr, request_script);
ASSERT_EQ(rc, 0);
EXPECT_EQ(lua_background.get_table_value("c"), TestBackground::empty_table_value);
}
TEST(TestRGWLuaBackground, TableIncrement)
{
MAKE_STORE;
TestBackground lua_background(store.get(), "");
const std::string request_script = R"(
RGW["key1"] = 42
RGW["key2"] = 42.2
RGW.increment("key1")
assert(RGW["key1"] == 43)
RGW.increment("key2")
assert(RGW["key2"] == 43.2)
)";
DEFINE_REQ_STATE;
pe.lua.background = &lua_background;
const auto rc = lua::request::execute(nullptr, nullptr, nullptr, &s, nullptr, request_script);
ASSERT_EQ(rc, 0);
}
TEST(TestRGWLuaBackground, TableIncrementBy)
{
MAKE_STORE;
TestBackground lua_background(store.get(), "");
const std::string request_script = R"(
RGW["key1"] = 42
RGW["key2"] = 42.2
RGW.increment("key1", 10)
assert(RGW["key1"] == 52)
RGW.increment("key2", 10)
assert(RGW["key2"] == 52.2)
RGW.increment("key1", 0.2)
assert(RGW["key1"] == 52.2)
)";
DEFINE_REQ_STATE;
pe.lua.background = &lua_background;
const auto rc = lua::request::execute(nullptr, nullptr, nullptr, &s, nullptr, request_script);
ASSERT_EQ(rc, 0);
}
TEST(TestRGWLuaBackground, TableDecrement)
{
MAKE_STORE;
TestBackground lua_background(store.get(), "");
const std::string request_script = R"(
RGW["key1"] = 42
RGW["key2"] = 42.2
RGW.decrement("key1")
assert(RGW["key1"] == 41)
RGW.decrement("key2")
assert(RGW["key2"] == 41.2)
)";
DEFINE_REQ_STATE;
pe.lua.background = &lua_background;
const auto rc = lua::request::execute(nullptr, nullptr, nullptr, &s, nullptr, request_script);
ASSERT_EQ(rc, 0);
}
TEST(TestRGWLuaBackground, TableDecrementBy)
{
MAKE_STORE;
TestBackground lua_background(store.get(), "");
const std::string request_script = R"(
RGW["key1"] = 42
RGW["key2"] = 42.2
RGW.decrement("key1", 10)
assert(RGW["key1"] == 32)
RGW.decrement("key2", 10)
assert(RGW["key2"] == 32.2)
RGW.decrement("key1", 0.8)
assert(RGW["key1"] == 31.2)
)";
DEFINE_REQ_STATE;
pe.lua.background = &lua_background;
const auto rc = lua::request::execute(nullptr, nullptr, nullptr, &s, nullptr, request_script);
ASSERT_EQ(rc, 0);
}
TEST(TestRGWLuaBackground, TableIncrementValueError)
{
MAKE_STORE;
TestBackground lua_background(store.get(), "");
std::string request_script = R"(
-- cannot increment string values
RGW["key1"] = "hello"
RGW.increment("key1")
)";
DEFINE_REQ_STATE;
pe.lua.background = &lua_background;
auto rc = lua::request::execute(nullptr, nullptr, nullptr, &s, nullptr, request_script);
ASSERT_NE(rc, 0);
request_script = R"(
-- cannot increment bool values
RGW["key1"] = true
RGW.increment("key1")
)";
rc = lua::request::execute(nullptr, nullptr, nullptr, &s, nullptr, request_script);
ASSERT_NE(rc, 0);
request_script = R"(
-- cannot increment by string values
RGW["key1"] = 99
RGW.increment("key1", "kaboom")
)";
rc = lua::request::execute(nullptr, nullptr, nullptr, &s, nullptr, request_script);
ASSERT_NE(rc, 0);
}
TEST(TestRGWLuaBackground, TableIncrementError)
{
MAKE_STORE;
TestBackground lua_background(store.get(), "");
std::string request_script = R"(
-- missing argument
RGW["key1"] = 11
RGW.increment()
)";
DEFINE_REQ_STATE;
pe.lua.background = &lua_background;
auto rc = lua::request::execute(nullptr, nullptr, nullptr, &s, nullptr, request_script);
ASSERT_NE(rc, 0);
request_script = R"(
-- used as settable field
RGW.increment = 11
)";
rc = lua::request::execute(nullptr, nullptr, nullptr, &s, nullptr, request_script);
ASSERT_NE(rc, 0);
}
TEST(TestRGWLua, TracingSetAttribute)
{
const std::string script = R"(
Request.Trace.SetAttribute("str-attr", "value")
Request.Trace.SetAttribute("int-attr", 42)
Request.Trace.SetAttribute("double-attr", 42.5)
)";
DEFINE_REQ_STATE;
INIT_TRACE;
const auto rc = lua::request::execute(nullptr, nullptr, nullptr, &s, nullptr, script);
ASSERT_EQ(rc, 0);
}
TEST(TestRGWLua, TracingSetBadAttribute)
{
const std::string script = R"(
Request.Trace.SetAttribute("attr", nil)
)";
DEFINE_REQ_STATE;
INIT_TRACE;
const auto rc = lua::request::execute(nullptr, nullptr, nullptr, &s, nullptr, script);
#ifdef HAVE_JAEGER
ASSERT_NE(rc, 0);
#else
ASSERT_EQ(rc, 0);
#endif
}
TEST(TestRGWLua, TracingAddEvent)
{
const std::string script = R"(
event_attrs = {}
event_attrs["x"] = "value-x"
event_attrs[42] = 42
event_attrs[42.5] = 42.5
event_attrs["y"] = "value-y"
Request.Trace.AddEvent("my_event", event_attrs)
)";
DEFINE_REQ_STATE;
INIT_TRACE;
const auto rc = lua::request::execute(nullptr, nullptr, nullptr, &s, nullptr, script);
ASSERT_EQ(rc, 0);
}
TEST(TestRGWLua, Data)
{
const std::string script = R"(
local expected = "The quick brown fox jumps over the lazy dog"
local actual = ""
RGW["key1"] = 0
for i, c in pairs(Data) do
actual = actual .. c
RGW.increment("key1")
end
assert(expected == actual)
assert(#Data == #expected);
assert(RGW["key1"] == #Data)
assert(Request.RGWId == "foo")
assert(Offset == 12345678)
)";
MAKE_STORE;
TestBackground lua_background(store.get(), "");
DEFINE_REQ_STATE;
s.host_id = "foo";
pe.lua.background = &lua_background;
lua::RGWObjFilter filter(&s, script);
bufferlist bl;
bl.append("The quick brown fox jumps over the lazy dog");
off_t offset = 12345678;
const auto rc = filter.execute(bl, offset, "put_obj");
ASSERT_EQ(rc, 0);
}
TEST(TestRGWLua, WriteDataFail)
{
const std::string script = R"(
Data[1] = "h"
Data[2] = "e"
Data[3] = "l"
Data[4] = "l"
Data[5] = "o"
)";
DEFINE_REQ_STATE;
lua::RGWObjFilter filter(&s, script);
bufferlist bl;
bl.append("The quick brown fox jumps over the lazy dog");
const auto rc = filter.execute(bl, 0, "put_obj");
ASSERT_NE(rc, 0);
}
| 41,017 | 26.200265 | 455 | cc |
null | ceph-main/src/test/rgw/test_rgw_manifest.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2013 eNovance SAS <licensing@enovance.com>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <iostream>
#include "global/global_init.h"
#include "common/ceph_argparse.h"
#include "rgw_common.h"
#include "rgw_rados.h"
#include "test_rgw_common.h"
#include <gtest/gtest.h>
using namespace std;
auto cct = new CephContext(CEPH_ENTITY_TYPE_CLIENT);
const DoutPrefix dp(cct, 1, "test rgw manifest: ");
struct OldObjManifestPart {
old_rgw_obj loc; /* the object where the data is located */
uint64_t loc_ofs; /* the offset at that object where the data is located */
uint64_t size; /* the part size */
OldObjManifestPart() : loc_ofs(0), size(0) {}
void encode(bufferlist& bl) const {
ENCODE_START(2, 2, bl);
encode(loc, bl);
encode(loc_ofs, bl);
encode(size, bl);
ENCODE_FINISH(bl);
}
void decode(bufferlist::const_iterator& bl) {
DECODE_START_LEGACY_COMPAT_LEN_32(2, 2, 2, bl);
decode(loc, bl);
decode(loc_ofs, bl);
decode(size, bl);
DECODE_FINISH(bl);
}
void dump(Formatter *f) const;
static void generate_test_instances(list<OldObjManifestPart*>& o);
};
WRITE_CLASS_ENCODER(OldObjManifestPart)
class OldObjManifest {
protected:
map<uint64_t, OldObjManifestPart> objs;
uint64_t obj_size;
public:
OldObjManifest() : obj_size(0) {}
OldObjManifest(const OldObjManifest& rhs) {
*this = rhs;
}
OldObjManifest& operator=(const OldObjManifest& rhs) {
objs = rhs.objs;
obj_size = rhs.obj_size;
return *this;
}
const map<uint64_t, OldObjManifestPart>& get_objs() {
return objs;
}
void append(uint64_t ofs, const OldObjManifestPart& part) {
objs[ofs] = part;
obj_size = std::max(obj_size, ofs + part.size);
}
void encode(bufferlist& bl) const {
ENCODE_START(2, 2, bl);
encode(obj_size, bl);
encode(objs, bl);
ENCODE_FINISH(bl);
}
void decode(bufferlist::const_iterator& bl) {
DECODE_START_LEGACY_COMPAT_LEN_32(6, 2, 2, bl);
decode(obj_size, bl);
decode(objs, bl);
DECODE_FINISH(bl);
}
bool empty() {
return objs.empty();
}
};
WRITE_CLASS_ENCODER(OldObjManifest)
void append_head(list<rgw_obj> *objs, rgw_obj& head)
{
objs->push_back(head);
}
void append_stripes(list<rgw_obj> *objs, RGWObjManifest& manifest, uint64_t obj_size, uint64_t stripe_size)
{
string prefix = manifest.get_prefix();
rgw_bucket bucket = manifest.get_obj().bucket;
int i = 0;
for (uint64_t ofs = manifest.get_max_head_size(); ofs < obj_size; ofs += stripe_size) {
char buf[16];
snprintf(buf, sizeof(buf), "%d", ++i);
string oid = prefix + buf;
cout << "oid=" << oid << std::endl;
rgw_obj obj;
obj.init_ns(bucket, oid, "shadow");
objs->push_back(obj);
}
}
static void gen_obj(test_rgw_env& env, uint64_t obj_size, uint64_t head_max_size, uint64_t stripe_size,
RGWObjManifest *manifest, const rgw_placement_rule& placement_rule, rgw_bucket *bucket, rgw_obj *head, RGWObjManifest::generator *gen,
list<rgw_obj> *test_objs)
{
manifest->set_trivial_rule(head_max_size, stripe_size);
test_rgw_init_bucket(bucket, "buck");
*head = rgw_obj(*bucket, "oid");
gen->create_begin(g_ceph_context, manifest, placement_rule, nullptr, *bucket, *head);
append_head(test_objs, *head);
cout << "test_objs.size()=" << test_objs->size() << std::endl;
append_stripes(test_objs, *manifest, obj_size, stripe_size);
cout << "test_objs.size()=" << test_objs->size() << std::endl;
ASSERT_EQ((int)manifest->get_obj_size(), 0);
ASSERT_EQ((int)manifest->get_head_size(), 0);
ASSERT_EQ(manifest->has_tail(), false);
uint64_t ofs = 0;
list<rgw_obj>::iterator iter = test_objs->begin();
while (ofs < obj_size) {
rgw_raw_obj obj = gen->get_cur_obj(env.zonegroup, env.zone_params);
cout << "obj=" << obj << std::endl;
rgw_raw_obj test_raw = rgw_obj_select(*iter).get_raw_obj(env.zonegroup, env.zone_params);
ASSERT_TRUE(obj == test_raw);
ofs = std::min(ofs + gen->cur_stripe_max_size(), obj_size);
gen->create_next(ofs);
cout << "obj=" << obj << " *iter=" << *iter << std::endl;
cout << "test_objs.size()=" << test_objs->size() << std::endl;
++iter;
}
if (manifest->has_tail()) {
rgw_raw_obj obj = gen->get_cur_obj(env.zonegroup, env.zone_params);
rgw_raw_obj test_raw = rgw_obj_select(*iter).get_raw_obj(env.zonegroup, env.zone_params);
ASSERT_TRUE(obj == test_raw);
++iter;
}
ASSERT_TRUE(iter == test_objs->end());
ASSERT_EQ(manifest->get_obj_size(), obj_size);
ASSERT_EQ(manifest->get_head_size(), std::min(obj_size, head_max_size));
ASSERT_EQ(manifest->has_tail(), (obj_size > head_max_size));
}
static void gen_old_obj(test_rgw_env& env, uint64_t obj_size, uint64_t head_max_size, uint64_t stripe_size,
OldObjManifest *manifest, old_rgw_bucket *bucket, old_rgw_obj *head,
list<old_rgw_obj> *test_objs)
{
test_rgw_init_old_bucket(bucket, "buck");
*head = old_rgw_obj(*bucket, "obj");
OldObjManifestPart part;
part.loc = *head;
part.size = head_max_size;
part.loc_ofs = 0;
manifest->append(0, part);
test_objs->push_back(part.loc);
string prefix;
append_rand_alpha(g_ceph_context, prefix, prefix, 16);
int i = 0;
for (uint64_t ofs = head_max_size; ofs < obj_size; ofs += stripe_size, i++) {
char buf[32];
snprintf(buf, sizeof(buf), "%s.%d", prefix.c_str(), i);
old_rgw_obj loc(*bucket, buf);
loc.set_ns("shadow");
OldObjManifestPart part;
part.loc = loc;
part.size = min(stripe_size, obj_size - ofs);
part.loc_ofs = 0;
manifest->append(ofs, part);
test_objs->push_back(loc);
}
}
TEST(TestRGWManifest, head_only_obj) {
test_rgw_env env;
RGWObjManifest manifest;
rgw_bucket bucket;
rgw_obj head;
RGWObjManifest::generator gen;
int obj_size = 256 * 1024;
list<rgw_obj> objs;
gen_obj(env, obj_size, 512 * 1024, 4 * 1024 * 1024, &manifest, env.zonegroup.default_placement, &bucket, &head, &gen, &objs);
cout << " manifest.get_obj_size()=" << manifest.get_obj_size() << std::endl;
cout << " manifest.get_head_size()=" << manifest.get_head_size() << std::endl;
list<rgw_obj>::iterator liter;
RGWObjManifest::obj_iterator iter;
for (iter = manifest.obj_begin(&dp), liter = objs.begin();
iter != manifest.obj_end(&dp) && liter != objs.end();
++iter, ++liter) {
ASSERT_TRUE(env.get_raw(*liter) == env.get_raw(iter.get_location()));
}
ASSERT_TRUE(iter == manifest.obj_end(&dp));
ASSERT_TRUE(liter == objs.end());
rgw_raw_obj raw_head;
iter = manifest.obj_find(&dp, 100 * 1024);
ASSERT_TRUE(env.get_raw(iter.get_location()) == env.get_raw(head));
ASSERT_EQ((int)iter.get_stripe_size(), obj_size);
}
TEST(TestRGWManifest, obj_with_head_and_tail) {
test_rgw_env env;
RGWObjManifest manifest;
rgw_bucket bucket;
rgw_obj head;
RGWObjManifest::generator gen;
list<rgw_obj> objs;
int obj_size = 21 * 1024 * 1024 + 1000;
int stripe_size = 4 * 1024 * 1024;
int head_size = 512 * 1024;
gen_obj(env, obj_size, head_size, stripe_size, &manifest, env.zonegroup.default_placement, &bucket, &head, &gen, &objs);
list<rgw_obj>::iterator liter;
rgw_obj_select last_obj;
RGWObjManifest::obj_iterator iter;
for (iter = manifest.obj_begin(&dp), liter = objs.begin();
iter != manifest.obj_end(&dp) && liter != objs.end();
++iter, ++liter) {
cout << "*liter=" << *liter << " iter.get_location()=" << env.get_raw(iter.get_location()) << std::endl;
ASSERT_TRUE(env.get_raw(*liter) == env.get_raw(iter.get_location()));
last_obj = iter.get_location();
}
ASSERT_TRUE(iter == manifest.obj_end(&dp));
ASSERT_TRUE(liter == objs.end());
iter = manifest.obj_find(&dp, 100 * 1024);
ASSERT_TRUE(env.get_raw(iter.get_location()) == env.get_raw(head));
ASSERT_EQ((int)iter.get_stripe_size(), head_size);
uint64_t ofs = 20 * 1024 * 1024 + head_size;
iter = manifest.obj_find(&dp, ofs + 100);
ASSERT_TRUE(env.get_raw(iter.get_location()) == env.get_raw(last_obj));
ASSERT_EQ(iter.get_stripe_ofs(), ofs);
ASSERT_EQ(iter.get_stripe_size(), obj_size - ofs);
}
TEST(TestRGWManifest, multipart) {
test_rgw_env env;
int num_parts = 16;
vector <RGWObjManifest> pm(num_parts);
rgw_bucket bucket;
uint64_t part_size = 10 * 1024 * 1024;
uint64_t stripe_size = 4 * 1024 * 1024;
string upload_id = "abc123";
for (int i = 0; i < num_parts; ++i) {
RGWObjManifest& manifest = pm[i];
RGWObjManifest::generator gen;
manifest.set_prefix(upload_id);
manifest.set_multipart_part_rule(stripe_size, i + 1);
uint64_t ofs;
rgw_obj head;
for (ofs = 0; ofs < part_size; ofs += stripe_size) {
if (ofs == 0) {
rgw_placement_rule rule(env.zonegroup.default_placement.name, RGW_STORAGE_CLASS_STANDARD);
int r = gen.create_begin(g_ceph_context, &manifest, rule, nullptr, bucket, head);
ASSERT_EQ(r, 0);
continue;
}
gen.create_next(ofs);
}
if (ofs > part_size) {
gen.create_next(part_size);
}
}
RGWObjManifest m;
for (int i = 0; i < num_parts; i++) {
m.append(&dp, pm[i], env.zonegroup, env.zone_params);
}
RGWObjManifest::obj_iterator iter;
for (iter = m.obj_begin(&dp); iter != m.obj_end(&dp); ++iter) {
RGWObjManifest::obj_iterator fiter = m.obj_find(&dp, iter.get_ofs());
ASSERT_TRUE(env.get_raw(fiter.get_location()) == env.get_raw(iter.get_location()));
}
ASSERT_EQ(m.get_obj_size(), num_parts * part_size);
}
TEST(TestRGWManifest, old_obj_manifest) {
test_rgw_env env;
OldObjManifest old_manifest;
old_rgw_bucket old_bucket;
old_rgw_obj old_head;
int obj_size = 40 * 1024 * 1024;
uint64_t stripe_size = 4 * 1024 * 1024;
uint64_t head_size = 512 * 1024;
list<old_rgw_obj> old_objs;
gen_old_obj(env, obj_size, head_size, stripe_size, &old_manifest, &old_bucket, &old_head, &old_objs);
ASSERT_EQ(old_objs.size(), 11u);
bufferlist bl;
encode(old_manifest , bl);
RGWObjManifest manifest;
try {
auto iter = bl.cbegin();
decode(manifest, iter);
} catch (buffer::error& err) {
ASSERT_TRUE(false);
}
rgw_raw_obj last_obj;
RGWObjManifest::obj_iterator iter;
auto liter = old_objs.begin();
for (iter = manifest.obj_begin(&dp);
iter != manifest.obj_end(&dp) && liter != old_objs.end();
++iter, ++liter) {
rgw_pool old_pool(liter->bucket.data_pool);
string old_oid;
prepend_old_bucket_marker(old_bucket, liter->get_object(), old_oid);
rgw_raw_obj raw_old(old_pool, old_oid);
cout << "*liter=" << raw_old << " iter.get_location()=" << env.get_raw(iter.get_location()) << std::endl;
ASSERT_EQ(raw_old, env.get_raw(iter.get_location()));
last_obj = env.get_raw(iter.get_location());
}
ASSERT_TRUE(liter == old_objs.end());
ASSERT_TRUE(iter == manifest.obj_end(&dp));
}
int main(int argc, char **argv) {
auto args = argv_to_vec(argc, argv);
auto cct = global_init(NULL, args, CEPH_ENTITY_TYPE_CLIENT,
CODE_ENVIRONMENT_UTILITY,
CINIT_FLAG_NO_DEFAULT_CONFIG_FILE);
common_init_finish(g_ceph_context);
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}
| 11,568 | 28.067839 | 154 | cc |
null | ceph-main/src/test/rgw/test_rgw_obj.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2013 eNovance SAS <licensing@enovance.com>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <iostream>
#include "common/ceph_json.h"
#include "common/Formatter.h"
#include "rgw_common.h"
#include "rgw_rados.h"
#include "services/svc_tier_rados.h"
#include "test_rgw_common.h"
#include <gtest/gtest.h>
using namespace std;
void check_parsed_correctly(rgw_obj& obj, const string& name, const string& ns, const string& instance)
{
/* parse_raw_oid() */
rgw_obj_key parsed_key;
ASSERT_EQ(true, rgw_obj_key::parse_raw_oid(obj.get_oid(), &parsed_key));
cout << "parsed: " << parsed_key << std::endl;
ASSERT_EQ(name, parsed_key.name);
ASSERT_EQ(ns, parsed_key.ns);
ASSERT_EQ(instance, parsed_key.instance);
/* translate_raw_obj_to_obj_in_ns() */
rgw_obj_key tkey = parsed_key;
string tns = ns + "foo";
ASSERT_EQ(0, rgw_obj_key::oid_to_key_in_ns(obj.get_oid(), &tkey, tns));
tkey = rgw_obj_key();
tns = ns;
ASSERT_EQ(true, rgw_obj_key::oid_to_key_in_ns(obj.get_oid(), &tkey, tns));
cout << "parsed: " << tkey << std::endl;
ASSERT_EQ(obj.key, tkey);
/* strip_namespace_from_object() */
string strip_name = obj.get_oid();
string strip_ns, strip_instance;
ASSERT_EQ(true, rgw_obj_key::strip_namespace_from_name(strip_name, strip_ns, strip_instance));
cout << "stripped: " << strip_name << " ns=" << strip_ns << " i=" << strip_instance << std::endl;
ASSERT_EQ(name, strip_name);
ASSERT_EQ(ns, strip_ns);
ASSERT_EQ(instance, strip_instance);
}
void test_obj(const string& name, const string& ns, const string& instance)
{
rgw_bucket b;
test_rgw_init_bucket(&b, "test");
JSONFormatter *formatter = new JSONFormatter(true);
formatter->open_object_section("test");
rgw_obj o(b, name);
rgw_obj obj1(o);
if (!instance.empty()) {
obj1.key.instance = instance;
}
if (!ns.empty()) {
obj1.key.ns = ns;
}
check_parsed_correctly(obj1, name, ns, instance);
encode_json("obj1", obj1, formatter);
bufferlist bl;
encode(obj1, bl);
rgw_obj obj2;
decode(obj2, bl);
check_parsed_correctly(obj2, name, ns, instance);
encode_json("obj2", obj2, formatter);
rgw_obj obj3(o);
bufferlist bl3;
encode(obj3, bl3);
decode(obj3, bl3);
encode_json("obj3", obj3, formatter);
if (!instance.empty()) {
obj3.key.instance = instance;
}
if (!ns.empty()) {
obj3.key.ns = ns;
}
check_parsed_correctly(obj3, name, ns, instance);
encode_json("obj3-2", obj3, formatter);
formatter->close_section();
formatter->flush(cout);
ASSERT_EQ(obj1, obj2);
ASSERT_EQ(obj1, obj3);
/* rgw_obj_key conversion */
rgw_obj_index_key k;
obj1.key.get_index_key(&k);
rgw_obj new_obj(b, k);
ASSERT_EQ(obj1, new_obj);
delete formatter;
}
TEST(TestRGWObj, underscore) {
test_obj("_obj", "", "");
test_obj("_obj", "ns", "");
test_obj("_obj", "", "v1");
test_obj("_obj", "ns", "v1");
}
TEST(TestRGWObj, no_underscore) {
test_obj("obj", "", "");
test_obj("obj", "ns", "");
test_obj("obj", "", "v1");
test_obj("obj", "ns", "v1");
}
template <class T>
void dump(JSONFormatter& f, const string& name, const T& entity)
{
f.open_object_section(name.c_str());
::encode_json(name.c_str(), entity, &f);
f.close_section();
f.flush(cout);
}
static void test_obj_to_raw(test_rgw_env& env, const rgw_bucket& b,
const string& name, const string& instance, const string& ns,
const string& placement_id)
{
JSONFormatter f(true);
dump(f, "bucket", b);
rgw_obj obj = test_rgw_create_obj(b, name, instance, ns);
dump(f, "obj", obj);
rgw_obj_select s(obj);
rgw_raw_obj raw_obj = s.get_raw_obj(env.zonegroup, env.zone_params);
dump(f, "raw_obj", raw_obj);
if (!placement_id.empty()) {
ASSERT_EQ(raw_obj.pool, env.get_placement(placement_id).data_pool);
} else {
ASSERT_EQ(raw_obj.pool, b.explicit_placement.data_pool);
}
ASSERT_EQ(raw_obj.oid, test_rgw_get_obj_oid(obj));
rgw_obj new_obj;
RGWSI_Tier_RADOS::raw_obj_to_obj(b, raw_obj, &new_obj);
dump(f, "new_obj", new_obj);
ASSERT_EQ(obj, new_obj);
}
TEST(TestRGWObj, obj_to_raw) {
test_rgw_env env;
rgw_bucket b;
test_rgw_init_bucket(&b, "test");
rgw_bucket eb;
test_rgw_init_explicit_placement_bucket(&eb, "ebtest");
for (auto name : { "myobj", "_myobj", "_myobj_"}) {
for (auto inst : { "", "inst"}) {
for (auto ns : { "", "ns"}) {
test_obj_to_raw(env, b, name, inst, ns, env.zonegroup.default_placement.name);
test_obj_to_raw(env, eb, name, inst, ns, string());
}
}
}
}
TEST(TestRGWObj, old_to_raw) {
JSONFormatter f(true);
test_rgw_env env;
old_rgw_bucket eb;
test_rgw_init_old_bucket(&eb, "ebtest");
for (auto name : { "myobj", "_myobj", "_myobj_"}) {
for (string inst : { "", "inst"}) {
for (string ns : { "", "ns"}) {
old_rgw_obj old(eb, name);
if (!inst.empty()) {
old.set_instance(inst);
}
if (!ns.empty()) {
old.set_ns(ns);
}
bufferlist bl;
encode(old, bl);
rgw_obj new_obj;
rgw_raw_obj raw_obj;
try {
auto iter = bl.cbegin();
decode(new_obj, iter);
iter = bl.begin();
decode(raw_obj, iter);
} catch (buffer::error& err) {
ASSERT_TRUE(false);
}
bl.clear();
rgw_obj new_obj2;
rgw_raw_obj raw_obj2;
encode(new_obj, bl);
dump(f, "raw_obj", raw_obj);
dump(f, "new_obj", new_obj);
cout << "raw=" << raw_obj << std::endl;
try {
auto iter = bl.cbegin();
decode(new_obj2, iter);
/*
can't decode raw obj here, because we didn't encode an old versioned
object
*/
bl.clear();
encode(raw_obj, bl);
iter = bl.begin();
decode(raw_obj2, iter);
} catch (buffer::error& err) {
ASSERT_TRUE(false);
}
dump(f, "raw_obj2", raw_obj2);
dump(f, "new_obj2", new_obj2);
cout << "raw2=" << raw_obj2 << std::endl;
ASSERT_EQ(new_obj, new_obj2);
ASSERT_EQ(raw_obj, raw_obj2);
}
}
}
}
| 6,569 | 23.065934 | 103 | cc |
null | ceph-main/src/test/rgw/test_rgw_period_history.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2015 Red Hat
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "rgw_period_history.h"
#include "rgw_rados.h"
#include "rgw_zone.h"
#include "global/global_init.h"
#include "common/ceph_argparse.h"
#include <boost/lexical_cast.hpp>
#include <gtest/gtest.h>
using namespace std;
namespace {
// construct a period with the given fields
RGWPeriod make_period(const std::string& id, epoch_t realm_epoch,
const std::string& predecessor)
{
RGWPeriod period(id);
period.set_realm_epoch(realm_epoch);
period.set_predecessor(predecessor);
return period;
}
const auto current_period = make_period("5", 5, "4");
// mock puller that throws an exception if it's called
struct ErrorPuller : public RGWPeriodHistory::Puller {
int pull(const DoutPrefixProvider *dpp, const std::string& id, RGWPeriod& period, optional_yield) override {
throw std::runtime_error("unexpected call to pull");
}
};
ErrorPuller puller; // default puller
// mock puller that records the period ids requested and returns an error
using Ids = std::vector<std::string>;
class RecordingPuller : public RGWPeriodHistory::Puller {
const int error;
public:
explicit RecordingPuller(int error) : error(error) {}
Ids ids;
int pull(const DoutPrefixProvider *dpp, const std::string& id, RGWPeriod& period, optional_yield) override {
ids.push_back(id);
return error;
}
};
// mock puller that returns a fake period by parsing the period id
struct NumericPuller : public RGWPeriodHistory::Puller {
int pull(const DoutPrefixProvider *dpp, const std::string& id, RGWPeriod& period, optional_yield) override {
// relies on numeric period ids to divine the realm_epoch
auto realm_epoch = boost::lexical_cast<epoch_t>(id);
auto predecessor = boost::lexical_cast<std::string>(realm_epoch-1);
period = make_period(id, realm_epoch, predecessor);
return 0;
}
};
} // anonymous namespace
// for ASSERT_EQ()
bool operator==(const RGWPeriod& lhs, const RGWPeriod& rhs)
{
return lhs.get_id() == rhs.get_id()
&& lhs.get_realm_epoch() == rhs.get_realm_epoch();
}
TEST(PeriodHistory, InsertBefore)
{
RGWPeriodHistory history(g_ceph_context, &puller, current_period);
// inserting right before current_period 5 will attach to history
auto c = history.insert(make_period("4", 4, "3"));
ASSERT_TRUE(c);
ASSERT_FALSE(c.has_prev());
ASSERT_TRUE(c.has_next());
// cursor can traverse forward to current_period
c.next();
ASSERT_EQ(5u, c.get_epoch());
ASSERT_EQ(current_period, c.get_period());
}
TEST(PeriodHistory, InsertAfter)
{
RGWPeriodHistory history(g_ceph_context, &puller, current_period);
// inserting right after current_period 5 will attach to history
auto c = history.insert(make_period("6", 6, "5"));
ASSERT_TRUE(c);
ASSERT_TRUE(c.has_prev());
ASSERT_FALSE(c.has_next());
// cursor can traverse back to current_period
c.prev();
ASSERT_EQ(5u, c.get_epoch());
ASSERT_EQ(current_period, c.get_period());
}
TEST(PeriodHistory, InsertWayBefore)
{
RGWPeriodHistory history(g_ceph_context, &puller, current_period);
// inserting way before current_period 5 will not attach to history
auto c = history.insert(make_period("1", 1, ""));
ASSERT_FALSE(c);
ASSERT_EQ(0, c.get_error());
}
TEST(PeriodHistory, InsertWayAfter)
{
RGWPeriodHistory history(g_ceph_context, &puller, current_period);
// inserting way after current_period 5 will not attach to history
auto c = history.insert(make_period("9", 9, "8"));
ASSERT_FALSE(c);
ASSERT_EQ(0, c.get_error());
}
TEST(PeriodHistory, PullPredecessorsBeforeCurrent)
{
RecordingPuller puller{-EFAULT};
RGWPeriodHistory history(g_ceph_context, &puller, current_period);
const DoutPrefix dp(g_ceph_context, 1, "test rgw period history: ");
// create a disjoint history at 1 and verify that periods are requested
// backwards from current_period
auto c1 = history.attach(&dp, make_period("1", 1, ""), null_yield);
ASSERT_FALSE(c1);
ASSERT_EQ(-EFAULT, c1.get_error());
ASSERT_EQ(Ids{"4"}, puller.ids);
auto c4 = history.insert(make_period("4", 4, "3"));
ASSERT_TRUE(c4);
c1 = history.attach(&dp, make_period("1", 1, ""), null_yield);
ASSERT_FALSE(c1);
ASSERT_EQ(-EFAULT, c1.get_error());
ASSERT_EQ(Ids({"4", "3"}), puller.ids);
auto c3 = history.insert(make_period("3", 3, "2"));
ASSERT_TRUE(c3);
c1 = history.attach(&dp, make_period("1", 1, ""), null_yield);
ASSERT_FALSE(c1);
ASSERT_EQ(-EFAULT, c1.get_error());
ASSERT_EQ(Ids({"4", "3", "2"}), puller.ids);
auto c2 = history.insert(make_period("2", 2, "1"));
ASSERT_TRUE(c2);
c1 = history.attach(&dp, make_period("1", 1, ""), null_yield);
ASSERT_TRUE(c1);
ASSERT_EQ(Ids({"4", "3", "2"}), puller.ids);
}
TEST(PeriodHistory, PullPredecessorsAfterCurrent)
{
RecordingPuller puller{-EFAULT};
RGWPeriodHistory history(g_ceph_context, &puller, current_period);
const DoutPrefix dp(g_ceph_context, 1, "test rgw period history: ");
// create a disjoint history at 9 and verify that periods are requested
// backwards down to current_period
auto c9 = history.attach(&dp, make_period("9", 9, "8"), null_yield);
ASSERT_FALSE(c9);
ASSERT_EQ(-EFAULT, c9.get_error());
ASSERT_EQ(Ids{"8"}, puller.ids);
auto c8 = history.attach(&dp, make_period("8", 8, "7"), null_yield);
ASSERT_FALSE(c8);
ASSERT_EQ(-EFAULT, c8.get_error());
ASSERT_EQ(Ids({"8", "7"}), puller.ids);
auto c7 = history.attach(&dp, make_period("7", 7, "6"), null_yield);
ASSERT_FALSE(c7);
ASSERT_EQ(-EFAULT, c7.get_error());
ASSERT_EQ(Ids({"8", "7", "6"}), puller.ids);
auto c6 = history.attach(&dp, make_period("6", 6, "5"), null_yield);
ASSERT_TRUE(c6);
ASSERT_EQ(Ids({"8", "7", "6"}), puller.ids);
}
TEST(PeriodHistory, MergeBeforeCurrent)
{
RGWPeriodHistory history(g_ceph_context, &puller, current_period);
auto c = history.get_current();
ASSERT_FALSE(c.has_prev());
// create a disjoint history at 3
auto c3 = history.insert(make_period("3", 3, "2"));
ASSERT_FALSE(c3);
// insert the missing period to merge 3 and 5
auto c4 = history.insert(make_period("4", 4, "3"));
ASSERT_TRUE(c4);
ASSERT_TRUE(c4.has_prev());
ASSERT_TRUE(c4.has_next());
// verify that the merge didn't destroy the original cursor's history
ASSERT_EQ(current_period, c.get_period());
ASSERT_TRUE(c.has_prev());
}
TEST(PeriodHistory, MergeAfterCurrent)
{
RGWPeriodHistory history(g_ceph_context, &puller, current_period);
auto c = history.get_current();
ASSERT_FALSE(c.has_next());
// create a disjoint history at 7
auto c7 = history.insert(make_period("7", 7, "6"));
ASSERT_FALSE(c7);
// insert the missing period to merge 5 and 7
auto c6 = history.insert(make_period("6", 6, "5"));
ASSERT_TRUE(c6);
ASSERT_TRUE(c6.has_prev());
ASSERT_TRUE(c6.has_next());
// verify that the merge didn't destroy the original cursor's history
ASSERT_EQ(current_period, c.get_period());
ASSERT_TRUE(c.has_next());
}
TEST(PeriodHistory, MergeWithoutCurrent)
{
RGWPeriodHistory history(g_ceph_context, &puller, current_period);
// create a disjoint history at 7
auto c7 = history.insert(make_period("7", 7, "6"));
ASSERT_FALSE(c7);
// create a disjoint history at 9
auto c9 = history.insert(make_period("9", 9, "8"));
ASSERT_FALSE(c9);
// insert the missing period to merge 7 and 9
auto c8 = history.insert(make_period("8", 8, "7"));
ASSERT_FALSE(c8); // not connected to current_period yet
// insert the missing period to merge 5 and 7-9
auto c = history.insert(make_period("6", 6, "5"));
ASSERT_TRUE(c);
ASSERT_TRUE(c.has_next());
// verify that we merged all periods from 5-9
c.next();
ASSERT_EQ(7u, c.get_epoch());
ASSERT_TRUE(c.has_next());
c.next();
ASSERT_EQ(8u, c.get_epoch());
ASSERT_TRUE(c.has_next());
c.next();
ASSERT_EQ(9u, c.get_epoch());
ASSERT_FALSE(c.has_next());
}
TEST(PeriodHistory, AttachBefore)
{
NumericPuller puller;
RGWPeriodHistory history(g_ceph_context, &puller, current_period);
const DoutPrefix dp(g_ceph_context, 1, "test rgw period history: ");
auto c1 = history.attach(&dp, make_period("1", 1, ""), null_yield);
ASSERT_TRUE(c1);
// verify that we pulled and merged all periods from 1-5
auto c = history.get_current();
ASSERT_TRUE(c);
ASSERT_TRUE(c.has_prev());
c.prev();
ASSERT_EQ(4u, c.get_epoch());
ASSERT_TRUE(c.has_prev());
c.prev();
ASSERT_EQ(3u, c.get_epoch());
ASSERT_TRUE(c.has_prev());
c.prev();
ASSERT_EQ(2u, c.get_epoch());
ASSERT_TRUE(c.has_prev());
c.prev();
ASSERT_EQ(1u, c.get_epoch());
ASSERT_FALSE(c.has_prev());
}
TEST(PeriodHistory, AttachAfter)
{
NumericPuller puller;
RGWPeriodHistory history(g_ceph_context, &puller, current_period);
const DoutPrefix dp(g_ceph_context, 1, "test rgw period history: ");
auto c9 = history.attach(&dp, make_period("9", 9, "8"), null_yield);
ASSERT_TRUE(c9);
// verify that we pulled and merged all periods from 5-9
auto c = history.get_current();
ASSERT_TRUE(c);
ASSERT_TRUE(c.has_next());
c.next();
ASSERT_EQ(6u, c.get_epoch());
ASSERT_TRUE(c.has_next());
c.next();
ASSERT_EQ(7u, c.get_epoch());
ASSERT_TRUE(c.has_next());
c.next();
ASSERT_EQ(8u, c.get_epoch());
ASSERT_TRUE(c.has_next());
c.next();
ASSERT_EQ(9u, c.get_epoch());
ASSERT_FALSE(c.has_next());
}
int main(int argc, char** argv)
{
auto args = argv_to_vec(argc, argv);
auto cct = global_init(NULL, args, CEPH_ENTITY_TYPE_CLIENT,
CODE_ENVIRONMENT_UTILITY,
CINIT_FLAG_NO_DEFAULT_CONFIG_FILE);
common_init_finish(g_ceph_context);
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}
| 10,006 | 28.694362 | 110 | cc |
null | ceph-main/src/test/rgw/test_rgw_putobj.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2018 Red Hat, Inc.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "rgw_putobj.h"
#include <gtest/gtest.h>
inline bufferlist string_buf(const char* buf) {
bufferlist bl;
bl.append(buffer::create_static(strlen(buf), (char*)buf));
return bl;
}
struct Op {
std::string data;
uint64_t offset;
};
inline bool operator==(const Op& lhs, const Op& rhs) {
return lhs.data == rhs.data && lhs.offset == rhs.offset;
}
inline std::ostream& operator<<(std::ostream& out, const Op& op) {
return out << "{off=" << op.offset << " data='" << op.data << "'}";
}
struct MockProcessor : rgw::sal::DataProcessor {
std::vector<Op> ops;
int process(bufferlist&& data, uint64_t offset) override {
ops.push_back({data.to_str(), offset});
return {};
}
};
TEST(PutObj_Chunk, FlushHalf)
{
MockProcessor mock;
rgw::putobj::ChunkProcessor chunk(&mock, 4);
ASSERT_EQ(0, chunk.process(string_buf("22"), 0));
ASSERT_TRUE(mock.ops.empty()); // no writes
ASSERT_EQ(0, chunk.process({}, 2)); // flush
ASSERT_EQ(2u, mock.ops.size());
EXPECT_EQ(Op({"22", 0}), mock.ops[0]);
EXPECT_EQ(Op({"", 2}), mock.ops[1]);
}
TEST(PutObj_Chunk, One)
{
MockProcessor mock;
rgw::putobj::ChunkProcessor chunk(&mock, 4);
ASSERT_EQ(0, chunk.process(string_buf("4444"), 0));
ASSERT_EQ(1u, mock.ops.size());
EXPECT_EQ(Op({"4444", 0}), mock.ops[0]);
ASSERT_EQ(0, chunk.process({}, 4)); // flush
ASSERT_EQ(2u, mock.ops.size());
EXPECT_EQ(Op({"", 4}), mock.ops[1]);
}
TEST(PutObj_Chunk, OneAndFlushHalf)
{
MockProcessor mock;
rgw::putobj::ChunkProcessor chunk(&mock, 4);
ASSERT_EQ(0, chunk.process(string_buf("22"), 0));
ASSERT_TRUE(mock.ops.empty());
ASSERT_EQ(0, chunk.process(string_buf("4444"), 2));
ASSERT_EQ(1u, mock.ops.size());
EXPECT_EQ(Op({"2244", 0}), mock.ops[0]);
ASSERT_EQ(0, chunk.process({}, 6)); // flush
ASSERT_EQ(3u, mock.ops.size());
EXPECT_EQ(Op({"44", 4}), mock.ops[1]);
EXPECT_EQ(Op({"", 6}), mock.ops[2]);
}
TEST(PutObj_Chunk, Two)
{
MockProcessor mock;
rgw::putobj::ChunkProcessor chunk(&mock, 4);
ASSERT_EQ(0, chunk.process(string_buf("88888888"), 0));
ASSERT_EQ(2u, mock.ops.size());
EXPECT_EQ(Op({"8888", 0}), mock.ops[0]);
EXPECT_EQ(Op({"8888", 4}), mock.ops[1]);
ASSERT_EQ(0, chunk.process({}, 8)); // flush
ASSERT_EQ(3u, mock.ops.size());
EXPECT_EQ(Op({"", 8}), mock.ops[2]);
}
TEST(PutObj_Chunk, TwoAndFlushHalf)
{
MockProcessor mock;
rgw::putobj::ChunkProcessor chunk(&mock, 4);
ASSERT_EQ(0, chunk.process(string_buf("22"), 0));
ASSERT_TRUE(mock.ops.empty());
ASSERT_EQ(0, chunk.process(string_buf("88888888"), 2));
ASSERT_EQ(2u, mock.ops.size());
EXPECT_EQ(Op({"2288", 0}), mock.ops[0]);
EXPECT_EQ(Op({"8888", 4}), mock.ops[1]);
ASSERT_EQ(0, chunk.process({}, 10)); // flush
ASSERT_EQ(4u, mock.ops.size());
EXPECT_EQ(Op({"88", 8}), mock.ops[2]);
EXPECT_EQ(Op({"", 10}), mock.ops[3]);
}
using StripeMap = std::map<uint64_t, uint64_t>; // offset, stripe_size
class StripeMapGen : public rgw::putobj::StripeGenerator {
const StripeMap& stripes;
public:
StripeMapGen(const StripeMap& stripes) : stripes(stripes) {}
int next(uint64_t offset, uint64_t *stripe_size) override {
auto i = stripes.find(offset);
if (i == stripes.end()) {
return -ENOENT;
}
*stripe_size = i->second;
return 0;
}
};
TEST(PutObj_Stripe, DifferentStripeSize)
{
MockProcessor mock;
StripeMap stripes{
{ 0, 4},
{ 4, 6},
{10, 2}
};
StripeMapGen gen(stripes);
rgw::putobj::StripeProcessor processor(&mock, &gen, stripes.begin()->second);
ASSERT_EQ(0, processor.process(string_buf("22"), 0));
ASSERT_EQ(1u, mock.ops.size());
EXPECT_EQ(Op({"22", 0}), mock.ops[0]);
ASSERT_EQ(0, processor.process(string_buf("4444"), 2));
ASSERT_EQ(4u, mock.ops.size());
EXPECT_EQ(Op({"44", 2}), mock.ops[1]);
EXPECT_EQ(Op({"", 4}), mock.ops[2]); // flush
EXPECT_EQ(Op({"44", 0}), mock.ops[3]);
ASSERT_EQ(0, processor.process(string_buf("666666"), 6));
ASSERT_EQ(7u, mock.ops.size());
EXPECT_EQ(Op({"6666", 2}), mock.ops[4]);
EXPECT_EQ(Op({"", 6}), mock.ops[5]); // flush
EXPECT_EQ(Op({"66", 0}), mock.ops[6]);
ASSERT_EQ(0, processor.process({}, 12));
ASSERT_EQ(8u, mock.ops.size());
EXPECT_EQ(Op({"", 2}), mock.ops[7]); // flush
// gen returns an error past this
ASSERT_EQ(-ENOENT, processor.process(string_buf("1"), 12));
}
TEST(PutObj_Stripe, SkipFirstChunk)
{
MockProcessor mock;
StripeMap stripes{
{0, 4},
{4, 4},
};
StripeMapGen gen(stripes);
rgw::putobj::StripeProcessor processor(&mock, &gen, stripes.begin()->second);
ASSERT_EQ(0, processor.process(string_buf("666666"), 2));
ASSERT_EQ(3u, mock.ops.size());
EXPECT_EQ(Op({"66", 2}), mock.ops[0]);
EXPECT_EQ(Op({"", 4}), mock.ops[1]); // flush
EXPECT_EQ(Op({"6666", 0}), mock.ops[2]);
ASSERT_EQ(0, processor.process({}, 8));
ASSERT_EQ(4u, mock.ops.size());
EXPECT_EQ(Op({"", 4}), mock.ops[3]); // flush
}
| 5,311 | 25.964467 | 79 | cc |
null | ceph-main/src/test/rgw/test_rgw_ratelimit.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
#include <gtest/gtest.h>
#include "rgw_ratelimit.h"
using namespace std::chrono_literals;
TEST(RGWRateLimit, op_limit_not_enabled)
{
// info.enabled = false, so no limit
std::atomic_bool replacing;
std::condition_variable cv;
RateLimiter ratelimit(replacing, cv);
RGWRateLimitInfo info;
auto time = ceph::coarse_real_clock::now();
std::string key = "uuser123";
bool success = ratelimit.should_rate_limit("PUT", key, time, &info);
EXPECT_EQ(false, success);
}
TEST(RGWRateLimit, reject_op_over_limit)
{
// check that request is being rejected because there are not enough tokens
std::atomic_bool replacing;
std::condition_variable cv;
RateLimiter ratelimit(replacing, cv);
RGWRateLimitInfo info;
info.enabled = true;
info.max_read_ops = 1;
auto time = ceph::coarse_real_clock::now();
std::string key = "uuser123";
bool success = ratelimit.should_rate_limit("GET", key, time, &info);
time = ceph::coarse_real_clock::now();
success = ratelimit.should_rate_limit("GET", key, time, &info);
EXPECT_EQ(true, success);
}
TEST(RGWRateLimit, accept_op_after_giveback)
{
// check that giveback is working fine
std::atomic_bool replacing;
std::condition_variable cv;
RateLimiter ratelimit(replacing, cv);
RGWRateLimitInfo info;
info.enabled = true;
info.max_read_ops = 1;
auto time = ceph::coarse_real_clock::now();
std::string key = "uuser123";
bool success = ratelimit.should_rate_limit("GET", key, time, &info);
ratelimit.giveback_tokens("GET", key);
time = ceph::coarse_real_clock::now();
success = ratelimit.should_rate_limit("GET", key, time, &info);
EXPECT_EQ(false, success);
}
TEST(RGWRateLimit, accept_op_after_refill)
{
// check that tokens are being filled properly
std::atomic_bool replacing;
std::condition_variable cv;
RateLimiter ratelimit(replacing, cv);
RGWRateLimitInfo info;
info.enabled = true;
info.max_read_ops = 1;
auto time = ceph::coarse_real_clock::now();
std::string key = "uuser123";
bool success = ratelimit.should_rate_limit("GET", key, time, &info);
time += 61s;
success = ratelimit.should_rate_limit("GET", key, time, &info);
EXPECT_EQ(false, success);
}
TEST(RGWRateLimit, reject_bw_over_limit)
{
// check that a newer request is rejected if there is no enough tokens (bw)
std::atomic_bool replacing;
std::condition_variable cv;
RateLimiter ratelimit(replacing, cv);
RGWRateLimitInfo info;
info.enabled = true;
info.max_read_bytes = 1;
auto time = ceph::coarse_real_clock::now();
std::string key = "uuser123";
bool success = ratelimit.should_rate_limit("GET", key, time, &info);
ratelimit.decrease_bytes("GET",key, 2, &info);
time = ceph::coarse_real_clock::now();
success = ratelimit.should_rate_limit("GET", key, time, &info);
EXPECT_EQ(true, success);
}
TEST(RGWRateLimit, accept_bw)
{
// check that when there are enough tokens (bw) the request is still being served
std::atomic_bool replacing;
std::condition_variable cv;
RateLimiter ratelimit(replacing, cv);
RGWRateLimitInfo info;
info.enabled = true;
info.max_read_bytes = 2;
auto time = ceph::coarse_real_clock::now();
std::string key = "uuser123";
bool success = ratelimit.should_rate_limit("GET", key, time, &info);
ratelimit.decrease_bytes("GET",key, 1, &info);
time = ceph::coarse_real_clock::now();
success = ratelimit.should_rate_limit("GET", key, time, &info);
EXPECT_EQ(false, success);
}
TEST(RGWRateLimit, check_bw_debt_at_max_120secs)
{
// check that the bandwidth debt is not larger than 120 seconds
std::atomic_bool replacing;
std::condition_variable cv;
RateLimiter ratelimit(replacing, cv);
RGWRateLimitInfo info;
info.enabled = true;
info.max_read_bytes = 2;
auto time = ceph::coarse_real_clock::now();
std::string key = "uuser123";
bool success = ratelimit.should_rate_limit("GET", key, time, &info);
ratelimit.decrease_bytes("GET",key, 100, &info);
time += 121s;
success = ratelimit.should_rate_limit("GET", key, time, &info);
EXPECT_EQ(false, success);
}
TEST(RGWRateLimit, check_that_bw_limit_not_affect_ops)
{
// check that high read bytes limit, does not affect ops limit
std::atomic_bool replacing;
std::condition_variable cv;
RateLimiter ratelimit(replacing, cv);
RGWRateLimitInfo info;
info.enabled = true;
info.max_read_ops = 1;
info.max_read_bytes = 100000000;
auto time = ceph::coarse_real_clock::now();
std::string key = "uuser123";
bool success = ratelimit.should_rate_limit("GET", key, time, &info);
ratelimit.decrease_bytes("GET",key, 10000, &info);
time = ceph::coarse_real_clock::now();
success = ratelimit.should_rate_limit("GET", key, time, &info);
EXPECT_EQ(true, success);
}
TEST(RGWRateLimit, read_limit_does_not_affect_writes)
{
// read limit does not affect writes
std::atomic_bool replacing;
std::condition_variable cv;
RateLimiter ratelimit(replacing, cv);
RGWRateLimitInfo info;
info.enabled = true;
info.max_read_ops = 1;
info.max_read_bytes = 100000000;
auto time = ceph::coarse_real_clock::now();
std::string key = "uuser123";
bool success = ratelimit.should_rate_limit("PUT", key, time, &info);
ratelimit.decrease_bytes("PUT",key, 10000, &info);
time = ceph::coarse_real_clock::now();
success = ratelimit.should_rate_limit("PUT", key, time, &info);
EXPECT_EQ(false, success);
}
TEST(RGWRateLimit, write_limit_does_not_affect_reads)
{
// write limit does not affect reads
std::atomic_bool replacing;
std::condition_variable cv;
RateLimiter ratelimit(replacing, cv);
RGWRateLimitInfo info;
info.enabled = true;
info.max_write_ops = 1;
info.max_write_bytes = 100000000;
auto time = ceph::coarse_real_clock::now();
std::string key = "uuser123";
bool success = ratelimit.should_rate_limit("GET", key, time, &info);
ratelimit.decrease_bytes("GET",key, 10000, &info);
time = ceph::coarse_real_clock::now();
success = ratelimit.should_rate_limit("GET", key, time, &info);
EXPECT_EQ(false, success);
}
TEST(RGWRateLimit, allow_unlimited_access)
{
// 0 values in RGWRateLimitInfo should allow unlimited access
std::atomic_bool replacing;
std::condition_variable cv;
RateLimiter ratelimit(replacing, cv);
RGWRateLimitInfo info;
info.enabled = true;
auto time = ceph::coarse_real_clock::now();
std::string key = "uuser123";
bool success = ratelimit.should_rate_limit("GET", key, time, &info);
EXPECT_EQ(false, success);
}
TEST(RGWRateLimitGC, NO_GC_AHEAD_OF_TIME)
{
// Test if GC is not starting the replace before getting to map_size * 0.9
// Please make sure to change those values when you change the map_size in the code
std::shared_ptr<ActiveRateLimiter> ratelimit(new ActiveRateLimiter(g_ceph_context));
ratelimit->start();
auto active = ratelimit->get_active();
RGWRateLimitInfo info;
auto time = ceph::coarse_real_clock::now();
std::string key = "uuser123";
active->should_rate_limit("GET", key, time, &info);
auto activegc = ratelimit->get_active();
EXPECT_EQ(activegc, active);
}
TEST(RGWRateLimiterGC, GC_IS_WORKING)
{
// Test if GC is replacing the active RateLimiter
// Please make sure to change those values when you change the map_size in the code
std::shared_ptr<ActiveRateLimiter> ratelimit(new ActiveRateLimiter(g_ceph_context));
ratelimit->start();
auto active = ratelimit->get_active();
RGWRateLimitInfo info;
info.enabled = true;
auto time = ceph::coarse_real_clock::now();
std::string key = "-1";
for(int i = 0; i < 2000000; i++)
{
active->should_rate_limit("GET", key, time, &info);
key = std::to_string(i);
}
auto activegc = ratelimit->get_active();
EXPECT_NE(activegc, active);
}
TEST(RGWRateLimitEntry, op_limit_not_enabled)
{
// info.enabled = false, so no limit
RateLimiterEntry entry;
RGWRateLimitInfo info;
auto time = ceph::coarse_real_clock::now().time_since_epoch();
bool success = entry.should_rate_limit(false, &info, time);
EXPECT_EQ(false, success);
}
TEST(RGWRateLimitEntry, reject_op_over_limit)
{
// check that request is being rejected because there are not enough tokens
RGWRateLimitInfo info;
RateLimiterEntry entry;
info.enabled = true;
info.max_read_ops = 1;
auto time = ceph::coarse_real_clock::now().time_since_epoch();
bool success = entry.should_rate_limit(true, &info, time);
time = ceph::coarse_real_clock::now().time_since_epoch();
success = entry.should_rate_limit(true, &info, time);
EXPECT_EQ(true, success);
}
TEST(RGWRateLimitEntry, accept_op_after_giveback)
{
// check that giveback is working fine
RGWRateLimitInfo info;
RateLimiterEntry entry;
info.enabled = true;
info.max_read_ops = 1;
auto time = ceph::coarse_real_clock::now().time_since_epoch();
bool success = entry.should_rate_limit(true, &info, time);
entry.giveback_tokens(true);
time = ceph::coarse_real_clock::now().time_since_epoch();
success = entry.should_rate_limit(true, &info, time);
EXPECT_EQ(false, success);
}
TEST(RGWRateLimitEntry, accept_op_after_refill)
{
// check that tokens are being filled properly
RateLimiterEntry entry;
RGWRateLimitInfo info;
info.enabled = true;
info.max_read_ops = 1;
auto time = ceph::coarse_real_clock::now().time_since_epoch();
bool success = entry.should_rate_limit(true, &info, time);
time += 61s;
success = entry.should_rate_limit(true, &info, time);
EXPECT_EQ(false, success);
}
TEST(RGWRateLimitEntry, reject_bw_over_limit)
{
// check that a newer request is rejected if there is no enough tokens (bw)
RateLimiterEntry entry;
RGWRateLimitInfo info;
info.enabled = true;
info.max_read_bytes = 1;
auto time = ceph::coarse_real_clock::now().time_since_epoch();
bool success = entry.should_rate_limit(true, &info, time);
entry.decrease_bytes(true, 2, &info);
time = ceph::coarse_real_clock::now().time_since_epoch();
success = entry.should_rate_limit(true, &info, time);
EXPECT_EQ(true, success);
}
TEST(RGWRateLimitEntry, accept_bw)
{
// check that when there are enough tokens (bw) the request is still being served
RateLimiterEntry entry;
RGWRateLimitInfo info;
info.enabled = true;
info.max_read_bytes = 2;
auto time = ceph::coarse_real_clock::now().time_since_epoch();
bool success = entry.should_rate_limit(true, &info, time);
entry.decrease_bytes(true, 1, &info);
time = ceph::coarse_real_clock::now().time_since_epoch();
success = entry.should_rate_limit(true, &info, time);
EXPECT_EQ(false, success);
}
TEST(RGWRateLimitEntry, check_bw_debt_at_max_120secs)
{
// check that the bandwidth debt is not larger than 120 seconds
RateLimiterEntry entry;
RGWRateLimitInfo info;
info.enabled = true;
info.max_read_bytes = 2;
auto time = ceph::coarse_real_clock::now().time_since_epoch();
bool success = entry.should_rate_limit(true, &info, time);
entry.decrease_bytes(true, 100, &info);
time += 121s;
success = entry.should_rate_limit(true, &info, time);
EXPECT_EQ(false, success);
}
TEST(RGWRateLimitEntry, check_that_bw_limit_not_affect_ops)
{
// check that high read bytes limit, does not affect ops limit
RateLimiterEntry entry;
RGWRateLimitInfo info;
info.enabled = true;
info.max_read_ops = 1;
info.max_read_bytes = 100000000;
auto time = ceph::coarse_real_clock::now().time_since_epoch();
bool success = entry.should_rate_limit(true, &info, time);
entry.decrease_bytes(true, 10000, &info);
time = ceph::coarse_real_clock::now().time_since_epoch();
success = entry.should_rate_limit(true, &info, time);
EXPECT_EQ(true, success);
}
TEST(RGWRateLimitEntry, read_limit_does_not_affect_writes)
{
// read limit does not affect writes
RateLimiterEntry entry;
RGWRateLimitInfo info;
info.enabled = true;
info.max_read_ops = 1;
info.max_read_bytes = 100000000;
auto time = ceph::coarse_real_clock::now().time_since_epoch();
bool success = entry.should_rate_limit(false, &info, time);
entry.decrease_bytes(false, 10000, &info);
time = ceph::coarse_real_clock::now().time_since_epoch();
success = entry.should_rate_limit(false, &info, time);
EXPECT_EQ(false, success);
}
TEST(RGWRateLimitEntry, write_limit_does_not_affect_reads)
{
// write limit does not affect reads
RateLimiterEntry entry;
RGWRateLimitInfo info;
info.enabled = true;
info.max_write_ops = 1;
info.max_write_bytes = 100000000;
auto time = ceph::coarse_real_clock::now().time_since_epoch();
std::string key = "uuser123";
bool success = entry.should_rate_limit(true, &info, time);
entry.decrease_bytes(true, 10000, &info);
time = ceph::coarse_real_clock::now().time_since_epoch();
success = entry.should_rate_limit(true, &info, time);
EXPECT_EQ(false, success);
}
TEST(RGWRateLimitEntry, allow_unlimited_access)
{
// 0 values in RGWRateLimitInfo should allow unlimited access (default value)
RateLimiterEntry entry;
RGWRateLimitInfo info;
info.enabled = true;
auto time = ceph::coarse_real_clock::now().time_since_epoch();
bool success = entry.should_rate_limit(true, &info, time);
EXPECT_EQ(false, success);
}
| 13,171 | 33.938992 | 86 | cc |
null | ceph-main/src/test/rgw/test_rgw_reshard.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2019 Red Hat
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "rgw_reshard.h"
#include <gtest/gtest.h>
TEST(TestRGWReshard, dynamic_reshard_shard_count)
{
// assuming we have prime numbers up to 1999
ASSERT_EQ(1999u, RGWBucketReshard::get_max_prime_shards()) <<
"initial list has primes up to 1999";
ASSERT_EQ(1u, RGWBucketReshard::get_prime_shards_greater_or_equal(1)) <<
"we allow for 1 shard even though it's not prime";
ASSERT_EQ(809u, RGWBucketReshard::get_prime_shards_greater_or_equal(808)) <<
"809 is prime";
ASSERT_EQ(809u, RGWBucketReshard::get_prime_shards_greater_or_equal(809)) <<
"809 is prime";
ASSERT_EQ(811u, RGWBucketReshard::get_prime_shards_greater_or_equal(810)) <<
"811 is prime";
ASSERT_EQ(811u, RGWBucketReshard::get_prime_shards_greater_or_equal(811)) <<
"811 is prime";
ASSERT_EQ(821u, RGWBucketReshard::get_prime_shards_greater_or_equal(812)) <<
"821 is prime";
ASSERT_EQ(1u, RGWBucketReshard::get_prime_shards_less_or_equal(1)) <<
"we allow for 1 shard even though it's not prime";
ASSERT_EQ(797u, RGWBucketReshard::get_prime_shards_less_or_equal(808)) <<
"809 is prime";
ASSERT_EQ(809u, RGWBucketReshard::get_prime_shards_less_or_equal(809)) <<
"809 is prime";
ASSERT_EQ(809u, RGWBucketReshard::get_prime_shards_less_or_equal(810)) <<
"811 is prime";
ASSERT_EQ(811u, RGWBucketReshard::get_prime_shards_less_or_equal(811)) <<
"811 is prime";
ASSERT_EQ(811u, RGWBucketReshard::get_prime_shards_less_or_equal(812)) <<
"821 is prime";
// tests when max dynamic shards is equal to end of prime list
ASSERT_EQ(1999u, RGWBucketReshard::get_preferred_shards(1998, 1999));
ASSERT_EQ(1999u, RGWBucketReshard::get_preferred_shards(1999, 1999));
ASSERT_EQ(1999u, RGWBucketReshard::get_preferred_shards(2000, 1999));
ASSERT_EQ(1999u, RGWBucketReshard::get_preferred_shards(2001, 1999));
// tests when max dynamic shards is above end of prime list
ASSERT_EQ(1999u, RGWBucketReshard::get_preferred_shards(1998, 3000));
ASSERT_EQ(1999u, RGWBucketReshard::get_preferred_shards(1999, 3000));
ASSERT_EQ(2000u, RGWBucketReshard::get_preferred_shards(2000, 3000));
ASSERT_EQ(2001u, RGWBucketReshard::get_preferred_shards(2001, 3000));
// tests when max dynamic shards is below end of prime list
ASSERT_EQ(499u, RGWBucketReshard::get_preferred_shards(1998, 500));
ASSERT_EQ(499u, RGWBucketReshard::get_preferred_shards(1999, 500));
ASSERT_EQ(499u, RGWBucketReshard::get_preferred_shards(2000, 500));
ASSERT_EQ(499u, RGWBucketReshard::get_preferred_shards(2001, 500));
}
| 2,939 | 41.608696 | 78 | cc |
null | ceph-main/src/test/rgw/test_rgw_reshard_wait.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2018 Red Hat, Inc.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "rgw_reshard.h"
#include <spawn/spawn.hpp>
#include <gtest/gtest.h>
using namespace std::chrono_literals;
using Clock = RGWReshardWait::Clock;
TEST(ReshardWait, wait_block)
{
constexpr ceph::timespan wait_duration = 10ms;
RGWReshardWait waiter(wait_duration);
const auto start = Clock::now();
EXPECT_EQ(0, waiter.wait(null_yield));
const ceph::timespan elapsed = Clock::now() - start;
EXPECT_LE(wait_duration, elapsed); // waited at least 10ms
waiter.stop();
}
TEST(ReshardWait, stop_block)
{
constexpr ceph::timespan short_duration = 10ms;
constexpr ceph::timespan long_duration = 10s;
RGWReshardWait long_waiter(long_duration);
RGWReshardWait short_waiter(short_duration);
const auto start = Clock::now();
std::thread thread([&long_waiter] {
EXPECT_EQ(-ECANCELED, long_waiter.wait(null_yield));
});
EXPECT_EQ(0, short_waiter.wait(null_yield));
long_waiter.stop(); // cancel long waiter
thread.join();
const ceph::timespan elapsed = Clock::now() - start;
EXPECT_LE(short_duration, elapsed); // waited at least 10ms
EXPECT_GT(long_duration, elapsed); // waited less than 10s
short_waiter.stop();
}
TEST(ReshardWait, wait_yield)
{
constexpr ceph::timespan wait_duration = 50ms;
RGWReshardWait waiter(wait_duration);
boost::asio::io_context context;
spawn::spawn(context, [&] (yield_context yield) {
EXPECT_EQ(0, waiter.wait(optional_yield{context, yield}));
});
const auto start = Clock::now();
EXPECT_EQ(1u, context.poll()); // spawn
EXPECT_FALSE(context.stopped());
EXPECT_EQ(1u, context.run_one()); // timeout
EXPECT_TRUE(context.stopped());
const ceph::timespan elapsed = Clock::now() - start;
EXPECT_LE(wait_duration, elapsed); // waited at least 10ms
waiter.stop();
}
TEST(ReshardWait, stop_yield)
{
constexpr ceph::timespan short_duration = 50ms;
constexpr ceph::timespan long_duration = 10s;
RGWReshardWait long_waiter(long_duration);
RGWReshardWait short_waiter(short_duration);
boost::asio::io_context context;
spawn::spawn(context,
[&] (yield_context yield) {
EXPECT_EQ(-ECANCELED, long_waiter.wait(optional_yield{context, yield}));
});
const auto start = Clock::now();
EXPECT_EQ(1u, context.poll()); // spawn
EXPECT_FALSE(context.stopped());
EXPECT_EQ(0, short_waiter.wait(null_yield));
long_waiter.stop(); // cancel long waiter
EXPECT_EQ(1u, context.run_one_for(short_duration)); // timeout
EXPECT_TRUE(context.stopped());
const ceph::timespan elapsed = Clock::now() - start;
EXPECT_LE(short_duration, elapsed); // waited at least 10ms
EXPECT_GT(long_duration, elapsed); // waited less than 10s
short_waiter.stop();
}
TEST(ReshardWait, stop_multiple)
{
constexpr ceph::timespan short_duration = 50ms;
constexpr ceph::timespan long_duration = 10s;
RGWReshardWait long_waiter(long_duration);
RGWReshardWait short_waiter(short_duration);
// spawn 4 threads
std::vector<std::thread> threads;
{
auto sync_waiter([&long_waiter] {
EXPECT_EQ(-ECANCELED, long_waiter.wait(null_yield));
});
threads.emplace_back(sync_waiter);
threads.emplace_back(sync_waiter);
threads.emplace_back(sync_waiter);
threads.emplace_back(sync_waiter);
}
// spawn 4 coroutines
boost::asio::io_context context;
{
auto async_waiter = [&] (yield_context yield) {
EXPECT_EQ(-ECANCELED, long_waiter.wait(optional_yield{context, yield}));
};
spawn::spawn(context, async_waiter);
spawn::spawn(context, async_waiter);
spawn::spawn(context, async_waiter);
spawn::spawn(context, async_waiter);
}
const auto start = Clock::now();
EXPECT_EQ(4u, context.poll()); // spawn
EXPECT_FALSE(context.stopped());
EXPECT_EQ(0, short_waiter.wait(null_yield));
long_waiter.stop(); // cancel long waiter
EXPECT_EQ(4u, context.run_for(short_duration)); // timeout
EXPECT_TRUE(context.stopped());
for (auto& thread : threads) {
thread.join();
}
const ceph::timespan elapsed = Clock::now() - start;
EXPECT_LE(short_duration, elapsed); // waited at least 10ms
EXPECT_GT(long_duration, elapsed); // waited less than 10s
short_waiter.stop();
}
| 4,586 | 26.8 | 80 | cc |
null | ceph-main/src/test/rgw/test_rgw_string.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2017 Red Hat
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "rgw_string.h"
#include <gtest/gtest.h>
const std::string abc{"abc"};
const char *def{"def"}; // const char*
char ghi_arr[] = {'g', 'h', 'i', '\0'};
char *ghi{ghi_arr}; // char*
constexpr std::string_view jkl{"jkl", 3};
#define mno "mno" // string literal (char[4])
char pqr[] = {'p', 'q', 'r', '\0'};
TEST(string_size, types)
{
ASSERT_EQ(3u, string_size(abc));
ASSERT_EQ(3u, string_size(def));
ASSERT_EQ(3u, string_size(ghi));
ASSERT_EQ(3u, string_size(jkl));
ASSERT_EQ(3u, string_size(mno));
ASSERT_EQ(3u, string_size(pqr));
constexpr auto compile_time_string_view_size = string_size(jkl);
ASSERT_EQ(3u, compile_time_string_view_size);
constexpr auto compile_time_string_literal_size = string_size(mno);
ASSERT_EQ(3u, compile_time_string_literal_size);
char arr[] = {'a', 'b', 'c'}; // not null-terminated
ASSERT_THROW(string_size(arr), std::invalid_argument);
}
TEST(string_cat_reserve, types)
{
ASSERT_EQ("abcdefghijklmnopqr",
string_cat_reserve(abc, def, ghi, jkl, mno, pqr));
}
TEST(string_cat_reserve, count)
{
ASSERT_EQ("", string_cat_reserve());
ASSERT_EQ("abc", string_cat_reserve(abc));
ASSERT_EQ("abcdef", string_cat_reserve(abc, def));
}
TEST(string_join_reserve, types)
{
ASSERT_EQ("abc, def, ghi, jkl, mno, pqr",
string_join_reserve(", ", abc, def, ghi, jkl, mno, pqr));
}
TEST(string_join_reserve, count)
{
ASSERT_EQ("", string_join_reserve(", "));
ASSERT_EQ("abc", string_join_reserve(", ", abc));
ASSERT_EQ("abc, def", string_join_reserve(", ", abc, def));
}
TEST(string_join_reserve, delim)
{
ASSERT_EQ("abcdef", string_join_reserve("", abc, def));
ASSERT_EQ("abc def", string_join_reserve(' ', abc, def));
ASSERT_EQ("abc\ndef", string_join_reserve('\n', abc, def));
ASSERT_EQ("abcfoodef", string_join_reserve(std::string{"foo"}, abc, def));
}
| 2,251 | 28.246753 | 76 | cc |
null | ceph-main/src/test/rgw/test_rgw_throttle.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2018 Red Hat, Inc.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "rgw_aio_throttle.h"
#include <optional>
#include <thread>
#include "include/scope_guard.h"
#include <spawn/spawn.hpp>
#include <gtest/gtest.h>
static rgw_raw_obj make_obj(const std::string& oid)
{
return {{"testpool"}, oid};
}
namespace rgw {
struct scoped_completion {
Aio* aio = nullptr;
AioResult* result = nullptr;
~scoped_completion() { if (aio) { complete(-ECANCELED); } }
void complete(int r) {
result->result = r;
aio->put(*result);
aio = nullptr;
}
};
auto wait_on(scoped_completion& c) {
return [&c] (Aio* aio, AioResult& r) { c.aio = aio; c.result = &r; };
}
auto wait_for(boost::asio::io_context& context, ceph::timespan duration) {
return [&context, duration] (Aio* aio, AioResult& r) {
using Clock = ceph::coarse_mono_clock;
using Timer = boost::asio::basic_waitable_timer<Clock>;
auto t = std::make_unique<Timer>(context);
t->expires_after(duration);
t->async_wait([aio, &r, t=std::move(t)] (boost::system::error_code ec) {
if (ec != boost::asio::error::operation_aborted) {
aio->put(r);
}
});
};
}
TEST(Aio_Throttle, NoThrottleUpToMax)
{
BlockingAioThrottle throttle(4);
auto obj = make_obj(__PRETTY_FUNCTION__);
{
scoped_completion op1;
auto c1 = throttle.get(obj, wait_on(op1), 1, 0);
EXPECT_TRUE(c1.empty());
scoped_completion op2;
auto c2 = throttle.get(obj, wait_on(op2), 1, 0);
EXPECT_TRUE(c2.empty());
scoped_completion op3;
auto c3 = throttle.get(obj, wait_on(op3), 1, 0);
EXPECT_TRUE(c3.empty());
scoped_completion op4;
auto c4 = throttle.get(obj, wait_on(op4), 1, 0);
EXPECT_TRUE(c4.empty());
// no completions because no ops had to wait
auto c5 = throttle.poll();
EXPECT_TRUE(c5.empty());
}
auto completions = throttle.drain();
ASSERT_EQ(4u, completions.size());
for (auto& c : completions) {
EXPECT_EQ(-ECANCELED, c.result);
}
}
TEST(Aio_Throttle, CostOverWindow)
{
BlockingAioThrottle throttle(4);
auto obj = make_obj(__PRETTY_FUNCTION__);
scoped_completion op;
auto c = throttle.get(obj, wait_on(op), 8, 0);
ASSERT_EQ(1u, c.size());
EXPECT_EQ(-EDEADLK, c.front().result);
}
TEST(Aio_Throttle, ThrottleOverMax)
{
constexpr uint64_t window = 4;
BlockingAioThrottle throttle(window);
auto obj = make_obj(__PRETTY_FUNCTION__);
// issue 32 writes, and verify that max_outstanding <= window
constexpr uint64_t total = 32;
uint64_t max_outstanding = 0;
uint64_t outstanding = 0;
// timer thread
boost::asio::io_context context;
using Executor = boost::asio::io_context::executor_type;
using Work = boost::asio::executor_work_guard<Executor>;
std::optional<Work> work(context.get_executor());
std::thread worker([&context] { context.run(); });
auto g = make_scope_guard([&work, &worker] {
work.reset();
worker.join();
});
for (uint64_t i = 0; i < total; i++) {
using namespace std::chrono_literals;
auto c = throttle.get(obj, wait_for(context, 10ms), 1, 0);
outstanding++;
outstanding -= c.size();
if (max_outstanding < outstanding) {
max_outstanding = outstanding;
}
}
auto c = throttle.drain();
outstanding -= c.size();
EXPECT_EQ(0u, outstanding);
EXPECT_EQ(window, max_outstanding);
}
TEST(Aio_Throttle, YieldCostOverWindow)
{
auto obj = make_obj(__PRETTY_FUNCTION__);
boost::asio::io_context context;
spawn::spawn(context,
[&] (yield_context yield) {
YieldingAioThrottle throttle(4, context, yield);
scoped_completion op;
auto c = throttle.get(obj, wait_on(op), 8, 0);
ASSERT_EQ(1u, c.size());
EXPECT_EQ(-EDEADLK, c.front().result);
});
context.run();
}
TEST(Aio_Throttle, YieldingThrottleOverMax)
{
constexpr uint64_t window = 4;
auto obj = make_obj(__PRETTY_FUNCTION__);
// issue 32 writes, and verify that max_outstanding <= window
constexpr uint64_t total = 32;
uint64_t max_outstanding = 0;
uint64_t outstanding = 0;
boost::asio::io_context context;
spawn::spawn(context,
[&] (yield_context yield) {
YieldingAioThrottle throttle(window, context, yield);
for (uint64_t i = 0; i < total; i++) {
using namespace std::chrono_literals;
auto c = throttle.get(obj, wait_for(context, 10ms), 1, 0);
outstanding++;
outstanding -= c.size();
if (max_outstanding < outstanding) {
max_outstanding = outstanding;
}
}
auto c = throttle.drain();
outstanding -= c.size();
});
context.poll(); // run until we block
EXPECT_EQ(window, outstanding);
context.run();
EXPECT_EQ(0u, outstanding);
EXPECT_EQ(window, max_outstanding);
}
} // namespace rgw
| 5,124 | 26.116402 | 76 | cc |
null | ceph-main/src/test/rgw/test_rgw_url.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "rgw_url.h"
#include <string>
#include <gtest/gtest.h>
using namespace rgw;
TEST(TestURL, SimpleAuthority)
{
std::string host;
std::string user;
std::string password;
const std::string url = "http://example.com";
ASSERT_TRUE(parse_url_authority(url, host, user, password));
ASSERT_TRUE(user.empty());
ASSERT_TRUE(password.empty());
EXPECT_STREQ(host.c_str(), "example.com");
}
TEST(TestURL, SimpleAuthority_1)
{
std::string host;
std::string user;
std::string password;
const std::string url = "http://example.com/";
ASSERT_TRUE(parse_url_authority(url, host, user, password));
ASSERT_TRUE(user.empty());
ASSERT_TRUE(password.empty());
EXPECT_STREQ(host.c_str(), "example.com");
}
TEST(TestURL, IPAuthority)
{
std::string host;
std::string user;
std::string password;
const std::string url = "http://1.2.3.4";
ASSERT_TRUE(parse_url_authority(url, host, user, password));
ASSERT_TRUE(user.empty());
ASSERT_TRUE(password.empty());
EXPECT_STREQ(host.c_str(), "1.2.3.4");
}
TEST(TestURL, IPv6Authority)
{
std::string host;
std::string user;
std::string password;
const std::string url = "http://FE80:CD00:0000:0CDE:1257:0000:211E:729C";
ASSERT_TRUE(parse_url_authority(url, host, user, password));
ASSERT_TRUE(user.empty());
ASSERT_TRUE(password.empty());
EXPECT_STREQ(host.c_str(), "FE80:CD00:0000:0CDE:1257:0000:211E:729C");
}
TEST(TestURL, AuthorityWithUserinfo)
{
std::string host;
std::string user;
std::string password;
const std::string url = "https://user:password@example.com";
ASSERT_TRUE(parse_url_authority(url, host, user, password));
EXPECT_STREQ(host.c_str(), "example.com");
EXPECT_STREQ(user.c_str(), "user");
EXPECT_STREQ(password.c_str(), "password");
}
TEST(TestURL, AuthorityWithPort)
{
std::string host;
std::string user;
std::string password;
const std::string url = "http://user:password@example.com:1234";
ASSERT_TRUE(parse_url_authority(url, host, user, password));
EXPECT_STREQ(host.c_str(), "example.com:1234");
EXPECT_STREQ(user.c_str(), "user");
EXPECT_STREQ(password.c_str(), "password");
}
TEST(TestURL, DifferentSchema)
{
std::string host;
std::string user;
std::string password;
const std::string url = "kafka://example.com";
ASSERT_TRUE(parse_url_authority(url, host, user, password));
ASSERT_TRUE(user.empty());
ASSERT_TRUE(password.empty());
EXPECT_STREQ(host.c_str(), "example.com");
}
TEST(TestURL, InvalidHost)
{
std::string host;
std::string user;
std::string password;
const std::string url = "http://exa_mple.com";
ASSERT_FALSE(parse_url_authority(url, host, user, password));
}
TEST(TestURL, WithPath)
{
std::string host;
std::string user;
std::string password;
const std::string url = "amqps://www.example.com:1234/vhost_name";
ASSERT_TRUE(parse_url_authority(url, host, user, password));
}
| 3,125 | 26.910714 | 77 | cc |
null | ceph-main/src/test/rgw/test_rgw_xml.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "rgw_xml.h"
#include <gtest/gtest.h>
#include <list>
#include <stdexcept>
struct NameAndStatus {
// these are sub-tags
std::string name;
bool status;
// intrusive XML decoding API
bool decode_xml(XMLObj *obj) {
if (!RGWXMLDecoder::decode_xml("Name", name, obj, true)) {
// name is mandatory
return false;
}
if (!RGWXMLDecoder::decode_xml("Status", status, obj, false)) {
// status is optional and defaults to True
status = true;
}
return true;
}
};
struct Item {
// these are sub-tags
NameAndStatus name_and_status;
int value;
int extra_value;
// these are attributes
std::string date;
std::string comment;
// intrusive XML decoding API
bool decode_xml(XMLObj *obj) {
if (!RGWXMLDecoder::decode_xml("NameAndStatus", name_and_status, obj, true)) {
// name amd status are mandatory
return false;
}
if (!RGWXMLDecoder::decode_xml("Value", value, obj, true)) {
// value is mandatory
return false;
}
if (!RGWXMLDecoder::decode_xml("ExtraValue", extra_value, obj, false)) {
// extra value is optional and defaults to zero
extra_value = 0;
}
// date attribute is optional
if (!obj->get_attr("Date", date)) {
date = "no date";
}
// comment attribute is optional
if (!obj->get_attr("Comment", comment)) {
comment = "no comment";
}
return true;
}
};
struct Items {
// these are sub-tags
std::list<Item> item_list;
// intrusive XML decoding API
bool decode_xml(XMLObj *obj) {
do_decode_xml_obj(item_list, "Item", obj);
return true;
}
};
// in case of non-intrusive decoding class
// hierarchy should reflect the XML hierarchy
class NameXMLObj: public XMLObj {
protected:
void xml_handle_data(const char *s, int len) override {
// no need to set "data", setting "name" directly
value.append(s, len);
}
public:
std::string value;
~NameXMLObj() override = default;
};
class StatusXMLObj: public XMLObj {
protected:
void xml_handle_data(const char *s, int len) override {
std::istringstream is(std::string(s, len));
is >> std::boolalpha >> value;
}
public:
bool value;
~StatusXMLObj() override = default;
};
class NameAndStatusXMLObj: public NameAndStatus, public XMLObj {
public:
~NameAndStatusXMLObj() override = default;
bool xml_end(const char *el) override {
XMLObjIter iter = find("Name");
NameXMLObj* _name = static_cast<NameXMLObj*>(iter.get_next());
if (!_name) {
// name is mandatory
return false;
}
name = _name->value;
iter = find("Status");
StatusXMLObj* _status = static_cast<StatusXMLObj*>(iter.get_next());
if (!_status) {
// status is optional and defaults to True
status = true;
} else {
status = _status->value;
}
return true;
}
};
class ItemXMLObj: public Item, public XMLObj {
public:
~ItemXMLObj() override = default;
bool xml_end(const char *el) override {
XMLObjIter iter = find("NameAndStatus");
NameAndStatusXMLObj* _name_and_status = static_cast<NameAndStatusXMLObj*>(iter.get_next());
if (!_name_and_status) {
// name and status are mandatory
return false;
}
name_and_status = *static_cast<NameAndStatus*>(_name_and_status);
iter = find("Value");
XMLObj* _value = iter.get_next();
if (!_value) {
// value is mandatory
return false;
}
try {
value = std::stoi(_value->get_data());
} catch (const std::exception& e) {
return false;
}
iter = find("ExtraValue");
XMLObj* _extra_value = iter.get_next();
if (_extra_value) {
// extra value is optional but cannot contain garbage
try {
extra_value = std::stoi(_extra_value->get_data());
} catch (const std::exception& e) {
return false;
}
} else {
// if not set, it defaults to zero
extra_value = 0;
}
// date attribute is optional
if (!get_attr("Date", date)) {
date = "no date";
}
// comment attribute is optional
if (!get_attr("Comment", comment)) {
comment = "no comment";
}
return true;
}
};
class ItemsXMLObj: public Items, public XMLObj {
public:
~ItemsXMLObj() override = default;
bool xml_end(const char *el) override {
XMLObjIter iter = find("Item");
ItemXMLObj* item_ptr = static_cast<ItemXMLObj*>(iter.get_next());
// mandatory to have at least one item
bool item_found = false;
while (item_ptr) {
item_list.push_back(*static_cast<Item*>(item_ptr));
item_ptr = static_cast<ItemXMLObj*>(iter.get_next());
item_found = true;
}
return item_found;
}
};
class ItemsXMLParser: public RGWXMLParser {
static const int MAX_NAME_LEN = 16;
public:
XMLObj *alloc_obj(const char *el) override {
if (strncmp(el, "Items", MAX_NAME_LEN) == 0) {
items = new ItemsXMLObj;
return items;
} else if (strncmp(el, "Item", MAX_NAME_LEN) == 0) {
return new ItemXMLObj;
} else if (strncmp(el, "NameAndStatus", MAX_NAME_LEN) == 0) {
return new NameAndStatusXMLObj;
} else if (strncmp(el, "Name", MAX_NAME_LEN) == 0) {
return new NameXMLObj;
} else if (strncmp(el, "Status", MAX_NAME_LEN) == 0) {
return new StatusXMLObj;
}
return nullptr;
}
// this is a pointer to the parsed results
ItemsXMLObj* items;
};
static const char* good_input = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>"
"<Items>"
"<Item><NameAndStatus><Name>hello</Name></NameAndStatus><Value>1</Value></Item>"
"<Item><ExtraValue>99</ExtraValue><NameAndStatus><Name>world</Name></NameAndStatus><Value>2</Value></Item>"
"<Item><Value>3</Value><NameAndStatus><Name>foo</Name></NameAndStatus></Item>"
"<Item><Value>4</Value><ExtraValue>42</ExtraValue><NameAndStatus><Name>bar</Name><Status>False</Status></NameAndStatus></Item>"
"</Items>";
static const char* expected_output = "((hello,1),1,0),((world,1),2,99),((foo,1),3,0),((bar,0),4,42),";
std::string to_string(const Items& items) {
std::stringstream ss;
for (const auto& item : items.item_list) {
ss << "((" << item.name_and_status.name << "," << item.name_and_status.status << ")," << item.value << "," << item.extra_value << ")" << ",";
}
return ss.str();
}
std::string to_string_with_attributes(const Items& items) {
std::stringstream ss;
for (const auto& item : items.item_list) {
ss << "(" << item.date << "," << item.comment << ",(" << item.name_and_status.name << "," << item.name_and_status.status << "),"
<< item.value << "," << item.extra_value << ")" << ",";
}
return ss.str();
}
TEST(TestParser, BasicParsing)
{
ItemsXMLParser parser;
ASSERT_TRUE(parser.init());
ASSERT_TRUE(parser.parse(good_input, strlen(good_input), 1));
ASSERT_EQ(parser.items->item_list.size(), 4U);
ASSERT_STREQ(to_string(*parser.items).c_str(), expected_output);
}
static const char* malformed_input = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>"
"<Items>"
"<Item><NameAndStatus><Name>hello</Name></NameAndStatus><Value>1</Value><Item>"
"<Item><ExtraValue>99</ExtraValue><NameAndStatus><Name>world</Name></NameAndStatus><Value>2</Value></Item>"
"<Item><Value>3</Value><NameAndStatus><Name>foo</Name></NameAndStatus></Item>"
"<Item><Value>4</Value><ExtraValue>42</ExtraValue><NameAndStatus><Name>bar</Name><Status>False</Status></NameAndStatus></Item>"
"</Items>";
TEST(TestParser, MalformedInput)
{
ItemsXMLParser parser;
ASSERT_TRUE(parser.init());
ASSERT_FALSE(parser.parse(good_input, strlen(malformed_input), 1));
}
static const char* missing_value_input = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>"
"<Items>"
"<Item><NameAndStatus><Name>hello</Name></NameAndStatus><Value>1</Value></Item>"
"<Item><ExtraValue>99</ExtraValue><NameAndStatus><Name>world</Name></NameAndStatus><Value>2</Value></Item>"
"<Item><Value>3</Value><NameAndStatus><Name>foo</Name></NameAndStatus></Item>"
"<Item><ExtraValue>42</ExtraValue><NameAndStatus><Name>bar</Name><Status>False</Status></NameAndStatus></Item>"
"</Items>";
TEST(TestParser, MissingMandatoryTag)
{
ItemsXMLParser parser;
ASSERT_TRUE(parser.init());
ASSERT_FALSE(parser.parse(missing_value_input, strlen(missing_value_input), 1));
}
static const char* unknown_tag_input = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>"
"<Items>"
"<Item><NameAndStatus><Name>hello</Name></NameAndStatus><Value>1</Value></Item>"
"<Item><ExtraValue>99</ExtraValue><NameAndStatus><Name>world</Name></NameAndStatus><Value>2</Value></Item>"
"<Item><Value>3</Value><NameAndStatus><Name>foo</Name></NameAndStatus><Kaboom>0</Kaboom></Item>"
"<Item><Value>4</Value><ExtraValue>42</ExtraValue><NameAndStatus><Name>bar</Name><Status>False</Status></NameAndStatus></Item>"
"<Kaboom>0</Kaboom>"
"</Items>";
TEST(TestParser, UnknownTag)
{
ItemsXMLParser parser;
ASSERT_TRUE(parser.init());
ASSERT_TRUE(parser.parse(unknown_tag_input, strlen(unknown_tag_input), 1));
ASSERT_EQ(parser.items->item_list.size(), 4U);
ASSERT_STREQ(to_string(*parser.items).c_str(), expected_output);
}
static const char* invalid_value_input = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>"
"<Items>"
"<Item><NameAndStatus><Name>hello</Name></NameAndStatus><Value>1</Value></Item>"
"<Item><ExtraValue>kaboom</ExtraValue><NameAndStatus><Name>world</Name></NameAndStatus><Value>2</Value></Item>"
"<Item><Value>3</Value><NameAndStatus><Name>foo</Name></NameAndStatus></Item>"
"<Item><Value>4</Value><ExtraValue>42</ExtraValue><NameAndStatus><Name>bar</Name><Status>False</Status></NameAndStatus></Item>"
"</Items>";
TEST(TestParser, InvalidValue)
{
ItemsXMLParser parser;
ASSERT_TRUE(parser.init());
ASSERT_FALSE(parser.parse(invalid_value_input, strlen(invalid_value_input), 1));
}
static const char* good_input1 = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>"
"<Items>"
"<Item><NameAndStatus><Name>hello</Name></NameAndStatus><Value>1</Value></Item>"
"<Item><ExtraValue>99</ExtraValue><NameAndStatus><Name>world</Name>";
static const char* good_input2 = "</NameAndStatus><Value>2</Value></Item>"
"<Item><Value>3</Value><NameAndStatus><Name>foo</Name></NameAndStatus></Item>"
"<Item><Value>4</Value><ExtraValue>42</ExtraValue><NameAndStatus><Name>bar</Name><Status>False</Status></NameAndStatus></Item>"
"</Items>";
TEST(TestParser, MultipleChunks)
{
ItemsXMLParser parser;
ASSERT_TRUE(parser.init());
ASSERT_TRUE(parser.parse(good_input1, strlen(good_input1), 0));
ASSERT_TRUE(parser.parse(good_input2, strlen(good_input2), 1));
ASSERT_EQ(parser.items->item_list.size(), 4U);
ASSERT_STREQ(to_string(*parser.items).c_str(), expected_output);
}
static const char* input_with_attributes = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>"
"<Items>"
"<Item Date=\"Tue Dec 27 17:21:29 2011\" Kaboom=\"just ignore\">"
"<NameAndStatus><Name>hello</Name></NameAndStatus><Value>1</Value>"
"</Item>"
"<Item Comment=\"hello world\">"
"<ExtraValue>99</ExtraValue><NameAndStatus><Name>world</Name></NameAndStatus><Value>2</Value>"
"</Item>"
"<Item><Value>3</Value><NameAndStatus><Name>foo</Name></NameAndStatus></Item>"
"<Item Comment=\"goodbye\" Date=\"Thu Feb 28 10:00:18 UTC 2019 \">"
"<Value>4</Value><ExtraValue>42</ExtraValue><NameAndStatus><Name>bar</Name><Status>False</Status></NameAndStatus>"
"</Item>"
"</Items>";
static const char* expected_output_with_attributes = "(Tue Dec 27 17:21:29 2011,no comment,(hello,1),1,0),"
"(no date,hello world,(world,1),2,99),"
"(no date,no comment,(foo,1),3,0),"
"(Thu Feb 28 10:00:18 UTC 2019 ,goodbye,(bar,0),4,42),";
TEST(TestParser, Attributes)
{
ItemsXMLParser parser;
ASSERT_TRUE(parser.init());
ASSERT_TRUE(parser.parse(input_with_attributes, strlen(input_with_attributes), 1));
ASSERT_EQ(parser.items->item_list.size(), 4U);
ASSERT_STREQ(to_string_with_attributes(*parser.items).c_str(),
expected_output_with_attributes);
}
TEST(TestDecoder, BasicParsing)
{
RGWXMLDecoder::XMLParser parser;
ASSERT_TRUE(parser.init());
ASSERT_TRUE(parser.parse(good_input, strlen(good_input), 1));
Items result;
ASSERT_NO_THROW({
ASSERT_TRUE(RGWXMLDecoder::decode_xml("Items", result, &parser, true));
});
ASSERT_EQ(result.item_list.size(), 4U);
ASSERT_STREQ(to_string(result).c_str(), expected_output);
}
TEST(TestDecoder, MalfomedInput)
{
RGWXMLDecoder::XMLParser parser;
ASSERT_TRUE(parser.init());
ASSERT_FALSE(parser.parse(good_input, strlen(malformed_input), 1));
}
TEST(TestDecoder, MissingMandatoryTag)
{
RGWXMLDecoder::XMLParser parser;
ASSERT_TRUE(parser.init());
ASSERT_TRUE(parser.parse(missing_value_input, strlen(missing_value_input), 1));
Items result;
ASSERT_ANY_THROW({
ASSERT_TRUE(RGWXMLDecoder::decode_xml("Items", result, &parser, true));
});
}
TEST(TestDecoder, InvalidValue)
{
RGWXMLDecoder::XMLParser parser;
ASSERT_TRUE(parser.init());
ASSERT_TRUE(parser.parse(invalid_value_input, strlen(invalid_value_input), 1));
Items result;
ASSERT_ANY_THROW({
ASSERT_TRUE(RGWXMLDecoder::decode_xml("Items", result, &parser, true));
});
}
TEST(TestDecoder, MultipleChunks)
{
RGWXMLDecoder::XMLParser parser;
ASSERT_TRUE(parser.init());
ASSERT_TRUE(parser.parse(good_input1, strlen(good_input1), 0));
ASSERT_TRUE(parser.parse(good_input2, strlen(good_input2), 1));
Items result;
ASSERT_NO_THROW({
ASSERT_TRUE(RGWXMLDecoder::decode_xml("Items", result, &parser, true));
});
ASSERT_EQ(result.item_list.size(), 4U);
ASSERT_STREQ(to_string(result).c_str(), expected_output);
}
TEST(TestDecoder, Attributes)
{
RGWXMLDecoder::XMLParser parser;
ASSERT_TRUE(parser.init());
ASSERT_TRUE(parser.parse(input_with_attributes, strlen(input_with_attributes), 1));
Items result;
ASSERT_NO_THROW({
ASSERT_TRUE(RGWXMLDecoder::decode_xml("Items", result, &parser, true));
});
ASSERT_EQ(result.item_list.size(), 4U);
ASSERT_STREQ(to_string_with_attributes(result).c_str(),
expected_output_with_attributes);
}
static const char* expected_xml_output = "<Items xmlns=\"https://www.ceph.com/doc/\">"
"<Item Order=\"0\"><NameAndStatus><Name>hello</Name><Status>True</Status></NameAndStatus><Value>0</Value></Item>"
"<Item Order=\"1\"><NameAndStatus><Name>hello</Name><Status>False</Status></NameAndStatus><Value>1</Value></Item>"
"<Item Order=\"2\"><NameAndStatus><Name>hello</Name><Status>True</Status></NameAndStatus><Value>2</Value></Item>"
"<Item Order=\"3\"><NameAndStatus><Name>hello</Name><Status>False</Status></NameAndStatus><Value>3</Value></Item>"
"<Item Order=\"4\"><NameAndStatus><Name>hello</Name><Status>True</Status></NameAndStatus><Value>4</Value></Item>"
"</Items>";
TEST(TestEncoder, ListWithAttrsAndNS)
{
XMLFormatter f;
const auto array_size = 5;
f.open_array_section_in_ns("Items", "https://www.ceph.com/doc/");
for (auto i = 0; i < array_size; ++i) {
FormatterAttrs item_attrs("Order", std::to_string(i).c_str(), NULL);
f.open_object_section_with_attrs("Item", item_attrs);
f.open_object_section("NameAndStatus");
encode_xml("Name", "hello", &f);
encode_xml("Status", (i%2 == 0), &f);
f.close_section();
encode_xml("Value", i, &f);
f.close_section();
}
f.close_section();
std::stringstream ss;
f.flush(ss);
ASSERT_STREQ(ss.str().c_str(), expected_xml_output);
}
| 17,124 | 35.907328 | 156 | cc |
null | ceph-main/src/test/rgw/bucket_notification/__init__.py | import configparser
import os
def setup():
cfg = configparser.RawConfigParser()
try:
path = os.environ['BNTESTS_CONF']
except KeyError:
raise RuntimeError(
'To run tests, point environment '
+ 'variable BNTESTS_CONF to a config file.',
)
cfg.read(path)
if not cfg.defaults():
raise RuntimeError('Your config file is missing the DEFAULT section!')
if not cfg.has_section("s3 main"):
raise RuntimeError('Your config file is missing the "s3 main" section!')
defaults = cfg.defaults()
global default_host
default_host = defaults.get("host")
global default_port
default_port = int(defaults.get("port"))
global main_access_key
main_access_key = cfg.get('s3 main',"access_key")
global main_secret_key
main_secret_key = cfg.get('s3 main',"secret_key")
def get_config_host():
global default_host
return default_host
def get_config_port():
global default_port
return default_port
def get_access_key():
global main_access_key
return main_access_key
def get_secret_key():
global main_secret_key
return main_secret_key
| 1,181 | 23.122449 | 80 | py |
null | ceph-main/src/test/rgw/bucket_notification/api.py | import logging
import ssl
import urllib
import hmac
import hashlib
import base64
import xmltodict
from http import client as http_client
from urllib import parse as urlparse
from time import gmtime, strftime
import boto3
from botocore.client import Config
import os
import subprocess
log = logging.getLogger('bucket_notification.tests')
NO_HTTP_BODY = ''
def put_object_tagging(conn, bucket_name, key, tags):
client = boto3.client('s3',
endpoint_url='http://'+conn.host+':'+str(conn.port),
aws_access_key_id=conn.aws_access_key_id,
aws_secret_access_key=conn.aws_secret_access_key)
return client.put_object(Body='aaaaaaaaaaa', Bucket=bucket_name, Key=key, Tagging=tags)
def make_request(conn, method, resource, parameters=None, sign_parameters=False, extra_parameters=None):
"""generic request sending to pubsub radogw
should cover: topics, notificatios and subscriptions
"""
url_params = ''
if parameters is not None:
url_params = urlparse.urlencode(parameters)
# remove 'None' from keys with no values
url_params = url_params.replace('=None', '')
url_params = '?' + url_params
if extra_parameters is not None:
url_params = url_params + '&' + extra_parameters
string_date = strftime("%a, %d %b %Y %H:%M:%S +0000", gmtime())
string_to_sign = method + '\n\n\n' + string_date + '\n' + resource
if sign_parameters:
string_to_sign += url_params
signature = base64.b64encode(hmac.new(conn.aws_secret_access_key.encode('utf-8'),
string_to_sign.encode('utf-8'),
hashlib.sha1).digest()).decode('ascii')
headers = {'Authorization': 'AWS '+conn.aws_access_key_id+':'+signature,
'Date': string_date,
'Host': conn.host+':'+str(conn.port)}
http_conn = http_client.HTTPConnection(conn.host, conn.port)
if log.getEffectiveLevel() <= 10:
http_conn.set_debuglevel(5)
http_conn.request(method, resource+url_params, NO_HTTP_BODY, headers)
response = http_conn.getresponse()
data = response.read()
status = response.status
http_conn.close()
return data.decode('utf-8'), status
def delete_all_objects(conn, bucket_name):
client = boto3.client('s3',
endpoint_url='http://'+conn.host+':'+str(conn.port),
aws_access_key_id=conn.aws_access_key_id,
aws_secret_access_key=conn.aws_secret_access_key)
objects = []
for key in client.list_objects(Bucket=bucket_name)['Contents']:
objects.append({'Key': key['Key']})
# delete objects from the bucket
response = client.delete_objects(Bucket=bucket_name,
Delete={'Objects': objects})
class PSTopicS3:
"""class to set/list/get/delete a topic
POST ?Action=CreateTopic&Name=<topic name>[&OpaqueData=<data>[&push-endpoint=<endpoint>&[<arg1>=<value1>...]]]
POST ?Action=ListTopics
POST ?Action=GetTopic&TopicArn=<topic-arn>
POST ?Action=DeleteTopic&TopicArn=<topic-arn>
"""
def __init__(self, conn, topic_name, region, endpoint_args=None, opaque_data=None):
self.conn = conn
self.topic_name = topic_name.strip()
assert self.topic_name
self.topic_arn = ''
self.attributes = {}
if endpoint_args is not None:
self.attributes = {nvp[0] : nvp[1] for nvp in urlparse.parse_qsl(endpoint_args, keep_blank_values=True)}
if opaque_data is not None:
self.attributes['OpaqueData'] = opaque_data
protocol = 'https' if conn.is_secure else 'http'
self.client = boto3.client('sns',
endpoint_url=protocol+'://'+conn.host+':'+str(conn.port),
aws_access_key_id=conn.aws_access_key_id,
aws_secret_access_key=conn.aws_secret_access_key,
region_name=region,
verify='./cert.pem')
def get_config(self):
"""get topic info"""
parameters = {'Action': 'GetTopic', 'TopicArn': self.topic_arn}
body = urlparse.urlencode(parameters)
string_date = strftime("%a, %d %b %Y %H:%M:%S +0000", gmtime())
content_type = 'application/x-www-form-urlencoded; charset=utf-8'
resource = '/'
method = 'POST'
string_to_sign = method + '\n\n' + content_type + '\n' + string_date + '\n' + resource
log.debug('StringTosign: %s', string_to_sign)
signature = base64.b64encode(hmac.new(self.conn.aws_secret_access_key.encode('utf-8'),
string_to_sign.encode('utf-8'),
hashlib.sha1).digest()).decode('ascii')
headers = {'Authorization': 'AWS '+self.conn.aws_access_key_id+':'+signature,
'Date': string_date,
'Host': self.conn.host+':'+str(self.conn.port),
'Content-Type': content_type}
if self.conn.is_secure:
http_conn = http_client.HTTPSConnection(self.conn.host, self.conn.port,
context=ssl.create_default_context(cafile='./cert.pem'))
else:
http_conn = http_client.HTTPConnection(self.conn.host, self.conn.port)
http_conn.request(method, resource, body, headers)
response = http_conn.getresponse()
data = response.read()
status = response.status
http_conn.close()
dict_response = xmltodict.parse(data)
return dict_response, status
def set_config(self):
"""set topic"""
result = self.client.create_topic(Name=self.topic_name, Attributes=self.attributes)
self.topic_arn = result['TopicArn']
return self.topic_arn
def del_config(self, topic_arn=None):
"""delete topic"""
result = self.client.delete_topic(TopicArn=(topic_arn if topic_arn is not None else self.topic_arn))
return result['ResponseMetadata']['HTTPStatusCode']
def get_list(self):
"""list all topics"""
# note that boto3 supports list_topics(), however, the result only show ARNs
parameters = {'Action': 'ListTopics'}
body = urlparse.urlencode(parameters)
string_date = strftime("%a, %d %b %Y %H:%M:%S +0000", gmtime())
content_type = 'application/x-www-form-urlencoded; charset=utf-8'
resource = '/'
method = 'POST'
string_to_sign = method + '\n\n' + content_type + '\n' + string_date + '\n' + resource
log.debug('StringTosign: %s', string_to_sign)
signature = base64.b64encode(hmac.new(self.conn.aws_secret_access_key.encode('utf-8'),
string_to_sign.encode('utf-8'),
hashlib.sha1).digest()).decode('ascii')
headers = {'Authorization': 'AWS '+self.conn.aws_access_key_id+':'+signature,
'Date': string_date,
'Host': self.conn.host+':'+str(self.conn.port),
'Content-Type': content_type}
if self.conn.is_secure:
http_conn = http_client.HTTPSConnection(self.conn.host, self.conn.port,
context=ssl.create_default_context(cafile='./cert.pem'))
else:
http_conn = http_client.HTTPConnection(self.conn.host, self.conn.port)
http_conn.request(method, resource, body, headers)
response = http_conn.getresponse()
data = response.read()
status = response.status
http_conn.close()
dict_response = xmltodict.parse(data)
return dict_response, status
class PSNotificationS3:
"""class to set/get/delete an S3 notification
PUT /<bucket>?notification
GET /<bucket>?notification[=<notification>]
DELETE /<bucket>?notification[=<notification>]
"""
def __init__(self, conn, bucket_name, topic_conf_list):
self.conn = conn
assert bucket_name.strip()
self.bucket_name = bucket_name
self.resource = '/'+bucket_name
self.topic_conf_list = topic_conf_list
self.client = boto3.client('s3',
endpoint_url='http://'+conn.host+':'+str(conn.port),
aws_access_key_id=conn.aws_access_key_id,
aws_secret_access_key=conn.aws_secret_access_key)
def send_request(self, method, parameters=None):
"""send request to radosgw"""
return make_request(self.conn, method, self.resource,
parameters=parameters, sign_parameters=True)
def get_config(self, notification=None):
"""get notification info"""
parameters = None
if notification is None:
response = self.client.get_bucket_notification_configuration(Bucket=self.bucket_name)
status = response['ResponseMetadata']['HTTPStatusCode']
return response, status
parameters = {'notification': notification}
response, status = self.send_request('GET', parameters=parameters)
dict_response = xmltodict.parse(response)
return dict_response, status
def set_config(self):
"""set notification"""
response = self.client.put_bucket_notification_configuration(Bucket=self.bucket_name,
NotificationConfiguration={
'TopicConfigurations': self.topic_conf_list
})
status = response['ResponseMetadata']['HTTPStatusCode']
return response, status
def del_config(self, notification=None):
"""delete notification"""
parameters = {'notification': notification}
return self.send_request('DELETE', parameters)
test_path = os.path.normpath(os.path.dirname(os.path.realpath(__file__))) + '/../'
def bash(cmd, **kwargs):
log.debug('running command: %s', ' '.join(cmd))
kwargs['stdout'] = subprocess.PIPE
process = subprocess.Popen(cmd, **kwargs)
s = process.communicate()[0].decode('utf-8')
return (s, process.returncode)
def admin(args, **kwargs):
""" radosgw-admin command """
cmd = [test_path + 'test-rgw-call.sh', 'call_rgw_admin', 'noname'] + args
return bash(cmd, **kwargs)
| 10,435 | 43.408511 | 116 | py |
null | ceph-main/src/test/rgw/bucket_notification/kafka-security.sh | FQDN=localhost
KEYFILE=server.keystore.jks
TRUSTFILE=server.truststore.jks
CAFILE=y-ca.crt
CAKEYFILE=y-ca.key
REQFILE=$FQDN.req
CERTFILE=$FQDN.crt
MYPW=mypassword
VALIDITY=36500
rm -f $KEYFILE
rm -f $TRUSTFILE
rm -f $CAFILE
rm -f $REQFILE
rm -f $CERTFILE
echo "########## create the request in key store '$KEYFILE'"
keytool -keystore $KEYFILE -alias localhost \
-dname "CN=$FQDN, OU=Michigan Engineering, O=Red Hat Inc, \
L=Ann Arbor, ST=Michigan, C=US" \
-storepass $MYPW -keypass $MYPW \
-validity $VALIDITY -genkey -keyalg RSA -ext SAN=DNS:"$FQDN"
echo "########## create the CA '$CAFILE'"
openssl req -new -nodes -x509 -keyout $CAKEYFILE -out $CAFILE \
-days $VALIDITY -subj \
'/C=US/ST=Michigan/L=Ann Arbor/O=Red Hat Inc/OU=Michigan Engineering/CN=yuval-1'
echo "########## store the CA in trust store '$TRUSTFILE'"
keytool -keystore $TRUSTFILE -storepass $MYPW -alias CARoot \
-noprompt -importcert -file $CAFILE
echo "########## create a request '$REQFILE' for signing in key store '$KEYFILE'"
keytool -storepass $MYPW -keystore $KEYFILE \
-alias localhost -certreq -file $REQFILE
echo "########## sign and create certificate '$CERTFILE'"
openssl x509 -req -CA $CAFILE -CAkey $CAKEYFILE -CAcreateserial \
-days $VALIDITY \
-in $REQFILE -out $CERTFILE
echo "########## store CA '$CAFILE' in key store '$KEYFILE'"
keytool -storepass $MYPW -keystore $KEYFILE -alias CARoot \
-noprompt -importcert -file $CAFILE
echo "########## store certificate '$CERTFILE' in key store '$KEYFILE'"
keytool -storepass $MYPW -keystore $KEYFILE -alias localhost \
-import -file $CERTFILE
| 1,607 | 31.16 | 82 | sh |
null | ceph-main/src/test/rgw/bucket_notification/setup.py | #!/usr/bin/python
from setuptools import setup, find_packages
setup(
name='bn_tests',
version='0.0.1',
packages=find_packages(),
author='Kalpesh Pandya',
author_email='kapandya@redhat.com',
description='Bucket Notification compatibility tests',
license='MIT',
keywords='bn web testing',
install_requires=[
'boto >=2.0b4',
'boto3 >=1.0.0'
],
)
| 410 | 19.55 | 58 | py |
null | ceph-main/src/test/rgw/bucket_notification/test_bn.py | import logging
import json
import tempfile
import random
import threading
import subprocess
import socket
import time
import os
import string
import boto
from botocore.exceptions import ClientError
from http import server as http_server
from random import randint
import hashlib
from nose.plugins.attrib import attr
import boto3
import datetime
from cloudevents.http import from_http
from dateutil import parser
from boto.s3.connection import S3Connection
from . import(
get_config_host,
get_config_port,
get_access_key,
get_secret_key
)
from .api import PSTopicS3, \
PSNotificationS3, \
delete_all_objects, \
put_object_tagging, \
admin
from nose import SkipTest
from nose.tools import assert_not_equal, assert_equal, assert_in
import boto.s3.tagging
# configure logging for the tests module
log = logging.getLogger(__name__)
TOPIC_SUFFIX = "_topic"
NOTIFICATION_SUFFIX = "_notif"
num_buckets = 0
run_prefix=''.join(random.choice(string.ascii_lowercase) for _ in range(6))
def gen_bucket_name():
global num_buckets
num_buckets += 1
return run_prefix + '-' + str(num_buckets)
def set_contents_from_string(key, content):
try:
key.set_contents_from_string(content)
except Exception as e:
print('Error: ' + str(e))
class HTTPPostHandler(http_server.BaseHTTPRequestHandler):
"""HTTP POST hanler class storing the received events in its http server"""
def do_POST(self):
"""implementation of POST handler"""
content_length = int(self.headers['Content-Length'])
body = self.rfile.read(content_length)
if self.server.cloudevents:
event = from_http(self.headers, body)
record = json.loads(body)['Records'][0]
assert_equal(event['specversion'], '1.0')
assert_equal(event['id'], record['responseElements']['x-amz-request-id'] + '.' + record['responseElements']['x-amz-id-2'])
assert_equal(event['source'], 'ceph:s3.' + record['awsRegion'] + '.' + record['s3']['bucket']['name'])
assert_equal(event['type'], 'com.amazonaws.' + record['eventName'])
assert_equal(event['datacontenttype'], 'application/json')
assert_equal(event['subject'], record['s3']['object']['key'])
assert_equal(parser.parse(event['time']), parser.parse(record['eventTime']))
log.info('HTTP Server (%d) received event: %s', self.server.worker_id, str(body))
self.server.append(json.loads(body))
if self.headers.get('Expect') == '100-continue':
self.send_response(100)
else:
self.send_response(200)
if self.server.delay > 0:
time.sleep(self.server.delay)
self.end_headers()
class HTTPServerWithEvents(http_server.HTTPServer):
"""HTTP server used by the handler to store events"""
def __init__(self, addr, handler, worker_id, delay=0, cloudevents=False):
http_server.HTTPServer.__init__(self, addr, handler, False)
self.worker_id = worker_id
self.events = []
self.delay = delay
self.cloudevents = cloudevents
def append(self, event):
self.events.append(event)
class HTTPServerThread(threading.Thread):
"""thread for running the HTTP server. reusing the same socket for all threads"""
def __init__(self, i, sock, addr, delay=0, cloudevents=False):
threading.Thread.__init__(self)
self.i = i
self.daemon = True
self.httpd = HTTPServerWithEvents(addr, HTTPPostHandler, i, delay, cloudevents)
self.httpd.socket = sock
# prevent the HTTP server from re-binding every handler
self.httpd.server_bind = self.server_close = lambda self: None
self.start()
def run(self):
try:
log.info('HTTP Server (%d) started on: %s', self.i, self.httpd.server_address)
self.httpd.serve_forever()
log.info('HTTP Server (%d) ended', self.i)
except Exception as error:
# could happen if the server r/w to a closing socket during shutdown
log.info('HTTP Server (%d) ended unexpectedly: %s', self.i, str(error))
def close(self):
self.httpd.shutdown()
def get_events(self):
return self.httpd.events
def reset_events(self):
self.httpd.events = []
class StreamingHTTPServer:
"""multi-threaded http server class also holding list of events received into the handler
each thread has its own server, and all servers share the same socket"""
def __init__(self, host, port, num_workers=100, delay=0, cloudevents=False):
addr = (host, port)
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.bind(addr)
self.sock.listen(num_workers)
self.workers = [HTTPServerThread(i, self.sock, addr, delay, cloudevents) for i in range(num_workers)]
def verify_s3_events(self, keys, exact_match=False, deletions=False, expected_sizes={}):
"""verify stored s3 records agains a list of keys"""
events = []
for worker in self.workers:
events += worker.get_events()
worker.reset_events()
verify_s3_records_by_elements(events, keys, exact_match=exact_match, deletions=deletions, expected_sizes=expected_sizes)
def verify_events(self, keys, exact_match=False, deletions=False):
"""verify stored events agains a list of keys"""
events = []
for worker in self.workers:
events += worker.get_events()
worker.reset_events()
verify_events_by_elements(events, keys, exact_match=exact_match, deletions=deletions)
def get_and_reset_events(self):
events = []
for worker in self.workers:
events += worker.get_events()
worker.reset_events()
return events
def close(self):
"""close all workers in the http server and wait for it to finish"""
# make sure that the shared socket is closed
# this is needed in case that one of the threads is blocked on the socket
self.sock.shutdown(socket.SHUT_RDWR)
self.sock.close()
# wait for server threads to finish
for worker in self.workers:
worker.close()
worker.join()
# AMQP endpoint functions
class AMQPReceiver(object):
"""class for receiving and storing messages on a topic from the AMQP broker"""
def __init__(self, exchange, topic, external_endpoint_address=None, ca_location=None):
import pika
import ssl
if ca_location:
ssl_context = ssl.create_default_context()
ssl_context.load_verify_locations(cafile=ca_location)
ssl_options = pika.SSLOptions(ssl_context)
rabbitmq_port = 5671
else:
rabbitmq_port = 5672
ssl_options = None
if external_endpoint_address:
if ssl_options:
# this is currently not working due to: https://github.com/pika/pika/issues/1192
params = pika.URLParameters(external_endpoint_address, ssl_options=ssl_options)
else:
params = pika.URLParameters(external_endpoint_address)
else:
hostname = get_ip()
params = pika.ConnectionParameters(host=hostname, port=rabbitmq_port, ssl_options=ssl_options)
remaining_retries = 10
while remaining_retries > 0:
try:
connection = pika.BlockingConnection(params)
break
except Exception as error:
remaining_retries -= 1
print('failed to connect to rabbitmq (remaining retries '
+ str(remaining_retries) + '): ' + str(error))
time.sleep(1)
if remaining_retries == 0:
raise Exception('failed to connect to rabbitmq - no retries left')
self.channel = connection.channel()
self.channel.exchange_declare(exchange=exchange, exchange_type='topic', durable=True)
result = self.channel.queue_declare('', exclusive=True)
queue_name = result.method.queue
self.channel.queue_bind(exchange=exchange, queue=queue_name, routing_key=topic)
self.channel.basic_consume(queue=queue_name,
on_message_callback=self.on_message,
auto_ack=True)
self.events = []
self.topic = topic
def on_message(self, ch, method, properties, body):
"""callback invoked when a new message arrive on the topic"""
log.info('AMQP received event for topic %s:\n %s', self.topic, body)
self.events.append(json.loads(body))
# TODO create a base class for the AMQP and HTTP cases
def verify_s3_events(self, keys, exact_match=False, deletions=False, expected_sizes={}):
"""verify stored s3 records agains a list of keys"""
verify_s3_records_by_elements(self.events, keys, exact_match=exact_match, deletions=deletions, expected_sizes=expected_sizes)
self.events = []
def verify_events(self, keys, exact_match=False, deletions=False):
"""verify stored events agains a list of keys"""
verify_events_by_elements(self.events, keys, exact_match=exact_match, deletions=deletions)
self.events = []
def get_and_reset_events(self):
tmp = self.events
self.events = []
return tmp
def amqp_receiver_thread_runner(receiver):
"""main thread function for the amqp receiver"""
try:
log.info('AMQP receiver started')
receiver.channel.start_consuming()
log.info('AMQP receiver ended')
except Exception as error:
log.info('AMQP receiver ended unexpectedly: %s', str(error))
def create_amqp_receiver_thread(exchange, topic, external_endpoint_address=None, ca_location=None):
"""create amqp receiver and thread"""
receiver = AMQPReceiver(exchange, topic, external_endpoint_address, ca_location)
task = threading.Thread(target=amqp_receiver_thread_runner, args=(receiver,))
task.daemon = True
return task, receiver
def stop_amqp_receiver(receiver, task):
"""stop the receiver thread and wait for it to finis"""
try:
receiver.channel.stop_consuming()
log.info('stopping AMQP receiver')
except Exception as error:
log.info('failed to gracefuly stop AMQP receiver: %s', str(error))
task.join(5)
def init_rabbitmq():
""" start a rabbitmq broker """
hostname = get_ip()
try:
# first try to stop any existing process
subprocess.call(['sudo', 'rabbitmqctl', 'stop'])
time.sleep(5)
proc = subprocess.Popen(['sudo', '--preserve-env=RABBITMQ_CONFIG_FILE', 'rabbitmq-server'])
except Exception as error:
log.info('failed to execute rabbitmq-server: %s', str(error))
print('failed to execute rabbitmq-server: %s' % str(error))
return None
# TODO add rabbitmq checkpoint instead of sleep
time.sleep(5)
return proc
def clean_rabbitmq(proc):
""" stop the rabbitmq broker """
try:
subprocess.call(['sudo', 'rabbitmqctl', 'stop'])
time.sleep(5)
proc.terminate()
except:
log.info('rabbitmq server already terminated')
def verify_events_by_elements(events, keys, exact_match=False, deletions=False):
""" verify there is at least one event per element """
err = ''
for key in keys:
key_found = False
if type(events) is list:
for event_list in events:
if key_found:
break
for event in event_list['events']:
if event['info']['bucket']['name'] == key.bucket.name and \
event['info']['key']['name'] == key.name:
if deletions and event['event'] == 'OBJECT_DELETE':
key_found = True
break
elif not deletions and event['event'] == 'OBJECT_CREATE':
key_found = True
break
else:
for event in events['events']:
if event['info']['bucket']['name'] == key.bucket.name and \
event['info']['key']['name'] == key.name:
if deletions and event['event'] == 'OBJECT_DELETE':
key_found = True
break
elif not deletions and event['event'] == 'OBJECT_CREATE':
key_found = True
break
if not key_found:
err = 'no ' + ('deletion' if deletions else 'creation') + ' event found for key: ' + str(key)
log.error(events)
assert False, err
if not len(events) == len(keys):
err = 'superfluous events are found'
log.debug(err)
if exact_match:
log.error(events)
assert False, err
META_PREFIX = 'x-amz-meta-'
def verify_s3_records_by_elements(records, keys, exact_match=False, deletions=False, expected_sizes={}, etags=[]):
""" verify there is at least one record per element """
err = ''
for key in keys:
key_found = False
object_size = 0
if type(records) is list:
for record_list in records:
if key_found:
break
for record in record_list['Records']:
assert_in('eTag', record['s3']['object'])
if record['s3']['bucket']['name'] == key.bucket.name and \
record['s3']['object']['key'] == key.name:
# Assertion Error needs to be fixed
#assert_equal(key.etag[1:-1], record['s3']['object']['eTag'])
if etags:
assert_in(key.etag[1:-1], etags)
if len(record['s3']['object']['metadata']) > 0:
for meta in record['s3']['object']['metadata']:
assert(meta['key'].startswith(META_PREFIX))
if deletions and record['eventName'].startswith('ObjectRemoved'):
key_found = True
object_size = record['s3']['object']['size']
break
elif not deletions and record['eventName'].startswith('ObjectCreated'):
key_found = True
object_size = record['s3']['object']['size']
break
else:
for record in records['Records']:
assert_in('eTag', record['s3']['object'])
if record['s3']['bucket']['name'] == key.bucket.name and \
record['s3']['object']['key'] == key.name:
assert_equal(key.etag, record['s3']['object']['eTag'])
if etags:
assert_in(key.etag[1:-1], etags)
if len(record['s3']['object']['metadata']) > 0:
for meta in record['s3']['object']['metadata']:
assert(meta['key'].startswith(META_PREFIX))
if deletions and record['eventName'].startswith('ObjectRemoved'):
key_found = True
object_size = record['s3']['object']['size']
break
elif not deletions and record['eventName'].startswith('ObjectCreated'):
key_found = True
object_size = record['s3']['object']['size']
break
if not key_found:
err = 'no ' + ('deletion' if deletions else 'creation') + ' event found for key: ' + str(key)
assert False, err
elif expected_sizes:
assert_equal(object_size, expected_sizes.get(key.name))
if not len(records) == len(keys):
err = 'superfluous records are found'
log.warning(err)
if exact_match:
for record_list in records:
for record in record_list['Records']:
log.error(str(record['s3']['bucket']['name']) + ',' + str(record['s3']['object']['key']))
assert False, err
# Kafka endpoint functions
kafka_server = 'localhost'
class KafkaReceiver(object):
"""class for receiving and storing messages on a topic from the kafka broker"""
def __init__(self, topic, security_type):
from kafka import KafkaConsumer
remaining_retries = 10
port = 9092
if security_type != 'PLAINTEXT':
security_type = 'SSL'
port = 9093
while remaining_retries > 0:
try:
self.consumer = KafkaConsumer(topic,
bootstrap_servers = kafka_server+':'+str(port),
security_protocol=security_type,
consumer_timeout_ms=16000)
print('Kafka consumer created on topic: '+topic)
break
except Exception as error:
remaining_retries -= 1
print('failed to connect to kafka (remaining retries '
+ str(remaining_retries) + '): ' + str(error))
time.sleep(1)
if remaining_retries == 0:
raise Exception('failed to connect to kafka - no retries left')
self.events = []
self.topic = topic
self.stop = False
def verify_s3_events(self, keys, exact_match=False, deletions=False, etags=[]):
"""verify stored s3 records agains a list of keys"""
verify_s3_records_by_elements(self.events, keys, exact_match=exact_match, deletions=deletions, etags=etags)
self.events = []
def kafka_receiver_thread_runner(receiver):
"""main thread function for the kafka receiver"""
try:
log.info('Kafka receiver started')
print('Kafka receiver started')
while not receiver.stop:
for msg in receiver.consumer:
receiver.events.append(json.loads(msg.value))
time.sleep(0.1)
log.info('Kafka receiver ended')
print('Kafka receiver ended')
except Exception as error:
log.info('Kafka receiver ended unexpectedly: %s', str(error))
print('Kafka receiver ended unexpectedly: ' + str(error))
def create_kafka_receiver_thread(topic, security_type='PLAINTEXT'):
"""create kafka receiver and thread"""
receiver = KafkaReceiver(topic, security_type)
task = threading.Thread(target=kafka_receiver_thread_runner, args=(receiver,))
task.daemon = True
return task, receiver
def stop_kafka_receiver(receiver, task):
"""stop the receiver thread and wait for it to finish"""
receiver.stop = True
task.join(1)
try:
receiver.consumer.unsubscribe()
receiver.consumer.close()
except Exception as error:
log.info('failed to gracefuly stop Kafka receiver: %s', str(error))
def get_ip():
return 'localhost'
def get_ip_http():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
# address should not be reachable
s.connect(('10.255.255.255', 1))
ip = s.getsockname()[0]
finally:
s.close()
return ip
def connection():
hostname = get_config_host()
port_no = get_config_port()
vstart_access_key = get_access_key()
vstart_secret_key = get_secret_key()
conn = S3Connection(aws_access_key_id=vstart_access_key,
aws_secret_access_key=vstart_secret_key,
is_secure=False, port=port_no, host=hostname,
calling_format='boto.s3.connection.OrdinaryCallingFormat')
return conn
def connection2():
hostname = get_config_host()
port_no = 8001
vstart_access_key = get_access_key()
vstart_secret_key = get_secret_key()
conn = S3Connection(aws_access_key_id=vstart_access_key,
aws_secret_access_key=vstart_secret_key,
is_secure=False, port=port_no, host=hostname,
calling_format='boto.s3.connection.OrdinaryCallingFormat')
return conn
def another_user(tenant=None):
access_key = str(time.time())
secret_key = str(time.time())
uid = 'superman' + str(time.time())
if tenant:
_, result = admin(['user', 'create', '--uid', uid, '--tenant', tenant, '--access-key', access_key, '--secret-key', secret_key, '--display-name', '"Super Man"'])
else:
_, result = admin(['user', 'create', '--uid', uid, '--access-key', access_key, '--secret-key', secret_key, '--display-name', '"Super Man"'])
assert_equal(result, 0)
conn = S3Connection(aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
is_secure=False, port=get_config_port(), host=get_config_host(),
calling_format='boto.s3.connection.OrdinaryCallingFormat')
return conn
##############
# bucket notifications tests
##############
@attr('basic_test')
def test_ps_s3_topic_on_master():
""" test s3 topics set/get/delete on master """
tenant = 'kaboom'
conn = another_user(tenant)
zonegroup = 'default'
bucket_name = gen_bucket_name()
topic_name = bucket_name + TOPIC_SUFFIX
# create s3 topics
endpoint_address = 'amqp://127.0.0.1:7001/vhost_1'
endpoint_args = 'push-endpoint='+endpoint_address+'&amqp-exchange=amqp.direct&amqp-ack-level=none'
topic_conf1 = PSTopicS3(conn, topic_name+'_1', zonegroup, endpoint_args=endpoint_args)
# clean all topics
try:
result = topic_conf1.get_list()[0]['ListTopicsResponse']['ListTopicsResult']['Topics']
topics = []
if result is not None:
topics = result['member']
for topic in topics:
topic_conf1.del_config(topic_arn=topic['TopicArn'])
except Exception as err:
print('failed to do topic cleanup: ' + str(err))
topic_arn = topic_conf1.set_config()
assert_equal(topic_arn,
'arn:aws:sns:' + zonegroup + ':' + tenant + ':' + topic_name + '_1')
endpoint_address = 'http://127.0.0.1:9001'
endpoint_args = 'push-endpoint='+endpoint_address
topic_conf2 = PSTopicS3(conn, topic_name+'_2', zonegroup, endpoint_args=endpoint_args)
topic_arn = topic_conf2.set_config()
assert_equal(topic_arn,
'arn:aws:sns:' + zonegroup + ':' + tenant + ':' + topic_name + '_2')
endpoint_address = 'http://127.0.0.1:9002'
endpoint_args = 'push-endpoint='+endpoint_address
topic_conf3 = PSTopicS3(conn, topic_name+'_3', zonegroup, endpoint_args=endpoint_args)
topic_arn = topic_conf3.set_config()
assert_equal(topic_arn,
'arn:aws:sns:' + zonegroup + ':' + tenant + ':' + topic_name + '_3')
# get topic 3
result, status = topic_conf3.get_config()
assert_equal(status, 200)
assert_equal(topic_arn, result['GetTopicResponse']['GetTopicResult']['Topic']['TopicArn'])
assert_equal(endpoint_address, result['GetTopicResponse']['GetTopicResult']['Topic']['EndPoint']['EndpointAddress'])
# Note that endpoint args may be ordered differently in the result
# delete topic 1
result = topic_conf1.del_config()
assert_equal(status, 200)
# try to get a deleted topic
_, status = topic_conf1.get_config()
assert_equal(status, 404)
# get the remaining 2 topics
result, status = topic_conf1.get_list()
assert_equal(status, 200)
assert_equal(len(result['ListTopicsResponse']['ListTopicsResult']['Topics']['member']), 2)
# delete topics
result = topic_conf2.del_config()
assert_equal(status, 200)
result = topic_conf3.del_config()
assert_equal(status, 200)
# get topic list, make sure it is empty
result, status = topic_conf1.get_list()
assert_equal(result['ListTopicsResponse']['ListTopicsResult']['Topics'], None)
@attr('basic_test')
def test_ps_s3_topic_admin_on_master():
""" test s3 topics set/get/delete on master """
tenant = 'kaboom'
conn = another_user(tenant)
zonegroup = 'default'
bucket_name = gen_bucket_name()
topic_name = bucket_name + TOPIC_SUFFIX
# create s3 topics
endpoint_address = 'amqp://127.0.0.1:7001/vhost_1'
endpoint_args = 'push-endpoint='+endpoint_address+'&amqp-exchange=amqp.direct&amqp-ack-level=none'
topic_conf1 = PSTopicS3(conn, topic_name+'_1', zonegroup, endpoint_args=endpoint_args)
# clean all topics
try:
result = topic_conf1.get_list()[0]['ListTopicsResponse']['ListTopicsResult']['Topics']
topics = []
if result is not None:
topics = result['member']
for topic in topics:
topic_conf1.del_config(topic_arn=topic['TopicArn'])
except Exception as err:
print('failed to do topic cleanup: ' + str(err))
topic_arn1 = topic_conf1.set_config()
assert_equal(topic_arn1,
'arn:aws:sns:' + zonegroup + ':' + tenant + ':' + topic_name + '_1')
endpoint_address = 'http://127.0.0.1:9001'
endpoint_args = 'push-endpoint='+endpoint_address
topic_conf2 = PSTopicS3(conn, topic_name+'_2', zonegroup, endpoint_args=endpoint_args)
topic_arn2 = topic_conf2.set_config()
assert_equal(topic_arn2,
'arn:aws:sns:' + zonegroup + ':' + tenant + ':' + topic_name + '_2')
endpoint_address = 'http://127.0.0.1:9002'
endpoint_args = 'push-endpoint='+endpoint_address
topic_conf3 = PSTopicS3(conn, topic_name+'_3', zonegroup, endpoint_args=endpoint_args)
topic_arn3 = topic_conf3.set_config()
assert_equal(topic_arn3,
'arn:aws:sns:' + zonegroup + ':' + tenant + ':' + topic_name + '_3')
# get topic 3 via commandline
result = admin(['topic', 'get', '--topic', topic_name+'_3', '--tenant', tenant])
parsed_result = json.loads(result[0])
assert_equal(parsed_result['arn'], topic_arn3)
# delete topic 3
_, result = admin(['topic', 'rm', '--topic', topic_name+'_3', '--tenant', tenant])
assert_equal(result, 0)
# try to get a deleted topic
_, result = admin(['topic', 'get', '--topic', topic_name+'_3', '--tenant', tenant])
print('"topic not found" error is expected')
assert_equal(result, 2)
# get the remaining 2 topics
result = admin(['topic', 'list', '--tenant', tenant])
parsed_result = json.loads(result[0])
assert_equal(len(parsed_result['topics']), 2)
# delete topics
_, result = admin(['topic', 'rm', '--topic', topic_name+'_1', '--tenant', tenant])
assert_equal(result, 0)
_, result = admin(['topic', 'rm', '--topic', topic_name+'_2', '--tenant', tenant])
assert_equal(result, 0)
# get topic list, make sure it is empty
result = admin(['topic', 'list', '--tenant', tenant])
parsed_result = json.loads(result[0])
assert_equal(len(parsed_result['topics']), 0)
@attr('basic_test')
def test_ps_s3_notification_configuration_admin_on_master():
""" test s3 notification list/get/delete on master """
conn = connection()
zonegroup = 'default'
bucket_name = gen_bucket_name()
bucket = conn.create_bucket(bucket_name)
topic_name = bucket_name + TOPIC_SUFFIX
# create s3 topics
endpoint_address = 'amqp://127.0.0.1:7001/vhost_1'
endpoint_args = 'push-endpoint='+endpoint_address+'&amqp-exchange=amqp.direct&amqp-ack-level=none'
topic_conf = PSTopicS3(conn, topic_name+'_1', zonegroup, endpoint_args=endpoint_args)
# clean all topics
try:
result = topic_conf.get_list()[0]['ListTopicsResponse']['ListTopicsResult']['Topics']
topics = []
if result is not None:
topics = result['member']
for topic in topics:
topic_conf.del_config(topic_arn=topic['TopicArn'])
except Exception as err:
print('failed to do topic cleanup: ' + str(err))
topic_arn = topic_conf.set_config()
assert_equal(topic_arn,
'arn:aws:sns:' + zonegroup + '::' + topic_name + '_1')
# create s3 notification
notification_name = bucket_name + NOTIFICATION_SUFFIX
topic_conf_list = [{'Id': notification_name+'_1',
'TopicArn': topic_arn,
'Events': ['s3:ObjectCreated:*']
},
{'Id': notification_name+'_2',
'TopicArn': topic_arn,
'Events': ['s3:ObjectRemoved:*']
},
{'Id': notification_name+'_3',
'TopicArn': topic_arn,
'Events': []
}]
s3_notification_conf = PSNotificationS3(conn, bucket_name, topic_conf_list)
_, status = s3_notification_conf.set_config()
assert_equal(status/100, 2)
# list notification
result = admin(['notification', 'list', '--bucket', bucket_name])
parsed_result = json.loads(result[0])
assert_equal(len(parsed_result['notifications']), 3)
assert_equal(result[1], 0)
# get notification 1
result = admin(['notification', 'get', '--bucket', bucket_name, '--notification-id', notification_name+'_1'])
parsed_result = json.loads(result[0])
assert_equal(parsed_result['Id'], notification_name+'_1')
assert_equal(result[1], 0)
# remove notification 3
_, result = admin(['notification', 'rm', '--bucket', bucket_name, '--notification-id', notification_name+'_3'])
assert_equal(result, 0)
# list notification
result = admin(['notification', 'list', '--bucket', bucket_name])
parsed_result = json.loads(result[0])
assert_equal(len(parsed_result['notifications']), 2)
assert_equal(result[1], 0)
# delete notifications
_, result = admin(['notification', 'rm', '--bucket', bucket_name])
assert_equal(result, 0)
# list notification, make sure it is empty
result = admin(['notification', 'list', '--bucket', bucket_name])
parsed_result = json.loads(result[0])
assert_equal(len(parsed_result['notifications']), 0)
assert_equal(result[1], 0)
@attr('modification_required')
def test_ps_s3_topic_with_secret_on_master():
""" test s3 topics with secret set/get/delete on master """
return SkipTest('secure connection is needed to test topic with secrets')
conn = connection1()
if conn.secure_conn is None:
return SkipTest('secure connection is needed to test topic with secrets')
zonegroup = 'default'
bucket_name = gen_bucket_name()
topic_name = bucket_name + TOPIC_SUFFIX
# clean all topics
delete_all_s3_topics(conn, zonegroup)
# create s3 topics
endpoint_address = 'amqp://user:password@127.0.0.1:7001'
endpoint_args = 'push-endpoint='+endpoint_address+'&amqp-exchange=amqp.direct&amqp-ack-level=none'
bad_topic_conf = PSTopicS3(conn, topic_name, zonegroup, endpoint_args=endpoint_args)
try:
result = bad_topic_conf.set_config()
except Exception as err:
print('Error is expected: ' + str(err))
else:
assert False, 'user password configuration set allowed only over HTTPS'
topic_conf = PSTopicS3(conn.secure_conn, topic_name, zonegroup, endpoint_args=endpoint_args)
topic_arn = topic_conf.set_config()
assert_equal(topic_arn,
'arn:aws:sns:' + zonegroup + ':' + get_tenant() + ':' + topic_name)
_, status = bad_topic_conf.get_config()
assert_equal(status/100, 4)
# get topic
result, status = topic_conf.get_config()
assert_equal(status, 200)
assert_equal(topic_arn, result['GetTopicResponse']['GetTopicResult']['Topic']['TopicArn'])
assert_equal(endpoint_address, result['GetTopicResponse']['GetTopicResult']['Topic']['EndPoint']['EndpointAddress'])
_, status = bad_topic_conf.get_config()
assert_equal(status/100, 4)
_, status = topic_conf.get_list()
assert_equal(status/100, 2)
# delete topics
result = topic_conf.del_config()
@attr('basic_test')
def test_ps_s3_notification_on_master():
""" test s3 notification set/get/delete on master """
conn = connection()
zonegroup = 'default'
bucket_name = gen_bucket_name()
# create bucket
bucket = conn.create_bucket(bucket_name)
topic_name = bucket_name + TOPIC_SUFFIX
# create s3 topic
endpoint_address = 'amqp://127.0.0.1:7001'
endpoint_args = 'push-endpoint='+endpoint_address+'&amqp-exchange=amqp.direct&amqp-ack-level=none'
topic_conf = PSTopicS3(conn, topic_name, zonegroup, endpoint_args=endpoint_args)
topic_arn = topic_conf.set_config()
# create s3 notification
notification_name = bucket_name + NOTIFICATION_SUFFIX
topic_conf_list = [{'Id': notification_name+'_1',
'TopicArn': topic_arn,
'Events': ['s3:ObjectCreated:*']
},
{'Id': notification_name+'_2',
'TopicArn': topic_arn,
'Events': ['s3:ObjectRemoved:*']
},
{'Id': notification_name+'_3',
'TopicArn': topic_arn,
'Events': []
}]
s3_notification_conf = PSNotificationS3(conn, bucket_name, topic_conf_list)
_, status = s3_notification_conf.set_config()
assert_equal(status/100, 2)
# get notifications on a bucket
response, status = s3_notification_conf.get_config(notification=notification_name+'_1')
assert_equal(status/100, 2)
assert_equal(response['NotificationConfiguration']['TopicConfiguration']['Topic'], topic_arn)
# delete specific notifications
_, status = s3_notification_conf.del_config(notification=notification_name+'_1')
assert_equal(status/100, 2)
# get the remaining 2 notifications on a bucket
response, status = s3_notification_conf.get_config()
assert_equal(status/100, 2)
assert_equal(len(response['TopicConfigurations']), 2)
assert_equal(response['TopicConfigurations'][0]['TopicArn'], topic_arn)
assert_equal(response['TopicConfigurations'][1]['TopicArn'], topic_arn)
# delete remaining notifications
_, status = s3_notification_conf.del_config()
assert_equal(status/100, 2)
# make sure that the notifications are now deleted
_, status = s3_notification_conf.get_config()
# cleanup
topic_conf.del_config()
# delete the bucket
conn.delete_bucket(bucket_name)
@attr('basic_test')
def test_ps_s3_notification_on_master_empty_config():
""" test s3 notification set/get/delete on master with empty config """
hostname = get_ip()
conn = connection()
zonegroup = 'default'
# create bucket
bucket_name = gen_bucket_name()
bucket = conn.create_bucket(bucket_name)
topic_name = bucket_name + TOPIC_SUFFIX
# create s3 topic
endpoint_address = 'amqp://127.0.0.1:7001'
endpoint_args = 'push-endpoint='+endpoint_address+'&amqp-exchange=amqp.direct&amqp-ack-level=none'
topic_conf = PSTopicS3(conn, topic_name, zonegroup, endpoint_args=endpoint_args)
topic_arn = topic_conf.set_config()
# create s3 notification
notification_name = bucket_name + NOTIFICATION_SUFFIX
topic_conf_list = [{'Id': notification_name+'_1',
'TopicArn': topic_arn,
'Events': []
}]
s3_notification_conf = PSNotificationS3(conn, bucket_name, topic_conf_list)
_, status = s3_notification_conf.set_config()
assert_equal(status/100, 2)
# get notifications on a bucket
response, status = s3_notification_conf.get_config(notification=notification_name+'_1')
assert_equal(status/100, 2)
assert_equal(response['NotificationConfiguration']['TopicConfiguration']['Topic'], topic_arn)
# create s3 notification again with empty configuration to check if it deletes or not
topic_conf_list = []
s3_notification_conf = PSNotificationS3(conn, bucket_name, topic_conf_list)
_, status = s3_notification_conf.set_config()
assert_equal(status/100, 2)
# make sure that the notification is now deleted
response, status = s3_notification_conf.get_config()
try:
check = response['NotificationConfiguration']
except KeyError as e:
assert_equal(status/100, 2)
else:
assert False
# cleanup
topic_conf.del_config()
# delete the bucket
conn.delete_bucket(bucket_name)
@attr('amqp_test')
def test_ps_s3_notification_filter_on_master():
""" test s3 notification filter on master """
hostname = get_ip()
conn = connection()
ps_zone = conn
zonegroup = 'default'
# create bucket
bucket_name = gen_bucket_name()
bucket = conn.create_bucket(bucket_name)
topic_name = bucket_name + TOPIC_SUFFIX
# start amqp receivers
exchange = 'ex1'
task, receiver = create_amqp_receiver_thread(exchange, topic_name)
task.start()
# create s3 topic
endpoint_address = 'amqp://' + hostname
endpoint_args = 'push-endpoint='+endpoint_address+'&amqp-exchange=' + exchange +'&amqp-ack-level=broker'
topic_conf = PSTopicS3(conn, topic_name, zonegroup, endpoint_args=endpoint_args)
topic_arn = topic_conf.set_config()
# create s3 notification
notification_name = bucket_name + NOTIFICATION_SUFFIX
topic_conf_list = [{'Id': notification_name+'_1',
'TopicArn': topic_arn,
'Events': ['s3:ObjectCreated:*'],
'Filter': {
'Key': {
'FilterRules': [{'Name': 'prefix', 'Value': 'hello'}]
}
}
},
{'Id': notification_name+'_2',
'TopicArn': topic_arn,
'Events': ['s3:ObjectCreated:*'],
'Filter': {
'Key': {
'FilterRules': [{'Name': 'prefix', 'Value': 'world'},
{'Name': 'suffix', 'Value': 'log'}]
}
}
},
{'Id': notification_name+'_3',
'TopicArn': topic_arn,
'Events': [],
'Filter': {
'Key': {
'FilterRules': [{'Name': 'regex', 'Value': '([a-z]+)\\.txt'}]
}
}
}]
s3_notification_conf = PSNotificationS3(conn, bucket_name, topic_conf_list)
result, status = s3_notification_conf.set_config()
assert_equal(status/100, 2)
topic_conf_list = [{'Id': notification_name+'_4',
'TopicArn': topic_arn,
'Events': ['s3:ObjectCreated:*', 's3:ObjectRemoved:*'],
'Filter': {
'Metadata': {
'FilterRules': [{'Name': 'x-amz-meta-foo', 'Value': 'bar'},
{'Name': 'x-amz-meta-hello', 'Value': 'world'}]
},
'Key': {
'FilterRules': [{'Name': 'regex', 'Value': '([a-z]+)'}]
}
}
}]
try:
s3_notification_conf4 = PSNotificationS3(conn, bucket_name, topic_conf_list)
_, status = s3_notification_conf4.set_config()
assert_equal(status/100, 2)
skip_notif4 = False
except Exception as error:
print('note: metadata filter is not supported by boto3 - skipping test')
skip_notif4 = True
# get all notifications
result, status = s3_notification_conf.get_config()
assert_equal(status/100, 2)
for conf in result['TopicConfigurations']:
filter_name = conf['Filter']['Key']['FilterRules'][0]['Name']
assert filter_name == 'prefix' or filter_name == 'suffix' or filter_name == 'regex', filter_name
if not skip_notif4:
result, status = s3_notification_conf4.get_config(notification=notification_name+'_4')
assert_equal(status/100, 2)
filter_name = result['NotificationConfiguration']['TopicConfiguration']['Filter']['S3Metadata']['FilterRule'][0]['Name']
assert filter_name == 'x-amz-meta-foo' or filter_name == 'x-amz-meta-hello'
expected_in1 = ['hello.kaboom', 'hello.txt', 'hello123.txt', 'hello']
expected_in2 = ['world1.log', 'world2log', 'world3.log']
expected_in3 = ['hello.txt', 'hell.txt', 'worldlog.txt']
expected_in4 = ['foo', 'bar', 'hello', 'world']
filtered = ['hell.kaboom', 'world.og', 'world.logg', 'he123ll.txt', 'wo', 'log', 'h', 'txt', 'world.log.txt']
filtered_with_attr = ['nofoo', 'nobar', 'nohello', 'noworld']
# create objects in bucket
for key_name in expected_in1:
key = bucket.new_key(key_name)
key.set_contents_from_string('bar')
for key_name in expected_in2:
key = bucket.new_key(key_name)
key.set_contents_from_string('bar')
for key_name in expected_in3:
key = bucket.new_key(key_name)
key.set_contents_from_string('bar')
if not skip_notif4:
for key_name in expected_in4:
key = bucket.new_key(key_name)
key.set_metadata('foo', 'bar')
key.set_metadata('hello', 'world')
key.set_metadata('goodbye', 'cruel world')
key.set_contents_from_string('bar')
for key_name in filtered:
key = bucket.new_key(key_name)
key.set_contents_from_string('bar')
for key_name in filtered_with_attr:
key.set_metadata('foo', 'nobar')
key.set_metadata('hello', 'noworld')
key.set_metadata('goodbye', 'cruel world')
key = bucket.new_key(key_name)
key.set_contents_from_string('bar')
print('wait for 5sec for the messages...')
time.sleep(5)
found_in1 = []
found_in2 = []
found_in3 = []
found_in4 = []
for event in receiver.get_and_reset_events():
notif_id = event['Records'][0]['s3']['configurationId']
key_name = event['Records'][0]['s3']['object']['key']
awsRegion = event['Records'][0]['awsRegion']
assert_equal(awsRegion, zonegroup)
bucket_arn = event['Records'][0]['s3']['bucket']['arn']
assert_equal(bucket_arn, "arn:aws:s3:"+awsRegion+"::"+bucket_name)
if notif_id == notification_name+'_1':
found_in1.append(key_name)
elif notif_id == notification_name+'_2':
found_in2.append(key_name)
elif notif_id == notification_name+'_3':
found_in3.append(key_name)
elif not skip_notif4 and notif_id == notification_name+'_4':
found_in4.append(key_name)
else:
assert False, 'invalid notification: ' + notif_id
assert_equal(set(found_in1), set(expected_in1))
assert_equal(set(found_in2), set(expected_in2))
assert_equal(set(found_in3), set(expected_in3))
if not skip_notif4:
assert_equal(set(found_in4), set(expected_in4))
# cleanup
s3_notification_conf.del_config()
if not skip_notif4:
s3_notification_conf4.del_config()
topic_conf.del_config()
# delete the bucket
for key in bucket.list():
key.delete()
conn.delete_bucket(bucket_name)
stop_amqp_receiver(receiver, task)
@attr('basic_test')
def test_ps_s3_notification_errors_on_master():
""" test s3 notification set/get/delete on master """
conn = connection()
zonegroup = 'default'
bucket_name = gen_bucket_name()
# create bucket
bucket = conn.create_bucket(bucket_name)
topic_name = bucket_name + TOPIC_SUFFIX
# create s3 topic
endpoint_address = 'amqp://127.0.0.1:7001'
endpoint_args = 'push-endpoint='+endpoint_address+'&amqp-exchange=amqp.direct&amqp-ack-level=none'
topic_conf = PSTopicS3(conn, topic_name, zonegroup, endpoint_args=endpoint_args)
topic_arn = topic_conf.set_config()
# create s3 notification with invalid event name
notification_name = bucket_name + NOTIFICATION_SUFFIX
topic_conf_list = [{'Id': notification_name,
'TopicArn': topic_arn,
'Events': ['s3:ObjectCreated:Kaboom']
}]
s3_notification_conf = PSNotificationS3(conn, bucket_name, topic_conf_list)
try:
result, status = s3_notification_conf.set_config()
except Exception as error:
print(str(error) + ' - is expected')
else:
assert False, 'invalid event name is expected to fail'
# create s3 notification with missing name
topic_conf_list = [{'Id': '',
'TopicArn': topic_arn,
'Events': ['s3:ObjectCreated:Put']
}]
s3_notification_conf = PSNotificationS3(conn, bucket_name, topic_conf_list)
try:
_, _ = s3_notification_conf.set_config()
except Exception as error:
print(str(error) + ' - is expected')
else:
assert False, 'missing notification name is expected to fail'
# create s3 notification with invalid topic ARN
invalid_topic_arn = 'kaboom'
topic_conf_list = [{'Id': notification_name,
'TopicArn': invalid_topic_arn,
'Events': ['s3:ObjectCreated:Put']
}]
s3_notification_conf = PSNotificationS3(conn, bucket_name, topic_conf_list)
try:
_, _ = s3_notification_conf.set_config()
except Exception as error:
print(str(error) + ' - is expected')
else:
assert False, 'invalid ARN is expected to fail'
# create s3 notification with unknown topic ARN
invalid_topic_arn = 'arn:aws:sns:a::kaboom'
topic_conf_list = [{'Id': notification_name,
'TopicArn': invalid_topic_arn ,
'Events': ['s3:ObjectCreated:Put']
}]
s3_notification_conf = PSNotificationS3(conn, bucket_name, topic_conf_list)
try:
_, _ = s3_notification_conf.set_config()
except Exception as error:
print(str(error) + ' - is expected')
else:
assert False, 'unknown topic is expected to fail'
# create s3 notification with wrong bucket
topic_conf_list = [{'Id': notification_name,
'TopicArn': topic_arn,
'Events': ['s3:ObjectCreated:Put']
}]
s3_notification_conf = PSNotificationS3(conn, 'kaboom', topic_conf_list)
try:
_, _ = s3_notification_conf.set_config()
except Exception as error:
print(str(error) + ' - is expected')
else:
assert False, 'unknown bucket is expected to fail'
topic_conf.del_config()
status = topic_conf.del_config()
# deleting an unknown notification is not considered an error
assert_equal(status, 200)
_, status = topic_conf.get_config()
assert_equal(status, 404)
# cleanup
# delete the bucket
conn.delete_bucket(bucket_name)
@attr('basic_test')
def test_ps_s3_notification_permissions():
""" test s3 notification set/get/delete permissions """
conn1 = connection()
conn2 = another_user()
zonegroup = 'default'
bucket_name = gen_bucket_name()
# create bucket
bucket = conn1.create_bucket(bucket_name)
topic_name = bucket_name + TOPIC_SUFFIX
# create s3 topic
endpoint_address = 'amqp://127.0.0.1:7001'
endpoint_args = 'push-endpoint='+endpoint_address+'&amqp-exchange=amqp.direct&amqp-ack-level=none'
topic_conf = PSTopicS3(conn1, topic_name, zonegroup, endpoint_args=endpoint_args)
topic_arn = topic_conf.set_config()
# one user create a notification
notification_name = bucket_name + NOTIFICATION_SUFFIX
topic_conf_list = [{'Id': notification_name,
'TopicArn': topic_arn,
'Events': []
}]
s3_notification_conf1 = PSNotificationS3(conn1, bucket_name, topic_conf_list)
_, status = s3_notification_conf1.set_config()
assert_equal(status, 200)
# another user try to fetch it
s3_notification_conf2 = PSNotificationS3(conn2, bucket_name, topic_conf_list)
try:
_, _ = s3_notification_conf2.get_config()
assert False, "'AccessDenied' error is expected"
except ClientError as error:
assert_equal(error.response['Error']['Code'], 'AccessDenied')
# other user try to delete the notification
_, status = s3_notification_conf2.del_config()
assert_equal(status, 403)
# bucket policy is added by the 1st user
client = boto3.client('s3',
endpoint_url='http://'+conn1.host+':'+str(conn1.port),
aws_access_key_id=conn1.aws_access_key_id,
aws_secret_access_key=conn1.aws_secret_access_key)
bucket_policy = json.dumps({
"Version": "2012-10-17",
"Statement": [
{
"Sid": "Statement",
"Effect": "Allow",
"Principal": "*",
"Action": ["s3:GetBucketNotification", "s3:PutBucketNotification"],
"Resource": f"arn:aws:s3:::{bucket_name}"
}
]
})
response = client.put_bucket_policy(Bucket=bucket_name, Policy=bucket_policy)
assert_equal(int(response['ResponseMetadata']['HTTPStatusCode']/100), 2)
result = client.get_bucket_policy(Bucket=bucket_name)
print(result['Policy'])
# 2nd user try to fetch it again
_, status = s3_notification_conf2.get_config()
assert_equal(status, 200)
# 2nd user try to delete it again
result, status = s3_notification_conf2.del_config()
assert_equal(status, 200)
# 2nd user try to add another notification
topic_conf_list = [{'Id': notification_name+"2",
'TopicArn': topic_arn,
'Events': []
}]
s3_notification_conf2 = PSNotificationS3(conn2, bucket_name, topic_conf_list)
result, status = s3_notification_conf2.set_config()
assert_equal(status, 200)
# cleanup
s3_notification_conf1.del_config()
s3_notification_conf2.del_config()
topic_conf.del_config()
# delete the bucket
conn1.delete_bucket(bucket_name)
@attr('amqp_test')
def test_ps_s3_notification_push_amqp_on_master():
""" test pushing amqp s3 notification on master """
hostname = get_ip()
conn = connection()
zonegroup = 'default'
# create bucket
bucket_name = gen_bucket_name()
bucket = conn.create_bucket(bucket_name)
topic_name1 = bucket_name + TOPIC_SUFFIX + '_1'
topic_name2 = bucket_name + TOPIC_SUFFIX + '_2'
# start amqp receivers
exchange = 'ex1'
task1, receiver1 = create_amqp_receiver_thread(exchange, topic_name1)
task2, receiver2 = create_amqp_receiver_thread(exchange, topic_name2)
task1.start()
task2.start()
# create two s3 topic
endpoint_address = 'amqp://' + hostname
# with acks from broker
endpoint_args = 'push-endpoint='+endpoint_address+'&amqp-exchange=' + exchange +'&amqp-ack-level=broker'
topic_conf1 = PSTopicS3(conn, topic_name1, zonegroup, endpoint_args=endpoint_args)
topic_arn1 = topic_conf1.set_config()
# without acks from broker
endpoint_args = 'push-endpoint='+endpoint_address+'&amqp-exchange=' + exchange +'&amqp-ack-level=routable'
topic_conf2 = PSTopicS3(conn, topic_name2, zonegroup, endpoint_args=endpoint_args)
topic_arn2 = topic_conf2.set_config()
# create s3 notification
notification_name = bucket_name + NOTIFICATION_SUFFIX
topic_conf_list = [{'Id': notification_name+'_1', 'TopicArn': topic_arn1,
'Events': []
},
{'Id': notification_name+'_2', 'TopicArn': topic_arn2,
'Events': ['s3:ObjectCreated:*']
}]
s3_notification_conf = PSNotificationS3(conn, bucket_name, topic_conf_list)
response, status = s3_notification_conf.set_config()
assert_equal(status/100, 2)
# create objects in the bucket (async)
number_of_objects = 100
client_threads = []
start_time = time.time()
for i in range(number_of_objects):
key = bucket.new_key(str(i))
content = str(os.urandom(1024*1024))
thr = threading.Thread(target = set_contents_from_string, args=(key, content,))
thr.start()
client_threads.append(thr)
[thr.join() for thr in client_threads]
time_diff = time.time() - start_time
print('average time for creation + qmqp notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds')
print('wait for 5sec for the messages...')
time.sleep(5)
# check amqp receiver
keys = list(bucket.list())
print('total number of objects: ' + str(len(keys)))
receiver1.verify_s3_events(keys, exact_match=True)
receiver2.verify_s3_events(keys, exact_match=True)
# delete objects from the bucket
client_threads = []
start_time = time.time()
for key in bucket.list():
thr = threading.Thread(target = key.delete, args=())
thr.start()
client_threads.append(thr)
[thr.join() for thr in client_threads]
time_diff = time.time() - start_time
print('average time for deletion + amqp notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds')
print('wait for 5sec for the messages...')
time.sleep(5)
# check amqp receiver 1 for deletions
receiver1.verify_s3_events(keys, exact_match=True, deletions=True)
# check amqp receiver 2 has no deletions
try:
receiver1.verify_s3_events(keys, exact_match=False, deletions=True)
except:
pass
else:
err = 'amqp receiver 2 should have no deletions'
assert False, err
# cleanup
stop_amqp_receiver(receiver1, task1)
stop_amqp_receiver(receiver2, task2)
s3_notification_conf.del_config()
topic_conf1.del_config()
topic_conf2.del_config()
# delete the bucket
conn.delete_bucket(bucket_name)
@attr('manual_test')
def test_ps_s3_notification_push_amqp_idleness_check():
""" test pushing amqp s3 notification and checking for connection idleness """
return SkipTest("only used in manual testing")
hostname = get_ip()
conn = connection()
zonegroup = 'default'
# create bucket
bucket_name = gen_bucket_name()
bucket = conn.create_bucket(bucket_name)
topic_name1 = bucket_name + TOPIC_SUFFIX + '_1'
# start amqp receivers
exchange = 'ex1'
task1, receiver1 = create_amqp_receiver_thread(exchange, topic_name1)
task1.start()
# create two s3 topic
endpoint_address = 'amqp://' + hostname
# with acks from broker
endpoint_args = 'push-endpoint='+endpoint_address+'&amqp-exchange=' + exchange +'&amqp-ack-level=broker'
topic_conf1 = PSTopicS3(conn, topic_name1, zonegroup, endpoint_args=endpoint_args)
topic_arn1 = topic_conf1.set_config()
# create s3 notification
notification_name = bucket_name + NOTIFICATION_SUFFIX
topic_conf_list = [{'Id': notification_name+'_1', 'TopicArn': topic_arn1,
'Events': []
}]
s3_notification_conf = PSNotificationS3(conn, bucket_name, topic_conf_list)
response, status = s3_notification_conf.set_config()
assert_equal(status/100, 2)
# create objects in the bucket (async)
number_of_objects = 10
client_threads = []
start_time = time.time()
for i in range(number_of_objects):
key = bucket.new_key(str(i))
content = str(os.urandom(1024*1024))
thr = threading.Thread(target = set_contents_from_string, args=(key, content,))
thr.start()
client_threads.append(thr)
[thr.join() for thr in client_threads]
time_diff = time.time() - start_time
print('average time for creation + amqp notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds')
print('wait for 5sec for the messages...')
time.sleep(5)
# check amqp receiver
keys = list(bucket.list())
print('total number of objects: ' + str(len(keys)))
receiver1.verify_s3_events(keys, exact_match=True)
# delete objects from the bucket
client_threads = []
start_time = time.time()
for key in bucket.list():
thr = threading.Thread(target = key.delete, args=())
thr.start()
client_threads.append(thr)
[thr.join() for thr in client_threads]
time_diff = time.time() - start_time
print('average time for deletion + amqp notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds')
print('wait for 5sec for the messages...')
time.sleep(5)
# check amqp receiver 1 for deletions
receiver1.verify_s3_events(keys, exact_match=True, deletions=True)
print('waiting for 40sec for checking idleness')
time.sleep(40)
os.system("netstat -nnp | grep 5672");
# do the process of uploading an object and checking for notification again
number_of_objects = 10
client_threads = []
start_time = time.time()
for i in range(number_of_objects):
key = bucket.new_key(str(i))
content = str(os.urandom(1024*1024))
thr = threading.Thread(target = set_contents_from_string, args=(key, content,))
thr.start()
client_threads.append(thr)
[thr.join() for thr in client_threads]
time_diff = time.time() - start_time
print('average time for creation + amqp notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds')
print('wait for 5sec for the messages...')
time.sleep(5)
# check amqp receiver
keys = list(bucket.list())
print('total number of objects: ' + str(len(keys)))
receiver1.verify_s3_events(keys, exact_match=True)
# delete objects from the bucket
client_threads = []
start_time = time.time()
for key in bucket.list():
thr = threading.Thread(target = key.delete, args=())
thr.start()
client_threads.append(thr)
[thr.join() for thr in client_threads]
time_diff = time.time() - start_time
print('average time for deletion + amqp notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds')
print('wait for 5sec for the messages...')
time.sleep(5)
# check amqp receiver 1 for deletions
receiver1.verify_s3_events(keys, exact_match=True, deletions=True)
os.system("netstat -nnp | grep 5672");
# cleanup
stop_amqp_receiver(receiver1, task1)
s3_notification_conf.del_config()
topic_conf1.del_config()
# delete the bucket
conn.delete_bucket(bucket_name)
@attr('kafka_test')
def test_ps_s3_notification_push_kafka_on_master():
""" test pushing kafka s3 notification on master """
conn = connection()
zonegroup = 'default'
# create bucket
bucket_name = gen_bucket_name()
bucket = conn.create_bucket(bucket_name)
# name is constant for manual testing
topic_name = bucket_name+'_topic'
# create consumer on the topic
try:
s3_notification_conf = None
topic_conf1 = None
topic_conf2 = None
receiver = None
task, receiver = create_kafka_receiver_thread(topic_name+'_1')
task.start()
# create s3 topic
endpoint_address = 'kafka://' + kafka_server
# without acks from broker
endpoint_args = 'push-endpoint='+endpoint_address+'&kafka-ack-level=broker'
topic_conf1 = PSTopicS3(conn, topic_name+'_1', zonegroup, endpoint_args=endpoint_args)
topic_arn1 = topic_conf1.set_config()
endpoint_args = 'push-endpoint='+endpoint_address+'&kafka-ack-level=none'
topic_conf2 = PSTopicS3(conn, topic_name+'_2', zonegroup, endpoint_args=endpoint_args)
topic_arn2 = topic_conf2.set_config()
# create s3 notification
notification_name = bucket_name + NOTIFICATION_SUFFIX
topic_conf_list = [{'Id': notification_name + '_1', 'TopicArn': topic_arn1,
'Events': []
},
{'Id': notification_name + '_2', 'TopicArn': topic_arn2,
'Events': []
}]
s3_notification_conf = PSNotificationS3(conn, bucket_name, topic_conf_list)
response, status = s3_notification_conf.set_config()
assert_equal(status/100, 2)
# create objects in the bucket (async)
number_of_objects = 10
client_threads = []
etags = []
start_time = time.time()
for i in range(number_of_objects):
key = bucket.new_key(str(i))
content = str(os.urandom(1024*1024))
etag = hashlib.md5(content.encode()).hexdigest()
etags.append(etag)
thr = threading.Thread(target = set_contents_from_string, args=(key, content,))
thr.start()
client_threads.append(thr)
[thr.join() for thr in client_threads]
time_diff = time.time() - start_time
print('average time for creation + kafka notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds')
print('wait for 5sec for the messages...')
time.sleep(5)
keys = list(bucket.list())
receiver.verify_s3_events(keys, exact_match=True, etags=etags)
# delete objects from the bucket
client_threads = []
start_time = time.time()
for key in bucket.list():
thr = threading.Thread(target = key.delete, args=())
thr.start()
client_threads.append(thr)
[thr.join() for thr in client_threads]
time_diff = time.time() - start_time
print('average time for deletion + kafka notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds')
print('wait for 5sec for the messages...')
time.sleep(5)
receiver.verify_s3_events(keys, exact_match=True, deletions=True, etags=etags)
except Exception as e:
print(e)
assert False
finally:
# cleanup
if s3_notification_conf is not None:
s3_notification_conf.del_config()
if topic_conf1 is not None:
topic_conf1.del_config()
if topic_conf2 is not None:
topic_conf2.del_config()
# delete the bucket
for key in bucket.list():
key.delete()
conn.delete_bucket(bucket_name)
if receiver is not None:
stop_kafka_receiver(receiver, task)
@attr('http_test')
def test_ps_s3_notification_multi_delete_on_master():
""" test deletion of multiple keys on master """
hostname = get_ip()
conn = connection()
zonegroup = 'default'
# create random port for the http server
host = get_ip()
port = random.randint(10000, 20000)
# start an http server in a separate thread
number_of_objects = 10
http_server = StreamingHTTPServer(host, port, num_workers=number_of_objects)
# create bucket
bucket_name = gen_bucket_name()
bucket = conn.create_bucket(bucket_name)
topic_name = bucket_name + TOPIC_SUFFIX
# create s3 topic
endpoint_address = 'http://'+host+':'+str(port)
endpoint_args = 'push-endpoint='+endpoint_address
topic_conf = PSTopicS3(conn, topic_name, zonegroup, endpoint_args=endpoint_args)
topic_arn = topic_conf.set_config()
# create s3 notification
notification_name = bucket_name + NOTIFICATION_SUFFIX
topic_conf_list = [{'Id': notification_name,
'TopicArn': topic_arn,
'Events': ['s3:ObjectRemoved:*']
}]
s3_notification_conf = PSNotificationS3(conn, bucket_name, topic_conf_list)
response, status = s3_notification_conf.set_config()
assert_equal(status/100, 2)
# create objects in the bucket
client_threads = []
objects_size = {}
for i in range(number_of_objects):
content = str(os.urandom(randint(1, 1024)))
object_size = len(content)
key = bucket.new_key(str(i))
objects_size[key.name] = object_size
thr = threading.Thread(target = set_contents_from_string, args=(key, content,))
thr.start()
client_threads.append(thr)
[thr.join() for thr in client_threads]
keys = list(bucket.list())
start_time = time.time()
delete_all_objects(conn, bucket_name)
time_diff = time.time() - start_time
print('average time for deletion + http notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds')
print('wait for 5sec for the messages...')
time.sleep(5)
# check http receiver
http_server.verify_s3_events(keys, exact_match=True, deletions=True, expected_sizes=objects_size)
# cleanup
topic_conf.del_config()
s3_notification_conf.del_config(notification=notification_name)
# delete the bucket
conn.delete_bucket(bucket_name)
http_server.close()
@attr('http_test')
def test_ps_s3_notification_push_http_on_master():
""" test pushing http s3 notification on master """
hostname = get_ip_http()
conn = connection()
zonegroup = 'default'
# create random port for the http server
host = get_ip()
port = random.randint(10000, 20000)
# start an http server in a separate thread
number_of_objects = 10
http_server = StreamingHTTPServer(host, port, num_workers=number_of_objects)
# create bucket
bucket_name = gen_bucket_name()
bucket = conn.create_bucket(bucket_name)
topic_name = bucket_name + TOPIC_SUFFIX
# create s3 topic
endpoint_address = 'http://'+host+':'+str(port)
endpoint_args = 'push-endpoint='+endpoint_address
topic_conf = PSTopicS3(conn, topic_name, zonegroup, endpoint_args=endpoint_args)
topic_arn = topic_conf.set_config()
# create s3 notification
notification_name = bucket_name + NOTIFICATION_SUFFIX
topic_conf_list = [{'Id': notification_name,
'TopicArn': topic_arn,
'Events': []
}]
s3_notification_conf = PSNotificationS3(conn, bucket_name, topic_conf_list)
response, status = s3_notification_conf.set_config()
assert_equal(status/100, 2)
# create objects in the bucket
client_threads = []
objects_size = {}
start_time = time.time()
for i in range(number_of_objects):
content = str(os.urandom(randint(1, 1024)))
object_size = len(content)
key = bucket.new_key(str(i))
objects_size[key.name] = object_size
thr = threading.Thread(target = set_contents_from_string, args=(key, content,))
thr.start()
client_threads.append(thr)
[thr.join() for thr in client_threads]
time_diff = time.time() - start_time
print('average time for creation + http notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds')
print('wait for 5sec for the messages...')
time.sleep(5)
# check http receiver
keys = list(bucket.list())
http_server.verify_s3_events(keys, exact_match=True, deletions=False, expected_sizes=objects_size)
# delete objects from the bucket
client_threads = []
start_time = time.time()
for key in bucket.list():
thr = threading.Thread(target = key.delete, args=())
thr.start()
client_threads.append(thr)
[thr.join() for thr in client_threads]
time_diff = time.time() - start_time
print('average time for deletion + http notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds')
print('wait for 5sec for the messages...')
time.sleep(5)
# check http receiver
http_server.verify_s3_events(keys, exact_match=True, deletions=True, expected_sizes=objects_size)
# cleanup
topic_conf.del_config()
s3_notification_conf.del_config(notification=notification_name)
# delete the bucket
conn.delete_bucket(bucket_name)
http_server.close()
@attr('http_test')
def test_ps_s3_notification_push_cloudevents_on_master():
""" test pushing cloudevents notification on master """
hostname = get_ip_http()
conn = connection()
zonegroup = 'default'
# create random port for the http server
host = get_ip()
port = random.randint(10000, 20000)
# start an http server in a separate thread
number_of_objects = 10
http_server = StreamingHTTPServer(host, port, num_workers=number_of_objects, cloudevents=True)
# create bucket
bucket_name = gen_bucket_name()
bucket = conn.create_bucket(bucket_name)
topic_name = bucket_name + TOPIC_SUFFIX
# create s3 topic
endpoint_address = 'http://'+host+':'+str(port)
endpoint_args = 'push-endpoint='+endpoint_address+'&cloudevents=true'
topic_conf = PSTopicS3(conn, topic_name, zonegroup, endpoint_args=endpoint_args)
topic_arn = topic_conf.set_config()
# create s3 notification
notification_name = bucket_name + NOTIFICATION_SUFFIX
topic_conf_list = [{'Id': notification_name,
'TopicArn': topic_arn,
'Events': []
}]
s3_notification_conf = PSNotificationS3(conn, bucket_name, topic_conf_list)
response, status = s3_notification_conf.set_config()
assert_equal(status/100, 2)
# create objects in the bucket
client_threads = []
objects_size = {}
start_time = time.time()
for i in range(number_of_objects):
content = str(os.urandom(randint(1, 1024)))
object_size = len(content)
key = bucket.new_key(str(i))
objects_size[key.name] = object_size
thr = threading.Thread(target = set_contents_from_string, args=(key, content,))
thr.start()
client_threads.append(thr)
[thr.join() for thr in client_threads]
time_diff = time.time() - start_time
print('average time for creation + http notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds')
print('wait for 5sec for the messages...')
time.sleep(5)
# check http receiver
keys = list(bucket.list())
http_server.verify_s3_events(keys, exact_match=True, deletions=False, expected_sizes=objects_size)
# delete objects from the bucket
client_threads = []
start_time = time.time()
for key in bucket.list():
thr = threading.Thread(target = key.delete, args=())
thr.start()
client_threads.append(thr)
[thr.join() for thr in client_threads]
time_diff = time.time() - start_time
print('average time for deletion + http notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds')
print('wait for 5sec for the messages...')
time.sleep(5)
# check http receiver
http_server.verify_s3_events(keys, exact_match=True, deletions=True, expected_sizes=objects_size)
# cleanup
topic_conf.del_config()
s3_notification_conf.del_config(notification=notification_name)
# delete the bucket
conn.delete_bucket(bucket_name)
http_server.close()
@attr('http_test')
def test_ps_s3_opaque_data_on_master():
""" test that opaque id set in topic, is sent in notification on master """
hostname = get_ip()
conn = connection()
zonegroup = 'default'
# create random port for the http server
host = get_ip()
port = random.randint(10000, 20000)
# start an http server in a separate thread
number_of_objects = 10
http_server = StreamingHTTPServer(host, port, num_workers=number_of_objects)
# create bucket
bucket_name = gen_bucket_name()
bucket = conn.create_bucket(bucket_name)
topic_name = bucket_name + TOPIC_SUFFIX
# create s3 topic
endpoint_address = 'http://'+host+':'+str(port)
endpoint_args = 'push-endpoint='+endpoint_address
opaque_data = 'http://1.2.3.4:8888'
topic_conf = PSTopicS3(conn, topic_name, zonegroup, endpoint_args=endpoint_args, opaque_data=opaque_data)
topic_arn = topic_conf.set_config()
# create s3 notification
notification_name = bucket_name + NOTIFICATION_SUFFIX
topic_conf_list = [{'Id': notification_name,
'TopicArn': topic_arn,
'Events': []
}]
s3_notification_conf = PSNotificationS3(conn, bucket_name, topic_conf_list)
response, status = s3_notification_conf.set_config()
assert_equal(status/100, 2)
# create objects in the bucket
client_threads = []
start_time = time.time()
content = 'bar'
for i in range(number_of_objects):
key = bucket.new_key(str(i))
thr = threading.Thread(target = set_contents_from_string, args=(key, content,))
thr.start()
client_threads.append(thr)
[thr.join() for thr in client_threads]
time_diff = time.time() - start_time
print('average time for creation + http notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds')
print('wait for 5sec for the messages...')
time.sleep(5)
# check http receiver
keys = list(bucket.list())
print('total number of objects: ' + str(len(keys)))
events = http_server.get_and_reset_events()
for event in events:
assert_equal(event['Records'][0]['opaqueData'], opaque_data)
# cleanup
for key in keys:
key.delete()
[thr.join() for thr in client_threads]
topic_conf.del_config()
s3_notification_conf.del_config(notification=notification_name)
# delete the bucket
conn.delete_bucket(bucket_name)
http_server.close()
@attr('http_test')
def test_ps_s3_lifecycle_on_master():
""" test that when object is deleted due to lifecycle policy, notification is sent on master """
hostname = get_ip()
conn = connection()
zonegroup = 'default'
# create random port for the http server
host = get_ip()
port = random.randint(10000, 20000)
# start an http server in a separate thread
number_of_objects = 10
http_server = StreamingHTTPServer(host, port, num_workers=number_of_objects)
# create bucket
bucket_name = gen_bucket_name()
bucket = conn.create_bucket(bucket_name)
topic_name = bucket_name + TOPIC_SUFFIX
# create s3 topic
endpoint_address = 'http://'+host+':'+str(port)
endpoint_args = 'push-endpoint='+endpoint_address
opaque_data = 'http://1.2.3.4:8888'
topic_conf = PSTopicS3(conn, topic_name, zonegroup, endpoint_args=endpoint_args, opaque_data=opaque_data)
topic_arn = topic_conf.set_config()
# create s3 notification
notification_name = bucket_name + NOTIFICATION_SUFFIX
topic_conf_list = [{'Id': notification_name,
'TopicArn': topic_arn,
'Events': ['s3:ObjectLifecycle:Expiration:*']
}]
s3_notification_conf = PSNotificationS3(conn, bucket_name, topic_conf_list)
response, status = s3_notification_conf.set_config()
assert_equal(status/100, 2)
# create objects in the bucket
obj_prefix = 'ooo'
client_threads = []
start_time = time.time()
content = 'bar'
for i in range(number_of_objects):
key = bucket.new_key(obj_prefix + str(i))
thr = threading.Thread(target = set_contents_from_string, args=(key, content,))
thr.start()
client_threads.append(thr)
[thr.join() for thr in client_threads]
time_diff = time.time() - start_time
print('average time for creation + http notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds')
# create lifecycle policy
client = boto3.client('s3',
endpoint_url='http://'+conn.host+':'+str(conn.port),
aws_access_key_id=conn.aws_access_key_id,
aws_secret_access_key=conn.aws_secret_access_key)
yesterday = datetime.date.today() - datetime.timedelta(days=1)
response = client.put_bucket_lifecycle_configuration(Bucket=bucket_name,
LifecycleConfiguration={'Rules': [
{
'ID': 'rule1',
'Expiration': {'Date': yesterday.isoformat()},
'Filter': {'Prefix': obj_prefix},
'Status': 'Enabled',
}
]
}
)
# start lifecycle processing
admin(['lc', 'process'])
print('wait for 5sec for the messages...')
time.sleep(5)
# check http receiver does not have messages
keys = list(bucket.list())
print('total number of objects: ' + str(len(keys)))
event_keys = []
events = http_server.get_and_reset_events()
for event in events:
assert_equal(event['Records'][0]['eventName'], 'ObjectLifecycle:Expiration:Current')
event_keys.append(event['Records'][0]['s3']['object']['key'])
for key in keys:
key_found = False
for event_key in event_keys:
if event_key == key:
key_found = True
break
if not key_found:
err = 'no lifecycle event found for key: ' + str(key)
log.error(events)
assert False, err
# cleanup
for key in keys:
key.delete()
[thr.join() for thr in client_threads]
topic_conf.del_config()
s3_notification_conf.del_config(notification=notification_name)
# delete the bucket
conn.delete_bucket(bucket_name)
http_server.close()
def ps_s3_creation_triggers_on_master(external_endpoint_address=None, ca_location=None, verify_ssl='true'):
""" test object creation s3 notifications in using put/copy/post on master"""
if not external_endpoint_address:
hostname = 'localhost'
proc = init_rabbitmq()
if proc is None:
return SkipTest('end2end amqp tests require rabbitmq-server installed')
else:
proc = None
conn = connection()
hostname = 'localhost'
zonegroup = 'default'
# create bucket
bucket_name = gen_bucket_name()
bucket = conn.create_bucket(bucket_name)
topic_name = bucket_name + TOPIC_SUFFIX
# start amqp receiver
exchange = 'ex1'
task, receiver = create_amqp_receiver_thread(exchange, topic_name, external_endpoint_address, ca_location)
task.start()
# create s3 topic
if external_endpoint_address:
endpoint_address = external_endpoint_address
elif ca_location:
endpoint_address = 'amqps://' + hostname
else:
endpoint_address = 'amqp://' + hostname
endpoint_args = 'push-endpoint='+endpoint_address+'&amqp-exchange=' + exchange +'&amqp-ack-level=broker&verify-ssl='+verify_ssl
if ca_location:
endpoint_args += '&ca-location={}'.format(ca_location)
topic_conf = PSTopicS3(conn, topic_name, zonegroup, endpoint_args=endpoint_args)
topic_arn = topic_conf.set_config()
# create s3 notification
notification_name = bucket_name + NOTIFICATION_SUFFIX
topic_conf_list = [{'Id': notification_name,'TopicArn': topic_arn,
'Events': ['s3:ObjectCreated:Put', 's3:ObjectCreated:Copy', 's3:ObjectCreated:CompleteMultipartUpload']
}]
s3_notification_conf = PSNotificationS3(conn, bucket_name, topic_conf_list)
response, status = s3_notification_conf.set_config()
assert_equal(status/100, 2)
objects_size = {}
# create objects in the bucket using PUT
content = str(os.urandom(randint(1, 1024)))
key_name = 'put'
key = bucket.new_key(key_name)
objects_size[key_name] = len(content)
key.set_contents_from_string(content)
# create objects in the bucket using COPY
key_name = 'copy'
bucket.copy_key(key_name, bucket.name, key.name)
objects_size[key_name] = len(content)
# create objects in the bucket using multi-part upload
fp = tempfile.NamedTemporaryFile(mode='w+b')
content = bytearray(os.urandom(10*1024*1024))
key_name = 'multipart'
objects_size[key_name] = len(content)
fp.write(content)
fp.flush()
fp.seek(0)
uploader = bucket.initiate_multipart_upload(key_name)
uploader.upload_part_from_file(fp, 1)
uploader.complete_upload()
fp.close()
print('wait for 5sec for the messages...')
time.sleep(5)
# check amqp receiver
keys = list(bucket.list())
receiver.verify_s3_events(keys, exact_match=True, expected_sizes=objects_size)
# cleanup
stop_amqp_receiver(receiver, task)
s3_notification_conf.del_config()
topic_conf.del_config()
for key in bucket.list():
key.delete()
# delete the bucket
conn.delete_bucket(bucket_name)
if proc:
clean_rabbitmq(proc)
@attr('amqp_test')
def test_ps_s3_creation_triggers_on_master():
ps_s3_creation_triggers_on_master(external_endpoint_address="amqp://localhost:5672")
@attr('amqp_ssl_test')
def test_ps_s3_creation_triggers_on_master_external():
from distutils.util import strtobool
if 'AMQP_EXTERNAL_ENDPOINT' in os.environ:
try:
if strtobool(os.environ['AMQP_VERIFY_SSL']):
verify_ssl = 'true'
else:
verify_ssl = 'false'
except Exception as e:
verify_ssl = 'true'
ps_s3_creation_triggers_on_master(
external_endpoint_address=os.environ['AMQP_EXTERNAL_ENDPOINT'],
verify_ssl=verify_ssl)
else:
return SkipTest("Set AMQP_EXTERNAL_ENDPOINT to a valid external AMQP endpoint url for this test to run")
def generate_private_key(tempdir):
import datetime
import stat
from cryptography import x509
from cryptography.x509.oid import NameOID
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import rsa
# modify permissions to ensure that the broker user can access them
os.chmod(tempdir, mode=stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH)
CACERTFILE = os.path.join(tempdir, 'ca_certificate.pem')
CERTFILE = os.path.join(tempdir, 'server_certificate.pem')
KEYFILE = os.path.join(tempdir, 'server_key.pem')
root_key = rsa.generate_private_key(
public_exponent=65537,
key_size=2048,
backend=default_backend()
)
subject = issuer = x509.Name([
x509.NameAttribute(NameOID.COUNTRY_NAME, u"UK"),
x509.NameAttribute(NameOID.STATE_OR_PROVINCE_NAME, u"Oxfordshire"),
x509.NameAttribute(NameOID.LOCALITY_NAME, u"Harwell"),
x509.NameAttribute(NameOID.ORGANIZATION_NAME, u"Rosalind Franklin Institute"),
x509.NameAttribute(NameOID.COMMON_NAME, u"RFI CA"),
])
root_cert = x509.CertificateBuilder().subject_name(
subject
).issuer_name(
issuer
).public_key(
root_key.public_key()
).serial_number(
x509.random_serial_number()
).not_valid_before(
datetime.datetime.utcnow()
).not_valid_after(
datetime.datetime.utcnow() + datetime.timedelta(days=3650)
).add_extension(
x509.BasicConstraints(ca=True, path_length=None), critical=True
).sign(root_key, hashes.SHA256(), default_backend())
with open(CACERTFILE, "wb") as f:
f.write(root_cert.public_bytes(serialization.Encoding.PEM))
# Now we want to generate a cert from that root
cert_key = rsa.generate_private_key(
public_exponent=65537,
key_size=2048,
backend=default_backend(),
)
with open(KEYFILE, "wb") as f:
f.write(cert_key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.PKCS8,
encryption_algorithm=serialization.NoEncryption(),
))
new_subject = x509.Name([
x509.NameAttribute(NameOID.COUNTRY_NAME, u"UK"),
x509.NameAttribute(NameOID.STATE_OR_PROVINCE_NAME, u"Oxfordshire"),
x509.NameAttribute(NameOID.LOCALITY_NAME, u"Harwell"),
x509.NameAttribute(NameOID.ORGANIZATION_NAME, u"Rosalind Franklin Institute"),
])
cert = x509.CertificateBuilder().subject_name(
new_subject
).issuer_name(
root_cert.issuer
).public_key(
cert_key.public_key()
).serial_number(
x509.random_serial_number()
).not_valid_before(
datetime.datetime.utcnow()
).not_valid_after(
datetime.datetime.utcnow() + datetime.timedelta(days=30)
).add_extension(
x509.SubjectAlternativeName([x509.DNSName(u"localhost")]),
critical=False,
).sign(root_key, hashes.SHA256(), default_backend())
# Write our certificate out to disk.
with open(CERTFILE, "wb") as f:
f.write(cert.public_bytes(serialization.Encoding.PEM))
print("\n\n********private key generated********")
print(CACERTFILE, CERTFILE, KEYFILE)
print("\n\n")
return CACERTFILE, CERTFILE, KEYFILE
@attr('amqp_ssl_test')
def test_ps_s3_creation_triggers_on_master_ssl():
import textwrap
from tempfile import TemporaryDirectory
with TemporaryDirectory() as tempdir:
CACERTFILE, CERTFILE, KEYFILE = generate_private_key(tempdir)
RABBITMQ_CONF_FILE = os.path.join(tempdir, 'rabbitmq.config')
with open(RABBITMQ_CONF_FILE, "w") as f:
# use the old style config format to ensure it also runs on older RabbitMQ versions.
f.write(textwrap.dedent(f'''
[
{{rabbit, [
{{ssl_listeners, [5671]}},
{{ssl_options, [{{cacertfile, "{CACERTFILE}"}},
{{certfile, "{CERTFILE}"}},
{{keyfile, "{KEYFILE}"}},
{{verify, verify_peer}},
{{fail_if_no_peer_cert, false}}]}}]}}
].
'''))
os.environ['RABBITMQ_CONFIG_FILE'] = os.path.splitext(RABBITMQ_CONF_FILE)[0]
ps_s3_creation_triggers_on_master(ca_location=CACERTFILE)
del os.environ['RABBITMQ_CONFIG_FILE']
@attr('amqp_test')
def test_http_post_object_upload():
""" test that uploads object using HTTP POST """
import boto3
from collections import OrderedDict
import requests
hostname = get_ip()
zonegroup = 'default'
conn = connection()
endpoint = "http://%s:%d" % (get_config_host(), get_config_port())
conn1 = boto3.client(service_name='s3',
aws_access_key_id=get_access_key(),
aws_secret_access_key=get_secret_key(),
endpoint_url=endpoint,
)
bucket_name = gen_bucket_name()
topic_name = bucket_name + TOPIC_SUFFIX
key_name = 'foo.txt'
resp = conn1.generate_presigned_post(Bucket=bucket_name, Key=key_name,)
url = resp['url']
bucket = conn1.create_bucket(ACL='public-read-write', Bucket=bucket_name)
# start amqp receivers
exchange = 'ex1'
task1, receiver1 = create_amqp_receiver_thread(exchange, topic_name+'_1')
task1.start()
# create s3 topics
endpoint_address = 'amqp://' + hostname
endpoint_args = 'push-endpoint=' + endpoint_address + '&amqp-exchange=' + exchange + '&amqp-ack-level=broker'
topic_conf1 = PSTopicS3(conn, topic_name+'_1', zonegroup, endpoint_args=endpoint_args)
topic_arn1 = topic_conf1.set_config()
# create s3 notifications
notification_name = bucket_name + NOTIFICATION_SUFFIX
topic_conf_list = [{'Id': notification_name+'_1', 'TopicArn': topic_arn1,
'Events': ['s3:ObjectCreated:Post']
}]
s3_notification_conf = PSNotificationS3(conn, bucket_name, topic_conf_list)
response, status = s3_notification_conf.set_config()
assert_equal(status/100, 2)
payload = OrderedDict([("key" , "foo.txt"),("acl" , "public-read"),\
("Content-Type" , "text/plain"),('file', ('bar'))])
# POST upload
r = requests.post(url, files=payload, verify=True)
assert_equal(r.status_code, 204)
# check amqp receiver
events = receiver1.get_and_reset_events()
assert_equal(len(events), 1)
# cleanup
stop_amqp_receiver(receiver1, task1)
s3_notification_conf.del_config()
topic_conf1.del_config()
conn1.delete_object(Bucket=bucket_name, Key=key_name)
# delete the bucket
conn1.delete_bucket(Bucket=bucket_name)
@attr('amqp_test')
def test_ps_s3_multipart_on_master():
""" test multipart object upload on master"""
hostname = get_ip()
conn = connection()
zonegroup = 'default'
# create bucket
bucket_name = gen_bucket_name()
bucket = conn.create_bucket(bucket_name)
topic_name = bucket_name + TOPIC_SUFFIX
# start amqp receivers
exchange = 'ex1'
task1, receiver1 = create_amqp_receiver_thread(exchange, topic_name+'_1')
task1.start()
task2, receiver2 = create_amqp_receiver_thread(exchange, topic_name+'_2')
task2.start()
task3, receiver3 = create_amqp_receiver_thread(exchange, topic_name+'_3')
task3.start()
# create s3 topics
endpoint_address = 'amqp://' + hostname
endpoint_args = 'push-endpoint=' + endpoint_address + '&amqp-exchange=' + exchange + '&amqp-ack-level=broker'
topic_conf1 = PSTopicS3(conn, topic_name+'_1', zonegroup, endpoint_args=endpoint_args)
topic_arn1 = topic_conf1.set_config()
topic_conf2 = PSTopicS3(conn, topic_name+'_2', zonegroup, endpoint_args=endpoint_args)
topic_arn2 = topic_conf2.set_config()
topic_conf3 = PSTopicS3(conn, topic_name+'_3', zonegroup, endpoint_args=endpoint_args)
topic_arn3 = topic_conf3.set_config()
# create s3 notifications
notification_name = bucket_name + NOTIFICATION_SUFFIX
topic_conf_list = [{'Id': notification_name+'_1', 'TopicArn': topic_arn1,
'Events': ['s3:ObjectCreated:*']
},
{'Id': notification_name+'_2', 'TopicArn': topic_arn2,
'Events': ['s3:ObjectCreated:Post']
},
{'Id': notification_name+'_3', 'TopicArn': topic_arn3,
'Events': ['s3:ObjectCreated:CompleteMultipartUpload']
}]
s3_notification_conf = PSNotificationS3(conn, bucket_name, topic_conf_list)
response, status = s3_notification_conf.set_config()
assert_equal(status/100, 2)
# create objects in the bucket using multi-part upload
fp = tempfile.NamedTemporaryFile(mode='w+b')
object_size = 1024
content = bytearray(os.urandom(object_size))
fp.write(content)
fp.flush()
fp.seek(0)
uploader = bucket.initiate_multipart_upload('multipart')
uploader.upload_part_from_file(fp, 1)
uploader.complete_upload()
fp.close()
print('wait for 5sec for the messages...')
time.sleep(5)
# check amqp receiver
events = receiver1.get_and_reset_events()
assert_equal(len(events), 1)
events = receiver2.get_and_reset_events()
assert_equal(len(events), 0)
events = receiver3.get_and_reset_events()
assert_equal(len(events), 1)
assert_equal(events[0]['Records'][0]['eventName'], 'ObjectCreated:CompleteMultipartUpload')
assert_equal(events[0]['Records'][0]['s3']['configurationId'], notification_name+'_3')
assert_equal(events[0]['Records'][0]['s3']['object']['size'], object_size)
assert events[0]['Records'][0]['eventTime'] != '0.000000', 'invalid eventTime'
# cleanup
stop_amqp_receiver(receiver1, task1)
stop_amqp_receiver(receiver2, task2)
stop_amqp_receiver(receiver3, task3)
s3_notification_conf.del_config()
topic_conf1.del_config()
topic_conf2.del_config()
topic_conf3.del_config()
for key in bucket.list():
key.delete()
# delete the bucket
conn.delete_bucket(bucket_name)
@attr('amqp_test')
def test_ps_s3_metadata_filter_on_master():
""" test s3 notification of metadata on master """
hostname = get_ip()
conn = connection()
zonegroup = 'default'
# create bucket
bucket_name = gen_bucket_name()
bucket = conn.create_bucket(bucket_name)
topic_name = bucket_name + TOPIC_SUFFIX
# start amqp receivers
exchange = 'ex1'
task, receiver = create_amqp_receiver_thread(exchange, topic_name)
task.start()
# create s3 topic
endpoint_address = 'amqp://' + hostname
endpoint_args = 'push-endpoint='+endpoint_address+'&amqp-exchange=' + exchange +'&amqp-ack-level=routable'
topic_conf = PSTopicS3(conn, topic_name, zonegroup, endpoint_args=endpoint_args)
topic_arn = topic_conf.set_config()
# create s3 notification
notification_name = bucket_name + NOTIFICATION_SUFFIX
meta_key = 'meta1'
meta_value = 'This is my metadata value'
topic_conf_list = [{'Id': notification_name, 'TopicArn': topic_arn,
'Events': ['s3:ObjectCreated:*', 's3:ObjectRemoved:*'],
'Filter': {
'Metadata': {
'FilterRules': [{'Name': META_PREFIX+meta_key, 'Value': meta_value}]
}
}
}]
s3_notification_conf = PSNotificationS3(conn, bucket_name, topic_conf_list)
_, status = s3_notification_conf.set_config()
assert_equal(status/100, 2)
expected_keys = []
# create objects in the bucket
key_name = 'foo'
key = bucket.new_key(key_name)
key.set_metadata(meta_key, meta_value)
key.set_contents_from_string('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa')
expected_keys.append(key_name)
# create objects in the bucket using COPY
key_name = 'copy_of_foo'
bucket.copy_key(key_name, bucket.name, key.name)
expected_keys.append(key_name)
# create another objects in the bucket using COPY
# but override the metadata value
key_name = 'another_copy_of_foo'
bucket.copy_key(key_name, bucket.name, key.name, metadata={meta_key: 'kaboom'})
# this key is not in the expected keys due to the different meta value
# create objects in the bucket using multi-part upload
fp = tempfile.NamedTemporaryFile(mode='w+b')
chunk_size = 1024*1024*5 # 5MB
object_size = 10*chunk_size
content = bytearray(os.urandom(object_size))
fp.write(content)
fp.flush()
fp.seek(0)
key_name = 'multipart_foo'
uploader = bucket.initiate_multipart_upload(key_name,
metadata={meta_key: meta_value})
for i in range(1,5):
uploader.upload_part_from_file(fp, i, size=chunk_size)
fp.seek(i*chunk_size)
uploader.complete_upload()
fp.close()
expected_keys.append(key_name)
print('wait for 5sec for the messages...')
time.sleep(5)
# check amqp receiver
events = receiver.get_and_reset_events()
assert_equal(len(events), len(expected_keys))
for event in events:
assert(event['Records'][0]['s3']['object']['key'] in expected_keys)
# delete objects
for key in bucket.list():
key.delete()
print('wait for 5sec for the messages...')
time.sleep(5)
# check amqp receiver
events = receiver.get_and_reset_events()
assert_equal(len(events), len(expected_keys))
for event in events:
assert(event['Records'][0]['s3']['object']['key'] in expected_keys)
# cleanup
stop_amqp_receiver(receiver, task)
s3_notification_conf.del_config()
topic_conf.del_config()
# delete the bucket
conn.delete_bucket(bucket_name)
@attr('amqp_test')
def test_ps_s3_metadata_on_master():
""" test s3 notification of metadata on master """
hostname = get_ip()
conn = connection()
zonegroup = 'default'
# create bucket
bucket_name = gen_bucket_name()
bucket = conn.create_bucket(bucket_name)
topic_name = bucket_name + TOPIC_SUFFIX
# start amqp receivers
exchange = 'ex1'
task, receiver = create_amqp_receiver_thread(exchange, topic_name)
task.start()
# create s3 topic
endpoint_address = 'amqp://' + hostname
endpoint_args = 'push-endpoint='+endpoint_address+'&amqp-exchange=' + exchange +'&amqp-ack-level=routable'
topic_conf = PSTopicS3(conn, topic_name, zonegroup, endpoint_args=endpoint_args)
topic_arn = topic_conf.set_config()
# create s3 notification
notification_name = bucket_name + NOTIFICATION_SUFFIX
meta_key = 'meta1'
meta_value = 'This is my metadata value'
meta_prefix = META_PREFIX
topic_conf_list = [{'Id': notification_name, 'TopicArn': topic_arn,
'Events': ['s3:ObjectCreated:*', 's3:ObjectRemoved:*'],
}]
s3_notification_conf = PSNotificationS3(conn, bucket_name, topic_conf_list)
_, status = s3_notification_conf.set_config()
assert_equal(status/100, 2)
# create objects in the bucket
key_name = 'foo'
key = bucket.new_key(key_name)
key.set_metadata(meta_key, meta_value)
key.set_contents_from_string('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa')
# update the object
another_meta_key = 'meta2'
key.set_metadata(another_meta_key, meta_value)
key.set_contents_from_string('bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb')
# create objects in the bucket using COPY
key_name = 'copy_of_foo'
bucket.copy_key(key_name, bucket.name, key.name)
# create objects in the bucket using multi-part upload
fp = tempfile.NamedTemporaryFile(mode='w+b')
chunk_size = 1024*1024*5 # 5MB
object_size = 10*chunk_size
content = bytearray(os.urandom(object_size))
fp.write(content)
fp.flush()
fp.seek(0)
key_name = 'multipart_foo'
uploader = bucket.initiate_multipart_upload(key_name,
metadata={meta_key: meta_value})
for i in range(1,5):
uploader.upload_part_from_file(fp, i, size=chunk_size)
fp.seek(i*chunk_size)
uploader.complete_upload()
fp.close()
print('wait for 5sec for the messages...')
time.sleep(5)
# check amqp receiver
events = receiver.get_and_reset_events()
for event in events:
value = [x['val'] for x in event['Records'][0]['s3']['object']['metadata'] if x['key'] == META_PREFIX+meta_key]
assert_equal(value[0], meta_value)
# delete objects
for key in bucket.list():
key.delete()
print('wait for 5sec for the messages...')
time.sleep(5)
# check amqp receiver
events = receiver.get_and_reset_events()
for event in events:
value = [x['val'] for x in event['Records'][0]['s3']['object']['metadata'] if x['key'] == META_PREFIX+meta_key]
assert_equal(value[0], meta_value)
# cleanup
stop_amqp_receiver(receiver, task)
s3_notification_conf.del_config()
topic_conf.del_config()
# delete the bucket
conn.delete_bucket(bucket_name)
@attr('amqp_test')
def test_ps_s3_tags_on_master():
""" test s3 notification of tags on master """
hostname = get_ip()
conn = connection()
zonegroup = 'default'
# create bucket
bucket_name = gen_bucket_name()
bucket = conn.create_bucket(bucket_name)
topic_name = bucket_name + TOPIC_SUFFIX
# start amqp receiver
exchange = 'ex1'
task, receiver = create_amqp_receiver_thread(exchange, topic_name)
task.start()
# create s3 topic
endpoint_address = 'amqp://' + hostname
endpoint_args = 'push-endpoint='+endpoint_address+'&amqp-exchange=' + exchange +'&amqp-ack-level=routable'
topic_conf = PSTopicS3(conn, topic_name, zonegroup, endpoint_args=endpoint_args)
topic_arn = topic_conf.set_config()
# create s3 notification
notification_name = bucket_name + NOTIFICATION_SUFFIX
topic_conf_list = [{'Id': notification_name,'TopicArn': topic_arn,
'Events': ['s3:ObjectCreated:*', 's3:ObjectRemoved:*'],
'Filter': {
'Tags': {
'FilterRules': [{'Name': 'hello', 'Value': 'world'}, {'Name': 'ka', 'Value': 'boom'}]
}
}
}]
s3_notification_conf = PSNotificationS3(conn, bucket_name, topic_conf_list)
response, status = s3_notification_conf.set_config()
assert_equal(status/100, 2)
expected_keys = []
# create objects in the bucket with tags
# key 1 has all the tags in the filter
tags = 'hello=world&ka=boom&hello=helloworld'
key_name1 = 'key1'
put_object_tagging(conn, bucket_name, key_name1, tags)
expected_keys.append(key_name1)
# key 2 has an additional tag not in the filter
tags = 'hello=world&foo=bar&ka=boom&hello=helloworld'
key_name = 'key2'
put_object_tagging(conn, bucket_name, key_name, tags)
expected_keys.append(key_name)
# key 3 has no tags
key_name3 = 'key3'
key = bucket.new_key(key_name3)
key.set_contents_from_string('bar')
# key 4 has the wrong of the multi value tags
tags = 'hello=helloworld&ka=boom'
key_name = 'key4'
put_object_tagging(conn, bucket_name, key_name, tags)
# key 5 has the right of the multi value tags
tags = 'hello=world&ka=boom'
key_name = 'key5'
put_object_tagging(conn, bucket_name, key_name, tags)
expected_keys.append(key_name)
# key 6 is missing a tag
tags = 'hello=world'
key_name = 'key6'
put_object_tagging(conn, bucket_name, key_name, tags)
# create objects in the bucket using COPY
key_name = 'copy_of_'+key_name1
bucket.copy_key(key_name, bucket.name, key_name1)
expected_keys.append(key_name)
print('wait for 5sec for the messages...')
time.sleep(5)
event_count = 0
expected_tags1 = [{'key': 'hello', 'val': 'world'}, {'key': 'hello', 'val': 'helloworld'}, {'key': 'ka', 'val': 'boom'}]
expected_tags1 = sorted(expected_tags1, key=lambda k: k['key']+k['val'])
for event in receiver.get_and_reset_events():
key = event['Records'][0]['s3']['object']['key']
if (key == key_name1):
obj_tags = sorted(event['Records'][0]['s3']['object']['tags'], key=lambda k: k['key']+k['val'])
assert_equal(obj_tags, expected_tags1)
event_count += 1
assert(key in expected_keys)
assert_equal(event_count, len(expected_keys))
# delete the objects
for key in bucket.list():
key.delete()
print('wait for 5sec for the messages...')
time.sleep(5)
event_count = 0
# check amqp receiver
for event in receiver.get_and_reset_events():
key = event['Records'][0]['s3']['object']['key']
if (key == key_name1):
obj_tags = sorted(event['Records'][0]['s3']['object']['tags'], key=lambda k: k['key']+k['val'])
assert_equal(obj_tags, expected_tags1)
event_count += 1
assert(key in expected_keys)
assert(event_count == len(expected_keys))
# cleanup
stop_amqp_receiver(receiver, task)
s3_notification_conf.del_config()
topic_conf.del_config()
# delete the bucket
conn.delete_bucket(bucket_name)
@attr('amqp_test')
def test_ps_s3_versioning_on_master():
""" test s3 notification of object versions """
hostname = get_ip()
conn = connection()
zonegroup = 'default'
# create bucket
bucket_name = gen_bucket_name()
bucket = conn.create_bucket(bucket_name)
bucket.configure_versioning(True)
topic_name = bucket_name + TOPIC_SUFFIX
# start amqp receiver
exchange = 'ex1'
task, receiver = create_amqp_receiver_thread(exchange, topic_name)
task.start()
# create s3 topic
endpoint_address = 'amqp://' + hostname
endpoint_args = 'push-endpoint='+endpoint_address+'&amqp-exchange=' + exchange +'&amqp-ack-level=broker'
topic_conf = PSTopicS3(conn, topic_name, zonegroup, endpoint_args=endpoint_args)
topic_arn = topic_conf.set_config()
# create notification
notification_name = bucket_name + NOTIFICATION_SUFFIX
topic_conf_list = [{'Id': notification_name, 'TopicArn': topic_arn,
'Events': []
}]
s3_notification_conf = PSNotificationS3(conn, bucket_name, topic_conf_list)
_, status = s3_notification_conf.set_config()
assert_equal(status/100, 2)
# create objects in the bucket
key_name = 'foo'
key = bucket.new_key(key_name)
key.set_contents_from_string('hello')
ver1 = key.version_id
key.set_contents_from_string('world')
ver2 = key.version_id
copy_of_key = bucket.copy_key('copy_of_foo', bucket.name, key_name, src_version_id=ver1)
ver3 = copy_of_key.version_id
versions = [ver1, ver2, ver3]
print('wait for 5sec for the messages...')
time.sleep(5)
# check amqp receiver
events = receiver.get_and_reset_events()
num_of_versions = 0
for event_list in events:
for event in event_list['Records']:
assert event['s3']['object']['key'] in (key_name, copy_of_key.name)
version = event['s3']['object']['versionId']
num_of_versions += 1
if version not in versions:
print('version mismatch: '+version+' not in: '+str(versions))
# TODO: copy_key() does not return the version of the copied object
#assert False
else:
print('version ok: '+version+' in: '+str(versions))
assert_equal(num_of_versions, 3)
# cleanup
stop_amqp_receiver(receiver, task)
s3_notification_conf.del_config()
topic_conf.del_config()
# delete the bucket
bucket.delete_key(copy_of_key, version_id=ver3)
bucket.delete_key(key.name, version_id=ver2)
bucket.delete_key(key.name, version_id=ver1)
#conn.delete_bucket(bucket_name)
@attr('amqp_test')
def test_ps_s3_versioned_deletion_on_master():
""" test s3 notification of deletion markers on master """
hostname = get_ip()
conn = connection()
zonegroup = 'default'
# create bucket
bucket_name = gen_bucket_name()
bucket = conn.create_bucket(bucket_name)
bucket.configure_versioning(True)
topic_name = bucket_name + TOPIC_SUFFIX
# start amqp receiver
exchange = 'ex1'
task, receiver = create_amqp_receiver_thread(exchange, topic_name)
task.start()
# create s3 topic
endpoint_address = 'amqp://' + hostname
endpoint_args = 'push-endpoint='+endpoint_address+'&amqp-exchange=' + exchange +'&amqp-ack-level=broker'
topic_conf = PSTopicS3(conn, topic_name, zonegroup, endpoint_args=endpoint_args)
topic_arn = topic_conf.set_config()
# create s3 notification
notification_name = bucket_name + NOTIFICATION_SUFFIX
topic_conf_list = [{'Id': notification_name+'_1', 'TopicArn': topic_arn,
'Events': ['s3:ObjectRemoved:*']
},
{'Id': notification_name+'_2', 'TopicArn': topic_arn,
'Events': ['s3:ObjectRemoved:DeleteMarkerCreated']
},
{'Id': notification_name+'_3', 'TopicArn': topic_arn,
'Events': ['s3:ObjectRemoved:Delete']
}]
s3_notification_conf = PSNotificationS3(conn, bucket_name, topic_conf_list)
response, status = s3_notification_conf.set_config()
assert_equal(status/100, 2)
# create objects in the bucket
key = bucket.new_key('foo')
content = str(os.urandom(512))
size1 = len(content)
key.set_contents_from_string(content)
ver1 = key.version_id
content = str(os.urandom(511))
size2 = len(content)
key.set_contents_from_string(content)
ver2 = key.version_id
# create delete marker (non versioned deletion)
delete_marker_key = bucket.delete_key(key.name)
versions = [ver1, ver2, delete_marker_key.version_id]
time.sleep(1)
# versioned deletion
bucket.delete_key(key.name, version_id=ver2)
bucket.delete_key(key.name, version_id=ver1)
print('wait for 5sec for the messages...')
time.sleep(5)
# check amqp receiver
events = receiver.get_and_reset_events()
delete_events = 0
delete_marker_create_events = 0
for event_list in events:
for event in event_list['Records']:
version = event['s3']['object']['versionId']
size = event['s3']['object']['size']
if version not in versions:
print('version mismatch: '+version+' not in: '+str(versions))
assert False
else:
print('version ok: '+version+' in: '+str(versions))
if event['eventName'] == 'ObjectRemoved:Delete':
delete_events += 1
assert size in [size1, size2]
assert event['s3']['configurationId'] in [notification_name+'_1', notification_name+'_3']
if event['eventName'] == 'ObjectRemoved:DeleteMarkerCreated':
delete_marker_create_events += 1
assert size == size2
assert event['s3']['configurationId'] in [notification_name+'_1', notification_name+'_2']
# 2 key versions were deleted
# notified over the same topic via 2 notifications (1,3)
assert_equal(delete_events, 2*2)
# 1 deletion marker was created
# notified over the same topic over 2 notifications (1,2)
assert_equal(delete_marker_create_events, 1*2)
# cleanup
delete_marker_key.delete()
stop_amqp_receiver(receiver, task)
s3_notification_conf.del_config()
topic_conf.del_config()
# delete the bucket
conn.delete_bucket(bucket_name)
@attr('manual_test')
def test_ps_s3_persistent_cleanup():
""" test reservation cleanup after gateway crash """
return SkipTest("only used in manual testing")
conn = connection()
zonegroup = 'default'
# create random port for the http server
host = get_ip()
port = random.randint(10000, 20000)
# start an http server in a separate thread
number_of_objects = 200
http_server = StreamingHTTPServer(host, port, num_workers=number_of_objects)
gw = conn
# create bucket
bucket_name = gen_bucket_name()
bucket = gw.create_bucket(bucket_name)
topic_name = bucket_name + TOPIC_SUFFIX
# create s3 topic
endpoint_address = 'http://'+host+':'+str(port)
endpoint_args = 'push-endpoint='+endpoint_address+'&persistent=true'
topic_conf = PSTopicS3(gw, topic_name, zonegroup, endpoint_args=endpoint_args)
topic_arn = topic_conf.set_config()
# create s3 notification
notification_name = bucket_name + NOTIFICATION_SUFFIX
topic_conf_list = [{'Id': notification_name, 'TopicArn': topic_arn,
'Events': ['s3:ObjectCreated:Put']
}]
s3_notification_conf = PSNotificationS3(gw, bucket_name, topic_conf_list)
response, status = s3_notification_conf.set_config()
assert_equal(status/100, 2)
client_threads = []
start_time = time.time()
for i in range(number_of_objects):
key = bucket.new_key(str(i))
content = str(os.urandom(1024*1024))
thr = threading.Thread(target = set_contents_from_string, args=(key, content,))
thr.start()
client_threads.append(thr)
# stop gateway while clients are sending
os.system("killall -9 radosgw");
print('wait for 10 sec for before restarting the gateway')
time.sleep(10)
# TODO: start the radosgw
[thr.join() for thr in client_threads]
keys = list(bucket.list())
# delete objects from the bucket
client_threads = []
start_time = time.time()
for key in bucket.list():
thr = threading.Thread(target = key.delete, args=())
thr.start()
client_threads.append(thr)
[thr.join() for thr in client_threads]
# check http receiver
events = http_server.get_and_reset_events()
print(str(len(events) ) + " events found out of " + str(number_of_objects))
# make sure that things are working now
client_threads = []
start_time = time.time()
for i in range(number_of_objects):
key = bucket.new_key(str(i))
content = str(os.urandom(1024*1024))
thr = threading.Thread(target = set_contents_from_string, args=(key, content,))
thr.start()
client_threads.append(thr)
[thr.join() for thr in client_threads]
keys = list(bucket.list())
# delete objects from the bucket
client_threads = []
start_time = time.time()
for key in bucket.list():
thr = threading.Thread(target = key.delete, args=())
thr.start()
client_threads.append(thr)
[thr.join() for thr in client_threads]
print('wait for 180 sec for reservations to be stale before queue deletion')
time.sleep(180)
# check http receiver
events = http_server.get_and_reset_events()
print(str(len(events)) + " events found out of " + str(number_of_objects))
# cleanup
s3_notification_conf.del_config()
topic_conf.del_config()
gw.delete_bucket(bucket_name)
http_server.close()
@attr('basic_test')
def test_ps_s3_persistent_topic_stats():
""" test persistent topic stats """
conn = connection()
zonegroup = 'default'
# create random port for the http server
host = get_ip()
port = random.randint(10000, 20000)
# create bucket
bucket_name = gen_bucket_name()
bucket = conn.create_bucket(bucket_name)
topic_name = bucket_name + TOPIC_SUFFIX
# create s3 topic
endpoint_address = 'http://'+host+':'+str(port)
endpoint_args = 'push-endpoint='+endpoint_address+'&persistent=true'
topic_conf = PSTopicS3(conn, topic_name, zonegroup, endpoint_args=endpoint_args)
topic_arn = topic_conf.set_config()
# create s3 notification
notification_name = bucket_name + NOTIFICATION_SUFFIX
topic_conf_list = [{'Id': notification_name, 'TopicArn': topic_arn,
'Events': []
}]
s3_notification_conf = PSNotificationS3(conn, bucket_name, topic_conf_list)
response, status = s3_notification_conf.set_config()
assert_equal(status/100, 2)
# topic stats
result = admin(['topic', 'stats', '--topic', topic_name])
parsed_result = json.loads(result[0])
assert_equal(parsed_result['Topic Stats']['Entries'], 0)
assert_equal(result[1], 0)
# create objects in the bucket (async)
number_of_objects = 10
client_threads = []
start_time = time.time()
for i in range(number_of_objects):
key = bucket.new_key('key-'+str(i))
content = str(os.urandom(1024*1024))
thr = threading.Thread(target = set_contents_from_string, args=(key, content,))
thr.start()
client_threads.append(thr)
[thr.join() for thr in client_threads]
time_diff = time.time() - start_time
print('average time for creation + async http notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds')
# topic stats
result = admin(['topic', 'stats', '--topic', topic_name])
parsed_result = json.loads(result[0])
assert_equal(parsed_result['Topic Stats']['Entries'], number_of_objects)
assert_equal(result[1], 0)
# delete objects from the bucket
client_threads = []
start_time = time.time()
count = 0
for key in bucket.list():
count += 1
thr = threading.Thread(target = key.delete, args=())
thr.start()
client_threads.append(thr)
if count%100 == 0:
[thr.join() for thr in client_threads]
time_diff = time.time() - start_time
print('average time for deletion + async http notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds')
client_threads = []
start_time = time.time()
# topic stats
result = admin(['topic', 'stats', '--topic', topic_name])
parsed_result = json.loads(result[0])
assert_equal(parsed_result['Topic Stats']['Entries'], 2*number_of_objects)
assert_equal(result[1], 0)
# cleanup
s3_notification_conf.del_config()
topic_conf.del_config()
# delete the bucket
conn.delete_bucket(bucket_name)
@attr('manual_test')
def test_ps_s3_persistent_notification_pushback():
""" test pushing persistent notification pushback """
return SkipTest("only used in manual testing")
conn = connection()
zonegroup = 'default'
# create random port for the http server
host = get_ip()
port = random.randint(10000, 20000)
# start an http server in a separate thread
http_server = StreamingHTTPServer(host, port, num_workers=10, delay=0.5)
# create bucket
bucket_name = gen_bucket_name()
bucket = conn.create_bucket(bucket_name)
topic_name = bucket_name + TOPIC_SUFFIX
# create s3 topic
endpoint_address = 'http://'+host+':'+str(port)
endpoint_args = 'push-endpoint='+endpoint_address+'&persistent=true'
topic_conf = PSTopicS3(conn, topic_name, zonegroup, endpoint_args=endpoint_args)
topic_arn = topic_conf.set_config()
# create s3 notification
notification_name = bucket_name + NOTIFICATION_SUFFIX
topic_conf_list = [{'Id': notification_name, 'TopicArn': topic_arn,
'Events': []
}]
s3_notification_conf = PSNotificationS3(conn, bucket_name, topic_conf_list)
response, status = s3_notification_conf.set_config()
assert_equal(status/100, 2)
# create objects in the bucket (async)
for j in range(100):
number_of_objects = randint(500, 1000)
client_threads = []
start_time = time.time()
for i in range(number_of_objects):
key = bucket.new_key(str(j)+'-'+str(i))
content = str(os.urandom(1024*1024))
thr = threading.Thread(target = set_contents_from_string, args=(key, content,))
thr.start()
client_threads.append(thr)
[thr.join() for thr in client_threads]
time_diff = time.time() - start_time
print('average time for creation + async http notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds')
keys = list(bucket.list())
delay = 30
print('wait for '+str(delay)+'sec for the messages...')
time.sleep(delay)
# delete objects from the bucket
client_threads = []
start_time = time.time()
count = 0
for key in bucket.list():
count += 1
thr = threading.Thread(target = key.delete, args=())
thr.start()
client_threads.append(thr)
if count%100 == 0:
[thr.join() for thr in client_threads]
time_diff = time.time() - start_time
print('average time for deletion + async http notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds')
client_threads = []
start_time = time.time()
print('wait for '+str(delay)+'sec for the messages...')
time.sleep(delay)
# cleanup
s3_notification_conf.del_config()
topic_conf.del_config()
# delete the bucket
conn.delete_bucket(bucket_name)
time.sleep(delay)
http_server.close()
@attr('kafka_test')
def test_ps_s3_notification_kafka_idle_behaviour():
""" test pushing kafka s3 notification idle behaviour check """
# TODO convert this test to actual running test by changing
# os.system call to verify the process idleness
conn = connection()
zonegroup = 'default'
# create bucket
bucket_name = gen_bucket_name()
bucket = conn.create_bucket(bucket_name)
# name is constant for manual testing
topic_name = bucket_name+'_topic'
# create consumer on the topic
task, receiver = create_kafka_receiver_thread(topic_name+'_1')
task.start()
# create s3 topic
endpoint_address = 'kafka://' + kafka_server
# with acks from broker
endpoint_args = 'push-endpoint='+endpoint_address+'&kafka-ack-level=broker'
topic_conf1 = PSTopicS3(conn, topic_name+'_1', zonegroup, endpoint_args=endpoint_args)
topic_arn1 = topic_conf1.set_config()
# create s3 notification
notification_name = bucket_name + NOTIFICATION_SUFFIX
topic_conf_list = [{'Id': notification_name + '_1', 'TopicArn': topic_arn1,
'Events': []
}]
s3_notification_conf = PSNotificationS3(conn, bucket_name, topic_conf_list)
response, status = s3_notification_conf.set_config()
assert_equal(status/100, 2)
# create objects in the bucket (async)
number_of_objects = 10
client_threads = []
etags = []
start_time = time.time()
for i in range(number_of_objects):
key = bucket.new_key(str(i))
content = str(os.urandom(1024*1024))
etag = hashlib.md5(content.encode()).hexdigest()
etags.append(etag)
thr = threading.Thread(target = set_contents_from_string, args=(key, content,))
thr.start()
client_threads.append(thr)
[thr.join() for thr in client_threads]
time_diff = time.time() - start_time
print('average time for creation + kafka notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds')
print('wait for 5sec for the messages...')
time.sleep(5)
keys = list(bucket.list())
receiver.verify_s3_events(keys, exact_match=True, etags=etags)
# delete objects from the bucket
client_threads = []
start_time = time.time()
for key in bucket.list():
thr = threading.Thread(target = key.delete, args=())
thr.start()
client_threads.append(thr)
[thr.join() for thr in client_threads]
time_diff = time.time() - start_time
print('average time for deletion + kafka notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds')
print('wait for 5sec for the messages...')
time.sleep(5)
receiver.verify_s3_events(keys, exact_match=True, deletions=True, etags=etags)
is_idle = False
while not is_idle:
print('waiting for 10sec for checking idleness')
time.sleep(10)
cmd = "netstat -nnp | grep 9092 | grep radosgw"
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
out = proc.communicate()[0]
if len(out) == 0:
is_idle = True
else:
print("radosgw<->kafka connection is not idle")
print(out.decode('utf-8'))
# do the process of uploading an object and checking for notification again
number_of_objects = 10
client_threads = []
etags = []
start_time = time.time()
for i in range(number_of_objects):
key = bucket.new_key(str(i))
content = str(os.urandom(1024*1024))
etag = hashlib.md5(content.encode()).hexdigest()
etags.append(etag)
thr = threading.Thread(target = set_contents_from_string, args=(key, content,))
thr.start()
client_threads.append(thr)
[thr.join() for thr in client_threads]
time_diff = time.time() - start_time
print('average time for creation + kafka notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds')
print('wait for 5sec for the messages...')
time.sleep(5)
keys = list(bucket.list())
receiver.verify_s3_events(keys, exact_match=True, etags=etags)
# delete objects from the bucket
client_threads = []
start_time = time.time()
for key in bucket.list():
thr = threading.Thread(target = key.delete, args=())
thr.start()
client_threads.append(thr)
[thr.join() for thr in client_threads]
time_diff = time.time() - start_time
print('average time for deletion + kafka notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds')
print('wait for 5sec for the messages...')
time.sleep(5)
receiver.verify_s3_events(keys, exact_match=True, deletions=True, etags=etags)
# cleanup
s3_notification_conf.del_config()
topic_conf1.del_config()
# delete the bucket
conn.delete_bucket(bucket_name)
stop_kafka_receiver(receiver, task)
@attr('modification_required')
def test_ps_s3_persistent_gateways_recovery():
""" test gateway recovery of persistent notifications """
return SkipTest('This test requires two gateways.')
conn = connection()
zonegroup = 'default'
# create random port for the http server
host = get_ip()
port = random.randint(10000, 20000)
# start an http server in a separate thread
number_of_objects = 10
http_server = StreamingHTTPServer(host, port, num_workers=number_of_objects)
gw1 = conn
gw2 = connection2()
# create bucket
bucket_name = gen_bucket_name()
bucket = gw1.create_bucket(bucket_name)
topic_name = bucket_name + TOPIC_SUFFIX
# create two s3 topics
endpoint_address = 'http://'+host+':'+str(port)
endpoint_args = 'push-endpoint='+endpoint_address+'&persistent=true'
topic_conf1 = PSTopicS3(gw1, topic_name+'_1', zonegroup, endpoint_args=endpoint_args+'&OpaqueData=fromgw1')
topic_arn1 = topic_conf1.set_config()
topic_conf2 = PSTopicS3(gw2, topic_name+'_2', zonegroup, endpoint_args=endpoint_args+'&OpaqueData=fromgw2')
topic_arn2 = topic_conf2.set_config()
# create two s3 notifications
notification_name = bucket_name + NOTIFICATION_SUFFIX+'_1'
topic_conf_list = [{'Id': notification_name, 'TopicArn': topic_arn1,
'Events': ['s3:ObjectCreated:Put']
}]
s3_notification_conf1 = PSNotificationS3(gw1, bucket_name, topic_conf_list)
response, status = s3_notification_conf1.set_config()
assert_equal(status/100, 2)
notification_name = bucket_name + NOTIFICATION_SUFFIX+'_2'
topic_conf_list = [{'Id': notification_name, 'TopicArn': topic_arn2,
'Events': ['s3:ObjectRemoved:Delete']
}]
s3_notification_conf2 = PSNotificationS3(gw2, bucket_name, topic_conf_list)
response, status = s3_notification_conf2.set_config()
assert_equal(status/100, 2)
# stop gateway 2
print('stopping gateway2...')
client_threads = []
start_time = time.time()
for i in range(number_of_objects):
key = bucket.new_key(str(i))
content = str(os.urandom(1024*1024))
thr = threading.Thread(target = set_contents_from_string, args=(key, content,))
thr.start()
client_threads.append(thr)
[thr.join() for thr in client_threads]
keys = list(bucket.list())
# delete objects from the bucket
client_threads = []
start_time = time.time()
for key in bucket.list():
thr = threading.Thread(target = key.delete, args=())
thr.start()
client_threads.append(thr)
[thr.join() for thr in client_threads]
print('wait for 60 sec for before restarting the gateway')
time.sleep(60)
# check http receiver
events = http_server.get_and_reset_events()
for key in keys:
creations = 0
deletions = 0
for event in events:
if event['Records'][0]['eventName'] == 'ObjectCreated:Put' and \
key.name == event['Records'][0]['s3']['object']['key']:
creations += 1
elif event['Records'][0]['eventName'] == 'ObjectRemoved:Delete' and \
key.name == event['Records'][0]['s3']['object']['key']:
deletions += 1
assert_equal(creations, 1)
assert_equal(deletions, 1)
# cleanup
s3_notification_conf1.del_config()
topic_conf1.del_config()
gw1.delete_bucket(bucket_name)
time.sleep(10)
s3_notification_conf2.del_config()
topic_conf2.del_config()
http_server.close()
@attr('modification_required')
def test_ps_s3_persistent_multiple_gateways():
""" test pushing persistent notification via two gateways """
return SkipTest('This test requires two gateways.')
conn = connection()
zonegroup = 'default'
# create random port for the http server
host = get_ip()
port = random.randint(10000, 20000)
# start an http server in a separate thread
number_of_objects = 10
http_server = StreamingHTTPServer(host, port, num_workers=number_of_objects)
gw1 = conn
gw2 = connection2()
# create bucket
bucket_name = gen_bucket_name()
bucket1 = gw1.create_bucket(bucket_name)
bucket2 = gw2.get_bucket(bucket_name)
topic_name = bucket_name + TOPIC_SUFFIX
# create two s3 topics
endpoint_address = 'http://'+host+':'+str(port)
endpoint_args = 'push-endpoint='+endpoint_address+'&persistent=true'
topic1_opaque = 'fromgw1'
topic_conf1 = PSTopicS3(gw1, topic_name+'_1', zonegroup, endpoint_args=endpoint_args+'&OpaqueData='+topic1_opaque)
topic_arn1 = topic_conf1.set_config()
topic2_opaque = 'fromgw2'
topic_conf2 = PSTopicS3(gw2, topic_name+'_2', zonegroup, endpoint_args=endpoint_args+'&OpaqueData='+topic2_opaque)
topic_arn2 = topic_conf2.set_config()
# create two s3 notifications
notification_name = bucket_name + NOTIFICATION_SUFFIX+'_1'
topic_conf_list = [{'Id': notification_name, 'TopicArn': topic_arn1,
'Events': []
}]
s3_notification_conf1 = PSNotificationS3(gw1, bucket_name, topic_conf_list)
response, status = s3_notification_conf1.set_config()
assert_equal(status/100, 2)
notification_name = bucket_name + NOTIFICATION_SUFFIX+'_2'
topic_conf_list = [{'Id': notification_name, 'TopicArn': topic_arn2,
'Events': []
}]
s3_notification_conf2 = PSNotificationS3(gw2, bucket_name, topic_conf_list)
response, status = s3_notification_conf2.set_config()
assert_equal(status/100, 2)
client_threads = []
start_time = time.time()
for i in range(number_of_objects):
key = bucket1.new_key('gw1_'+str(i))
content = str(os.urandom(1024*1024))
thr = threading.Thread(target = set_contents_from_string, args=(key, content,))
thr.start()
client_threads.append(thr)
key = bucket2.new_key('gw2_'+str(i))
content = str(os.urandom(1024*1024))
thr = threading.Thread(target = set_contents_from_string, args=(key, content,))
thr.start()
client_threads.append(thr)
[thr.join() for thr in client_threads]
keys = list(bucket1.list())
delay = 30
print('wait for '+str(delay)+'sec for the messages...')
time.sleep(delay)
events = http_server.get_and_reset_events()
for key in keys:
topic1_count = 0
topic2_count = 0
for event in events:
if event['Records'][0]['eventName'] == 'ObjectCreated:Put' and \
key.name == event['Records'][0]['s3']['object']['key'] and \
topic1_opaque == event['Records'][0]['opaqueData']:
topic1_count += 1
elif event['Records'][0]['eventName'] == 'ObjectCreated:Put' and \
key.name == event['Records'][0]['s3']['object']['key'] and \
topic2_opaque == event['Records'][0]['opaqueData']:
topic2_count += 1
assert_equal(topic1_count, 1)
assert_equal(topic2_count, 1)
# delete objects from the bucket
client_threads = []
start_time = time.time()
for key in bucket1.list():
thr = threading.Thread(target = key.delete, args=())
thr.start()
client_threads.append(thr)
[thr.join() for thr in client_threads]
print('wait for '+str(delay)+'sec for the messages...')
time.sleep(delay)
events = http_server.get_and_reset_events()
for key in keys:
topic1_count = 0
topic2_count = 0
for event in events:
if event['Records'][0]['eventName'] == 'ObjectRemoved:Delete' and \
key.name == event['Records'][0]['s3']['object']['key'] and \
topic1_opaque == event['Records'][0]['opaqueData']:
topic1_count += 1
elif event['Records'][0]['eventName'] == 'ObjectRemoved:Delete' and \
key.name == event['Records'][0]['s3']['object']['key'] and \
topic2_opaque == event['Records'][0]['opaqueData']:
topic2_count += 1
assert_equal(topic1_count, 1)
assert_equal(topic2_count, 1)
# cleanup
s3_notification_conf1.del_config()
topic_conf1.del_config()
s3_notification_conf2.del_config()
topic_conf2.del_config()
gw1.delete_bucket(bucket_name)
http_server.close()
@attr('http_test')
def test_ps_s3_persistent_multiple_endpoints():
""" test pushing persistent notification when one of the endpoints has error """
conn = connection()
zonegroup = 'default'
# create random port for the http server
host = get_ip()
port = random.randint(10000, 20000)
# start an http server in a separate thread
number_of_objects = 10
http_server = StreamingHTTPServer(host, port, num_workers=number_of_objects)
# create bucket
bucket_name = gen_bucket_name()
bucket = conn.create_bucket(bucket_name)
topic_name = bucket_name + TOPIC_SUFFIX
# create two s3 topics
endpoint_address = 'http://'+host+':'+str(port)
endpoint_args = 'push-endpoint='+endpoint_address+'&persistent=true'
topic_conf1 = PSTopicS3(conn, topic_name+'_1', zonegroup, endpoint_args=endpoint_args)
topic_arn1 = topic_conf1.set_config()
endpoint_address = 'http://kaboom:9999'
endpoint_args = 'push-endpoint='+endpoint_address+'&persistent=true'
topic_conf2 = PSTopicS3(conn, topic_name+'_2', zonegroup, endpoint_args=endpoint_args)
topic_arn2 = topic_conf2.set_config()
# create two s3 notifications
notification_name = bucket_name + NOTIFICATION_SUFFIX+'_1'
topic_conf_list = [{'Id': notification_name, 'TopicArn': topic_arn1,
'Events': []
}]
s3_notification_conf1 = PSNotificationS3(conn, bucket_name, topic_conf_list)
response, status = s3_notification_conf1.set_config()
assert_equal(status/100, 2)
notification_name = bucket_name + NOTIFICATION_SUFFIX+'_2'
topic_conf_list = [{'Id': notification_name, 'TopicArn': topic_arn2,
'Events': []
}]
s3_notification_conf2 = PSNotificationS3(conn, bucket_name, topic_conf_list)
response, status = s3_notification_conf2.set_config()
assert_equal(status/100, 2)
client_threads = []
start_time = time.time()
for i in range(number_of_objects):
key = bucket.new_key(str(i))
content = str(os.urandom(1024*1024))
thr = threading.Thread(target = set_contents_from_string, args=(key, content,))
thr.start()
client_threads.append(thr)
[thr.join() for thr in client_threads]
keys = list(bucket.list())
delay = 30
print('wait for '+str(delay)+'sec for the messages...')
time.sleep(delay)
http_server.verify_s3_events(keys, exact_match=False, deletions=False)
# delete objects from the bucket
client_threads = []
start_time = time.time()
for key in bucket.list():
thr = threading.Thread(target = key.delete, args=())
thr.start()
client_threads.append(thr)
[thr.join() for thr in client_threads]
print('wait for '+str(delay)+'sec for the messages...')
time.sleep(delay)
http_server.verify_s3_events(keys, exact_match=False, deletions=True)
# cleanup
s3_notification_conf1.del_config()
topic_conf1.del_config()
s3_notification_conf2.del_config()
topic_conf2.del_config()
conn.delete_bucket(bucket_name)
http_server.close()
def persistent_notification(endpoint_type):
""" test pushing persistent notification """
conn = connection()
zonegroup = 'default'
# create bucket
bucket_name = gen_bucket_name()
bucket = conn.create_bucket(bucket_name)
topic_name = bucket_name + TOPIC_SUFFIX
receiver = {}
host = get_ip()
if endpoint_type == 'http':
# create random port for the http server
host = get_ip_http()
port = random.randint(10000, 20000)
# start an http server in a separate thread
receiver = StreamingHTTPServer(host, port, num_workers=10)
endpoint_address = 'http://'+host+':'+str(port)
endpoint_args = 'push-endpoint='+endpoint_address+'&persistent=true'
# the http server does not guarantee order, so duplicates are expected
exact_match = False
elif endpoint_type == 'amqp':
# start amqp receiver
exchange = 'ex1'
task, receiver = create_amqp_receiver_thread(exchange, topic_name)
task.start()
endpoint_address = 'amqp://' + host
endpoint_args = 'push-endpoint='+endpoint_address+'&amqp-exchange='+exchange+'&amqp-ack-level=broker'+'&persistent=true'
# amqp broker guarantee ordering
exact_match = True
elif endpoint_type == 'kafka':
# start amqp receiver
task, receiver = create_kafka_receiver_thread(topic_name)
task.start()
endpoint_address = 'kafka://' + host
endpoint_args = 'push-endpoint='+endpoint_address+'&kafka-ack-level=broker'+'&persistent=true'
# amqp broker guarantee ordering
exact_match = True
else:
return SkipTest('Unknown endpoint type: ' + endpoint_type)
# create s3 topic
topic_conf = PSTopicS3(conn, topic_name, zonegroup, endpoint_args=endpoint_args)
topic_arn = topic_conf.set_config()
# create s3 notification
notification_name = bucket_name + NOTIFICATION_SUFFIX
topic_conf_list = [{'Id': notification_name, 'TopicArn': topic_arn,
'Events': []
}]
s3_notification_conf = PSNotificationS3(conn, bucket_name, topic_conf_list)
response, status = s3_notification_conf.set_config()
assert_equal(status/100, 2)
# create objects in the bucket (async)
number_of_objects = 100
client_threads = []
start_time = time.time()
for i in range(number_of_objects):
key = bucket.new_key(str(i))
content = str(os.urandom(1024*1024))
thr = threading.Thread(target = set_contents_from_string, args=(key, content,))
thr.start()
client_threads.append(thr)
[thr.join() for thr in client_threads]
time_diff = time.time() - start_time
print('average time for creation + async http notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds')
keys = list(bucket.list())
delay = 40
print('wait for '+str(delay)+'sec for the messages...')
time.sleep(delay)
receiver.verify_s3_events(keys, exact_match=exact_match, deletions=False)
# delete objects from the bucket
client_threads = []
start_time = time.time()
for key in bucket.list():
thr = threading.Thread(target = key.delete, args=())
thr.start()
client_threads.append(thr)
[thr.join() for thr in client_threads]
time_diff = time.time() - start_time
print('average time for deletion + async http notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds')
print('wait for '+str(delay)+'sec for the messages...')
time.sleep(delay)
receiver.verify_s3_events(keys, exact_match=exact_match, deletions=True)
# cleanup
s3_notification_conf.del_config()
topic_conf.del_config()
# delete the bucket
conn.delete_bucket(bucket_name)
if endpoint_type == 'http':
receiver.close()
else:
stop_amqp_receiver(receiver, task)
@attr('http_test')
def test_ps_s3_persistent_notification_http():
""" test pushing persistent notification http """
persistent_notification('http')
@attr('amqp_test')
def test_ps_s3_persistent_notification_amqp():
""" test pushing persistent notification amqp """
persistent_notification('amqp')
@attr('kafka_test')
def test_ps_s3_persistent_notification_kafka():
""" test pushing persistent notification kafka """
persistent_notification('kafka')
def random_string(length):
import string
letters = string.ascii_letters
return ''.join(random.choice(letters) for i in range(length))
@attr('amqp_test')
def test_ps_s3_persistent_notification_large():
""" test pushing persistent notification of large notifications """
conn = connection()
zonegroup = 'default'
# create bucket
bucket_name = gen_bucket_name()
bucket = conn.create_bucket(bucket_name)
topic_name = bucket_name + TOPIC_SUFFIX
receiver = {}
host = get_ip()
# start amqp receiver
exchange = 'ex1'
task, receiver = create_amqp_receiver_thread(exchange, topic_name)
task.start()
endpoint_address = 'amqp://' + host
opaque_data = random_string(1024*2)
endpoint_args = 'push-endpoint='+endpoint_address+'&OpaqueData='+opaque_data+'&amqp-exchange='+exchange+'&amqp-ack-level=broker'+'&persistent=true'
# amqp broker guarantee ordering
exact_match = True
# create s3 topic
topic_conf = PSTopicS3(conn, topic_name, zonegroup, endpoint_args=endpoint_args)
topic_arn = topic_conf.set_config()
# create s3 notification
notification_name = bucket_name + NOTIFICATION_SUFFIX
topic_conf_list = [{'Id': notification_name, 'TopicArn': topic_arn,
'Events': []
}]
s3_notification_conf = PSNotificationS3(conn, bucket_name, topic_conf_list)
response, status = s3_notification_conf.set_config()
assert_equal(status/100, 2)
# create objects in the bucket (async)
number_of_objects = 100
client_threads = []
start_time = time.time()
for i in range(number_of_objects):
key_value = random_string(63)
key = bucket.new_key(key_value)
content = str(os.urandom(1024*1024))
thr = threading.Thread(target = set_contents_from_string, args=(key, content,))
thr.start()
client_threads.append(thr)
[thr.join() for thr in client_threads]
time_diff = time.time() - start_time
print('average time for creation + async http notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds')
keys = list(bucket.list())
delay = 40
print('wait for '+str(delay)+'sec for the messages...')
time.sleep(delay)
receiver.verify_s3_events(keys, exact_match=exact_match, deletions=False)
# delete objects from the bucket
client_threads = []
start_time = time.time()
for key in bucket.list():
thr = threading.Thread(target = key.delete, args=())
thr.start()
client_threads.append(thr)
[thr.join() for thr in client_threads]
time_diff = time.time() - start_time
print('average time for deletion + async http notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds')
print('wait for '+str(delay)+'sec for the messages...')
time.sleep(delay)
receiver.verify_s3_events(keys, exact_match=exact_match, deletions=True)
# cleanup
s3_notification_conf.del_config()
topic_conf.del_config()
# delete the bucket
conn.delete_bucket(bucket_name)
stop_amqp_receiver(receiver, task)
@attr('modification_required')
def test_ps_s3_topic_update():
""" test updating topic associated with a notification"""
return SkipTest('This test is yet to be modified.')
conn = connection()
ps_zone = None
bucket_name = gen_bucket_name()
topic_name = bucket_name+TOPIC_SUFFIX
# create amqp topic
hostname = get_ip()
exchange = 'ex1'
amqp_task, receiver = create_amqp_receiver_thread(exchange, topic_name)
amqp_task.start()
#topic_conf = PSTopic(ps_zone.conn, topic_name,endpoint='amqp://' + hostname,endpoint_args='amqp-exchange=' + exchange + '&amqp-ack-level=none')
topic_conf = PSTopicS3(conn, topic_name, zonegroup, endpoint_args='amqp-exchange=' + exchange + '&amqp-ack-level=none')
topic_arn = topic_conf.set_config()
#result, status = topic_conf.set_config()
#assert_equal(status/100, 2)
parsed_result = json.loads(result)
topic_arn = parsed_result['arn']
# get topic
result, _ = topic_conf.get_config()
# verify topic content
parsed_result = json.loads(result)
assert_equal(parsed_result['topic']['name'], topic_name)
assert_equal(parsed_result['topic']['dest']['push_endpoint'], topic_conf.parameters['push-endpoint'])
# create http server
port = random.randint(10000, 20000)
# start an http server in a separate thread
http_server = StreamingHTTPServer(hostname, port)
# create bucket on the first of the rados zones
bucket = conn.create_bucket(bucket_name)
# create s3 notification
notification_name = bucket_name + NOTIFICATION_SUFFIX
topic_conf_list = [{'Id': notification_name,
'TopicArn': topic_arn,
'Events': ['s3:ObjectCreated:*']
}]
s3_notification_conf = PSNotificationS3(ps_zone.conn, bucket_name, topic_conf_list)
_, status = s3_notification_conf.set_config()
assert_equal(status/100, 2)
# create objects in the bucket
number_of_objects = 10
for i in range(number_of_objects):
key = bucket.new_key(str(i))
key.set_contents_from_string('bar')
# wait for sync
#zone_bucket_checkpoint(ps_zone.zone, master_zone.zone, bucket_name)
keys = list(bucket.list())
# TODO: use exact match
receiver.verify_s3_events(keys, exact_match=False)
# update the same topic with new endpoint
#topic_conf = PSTopic(ps_zone.conn, topic_name,endpoint='http://'+ hostname + ':' + str(port))
topic_conf = PSTopicS3(conn, topic_name, endpoint_args='http://'+ hostname + ':' + str(port))
_, status = topic_conf.set_config()
assert_equal(status/100, 2)
# get topic
result, _ = topic_conf.get_config()
# verify topic content
parsed_result = json.loads(result)
assert_equal(parsed_result['topic']['name'], topic_name)
assert_equal(parsed_result['topic']['dest']['push_endpoint'], topic_conf.parameters['push-endpoint'])
# delete current objects and create new objects in the bucket
for key in bucket.list():
key.delete()
for i in range(number_of_objects):
key = bucket.new_key(str(i+100))
key.set_contents_from_string('bar')
# wait for sync
#zone_meta_checkpoint(ps_zone.zone)
#zone_bucket_checkpoint(ps_zone.zone, master_zone.zone, bucket_name)
keys = list(bucket.list())
# verify that notifications are still sent to amqp
# TODO: use exact match
receiver.verify_s3_events(keys, exact_match=False)
# update notification to update the endpoint from the topic
topic_conf_list = [{'Id': notification_name,
'TopicArn': topic_arn,
'Events': ['s3:ObjectCreated:*']
}]
s3_notification_conf = PSNotificationS3(ps_zone.conn, bucket_name, topic_conf_list)
_, status = s3_notification_conf.set_config()
assert_equal(status/100, 2)
# delete current objects and create new objects in the bucket
for key in bucket.list():
key.delete()
for i in range(number_of_objects):
key = bucket.new_key(str(i+200))
key.set_contents_from_string('bar')
# wait for sync
#zone_meta_checkpoint(ps_zone.zone)
#zone_bucket_checkpoint(ps_zone.zone, master_zone.zone, bucket_name)
keys = list(bucket.list())
# check that updates switched to http
# TODO: use exact match
http_server.verify_s3_events(keys, exact_match=False)
# cleanup
# delete objects from the bucket
stop_amqp_receiver(receiver, amqp_task)
for key in bucket.list():
key.delete()
s3_notification_conf.del_config()
topic_conf.del_config()
conn.delete_bucket(bucket_name)
http_server.close()
@attr('modification_required')
def test_ps_s3_notification_update():
""" test updating the topic of a notification"""
return SkipTest('This test is yet to be modified.')
hostname = get_ip()
conn = connection()
ps_zone = None
bucket_name = gen_bucket_name()
topic_name1 = bucket_name+'amqp'+TOPIC_SUFFIX
topic_name2 = bucket_name+'http'+TOPIC_SUFFIX
zonegroup = 'default'
# create topics
# start amqp receiver in a separate thread
exchange = 'ex1'
amqp_task, receiver = create_amqp_receiver_thread(exchange, topic_name1)
amqp_task.start()
# create random port for the http server
http_port = random.randint(10000, 20000)
# start an http server in a separate thread
http_server = StreamingHTTPServer(hostname, http_port)
#topic_conf1 = PSTopic(ps_zone.conn, topic_name1,endpoint='amqp://' + hostname,endpoint_args='amqp-exchange=' + exchange + '&amqp-ack-level=none')
topic_conf1 = PSTopicS3(conn, topic_name1, zonegroup, endpoint_args='amqp-exchange=' + exchange + '&amqp-ack-level=none')
result, status = topic_conf1.set_config()
parsed_result = json.loads(result)
topic_arn1 = parsed_result['arn']
assert_equal(status/100, 2)
#topic_conf2 = PSTopic(ps_zone.conn, topic_name2,endpoint='http://'+hostname+':'+str(http_port))
topic_conf2 = PSTopicS3(conn, topic_name2, endpoint_args='http://'+hostname+':'+str(http_port))
result, status = topic_conf2.set_config()
parsed_result = json.loads(result)
topic_arn2 = parsed_result['arn']
assert_equal(status/100, 2)
# create bucket on the first of the rados zones
bucket = conn.create_bucket(bucket_name)
# wait for sync
#zone_meta_checkpoint(ps_zone.zone)
# create s3 notification with topic1
notification_name = bucket_name + NOTIFICATION_SUFFIX
topic_conf_list = [{'Id': notification_name,
'TopicArn': topic_arn1,
'Events': ['s3:ObjectCreated:*']
}]
s3_notification_conf = PSNotificationS3(ps_zone.conn, bucket_name, topic_conf_list)
_, status = s3_notification_conf.set_config()
assert_equal(status/100, 2)
# create objects in the bucket
number_of_objects = 10
for i in range(number_of_objects):
key = bucket.new_key(str(i))
key.set_contents_from_string('bar')
# wait for sync
#zone_bucket_checkpoint(ps_zone.zone, master_zone.zone, bucket_name)
keys = list(bucket.list())
# TODO: use exact match
receiver.verify_s3_events(keys, exact_match=False);
# update notification to use topic2
topic_conf_list = [{'Id': notification_name,
'TopicArn': topic_arn2,
'Events': ['s3:ObjectCreated:*']
}]
s3_notification_conf = PSNotificationS3(ps_zone.conn, bucket_name, topic_conf_list)
_, status = s3_notification_conf.set_config()
assert_equal(status/100, 2)
# delete current objects and create new objects in the bucket
for key in bucket.list():
key.delete()
for i in range(number_of_objects):
key = bucket.new_key(str(i+100))
key.set_contents_from_string('bar')
# wait for sync
#zone_meta_checkpoint(ps_zone.zone)
#zone_bucket_checkpoint(ps_zone.zone, master_zone.zone, bucket_name)
keys = list(bucket.list())
# check that updates switched to http
# TODO: use exact match
http_server.verify_s3_events(keys, exact_match=False)
# cleanup
# delete objects from the bucket
stop_amqp_receiver(receiver, amqp_task)
for key in bucket.list():
key.delete()
s3_notification_conf.del_config()
topic_conf1.del_config()
topic_conf2.del_config()
conn.delete_bucket(bucket_name)
http_server.close()
@attr('modification_required')
def test_ps_s3_multiple_topics_notification():
""" test notification creation with multiple topics"""
return SkipTest('This test is yet to be modified.')
hostname = get_ip()
zonegroup = 'default'
conn = connection()
ps_zone = None
bucket_name = gen_bucket_name()
topic_name1 = bucket_name+'amqp'+TOPIC_SUFFIX
topic_name2 = bucket_name+'http'+TOPIC_SUFFIX
# create topics
# start amqp receiver in a separate thread
exchange = 'ex1'
amqp_task, receiver = create_amqp_receiver_thread(exchange, topic_name1)
amqp_task.start()
# create random port for the http server
http_port = random.randint(10000, 20000)
# start an http server in a separate thread
http_server = StreamingHTTPServer(hostname, http_port)
#topic_conf1 = PSTopic(ps_zone.conn, topic_name1,endpoint='amqp://' + hostname,endpoint_args='amqp-exchange=' + exchange + '&amqp-ack-level=none')
topic_conf1 = PSTopicS3(conn, topic_name1, zonegroup, endpoint_args='amqp-exchange=' + exchange + '&amqp-ack-level=none')
result, status = topic_conf1.set_config()
parsed_result = json.loads(result)
topic_arn1 = parsed_result['arn']
assert_equal(status/100, 2)
#topic_conf2 = PSTopic(ps_zone.conn, topic_name2,endpoint='http://'+hostname+':'+str(http_port))
topic_conf2 = PSTopicS3(conn, topic_name2, zonegroup, endpoint_args='http://'+hostname+':'+str(http_port))
result, status = topic_conf2.set_config()
parsed_result = json.loads(result)
topic_arn2 = parsed_result['arn']
assert_equal(status/100, 2)
# create bucket on the first of the rados zones
bucket = conn.create_bucket(bucket_name)
# wait for sync
#zone_meta_checkpoint(ps_zone.zone)
# create s3 notification
notification_name1 = bucket_name + NOTIFICATION_SUFFIX + '_1'
notification_name2 = bucket_name + NOTIFICATION_SUFFIX + '_2'
topic_conf_list = [
{
'Id': notification_name1,
'TopicArn': topic_arn1,
'Events': ['s3:ObjectCreated:*']
},
{
'Id': notification_name2,
'TopicArn': topic_arn2,
'Events': ['s3:ObjectCreated:*']
}]
s3_notification_conf = PSNotificationS3(ps_zone.conn, bucket_name, topic_conf_list)
_, status = s3_notification_conf.set_config()
assert_equal(status/100, 2)
result, _ = s3_notification_conf.get_config()
assert_equal(len(result['TopicConfigurations']), 2)
assert_equal(result['TopicConfigurations'][0]['Id'], notification_name1)
assert_equal(result['TopicConfigurations'][1]['Id'], notification_name2)
# get auto-generated subscriptions
sub_conf1 = PSSubscription(ps_zone.conn, notification_name1,
topic_name1)
_, status = sub_conf1.get_config()
assert_equal(status/100, 2)
sub_conf2 = PSSubscription(ps_zone.conn, notification_name2,
topic_name2)
_, status = sub_conf2.get_config()
assert_equal(status/100, 2)
# create objects in the bucket
number_of_objects = 10
for i in range(number_of_objects):
key = bucket.new_key(str(i))
key.set_contents_from_string('bar')
# wait for sync
#zone_bucket_checkpoint(ps_zone.zone, master_zone.zone, bucket_name)
# get the events from both of the subscription
result, _ = sub_conf1.get_events()
records = json.loads(result)
for record in records['Records']:
log.debug(record)
keys = list(bucket.list())
# TODO: use exact match
verify_s3_records_by_elements(records, keys, exact_match=False)
receiver.verify_s3_events(keys, exact_match=False)
result, _ = sub_conf2.get_events()
parsed_result = json.loads(result)
for record in parsed_result['Records']:
log.debug(record)
keys = list(bucket.list())
# TODO: use exact match
verify_s3_records_by_elements(records, keys, exact_match=False)
http_server.verify_s3_events(keys, exact_match=False)
# cleanup
stop_amqp_receiver(receiver, amqp_task)
s3_notification_conf.del_config()
topic_conf1.del_config()
topic_conf2.del_config()
# delete objects from the bucket
for key in bucket.list():
key.delete()
conn.delete_bucket(bucket_name)
http_server.close()
def kafka_security(security_type, mechanism='PLAIN'):
""" test pushing kafka s3 notification securly to master """
conn = connection()
zonegroup = 'default'
# create bucket
bucket_name = gen_bucket_name()
bucket = conn.create_bucket(bucket_name)
# name is constant for manual testing
topic_name = bucket_name+'_topic'
# create s3 topic
if security_type == 'SASL_SSL':
endpoint_address = 'kafka://alice:alice-secret@' + kafka_server + ':9094'
elif security_type == 'SSL':
endpoint_address = 'kafka://' + kafka_server + ':9093'
elif security_type == 'SASL_PLAINTEXT':
endpoint_address = 'kafka://alice:alice-secret@' + kafka_server + ':9095'
else:
assert False, 'unknown security method '+security_type
if security_type == 'SASL_PLAINTEXT':
endpoint_args = 'push-endpoint='+endpoint_address+'&kafka-ack-level=broker&use-ssl=false&mechanism='+mechanism
elif security_type == 'SASL_SSL':
KAFKA_DIR = os.environ['KAFKA_DIR']
endpoint_args = 'push-endpoint='+endpoint_address+'&kafka-ack-level=broker&use-ssl=true&ca-location='+KAFKA_DIR+'/y-ca.crt&mechanism='+mechanism
else:
KAFKA_DIR = os.environ['KAFKA_DIR']
endpoint_args = 'push-endpoint='+endpoint_address+'&kafka-ack-level=broker&use-ssl=true&ca-location='+KAFKA_DIR+'/y-ca.crt'
topic_conf = PSTopicS3(conn, topic_name, zonegroup, endpoint_args=endpoint_args)
# create consumer on the topic
task, receiver = create_kafka_receiver_thread(topic_name)
task.start()
topic_arn = topic_conf.set_config()
# create s3 notification
notification_name = bucket_name + NOTIFICATION_SUFFIX
topic_conf_list = [{'Id': notification_name, 'TopicArn': topic_arn,
'Events': []
}]
s3_notification_conf = PSNotificationS3(conn, bucket_name, topic_conf_list)
s3_notification_conf.set_config()
# create objects in the bucket (async)
number_of_objects = 10
client_threads = []
start_time = time.time()
for i in range(number_of_objects):
key = bucket.new_key(str(i))
content = str(os.urandom(1024*1024))
thr = threading.Thread(target = set_contents_from_string, args=(key, content,))
thr.start()
client_threads.append(thr)
[thr.join() for thr in client_threads]
time_diff = time.time() - start_time
print('average time for creation + kafka notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds')
try:
print('wait for 5sec for the messages...')
time.sleep(5)
keys = list(bucket.list())
receiver.verify_s3_events(keys, exact_match=True)
# delete objects from the bucket
client_threads = []
start_time = time.time()
for key in bucket.list():
thr = threading.Thread(target = key.delete, args=())
thr.start()
client_threads.append(thr)
[thr.join() for thr in client_threads]
time_diff = time.time() - start_time
print('average time for deletion + kafka notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds')
print('wait for 5sec for the messages...')
time.sleep(5)
receiver.verify_s3_events(keys, exact_match=True, deletions=True)
except Exception as err:
assert False, str(err)
finally:
# cleanup
s3_notification_conf.del_config()
topic_conf.del_config()
# delete the bucket
for key in bucket.list():
key.delete()
conn.delete_bucket(bucket_name)
stop_kafka_receiver(receiver, task)
@attr('kafka_security_test')
def test_ps_s3_notification_push_kafka_security_ssl():
kafka_security('SSL')
@attr('kafka_security_test')
def test_ps_s3_notification_push_kafka_security_ssl_sasl():
kafka_security('SASL_SSL')
@attr('kafka_security_test')
def test_ps_s3_notification_push_kafka_security_sasl():
kafka_security('SASL_PLAINTEXT')
@attr('kafka_security_test')
def test_ps_s3_notification_push_kafka_security_ssl_sasl_scram():
kafka_security('SASL_SSL', mechanism='SCRAM-SHA-256')
@attr('kafka_security_test')
def test_ps_s3_notification_push_kafka_security_sasl_scram():
kafka_security('SASL_PLAINTEXT', mechanism='SCRAM-SHA-256')
| 160,414 | 36.869452 | 170 | py |
null | ceph-main/src/test/rgw/rgw_multi/__init__.py | 0 | 0 | 0 | py | |
null | ceph-main/src/test/rgw/rgw_multi/conn.py | import boto
import boto.s3.connection
import boto.iam.connection
def get_gateway_connection(gateway, credentials):
""" connect to the given gateway """
if gateway.connection is None:
gateway.connection = boto.connect_s3(
aws_access_key_id = credentials.access_key,
aws_secret_access_key = credentials.secret,
host = gateway.host,
port = gateway.port,
is_secure = False,
calling_format = boto.s3.connection.OrdinaryCallingFormat())
return gateway.connection
def get_gateway_secure_connection(gateway, credentials):
""" secure connect to the given gateway """
if gateway.ssl_port == 0:
return None
if gateway.secure_connection is None:
gateway.secure_connection = boto.connect_s3(
aws_access_key_id = credentials.access_key,
aws_secret_access_key = credentials.secret,
host = gateway.host,
port = gateway.ssl_port,
is_secure = True,
validate_certs=False,
calling_format = boto.s3.connection.OrdinaryCallingFormat())
return gateway.secure_connection
def get_gateway_iam_connection(gateway, credentials):
""" connect to iam api of the given gateway """
if gateway.iam_connection is None:
gateway.iam_connection = boto.connect_iam(
aws_access_key_id = credentials.access_key,
aws_secret_access_key = credentials.secret,
host = gateway.host,
port = gateway.port,
is_secure = False)
return gateway.iam_connection
| 1,641 | 38.095238 | 76 | py |
null | ceph-main/src/test/rgw/rgw_multi/multisite.py | from abc import ABCMeta, abstractmethod
from io import StringIO
import json
from .conn import get_gateway_connection, get_gateway_iam_connection, get_gateway_secure_connection
class Cluster:
""" interface to run commands against a distinct ceph cluster """
__metaclass__ = ABCMeta
@abstractmethod
def admin(self, args = None, **kwargs):
""" execute a radosgw-admin command """
pass
class Gateway:
""" interface to control a single radosgw instance """
__metaclass__ = ABCMeta
def __init__(self, host = None, port = None, cluster = None, zone = None, ssl_port = 0):
self.host = host
self.port = port
self.cluster = cluster
self.zone = zone
self.connection = None
self.secure_connection = None
self.ssl_port = ssl_port
self.iam_connection = None
@abstractmethod
def start(self, args = []):
""" start the gateway with the given args """
pass
@abstractmethod
def stop(self):
""" stop the gateway """
pass
def endpoint(self):
return 'http://%s:%d' % (self.host, self.port)
class SystemObject:
""" interface for system objects, represented in json format and
manipulated with radosgw-admin commands """
__metaclass__ = ABCMeta
def __init__(self, data = None, uuid = None):
self.data = data
self.id = uuid
if data:
self.load_from_json(data)
@abstractmethod
def build_command(self, command):
""" return the command line for the given command, including arguments
to specify this object """
pass
@abstractmethod
def load_from_json(self, data):
""" update internal state based on json data """
pass
def command(self, cluster, cmd, args = None, **kwargs):
""" run the given command and return the output and retcode """
args = self.build_command(cmd) + (args or [])
return cluster.admin(args, **kwargs)
def json_command(self, cluster, cmd, args = None, **kwargs):
""" run the given command, parse the output and return the resulting
data and retcode """
s, r = self.command(cluster, cmd, args or [], **kwargs)
if r == 0:
data = json.loads(s)
self.load_from_json(data)
self.data = data
return self.data, r
# mixins for supported commands
class Create(object):
def create(self, cluster, args = None, **kwargs):
""" create the object with the given arguments """
return self.json_command(cluster, 'create', args, **kwargs)
class Delete(object):
def delete(self, cluster, args = None, **kwargs):
""" delete the object """
# not json_command() because delete has no output
_, r = self.command(cluster, 'delete', args, **kwargs)
if r == 0:
self.data = None
return r
class Get(object):
def get(self, cluster, args = None, **kwargs):
""" read the object from storage """
kwargs['read_only'] = True
return self.json_command(cluster, 'get', args, **kwargs)
class Set(object):
def set(self, cluster, data, args = None, **kwargs):
""" set the object by json """
kwargs['stdin'] = StringIO(json.dumps(data))
return self.json_command(cluster, 'set', args, **kwargs)
class Modify(object):
def modify(self, cluster, args = None, **kwargs):
""" modify the object with the given arguments """
return self.json_command(cluster, 'modify', args, **kwargs)
class CreateDelete(Create, Delete): pass
class GetSet(Get, Set): pass
class Zone(SystemObject, SystemObject.CreateDelete, SystemObject.GetSet, SystemObject.Modify):
def __init__(self, name, zonegroup = None, cluster = None, data = None, zone_id = None, gateways = None):
self.name = name
self.zonegroup = zonegroup
self.cluster = cluster
self.gateways = gateways or []
super(Zone, self).__init__(data, zone_id)
def zone_arg(self):
""" command-line argument to specify this zone """
return ['--rgw-zone', self.name]
def zone_args(self):
""" command-line arguments to specify this zone/zonegroup/realm """
args = self.zone_arg()
if self.zonegroup:
args += self.zonegroup.zonegroup_args()
return args
def build_command(self, command):
""" build a command line for the given command and args """
return ['zone', command] + self.zone_args()
def load_from_json(self, data):
""" load the zone from json """
self.id = data['id']
self.name = data['name']
def start(self, args = None):
""" start all gateways """
for g in self.gateways:
g.start(args)
def stop(self):
""" stop all gateways """
for g in self.gateways:
g.stop()
def period(self):
return self.zonegroup.period if self.zonegroup else None
def realm(self):
return self.zonegroup.realm() if self.zonegroup else None
def is_read_only(self):
return False
def tier_type(self):
raise NotImplementedError
def syncs_from(self, zone_name):
return zone_name != self.name
def has_buckets(self):
return True
def has_roles(self):
return True
def get_conn(self, credentials):
return ZoneConn(self, credentials) # not implemented, but can be used
class ZoneConn(object):
def __init__(self, zone, credentials):
self.zone = zone
self.name = zone.name
""" connect to the zone's first gateway """
if isinstance(credentials, list):
self.credentials = credentials[0]
else:
self.credentials = credentials
if self.zone.gateways is not None:
self.conn = get_gateway_connection(self.zone.gateways[0], self.credentials)
self.secure_conn = get_gateway_secure_connection(self.zone.gateways[0], self.credentials)
self.iam_conn = get_gateway_iam_connection(self.zone.gateways[0], self.credentials)
# create connections for the rest of the gateways (if exist)
for gw in list(self.zone.gateways):
get_gateway_connection(gw, self.credentials)
get_gateway_secure_connection(gw, self.credentials)
get_gateway_iam_connection(gw, self.credentials)
def get_connection(self):
return self.conn
def get_iam_connection(self):
return self.iam_conn
def get_bucket(self, bucket_name, credentials):
raise NotImplementedError
def check_bucket_eq(self, zone, bucket_name):
raise NotImplementedError
class ZoneGroup(SystemObject, SystemObject.CreateDelete, SystemObject.GetSet, SystemObject.Modify):
def __init__(self, name, period = None, data = None, zonegroup_id = None, zones = None, master_zone = None):
self.name = name
self.period = period
self.zones = zones or []
self.master_zone = master_zone
super(ZoneGroup, self).__init__(data, zonegroup_id)
self.rw_zones = []
self.ro_zones = []
self.zones_by_type = {}
for z in self.zones:
if z.is_read_only():
self.ro_zones.append(z)
else:
self.rw_zones.append(z)
def zonegroup_arg(self):
""" command-line argument to specify this zonegroup """
return ['--rgw-zonegroup', self.name]
def zonegroup_args(self):
""" command-line arguments to specify this zonegroup/realm """
args = self.zonegroup_arg()
realm = self.realm()
if realm:
args += realm.realm_arg()
return args
def build_command(self, command):
""" build a command line for the given command and args """
return ['zonegroup', command] + self.zonegroup_args()
def zone_by_id(self, zone_id):
""" return the matching zone by id """
for zone in self.zones:
if zone.id == zone_id:
return zone
return None
def load_from_json(self, data):
""" load the zonegroup from json """
self.id = data['id']
self.name = data['name']
master_id = data['master_zone']
if not self.master_zone or master_id != self.master_zone.id:
self.master_zone = self.zone_by_id(master_id)
def add(self, cluster, zone, args = None, **kwargs):
""" add an existing zone to the zonegroup """
args = zone.zone_arg() + (args or [])
data, r = self.json_command(cluster, 'add', args, **kwargs)
if r == 0:
zone.zonegroup = self
self.zones.append(zone)
return data, r
def remove(self, cluster, zone, args = None, **kwargs):
""" remove an existing zone from the zonegroup """
args = zone.zone_arg() + (args or [])
data, r = self.json_command(cluster, 'remove', args, **kwargs)
if r == 0:
zone.zonegroup = None
self.zones.remove(zone)
return data, r
def realm(self):
return self.period.realm if self.period else None
class Period(SystemObject, SystemObject.Get):
def __init__(self, realm = None, data = None, period_id = None, zonegroups = None, master_zonegroup = None):
self.realm = realm
self.zonegroups = zonegroups or []
self.master_zonegroup = master_zonegroup
super(Period, self).__init__(data, period_id)
def zonegroup_by_id(self, zonegroup_id):
""" return the matching zonegroup by id """
for zonegroup in self.zonegroups:
if zonegroup.id == zonegroup_id:
return zonegroup
return None
def build_command(self, command):
""" build a command line for the given command and args """
return ['period', command]
def load_from_json(self, data):
""" load the period from json """
self.id = data['id']
master_id = data['master_zonegroup']
if not self.master_zonegroup or master_id != self.master_zonegroup.id:
self.master_zonegroup = self.zonegroup_by_id(master_id)
def update(self, zone, args = None, **kwargs):
""" run 'radosgw-admin period update' on the given zone """
assert(zone.cluster)
args = zone.zone_args() + (args or [])
if kwargs.pop('commit', False):
args.append('--commit')
return self.json_command(zone.cluster, 'update', args, **kwargs)
def commit(self, zone, args = None, **kwargs):
""" run 'radosgw-admin period commit' on the given zone """
assert(zone.cluster)
args = zone.zone_args() + (args or [])
return self.json_command(zone.cluster, 'commit', args, **kwargs)
class Realm(SystemObject, SystemObject.CreateDelete, SystemObject.GetSet):
def __init__(self, name, period = None, data = None, realm_id = None):
self.name = name
self.current_period = period
super(Realm, self).__init__(data, realm_id)
def realm_arg(self):
""" return the command-line arguments that specify this realm """
return ['--rgw-realm', self.name]
def build_command(self, command):
""" build a command line for the given command and args """
return ['realm', command] + self.realm_arg()
def load_from_json(self, data):
""" load the realm from json """
self.id = data['id']
def pull(self, cluster, gateway, credentials, args = [], **kwargs):
""" pull an existing realm from the given gateway """
args += ['--url', gateway.endpoint()]
args += credentials.credential_args()
return self.json_command(cluster, 'pull', args, **kwargs)
def master_zonegroup(self):
""" return the current period's master zonegroup """
if self.current_period is None:
return None
return self.current_period.master_zonegroup
def meta_master_zone(self):
""" return the current period's metadata master zone """
zonegroup = self.master_zonegroup()
if zonegroup is None:
return None
return zonegroup.master_zone
class Credentials:
def __init__(self, access_key, secret):
self.access_key = access_key
self.secret = secret
def credential_args(self):
return ['--access-key', self.access_key, '--secret', self.secret]
class User(SystemObject):
def __init__(self, uid, data = None, name = None, credentials = None, tenant = None):
self.name = name
self.credentials = credentials or []
self.tenant = tenant
super(User, self).__init__(data, uid)
def user_arg(self):
""" command-line argument to specify this user """
args = ['--uid', self.id]
if self.tenant:
args += ['--tenant', self.tenant]
return args
def build_command(self, command):
""" build a command line for the given command and args """
return ['user', command] + self.user_arg()
def load_from_json(self, data):
""" load the user from json """
self.id = data['user_id']
self.name = data['display_name']
self.credentials = [Credentials(k['access_key'], k['secret_key']) for k in data['keys']]
def create(self, zone, args = None, **kwargs):
""" create the user with the given arguments """
assert(zone.cluster)
args = zone.zone_args() + (args or [])
return self.json_command(zone.cluster, 'create', args, **kwargs)
def info(self, zone, args = None, **kwargs):
""" read the user from storage """
assert(zone.cluster)
args = zone.zone_args() + (args or [])
kwargs['read_only'] = True
return self.json_command(zone.cluster, 'info', args, **kwargs)
def delete(self, zone, args = None, **kwargs):
""" delete the user """
assert(zone.cluster)
args = zone.zone_args() + (args or [])
return self.command(zone.cluster, 'delete', args, **kwargs)
| 14,263 | 33.960784 | 113 | py |
null | ceph-main/src/test/rgw/rgw_multi/tests.py | import json
import random
import string
import sys
import time
import logging
import errno
import dateutil.parser
from itertools import combinations
from itertools import zip_longest
from io import StringIO
import boto
import boto.s3.connection
from boto.s3.website import WebsiteConfiguration
from boto.s3.cors import CORSConfiguration
from nose.tools import eq_ as eq
from nose.tools import assert_not_equal, assert_equal
from nose.plugins.attrib import attr
from nose.plugins.skip import SkipTest
from .multisite import Zone, ZoneGroup, Credentials
from .conn import get_gateway_connection
from .tools import assert_raises
class Config:
""" test configuration """
def __init__(self, **kwargs):
# by default, wait up to 5 minutes before giving up on a sync checkpoint
self.checkpoint_retries = kwargs.get('checkpoint_retries', 60)
self.checkpoint_delay = kwargs.get('checkpoint_delay', 5)
# allow some time for realm reconfiguration after changing master zone
self.reconfigure_delay = kwargs.get('reconfigure_delay', 5)
self.tenant = kwargs.get('tenant', '')
# rgw multisite tests, written against the interfaces provided in rgw_multi.
# these tests must be initialized and run by another module that provides
# implementations of these interfaces by calling init_multi()
realm = None
user = None
config = None
def init_multi(_realm, _user, _config=None):
global realm
realm = _realm
global user
user = _user
global config
config = _config or Config()
realm_meta_checkpoint(realm)
def get_user():
return user.id if user is not None else ''
def get_tenant():
return config.tenant if config is not None and config.tenant is not None else ''
def get_realm():
return realm
log = logging.getLogger('rgw_multi.tests')
num_buckets = 0
run_prefix=''.join(random.choice(string.ascii_lowercase) for _ in range(6))
num_roles = 0
def get_zone_connection(zone, credentials):
""" connect to the zone's first gateway """
if isinstance(credentials, list):
credentials = credentials[0]
return get_gateway_connection(zone.gateways[0], credentials)
def mdlog_list(zone, period = None):
cmd = ['mdlog', 'list']
if period:
cmd += ['--period', period]
(mdlog_json, _) = zone.cluster.admin(cmd, read_only=True)
return json.loads(mdlog_json)
def mdlog_autotrim(zone):
zone.cluster.admin(['mdlog', 'autotrim'])
def datalog_list(zone, args = None):
cmd = ['datalog', 'list'] + (args or [])
(datalog_json, _) = zone.cluster.admin(cmd, read_only=True)
return json.loads(datalog_json)
def datalog_status(zone):
cmd = ['datalog', 'status']
(datalog_json, _) = zone.cluster.admin(cmd, read_only=True)
return json.loads(datalog_json)
def datalog_autotrim(zone):
zone.cluster.admin(['datalog', 'autotrim'])
def bilog_list(zone, bucket, args = None):
cmd = ['bilog', 'list', '--bucket', bucket] + (args or [])
cmd += ['--tenant', config.tenant, '--uid', user.name] if config.tenant else []
bilog, _ = zone.cluster.admin(cmd, read_only=True)
return json.loads(bilog)
def bilog_autotrim(zone, args = None):
zone.cluster.admin(['bilog', 'autotrim'] + (args or []))
def bucket_layout(zone, bucket, args = None):
(bl_output,_) = zone.cluster.admin(['bucket', 'layout', '--bucket', bucket] + (args or []))
return json.loads(bl_output)
def parse_meta_sync_status(meta_sync_status_json):
log.debug('current meta sync status=%s', meta_sync_status_json)
sync_status = json.loads(meta_sync_status_json)
sync_info = sync_status['sync_status']['info']
global_sync_status = sync_info['status']
num_shards = sync_info['num_shards']
period = sync_info['period']
realm_epoch = sync_info['realm_epoch']
sync_markers=sync_status['sync_status']['markers']
log.debug('sync_markers=%s', sync_markers)
assert(num_shards == len(sync_markers))
markers={}
for i in range(num_shards):
# get marker, only if it's an incremental marker for the same realm epoch
if realm_epoch > sync_markers[i]['val']['realm_epoch'] or sync_markers[i]['val']['state'] == 0:
markers[i] = ''
else:
markers[i] = sync_markers[i]['val']['marker']
return period, realm_epoch, num_shards, markers
def meta_sync_status(zone):
for _ in range(config.checkpoint_retries):
cmd = ['metadata', 'sync', 'status'] + zone.zone_args()
meta_sync_status_json, retcode = zone.cluster.admin(cmd, check_retcode=False, read_only=True)
if retcode == 0:
return parse_meta_sync_status(meta_sync_status_json)
assert(retcode == 2) # ENOENT
time.sleep(config.checkpoint_delay)
assert False, 'failed to read metadata sync status for zone=%s' % zone.name
def meta_master_log_status(master_zone):
cmd = ['mdlog', 'status'] + master_zone.zone_args()
mdlog_status_json, retcode = master_zone.cluster.admin(cmd, read_only=True)
mdlog_status = json.loads(mdlog_status_json)
markers = {i: s['marker'] for i, s in enumerate(mdlog_status)}
log.debug('master meta markers=%s', markers)
return markers
def compare_meta_status(zone, log_status, sync_status):
if len(log_status) != len(sync_status):
log.error('len(log_status)=%d, len(sync_status)=%d', len(log_status), len(sync_status))
return False
msg = ''
for i, l, s in zip(log_status, log_status.values(), sync_status.values()):
if l > s:
if len(msg):
msg += ', '
msg += 'shard=' + str(i) + ' master=' + l + ' target=' + s
if len(msg) > 0:
log.warning('zone %s behind master: %s', zone.name, msg)
return False
return True
def zone_meta_checkpoint(zone, meta_master_zone = None, master_status = None):
if not meta_master_zone:
meta_master_zone = zone.realm().meta_master_zone()
if not master_status:
master_status = meta_master_log_status(meta_master_zone)
current_realm_epoch = realm.current_period.data['realm_epoch']
log.info('starting meta checkpoint for zone=%s', zone.name)
for _ in range(config.checkpoint_retries):
period, realm_epoch, num_shards, sync_status = meta_sync_status(zone)
if realm_epoch < current_realm_epoch:
log.warning('zone %s is syncing realm epoch=%d, behind current realm epoch=%d',
zone.name, realm_epoch, current_realm_epoch)
else:
log.debug('log_status=%s', master_status)
log.debug('sync_status=%s', sync_status)
if compare_meta_status(zone, master_status, sync_status):
log.info('finish meta checkpoint for zone=%s', zone.name)
return
time.sleep(config.checkpoint_delay)
assert False, 'failed meta checkpoint for zone=%s' % zone.name
def zonegroup_meta_checkpoint(zonegroup, meta_master_zone = None, master_status = None):
if not meta_master_zone:
meta_master_zone = zonegroup.realm().meta_master_zone()
if not master_status:
master_status = meta_master_log_status(meta_master_zone)
for zone in zonegroup.zones:
if zone == meta_master_zone:
continue
zone_meta_checkpoint(zone, meta_master_zone, master_status)
def realm_meta_checkpoint(realm):
log.info('meta checkpoint')
meta_master_zone = realm.meta_master_zone()
master_status = meta_master_log_status(meta_master_zone)
for zonegroup in realm.current_period.zonegroups:
zonegroup_meta_checkpoint(zonegroup, meta_master_zone, master_status)
def parse_data_sync_status(data_sync_status_json):
log.debug('current data sync status=%s', data_sync_status_json)
sync_status = json.loads(data_sync_status_json)
global_sync_status=sync_status['sync_status']['info']['status']
num_shards=sync_status['sync_status']['info']['num_shards']
sync_markers=sync_status['sync_status']['markers']
log.debug('sync_markers=%s', sync_markers)
assert(num_shards == len(sync_markers))
markers={}
for i in range(num_shards):
markers[i] = sync_markers[i]['val']['marker']
return (num_shards, markers)
def data_sync_status(target_zone, source_zone):
if target_zone == source_zone:
return None
for _ in range(config.checkpoint_retries):
cmd = ['data', 'sync', 'status'] + target_zone.zone_args()
cmd += ['--source-zone', source_zone.name]
data_sync_status_json, retcode = target_zone.cluster.admin(cmd, check_retcode=False, read_only=True)
if retcode == 0:
return parse_data_sync_status(data_sync_status_json)
assert(retcode == 2) # ENOENT
time.sleep(config.checkpoint_delay)
assert False, 'failed to read data sync status for target_zone=%s source_zone=%s' % \
(target_zone.name, source_zone.name)
def bucket_sync_status(target_zone, source_zone, bucket_name):
if target_zone == source_zone:
return None
cmd = ['bucket', 'sync', 'markers'] + target_zone.zone_args()
cmd += ['--source-zone', source_zone.name]
cmd += ['--bucket', bucket_name]
cmd += ['--tenant', config.tenant, '--uid', user.name] if config.tenant else []
while True:
bucket_sync_status_json, retcode = target_zone.cluster.admin(cmd, check_retcode=False, read_only=True)
if retcode == 0:
break
assert(retcode == 2) # ENOENT
sync_status = json.loads(bucket_sync_status_json)
markers={}
for entry in sync_status:
val = entry['val']
pos = val['inc_marker']['position'].split('#')[-1] # get rid of shard id; e.g., 6#00000000002.132.3 -> 00000000002.132.3
markers[entry['key']] = pos
return markers
def data_source_log_status(source_zone):
source_cluster = source_zone.cluster
cmd = ['datalog', 'status'] + source_zone.zone_args()
datalog_status_json, retcode = source_cluster.admin(cmd, read_only=True)
datalog_status = json.loads(datalog_status_json)
markers = {i: s['marker'] for i, s in enumerate(datalog_status)}
log.debug('data markers for zone=%s markers=%s', source_zone.name, markers)
return markers
def bucket_source_log_status(source_zone, bucket_name):
cmd = ['bilog', 'status'] + source_zone.zone_args()
cmd += ['--bucket', bucket_name]
cmd += ['--tenant', config.tenant, '--uid', user.name] if config.tenant else []
source_cluster = source_zone.cluster
bilog_status_json, retcode = source_cluster.admin(cmd, read_only=True)
bilog_status = json.loads(bilog_status_json)
m={}
markers={}
try:
m = bilog_status['markers']
except:
pass
for s in m:
key = s['key']
val = s['val']
markers[key] = val
log.debug('bilog markers for zone=%s bucket=%s markers=%s', source_zone.name, bucket_name, markers)
return markers
def compare_data_status(target_zone, source_zone, log_status, sync_status):
if len(log_status) != len(sync_status):
log.error('len(log_status)=%d len(sync_status)=%d', len(log_status), len(sync_status))
return False
msg = ''
for i, l, s in zip(log_status, log_status.values(), sync_status.values()):
if l > s:
if len(msg):
msg += ', '
msg += 'shard=' + str(i) + ' master=' + l + ' target=' + s
if len(msg) > 0:
log.warning('data of zone %s behind zone %s: %s', target_zone.name, source_zone.name, msg)
return False
return True
def compare_bucket_status(target_zone, source_zone, bucket_name, log_status, sync_status):
if len(log_status) != len(sync_status):
log.error('len(log_status)=%d len(sync_status)=%d', len(log_status), len(sync_status))
return False
msg = ''
for i, l, s in zip(log_status, log_status.values(), sync_status.values()):
if l > s:
if len(msg):
msg += ', '
msg += 'shard=' + str(i) + ' master=' + l + ' target=' + s
if len(msg) > 0:
log.warning('bucket %s zone %s behind zone %s: %s', bucket_name, target_zone.name, source_zone.name, msg)
return False
return True
def zone_data_checkpoint(target_zone, source_zone):
if not target_zone.syncs_from(source_zone.name):
return
log_status = data_source_log_status(source_zone)
log.info('starting data checkpoint for target_zone=%s source_zone=%s', target_zone.name, source_zone.name)
for _ in range(config.checkpoint_retries):
num_shards, sync_status = data_sync_status(target_zone, source_zone)
log.debug('log_status=%s', log_status)
log.debug('sync_status=%s', sync_status)
if compare_data_status(target_zone, source_zone, log_status, sync_status):
log.info('finished data checkpoint for target_zone=%s source_zone=%s',
target_zone.name, source_zone.name)
return
time.sleep(config.checkpoint_delay)
assert False, 'failed data checkpoint for target_zone=%s source_zone=%s' % \
(target_zone.name, source_zone.name)
def zonegroup_data_checkpoint(zonegroup_conns):
for source_conn in zonegroup_conns.rw_zones:
for target_conn in zonegroup_conns.zones:
if source_conn.zone == target_conn.zone:
continue
log.debug('data checkpoint: source=%s target=%s', source_conn.zone.name, target_conn.zone.name)
zone_data_checkpoint(target_conn.zone, source_conn.zone)
def zone_bucket_checkpoint(target_zone, source_zone, bucket_name):
if not target_zone.syncs_from(source_zone.name):
return
cmd = ['bucket', 'sync', 'checkpoint']
cmd += ['--bucket', bucket_name, '--source-zone', source_zone.name]
retry_delay_ms = config.checkpoint_delay * 1000
timeout_sec = config.checkpoint_retries * config.checkpoint_delay
cmd += ['--retry-delay-ms', str(retry_delay_ms), '--timeout-sec', str(timeout_sec)]
cmd += target_zone.zone_args()
target_zone.cluster.admin(cmd, debug_rgw=1)
def zonegroup_bucket_checkpoint(zonegroup_conns, bucket_name):
for source_conn in zonegroup_conns.rw_zones:
for target_conn in zonegroup_conns.zones:
if source_conn.zone == target_conn.zone:
continue
log.debug('bucket checkpoint: source=%s target=%s bucket=%s', source_conn.zone.name, target_conn.zone.name, bucket_name)
zone_bucket_checkpoint(target_conn.zone, source_conn.zone, bucket_name)
for source_conn, target_conn in combinations(zonegroup_conns.zones, 2):
if target_conn.zone.has_buckets():
target_conn.check_bucket_eq(source_conn, bucket_name)
def set_master_zone(zone):
zone.modify(zone.cluster, ['--master'])
zonegroup = zone.zonegroup
zonegroup.period.update(zone, commit=True)
zonegroup.master_zone = zone
log.info('Set master zone=%s, waiting %ds for reconfiguration..', zone.name, config.reconfigure_delay)
time.sleep(config.reconfigure_delay)
def set_sync_from_all(zone, flag):
s = 'true' if flag else 'false'
zone.modify(zone.cluster, ['--sync-from-all={}'.format(s)])
zonegroup = zone.zonegroup
zonegroup.period.update(zone, commit=True)
log.info('Set sync_from_all flag on zone %s to %s', zone.name, s)
time.sleep(config.reconfigure_delay)
def set_redirect_zone(zone, redirect_zone):
id_str = redirect_zone.id if redirect_zone else ''
zone.modify(zone.cluster, ['--redirect-zone={}'.format(id_str)])
zonegroup = zone.zonegroup
zonegroup.period.update(zone, commit=True)
log.info('Set redirect_zone zone %s to "%s"', zone.name, id_str)
time.sleep(config.reconfigure_delay)
def enable_bucket_sync(zone, bucket_name):
cmd = ['bucket', 'sync', 'enable', '--bucket', bucket_name] + zone.zone_args()
zone.cluster.admin(cmd)
def disable_bucket_sync(zone, bucket_name):
cmd = ['bucket', 'sync', 'disable', '--bucket', bucket_name] + zone.zone_args()
zone.cluster.admin(cmd)
def check_buckets_sync_status_obj_not_exist(zone, buckets):
for _ in range(config.checkpoint_retries):
cmd = ['log', 'list'] + zone.zone_arg()
log_list, ret = zone.cluster.admin(cmd, check_retcode=False, read_only=True)
for bucket in buckets:
if log_list.find(':'+bucket+":") >= 0:
break
else:
return
time.sleep(config.checkpoint_delay)
assert False
def gen_bucket_name():
global num_buckets
num_buckets += 1
return run_prefix + '-' + str(num_buckets)
def gen_role_name():
global num_roles
num_roles += 1
return "roles" + '-' + run_prefix + '-' + str(num_roles)
class ZonegroupConns:
def __init__(self, zonegroup):
self.zonegroup = zonegroup
self.zones = []
self.ro_zones = []
self.rw_zones = []
self.master_zone = None
for z in zonegroup.zones:
zone_conn = z.get_conn(user.credentials)
self.zones.append(zone_conn)
if z.is_read_only():
self.ro_zones.append(zone_conn)
else:
self.rw_zones.append(zone_conn)
if z == zonegroup.master_zone:
self.master_zone = zone_conn
def check_all_buckets_exist(zone_conn, buckets):
if not zone_conn.zone.has_buckets():
return True
for b in buckets:
try:
zone_conn.get_bucket(b)
except:
log.critical('zone %s does not contain bucket %s', zone_conn.zone.name, b)
return False
return True
def check_all_buckets_dont_exist(zone_conn, buckets):
if not zone_conn.zone.has_buckets():
return True
for b in buckets:
try:
zone_conn.get_bucket(b)
except:
continue
log.critical('zone %s contains bucket %s', zone.zone, b)
return False
return True
def create_role_per_zone(zonegroup_conns, roles_per_zone = 1):
roles = []
zone_role = []
for zone in zonegroup_conns.rw_zones:
for i in range(roles_per_zone):
role_name = gen_role_name()
log.info('create role zone=%s name=%s', zone.name, role_name)
policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"AWS\":[\"arn:aws:iam:::user/testuser\"]},\"Action\":[\"sts:AssumeRole\"]}]}"
role = zone.create_role("", role_name, policy_document, "")
roles.append(role_name)
zone_role.append((zone, role))
return roles, zone_role
def create_bucket_per_zone(zonegroup_conns, buckets_per_zone = 1):
buckets = []
zone_bucket = []
for zone in zonegroup_conns.rw_zones:
for i in range(buckets_per_zone):
bucket_name = gen_bucket_name()
log.info('create bucket zone=%s name=%s', zone.name, bucket_name)
bucket = zone.create_bucket(bucket_name)
buckets.append(bucket_name)
zone_bucket.append((zone, bucket))
return buckets, zone_bucket
def create_bucket_per_zone_in_realm():
buckets = []
zone_bucket = []
for zonegroup in realm.current_period.zonegroups:
zg_conn = ZonegroupConns(zonegroup)
b, z = create_bucket_per_zone(zg_conn)
buckets.extend(b)
zone_bucket.extend(z)
return buckets, zone_bucket
def test_bucket_create():
zonegroup = realm.master_zonegroup()
zonegroup_conns = ZonegroupConns(zonegroup)
buckets, _ = create_bucket_per_zone(zonegroup_conns)
zonegroup_meta_checkpoint(zonegroup)
for zone in zonegroup_conns.zones:
assert check_all_buckets_exist(zone, buckets)
def test_bucket_recreate():
zonegroup = realm.master_zonegroup()
zonegroup_conns = ZonegroupConns(zonegroup)
buckets, _ = create_bucket_per_zone(zonegroup_conns)
zonegroup_meta_checkpoint(zonegroup)
for zone in zonegroup_conns.zones:
assert check_all_buckets_exist(zone, buckets)
# recreate buckets on all zones, make sure they weren't removed
for zone in zonegroup_conns.rw_zones:
for bucket_name in buckets:
bucket = zone.create_bucket(bucket_name)
for zone in zonegroup_conns.zones:
assert check_all_buckets_exist(zone, buckets)
zonegroup_meta_checkpoint(zonegroup)
for zone in zonegroup_conns.zones:
assert check_all_buckets_exist(zone, buckets)
def test_bucket_remove():
zonegroup = realm.master_zonegroup()
zonegroup_conns = ZonegroupConns(zonegroup)
buckets, zone_bucket = create_bucket_per_zone(zonegroup_conns)
zonegroup_meta_checkpoint(zonegroup)
for zone in zonegroup_conns.zones:
assert check_all_buckets_exist(zone, buckets)
for zone, bucket_name in zone_bucket:
zone.conn.delete_bucket(bucket_name)
zonegroup_meta_checkpoint(zonegroup)
for zone in zonegroup_conns.zones:
assert check_all_buckets_dont_exist(zone, buckets)
def get_bucket(zone, bucket_name):
return zone.conn.get_bucket(bucket_name)
def get_key(zone, bucket_name, obj_name):
b = get_bucket(zone, bucket_name)
return b.get_key(obj_name)
def new_key(zone, bucket_name, obj_name):
b = get_bucket(zone, bucket_name)
return b.new_key(obj_name)
def check_bucket_eq(zone_conn1, zone_conn2, bucket):
if zone_conn2.zone.has_buckets():
zone_conn2.check_bucket_eq(zone_conn1, bucket.name)
def check_role_eq(zone_conn1, zone_conn2, role):
if zone_conn2.zone.has_roles():
zone_conn2.check_role_eq(zone_conn1, role['create_role_response']['create_role_result']['role']['role_name'])
def test_object_sync():
zonegroup = realm.master_zonegroup()
zonegroup_conns = ZonegroupConns(zonegroup)
buckets, zone_bucket = create_bucket_per_zone(zonegroup_conns)
objnames = [ 'myobj', '_myobj', ':', '&' ]
content = 'asdasd'
# don't wait for meta sync just yet
for zone, bucket_name in zone_bucket:
for objname in objnames:
k = new_key(zone, bucket_name, objname)
k.set_contents_from_string(content)
zonegroup_meta_checkpoint(zonegroup)
for source_conn, bucket in zone_bucket:
for target_conn in zonegroup_conns.zones:
if source_conn.zone == target_conn.zone:
continue
zone_bucket_checkpoint(target_conn.zone, source_conn.zone, bucket.name)
check_bucket_eq(source_conn, target_conn, bucket)
def test_object_delete():
zonegroup = realm.master_zonegroup()
zonegroup_conns = ZonegroupConns(zonegroup)
buckets, zone_bucket = create_bucket_per_zone(zonegroup_conns)
objname = 'myobj'
content = 'asdasd'
# don't wait for meta sync just yet
for zone, bucket in zone_bucket:
k = new_key(zone, bucket, objname)
k.set_contents_from_string(content)
zonegroup_meta_checkpoint(zonegroup)
# check object exists
for source_conn, bucket in zone_bucket:
for target_conn in zonegroup_conns.zones:
if source_conn.zone == target_conn.zone:
continue
zone_bucket_checkpoint(target_conn.zone, source_conn.zone, bucket.name)
check_bucket_eq(source_conn, target_conn, bucket)
# check object removal
for source_conn, bucket in zone_bucket:
k = get_key(source_conn, bucket, objname)
k.delete()
for target_conn in zonegroup_conns.zones:
if source_conn.zone == target_conn.zone:
continue
zone_bucket_checkpoint(target_conn.zone, source_conn.zone, bucket.name)
check_bucket_eq(source_conn, target_conn, bucket)
def get_latest_object_version(key):
for k in key.bucket.list_versions(key.name):
if k.is_latest:
return k
return None
def test_versioned_object_incremental_sync():
zonegroup = realm.master_zonegroup()
zonegroup_conns = ZonegroupConns(zonegroup)
buckets, zone_bucket = create_bucket_per_zone(zonegroup_conns)
# enable versioning
for _, bucket in zone_bucket:
bucket.configure_versioning(True)
zonegroup_meta_checkpoint(zonegroup)
# upload a dummy object to each bucket and wait for sync. this forces each
# bucket to finish a full sync and switch to incremental
for source_conn, bucket in zone_bucket:
new_key(source_conn, bucket, 'dummy').set_contents_from_string('')
for target_conn in zonegroup_conns.zones:
if source_conn.zone == target_conn.zone:
continue
zone_bucket_checkpoint(target_conn.zone, source_conn.zone, bucket.name)
for _, bucket in zone_bucket:
# create and delete multiple versions of an object from each zone
for zone_conn in zonegroup_conns.rw_zones:
obj = 'obj-' + zone_conn.name
k = new_key(zone_conn, bucket, obj)
k.set_contents_from_string('version1')
log.debug('version1 id=%s', k.version_id)
# don't delete version1 - this tests that the initial version
# doesn't get squashed into later versions
# create and delete the following object versions to test that
# the operations don't race with each other during sync
k.set_contents_from_string('version2')
log.debug('version2 id=%s', k.version_id)
k.bucket.delete_key(obj, version_id=k.version_id)
k.set_contents_from_string('version3')
log.debug('version3 id=%s', k.version_id)
k.bucket.delete_key(obj, version_id=k.version_id)
for _, bucket in zone_bucket:
zonegroup_bucket_checkpoint(zonegroup_conns, bucket.name)
for _, bucket in zone_bucket:
# overwrite the acls to test that metadata-only entries are applied
for zone_conn in zonegroup_conns.rw_zones:
obj = 'obj-' + zone_conn.name
k = new_key(zone_conn, bucket.name, obj)
v = get_latest_object_version(k)
v.make_public()
for _, bucket in zone_bucket:
zonegroup_bucket_checkpoint(zonegroup_conns, bucket.name)
def test_concurrent_versioned_object_incremental_sync():
zonegroup = realm.master_zonegroup()
zonegroup_conns = ZonegroupConns(zonegroup)
zone = zonegroup_conns.rw_zones[0]
# create a versioned bucket
bucket = zone.create_bucket(gen_bucket_name())
log.debug('created bucket=%s', bucket.name)
bucket.configure_versioning(True)
zonegroup_meta_checkpoint(zonegroup)
# upload a dummy object and wait for sync. this forces each zone to finish
# a full sync and switch to incremental
new_key(zone, bucket, 'dummy').set_contents_from_string('')
zonegroup_bucket_checkpoint(zonegroup_conns, bucket.name)
# create several concurrent versions on each zone and let them race to sync
obj = 'obj'
for i in range(10):
for zone_conn in zonegroup_conns.rw_zones:
k = new_key(zone_conn, bucket, obj)
k.set_contents_from_string('version1')
log.debug('zone=%s version=%s', zone_conn.zone.name, k.version_id)
zonegroup_bucket_checkpoint(zonegroup_conns, bucket.name)
zonegroup_data_checkpoint(zonegroup_conns)
def test_version_suspended_incremental_sync():
zonegroup = realm.master_zonegroup()
zonegroup_conns = ZonegroupConns(zonegroup)
zone = zonegroup_conns.rw_zones[0]
# create a non-versioned bucket
bucket = zone.create_bucket(gen_bucket_name())
log.debug('created bucket=%s', bucket.name)
zonegroup_meta_checkpoint(zonegroup)
# upload an initial object
key1 = new_key(zone, bucket, 'obj')
key1.set_contents_from_string('')
log.debug('created initial version id=%s', key1.version_id)
zonegroup_bucket_checkpoint(zonegroup_conns, bucket.name)
# enable versioning
bucket.configure_versioning(True)
zonegroup_meta_checkpoint(zonegroup)
# re-upload the object as a new version
key2 = new_key(zone, bucket, 'obj')
key2.set_contents_from_string('')
log.debug('created new version id=%s', key2.version_id)
zonegroup_bucket_checkpoint(zonegroup_conns, bucket.name)
# suspend versioning
bucket.configure_versioning(False)
zonegroup_meta_checkpoint(zonegroup)
# re-upload the object as a 'null' version
key3 = new_key(zone, bucket, 'obj')
key3.set_contents_from_string('')
log.debug('created null version id=%s', key3.version_id)
zonegroup_bucket_checkpoint(zonegroup_conns, bucket.name)
def test_delete_marker_full_sync():
zonegroup = realm.master_zonegroup()
zonegroup_conns = ZonegroupConns(zonegroup)
buckets, zone_bucket = create_bucket_per_zone(zonegroup_conns)
# enable versioning
for _, bucket in zone_bucket:
bucket.configure_versioning(True)
zonegroup_meta_checkpoint(zonegroup)
for zone, bucket in zone_bucket:
# upload an initial object
key1 = new_key(zone, bucket, 'obj')
key1.set_contents_from_string('')
# create a delete marker
key2 = new_key(zone, bucket, 'obj')
key2.delete()
# wait for full sync
for _, bucket in zone_bucket:
zonegroup_bucket_checkpoint(zonegroup_conns, bucket.name)
def test_suspended_delete_marker_full_sync():
zonegroup = realm.master_zonegroup()
zonegroup_conns = ZonegroupConns(zonegroup)
buckets, zone_bucket = create_bucket_per_zone(zonegroup_conns)
# enable/suspend versioning
for _, bucket in zone_bucket:
bucket.configure_versioning(True)
bucket.configure_versioning(False)
zonegroup_meta_checkpoint(zonegroup)
for zone, bucket in zone_bucket:
# upload an initial object
key1 = new_key(zone, bucket, 'obj')
key1.set_contents_from_string('')
# create a delete marker
key2 = new_key(zone, bucket, 'obj')
key2.delete()
# wait for full sync
for _, bucket in zone_bucket:
zonegroup_bucket_checkpoint(zonegroup_conns, bucket.name)
def test_bucket_versioning():
buckets, zone_bucket = create_bucket_per_zone_in_realm()
for _, bucket in zone_bucket:
bucket.configure_versioning(True)
res = bucket.get_versioning_status()
key = 'Versioning'
assert(key in res and res[key] == 'Enabled')
def test_bucket_acl():
buckets, zone_bucket = create_bucket_per_zone_in_realm()
for _, bucket in zone_bucket:
assert(len(bucket.get_acl().acl.grants) == 1) # single grant on owner
bucket.set_acl('public-read')
assert(len(bucket.get_acl().acl.grants) == 2) # new grant on AllUsers
def test_bucket_cors():
buckets, zone_bucket = create_bucket_per_zone_in_realm()
for _, bucket in zone_bucket:
cors_cfg = CORSConfiguration()
cors_cfg.add_rule(['DELETE'], 'https://www.example.com', allowed_header='*', max_age_seconds=3000)
bucket.set_cors(cors_cfg)
assert(bucket.get_cors().to_xml() == cors_cfg.to_xml())
def test_bucket_delete_notempty():
zonegroup = realm.master_zonegroup()
zonegroup_conns = ZonegroupConns(zonegroup)
buckets, zone_bucket = create_bucket_per_zone(zonegroup_conns)
zonegroup_meta_checkpoint(zonegroup)
for zone_conn, bucket_name in zone_bucket:
# upload an object to each bucket on its own zone
conn = zone_conn.get_connection()
bucket = conn.get_bucket(bucket_name)
k = bucket.new_key('foo')
k.set_contents_from_string('bar')
# attempt to delete the bucket before this object can sync
try:
conn.delete_bucket(bucket_name)
except boto.exception.S3ResponseError as e:
assert(e.error_code == 'BucketNotEmpty')
continue
assert False # expected 409 BucketNotEmpty
# assert that each bucket still exists on the master
c1 = zonegroup_conns.master_zone.conn
for _, bucket_name in zone_bucket:
assert c1.get_bucket(bucket_name)
def test_multi_period_incremental_sync():
zonegroup = realm.master_zonegroup()
if len(zonegroup.zones) < 3:
raise SkipTest("test_multi_period_incremental_sync skipped. Requires 3 or more zones in master zonegroup.")
# periods to include in mdlog comparison
mdlog_periods = [realm.current_period.id]
# create a bucket in each zone
zonegroup_conns = ZonegroupConns(zonegroup)
buckets, zone_bucket = create_bucket_per_zone(zonegroup_conns)
zonegroup_meta_checkpoint(zonegroup)
z1, z2, z3 = zonegroup.zones[0:3]
assert(z1 == zonegroup.master_zone)
# kill zone 3 gateways to freeze sync status to incremental in first period
z3.stop()
# change master to zone 2 -> period 2
set_master_zone(z2)
mdlog_periods += [realm.current_period.id]
for zone_conn, _ in zone_bucket:
if zone_conn.zone == z3:
continue
bucket_name = gen_bucket_name()
log.info('create bucket zone=%s name=%s', zone_conn.name, bucket_name)
bucket = zone_conn.conn.create_bucket(bucket_name)
buckets.append(bucket_name)
# wait for zone 1 to sync
zone_meta_checkpoint(z1)
# change master back to zone 1 -> period 3
set_master_zone(z1)
mdlog_periods += [realm.current_period.id]
for zone_conn, bucket_name in zone_bucket:
if zone_conn.zone == z3:
continue
bucket_name = gen_bucket_name()
log.info('create bucket zone=%s name=%s', zone_conn.name, bucket_name)
zone_conn.conn.create_bucket(bucket_name)
buckets.append(bucket_name)
# restart zone 3 gateway and wait for sync
z3.start()
zonegroup_meta_checkpoint(zonegroup)
# verify that we end up with the same objects
for bucket_name in buckets:
for source_conn, _ in zone_bucket:
for target_conn in zonegroup_conns.zones:
if source_conn.zone == target_conn.zone:
continue
if target_conn.zone.has_buckets():
target_conn.check_bucket_eq(source_conn, bucket_name)
# verify that mdlogs are not empty and match for each period
for period in mdlog_periods:
master_mdlog = mdlog_list(z1, period)
assert len(master_mdlog) > 0
for zone in zonegroup.zones:
if zone == z1:
continue
mdlog = mdlog_list(zone, period)
assert len(mdlog) == len(master_mdlog)
# autotrim mdlogs for master zone
mdlog_autotrim(z1)
# autotrim mdlogs for peers
for zone in zonegroup.zones:
if zone == z1:
continue
mdlog_autotrim(zone)
# verify that mdlogs are empty for each period
for period in mdlog_periods:
for zone in zonegroup.zones:
mdlog = mdlog_list(zone, period)
assert len(mdlog) == 0
def test_datalog_autotrim():
zonegroup = realm.master_zonegroup()
zonegroup_conns = ZonegroupConns(zonegroup)
buckets, zone_bucket = create_bucket_per_zone(zonegroup_conns)
# upload an object to each zone to generate a datalog entry
for zone, bucket in zone_bucket:
k = new_key(zone, bucket.name, 'key')
k.set_contents_from_string('body')
# wait for metadata and data sync to catch up
zonegroup_meta_checkpoint(zonegroup)
zonegroup_data_checkpoint(zonegroup_conns)
# trim each datalog
for zone, _ in zone_bucket:
# read max markers for each shard
status = datalog_status(zone.zone)
datalog_autotrim(zone.zone)
for shard_id, shard_status in enumerate(status):
try:
before_trim = dateutil.parser.isoparse(shard_status['last_update'])
except: # empty timestamps look like "0.000000" and will fail here
continue
entries = datalog_list(zone.zone, ['--shard-id', str(shard_id), '--max-entries', '1'])
if not len(entries):
continue
after_trim = dateutil.parser.isoparse(entries[0]['timestamp'])
assert before_trim < after_trim, "any datalog entries must be newer than trim"
def test_multi_zone_redirect():
zonegroup = realm.master_zonegroup()
if len(zonegroup.rw_zones) < 2:
raise SkipTest("test_multi_period_incremental_sync skipped. Requires 3 or more zones in master zonegroup.")
zonegroup_conns = ZonegroupConns(zonegroup)
(zc1, zc2) = zonegroup_conns.rw_zones[0:2]
z1, z2 = (zc1.zone, zc2.zone)
set_sync_from_all(z2, False)
# create a bucket on the first zone
bucket_name = gen_bucket_name()
log.info('create bucket zone=%s name=%s', z1.name, bucket_name)
bucket = zc1.conn.create_bucket(bucket_name)
obj = 'testredirect'
key = bucket.new_key(obj)
data = 'A'*512
key.set_contents_from_string(data)
zonegroup_meta_checkpoint(zonegroup)
# try to read object from second zone (should fail)
bucket2 = get_bucket(zc2, bucket_name)
assert_raises(boto.exception.S3ResponseError, bucket2.get_key, obj)
set_redirect_zone(z2, z1)
key2 = bucket2.get_key(obj)
eq(data, key2.get_contents_as_string(encoding='ascii'))
key = bucket.new_key(obj)
for x in ['a', 'b', 'c', 'd']:
data = x*512
key.set_contents_from_string(data)
eq(data, key2.get_contents_as_string(encoding='ascii'))
# revert config changes
set_sync_from_all(z2, True)
set_redirect_zone(z2, None)
def test_zonegroup_remove():
zonegroup = realm.master_zonegroup()
zonegroup_conns = ZonegroupConns(zonegroup)
if len(zonegroup.zones) < 2:
raise SkipTest("test_zonegroup_remove skipped. Requires 2 or more zones in master zonegroup.")
zonegroup_meta_checkpoint(zonegroup)
z1, z2 = zonegroup.zones[0:2]
c1, c2 = (z1.cluster, z2.cluster)
# get admin credentials out of existing zone
system_key = z1.data['system_key']
admin_creds = Credentials(system_key['access_key'], system_key['secret_key'])
# create a new zone in zonegroup on c2 and commit
zone = Zone('remove', zonegroup, c2)
zone.create(c2, admin_creds.credential_args())
zonegroup.zones.append(zone)
zonegroup.period.update(zone, commit=True)
zonegroup.remove(c1, zone)
# another 'zonegroup remove' should fail with ENOENT
_, retcode = zonegroup.remove(c1, zone, check_retcode=False)
assert(retcode == 2) # ENOENT
# delete the new zone
zone.delete(c2)
# validate the resulting period
zonegroup.period.update(z1, commit=True)
def test_zg_master_zone_delete():
master_zg = realm.master_zonegroup()
master_zone = master_zg.master_zone
assert(len(master_zg.zones) >= 1)
master_cluster = master_zg.zones[0].cluster
rm_zg = ZoneGroup('remove_zg')
rm_zg.create(master_cluster)
rm_zone = Zone('remove', rm_zg, master_cluster)
rm_zone.create(master_cluster)
master_zg.period.update(master_zone, commit=True)
rm_zone.delete(master_cluster)
# Period update: This should now fail as the zone will be the master zone
# in that zg
_, retcode = master_zg.period.update(master_zone, check_retcode=False)
assert(retcode == errno.EINVAL)
# Proceed to delete the zonegroup as well, previous period now does not
# contain a dangling master_zone, this must succeed
rm_zg.delete(master_cluster)
master_zg.period.update(master_zone, commit=True)
def test_set_bucket_website():
buckets, zone_bucket = create_bucket_per_zone_in_realm()
for _, bucket in zone_bucket:
website_cfg = WebsiteConfiguration(suffix='index.html',error_key='error.html')
try:
bucket.set_website_configuration(website_cfg)
except boto.exception.S3ResponseError as e:
if e.error_code == 'MethodNotAllowed':
raise SkipTest("test_set_bucket_website skipped. Requires rgw_enable_static_website = 1.")
assert(bucket.get_website_configuration_with_xml()[1] == website_cfg.to_xml())
def test_set_bucket_policy():
policy = '''{
"Version": "2012-10-17",
"Statement": [{
"Effect": "Allow",
"Principal": "*"
}]
}'''
buckets, zone_bucket = create_bucket_per_zone_in_realm()
for _, bucket in zone_bucket:
bucket.set_policy(policy)
assert(bucket.get_policy().decode('ascii') == policy)
@attr('bucket_sync_disable')
def test_bucket_sync_disable():
zonegroup = realm.master_zonegroup()
zonegroup_conns = ZonegroupConns(zonegroup)
buckets, zone_bucket = create_bucket_per_zone(zonegroup_conns)
zonegroup_meta_checkpoint(zonegroup)
for bucket_name in buckets:
disable_bucket_sync(realm.meta_master_zone(), bucket_name)
for zone in zonegroup.zones:
check_buckets_sync_status_obj_not_exist(zone, buckets)
zonegroup_data_checkpoint(zonegroup_conns)
@attr('bucket_sync_disable')
def test_bucket_sync_enable_right_after_disable():
zonegroup = realm.master_zonegroup()
zonegroup_conns = ZonegroupConns(zonegroup)
buckets, zone_bucket = create_bucket_per_zone(zonegroup_conns)
objnames = ['obj1', 'obj2', 'obj3', 'obj4']
content = 'asdasd'
for zone, bucket in zone_bucket:
for objname in objnames:
k = new_key(zone, bucket.name, objname)
k.set_contents_from_string(content)
zonegroup_meta_checkpoint(zonegroup)
for bucket_name in buckets:
zonegroup_bucket_checkpoint(zonegroup_conns, bucket_name)
for bucket_name in buckets:
disable_bucket_sync(realm.meta_master_zone(), bucket_name)
enable_bucket_sync(realm.meta_master_zone(), bucket_name)
objnames_2 = ['obj5', 'obj6', 'obj7', 'obj8']
for zone, bucket in zone_bucket:
for objname in objnames_2:
k = new_key(zone, bucket.name, objname)
k.set_contents_from_string(content)
for bucket_name in buckets:
zonegroup_bucket_checkpoint(zonegroup_conns, bucket_name)
zonegroup_data_checkpoint(zonegroup_conns)
@attr('bucket_sync_disable')
def test_bucket_sync_disable_enable():
zonegroup = realm.master_zonegroup()
zonegroup_conns = ZonegroupConns(zonegroup)
buckets, zone_bucket = create_bucket_per_zone(zonegroup_conns)
objnames = [ 'obj1', 'obj2', 'obj3', 'obj4' ]
content = 'asdasd'
for zone, bucket in zone_bucket:
for objname in objnames:
k = new_key(zone, bucket.name, objname)
k.set_contents_from_string(content)
zonegroup_meta_checkpoint(zonegroup)
for bucket_name in buckets:
zonegroup_bucket_checkpoint(zonegroup_conns, bucket_name)
for bucket_name in buckets:
disable_bucket_sync(realm.meta_master_zone(), bucket_name)
zonegroup_meta_checkpoint(zonegroup)
objnames_2 = [ 'obj5', 'obj6', 'obj7', 'obj8' ]
for zone, bucket in zone_bucket:
for objname in objnames_2:
k = new_key(zone, bucket.name, objname)
k.set_contents_from_string(content)
for bucket_name in buckets:
enable_bucket_sync(realm.meta_master_zone(), bucket_name)
for bucket_name in buckets:
zonegroup_bucket_checkpoint(zonegroup_conns, bucket_name)
zonegroup_data_checkpoint(zonegroup_conns)
def test_multipart_object_sync():
zonegroup = realm.master_zonegroup()
zonegroup_conns = ZonegroupConns(zonegroup)
buckets, zone_bucket = create_bucket_per_zone(zonegroup_conns)
_, bucket = zone_bucket[0]
# initiate a multipart upload
upload = bucket.initiate_multipart_upload('MULTIPART')
mp = boto.s3.multipart.MultiPartUpload(bucket)
mp.key_name = upload.key_name
mp.id = upload.id
part_size = 5 * 1024 * 1024 # 5M min part size
mp.upload_part_from_file(StringIO('a' * part_size), 1)
mp.upload_part_from_file(StringIO('b' * part_size), 2)
mp.upload_part_from_file(StringIO('c' * part_size), 3)
mp.upload_part_from_file(StringIO('d' * part_size), 4)
mp.complete_upload()
zonegroup_meta_checkpoint(zonegroup)
zonegroup_bucket_checkpoint(zonegroup_conns, bucket.name)
def test_encrypted_object_sync():
zonegroup = realm.master_zonegroup()
zonegroup_conns = ZonegroupConns(zonegroup)
if len(zonegroup.rw_zones) < 2:
raise SkipTest("test_zonegroup_remove skipped. Requires 2 or more zones in master zonegroup.")
(zone1, zone2) = zonegroup_conns.rw_zones[0:2]
# create a bucket on the first zone
bucket_name = gen_bucket_name()
log.info('create bucket zone=%s name=%s', zone1.name, bucket_name)
bucket = zone1.conn.create_bucket(bucket_name)
# upload an object with sse-c encryption
sse_c_headers = {
'x-amz-server-side-encryption-customer-algorithm': 'AES256',
'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
'x-amz-server-side-encryption-customer-key-md5': 'DWygnHRtgiJ77HCm+1rvHw=='
}
key = bucket.new_key('testobj-sse-c')
data = 'A'*512
key.set_contents_from_string(data, headers=sse_c_headers)
# upload an object with sse-kms encryption
sse_kms_headers = {
'x-amz-server-side-encryption': 'aws:kms',
# testkey-1 must be present in 'rgw crypt s3 kms encryption keys' (vstart.sh adds this)
'x-amz-server-side-encryption-aws-kms-key-id': 'testkey-1',
}
key = bucket.new_key('testobj-sse-kms')
key.set_contents_from_string(data, headers=sse_kms_headers)
# wait for the bucket metadata and data to sync
zonegroup_meta_checkpoint(zonegroup)
zone_bucket_checkpoint(zone2.zone, zone1.zone, bucket_name)
# read the encrypted objects from the second zone
bucket2 = get_bucket(zone2, bucket_name)
key = bucket2.get_key('testobj-sse-c', headers=sse_c_headers)
eq(data, key.get_contents_as_string(headers=sse_c_headers, encoding='ascii'))
key = bucket2.get_key('testobj-sse-kms')
eq(data, key.get_contents_as_string(encoding='ascii'))
def test_bucket_index_log_trim():
zonegroup = realm.master_zonegroup()
zonegroup_conns = ZonegroupConns(zonegroup)
zone = zonegroup_conns.rw_zones[0]
# create a test bucket, upload some objects, and wait for sync
def make_test_bucket():
name = gen_bucket_name()
log.info('create bucket zone=%s name=%s', zone.name, name)
bucket = zone.conn.create_bucket(name)
for objname in ('a', 'b', 'c', 'd'):
k = new_key(zone, name, objname)
k.set_contents_from_string('foo')
zonegroup_meta_checkpoint(zonegroup)
zonegroup_bucket_checkpoint(zonegroup_conns, name)
return bucket
# create a 'cold' bucket
cold_bucket = make_test_bucket()
# trim with max-buckets=0 to clear counters for cold bucket. this should
# prevent it from being considered 'active' by the next autotrim
bilog_autotrim(zone.zone, [
'--rgw-sync-log-trim-max-buckets', '0',
])
# create an 'active' bucket
active_bucket = make_test_bucket()
# trim with max-buckets=1 min-cold-buckets=0 to trim active bucket only
bilog_autotrim(zone.zone, [
'--rgw-sync-log-trim-max-buckets', '1',
'--rgw-sync-log-trim-min-cold-buckets', '0',
])
# verify active bucket has empty bilog
active_bilog = bilog_list(zone.zone, active_bucket.name)
assert(len(active_bilog) == 0)
# verify cold bucket has nonempty bilog
cold_bilog = bilog_list(zone.zone, cold_bucket.name)
assert(len(cold_bilog) > 0)
# trim with min-cold-buckets=999 to trim all buckets
bilog_autotrim(zone.zone, [
'--rgw-sync-log-trim-max-buckets', '999',
'--rgw-sync-log-trim-min-cold-buckets', '999',
])
# verify cold bucket has empty bilog
cold_bilog = bilog_list(zone.zone, cold_bucket.name)
assert(len(cold_bilog) == 0)
def test_bucket_reshard_index_log_trim():
zonegroup = realm.master_zonegroup()
zonegroup_conns = ZonegroupConns(zonegroup)
zone = zonegroup_conns.rw_zones[0]
# create a test bucket, upload some objects, and wait for sync
def make_test_bucket():
name = gen_bucket_name()
log.info('create bucket zone=%s name=%s', zone.name, name)
bucket = zone.conn.create_bucket(name)
for objname in ('a', 'b', 'c', 'd'):
k = new_key(zone, name, objname)
k.set_contents_from_string('foo')
zonegroup_meta_checkpoint(zonegroup)
zonegroup_bucket_checkpoint(zonegroup_conns, name)
return bucket
# create a 'test' bucket
test_bucket = make_test_bucket()
# checking bucket layout before resharding
json_obj_1 = bucket_layout(zone.zone, test_bucket.name)
assert(len(json_obj_1['layout']['logs']) == 1)
first_gen = json_obj_1['layout']['current_index']['gen']
before_reshard_bilog = bilog_list(zone.zone, test_bucket.name, ['--gen', str(first_gen)])
assert(len(before_reshard_bilog) == 4)
# Resharding the bucket
zone.zone.cluster.admin(['bucket', 'reshard',
'--bucket', test_bucket.name,
'--num-shards', '3',
'--yes-i-really-mean-it'])
# checking bucket layout after 1st resharding
json_obj_2 = bucket_layout(zone.zone, test_bucket.name)
assert(len(json_obj_2['layout']['logs']) == 2)
second_gen = json_obj_2['layout']['current_index']['gen']
after_reshard_bilog = bilog_list(zone.zone, test_bucket.name, ['--gen', str(second_gen)])
assert(len(after_reshard_bilog) == 0)
# upload more objects
for objname in ('e', 'f', 'g', 'h'):
k = new_key(zone, test_bucket.name, objname)
k.set_contents_from_string('foo')
zonegroup_bucket_checkpoint(zonegroup_conns, test_bucket.name)
# Resharding the bucket again
zone.zone.cluster.admin(['bucket', 'reshard',
'--bucket', test_bucket.name,
'--num-shards', '3',
'--yes-i-really-mean-it'])
# checking bucket layout after 2nd resharding
json_obj_3 = bucket_layout(zone.zone, test_bucket.name)
assert(len(json_obj_3['layout']['logs']) == 3)
zonegroup_bucket_checkpoint(zonegroup_conns, test_bucket.name)
bilog_autotrim(zone.zone)
# checking bucket layout after 1st bilog autotrim
json_obj_4 = bucket_layout(zone.zone, test_bucket.name)
assert(len(json_obj_4['layout']['logs']) == 2)
bilog_autotrim(zone.zone)
# checking bucket layout after 2nd bilog autotrim
json_obj_5 = bucket_layout(zone.zone, test_bucket.name)
assert(len(json_obj_5['layout']['logs']) == 1)
bilog_autotrim(zone.zone)
# upload more objects
for objname in ('i', 'j', 'k', 'l'):
k = new_key(zone, test_bucket.name, objname)
k.set_contents_from_string('foo')
zonegroup_bucket_checkpoint(zonegroup_conns, test_bucket.name)
# verify the bucket has non-empty bilog
test_bilog = bilog_list(zone.zone, test_bucket.name)
assert(len(test_bilog) > 0)
@attr('bucket_reshard')
def test_bucket_reshard_incremental():
zonegroup = realm.master_zonegroup()
zonegroup_conns = ZonegroupConns(zonegroup)
zone = zonegroup_conns.rw_zones[0]
# create a bucket
bucket = zone.create_bucket(gen_bucket_name())
log.debug('created bucket=%s', bucket.name)
zonegroup_meta_checkpoint(zonegroup)
# upload some objects
for objname in ('a', 'b', 'c', 'd'):
k = new_key(zone, bucket.name, objname)
k.set_contents_from_string('foo')
zonegroup_bucket_checkpoint(zonegroup_conns, bucket.name)
# reshard in each zone
for z in zonegroup_conns.rw_zones:
z.zone.cluster.admin(['bucket', 'reshard',
'--bucket', bucket.name,
'--num-shards', '3',
'--yes-i-really-mean-it'])
# upload more objects
for objname in ('e', 'f', 'g', 'h'):
k = new_key(zone, bucket.name, objname)
k.set_contents_from_string('foo')
zonegroup_bucket_checkpoint(zonegroup_conns, bucket.name)
@attr('bucket_reshard')
def test_bucket_reshard_full():
zonegroup = realm.master_zonegroup()
zonegroup_conns = ZonegroupConns(zonegroup)
zone = zonegroup_conns.rw_zones[0]
# create a bucket
bucket = zone.create_bucket(gen_bucket_name())
log.debug('created bucket=%s', bucket.name)
zonegroup_meta_checkpoint(zonegroup)
# stop gateways in other zones so we can force the bucket to full sync
for z in zonegroup_conns.rw_zones[1:]:
z.zone.stop()
# use try-finally to restart gateways even if something fails
try:
# upload some objects
for objname in ('a', 'b', 'c', 'd'):
k = new_key(zone, bucket.name, objname)
k.set_contents_from_string('foo')
# reshard on first zone
zone.zone.cluster.admin(['bucket', 'reshard',
'--bucket', bucket.name,
'--num-shards', '3',
'--yes-i-really-mean-it'])
# upload more objects
for objname in ('e', 'f', 'g', 'h'):
k = new_key(zone, bucket.name, objname)
k.set_contents_from_string('foo')
finally:
for z in zonegroup_conns.rw_zones[1:]:
z.zone.start()
zonegroup_bucket_checkpoint(zonegroup_conns, bucket.name)
def test_bucket_creation_time():
zonegroup = realm.master_zonegroup()
zonegroup_conns = ZonegroupConns(zonegroup)
zonegroup_meta_checkpoint(zonegroup)
zone_buckets = [zone.get_connection().get_all_buckets() for zone in zonegroup_conns.rw_zones]
for z1, z2 in combinations(zone_buckets, 2):
for a, b in zip(z1, z2):
eq(a.name, b.name)
eq(a.creation_date, b.creation_date)
def get_bucket_shard_objects(zone, num_shards):
"""
Get one object for each shard of the bucket index log
"""
cmd = ['bucket', 'shard', 'objects'] + zone.zone_args()
cmd += ['--num-shards', str(num_shards)]
shardobjs_json, ret = zone.cluster.admin(cmd, read_only=True)
assert ret == 0
shardobjs = json.loads(shardobjs_json)
return shardobjs['objs']
def write_most_shards(zone, bucket_name, num_shards):
"""
Write one object to most (but not all) bucket index shards.
"""
objs = get_bucket_shard_objects(zone.zone, num_shards)
random.shuffle(objs)
del objs[-(len(objs)//10):]
for obj in objs:
k = new_key(zone, bucket_name, obj)
k.set_contents_from_string('foo')
def reshard_bucket(zone, bucket_name, num_shards):
"""
Reshard a bucket
"""
cmd = ['bucket', 'reshard'] + zone.zone_args()
cmd += ['--bucket', bucket_name]
cmd += ['--num-shards', str(num_shards)]
cmd += ['--yes-i-really-mean-it']
zone.cluster.admin(cmd)
def get_obj_names(zone, bucket_name, maxobjs):
"""
Get names of objects in a bucket.
"""
cmd = ['bucket', 'list'] + zone.zone_args()
cmd += ['--bucket', bucket_name]
cmd += ['--max-entries', str(maxobjs)]
objs_json, _ = zone.cluster.admin(cmd, read_only=True)
objs = json.loads(objs_json)
return [o['name'] for o in objs]
def bucket_keys_eq(zone1, zone2, bucket_name):
"""
Ensure that two buckets have the same keys, but get the lists through
radosgw-admin rather than S3 so it can be used when radosgw isn't running.
Only works for buckets of 10,000 objects since the tests calling it don't
need more, and the output from bucket list doesn't have an obvious marker
with which to continue.
"""
keys1 = get_obj_names(zone1, bucket_name, 10000)
keys2 = get_obj_names(zone2, bucket_name, 10000)
for key1, key2 in zip_longest(keys1, keys2):
if key1 is None:
log.critical('key=%s is missing from zone=%s', key1.name,
zone1.name)
assert False
if key2 is None:
log.critical('key=%s is missing from zone=%s', key2.name,
zone2.name)
assert False
@attr('bucket_reshard')
def test_bucket_sync_run_basic_incremental():
"""
Create several generations of objects, then run bucket sync
run to ensure they're all processed.
"""
zonegroup = realm.master_zonegroup()
zonegroup_conns = ZonegroupConns(zonegroup)
primary = zonegroup_conns.rw_zones[0]
# create a bucket write objects to it and wait for them to sync, ensuring
# we are in incremental.
bucket = primary.create_bucket(gen_bucket_name())
log.debug('created bucket=%s', bucket.name)
zonegroup_meta_checkpoint(zonegroup)
write_most_shards(primary, bucket.name, 11)
zonegroup_bucket_checkpoint(zonegroup_conns, bucket.name)
try:
# stop gateways in other zones so we can rely on bucket sync run
for secondary in zonegroup_conns.rw_zones[1:]:
secondary.zone.stop()
# build up multiple generations each with some objects written to
# them.
generations = [17, 19, 23, 29, 31, 37]
for num_shards in generations:
reshard_bucket(primary.zone, bucket.name, num_shards)
write_most_shards(primary, bucket.name, num_shards)
# bucket sync run on every secondary
for secondary in zonegroup_conns.rw_zones[1:]:
cmd = ['bucket', 'sync', 'run'] + secondary.zone.zone_args()
cmd += ['--bucket', bucket.name, '--source-zone', primary.name]
secondary.zone.cluster.admin(cmd)
bucket_keys_eq(primary.zone, secondary.zone, bucket.name)
finally:
# Restart so bucket_checkpoint can actually fetch things from the
# secondaries. Put this in a finally block so they restart even on
# error.
for secondary in zonegroup_conns.rw_zones[1:]:
secondary.zone.start()
zonegroup_bucket_checkpoint(zonegroup_conns, bucket.name)
def trash_bucket(zone, bucket_name):
"""
Remove objects through radosgw-admin, zapping bilog to prevent the deletes
from replicating.
"""
objs = get_obj_names(zone, bucket_name, 10000)
# Delete the objects
for obj in objs:
cmd = ['object', 'rm'] + zone.zone_args()
cmd += ['--bucket', bucket_name]
cmd += ['--object', obj]
zone.cluster.admin(cmd)
# Zap the bilog
cmd = ['bilog', 'trim'] + zone.zone_args()
cmd += ['--bucket', bucket_name]
zone.cluster.admin(cmd)
@attr('bucket_reshard')
def test_zap_init_bucket_sync_run():
"""
Create several generations of objects, trash them, then run bucket sync init
and bucket sync run.
"""
zonegroup = realm.master_zonegroup()
zonegroup_conns = ZonegroupConns(zonegroup)
primary = zonegroup_conns.rw_zones[0]
bucket = primary.create_bucket(gen_bucket_name())
log.debug('created bucket=%s', bucket.name)
zonegroup_meta_checkpoint(zonegroup)
# Write zeroth generation
for obj in range(1, 6):
k = new_key(primary, bucket.name, f'obj{obj * 11}')
k.set_contents_from_string('foo')
zonegroup_bucket_checkpoint(zonegroup_conns, bucket.name)
# Write several more generations
generations = [17, 19, 23, 29, 31, 37]
for num_shards in generations:
reshard_bucket(primary.zone, bucket.name, num_shards)
for obj in range(1, 6):
k = new_key(primary, bucket.name, f'obj{obj * num_shards}')
k.set_contents_from_string('foo')
zonegroup_bucket_checkpoint(zonegroup_conns, bucket.name)
# Stop gateways, trash bucket, init, sync, and restart for every secondary
for secondary in zonegroup_conns.rw_zones[1:]:
try:
secondary.zone.stop()
trash_bucket(secondary.zone, bucket.name)
cmd = ['bucket', 'sync', 'init'] + secondary.zone.zone_args()
cmd += ['--bucket', bucket.name]
cmd += ['--source-zone', primary.name]
secondary.zone.cluster.admin(cmd)
cmd = ['bucket', 'sync', 'run'] + secondary.zone.zone_args()
cmd += ['--bucket', bucket.name, '--source-zone', primary.name]
secondary.zone.cluster.admin(cmd)
bucket_keys_eq(primary.zone, secondary.zone, bucket.name)
finally:
# Do this as a finally so we bring the zone back up even on error.
secondary.zone.start()
zonegroup_bucket_checkpoint(zonegroup_conns, bucket.name)
def test_role_sync():
zonegroup = realm.master_zonegroup()
zonegroup_conns = ZonegroupConns(zonegroup)
roles, zone_role = create_role_per_zone(zonegroup_conns)
zonegroup_meta_checkpoint(zonegroup)
for source_conn, role in zone_role:
for target_conn in zonegroup_conns.zones:
if source_conn.zone == target_conn.zone:
continue
check_role_eq(source_conn, target_conn, role)
@attr('data_sync_init')
def test_bucket_full_sync_after_data_sync_init():
zonegroup = realm.master_zonegroup()
zonegroup_conns = ZonegroupConns(zonegroup)
primary = zonegroup_conns.rw_zones[0]
secondary = zonegroup_conns.rw_zones[1]
bucket = primary.create_bucket(gen_bucket_name())
log.debug('created bucket=%s', bucket.name)
zonegroup_meta_checkpoint(zonegroup)
try:
# stop secondary zone before it starts a bucket full sync
secondary.zone.stop()
# write some objects that don't sync yet
for obj in range(1, 6):
k = new_key(primary, bucket.name, f'obj{obj * 11}')
k.set_contents_from_string('foo')
cmd = ['data', 'sync', 'init'] + secondary.zone.zone_args()
cmd += ['--source-zone', primary.name]
secondary.zone.cluster.admin(cmd)
finally:
# Do this as a finally so we bring the zone back up even on error.
secondary.zone.start()
# expect all objects to replicate via 'bucket full sync'
zonegroup_bucket_checkpoint(zonegroup_conns, bucket.name)
zonegroup_data_checkpoint(zonegroup_conns)
@attr('data_sync_init')
@attr('bucket_reshard')
def test_resharded_bucket_full_sync_after_data_sync_init():
zonegroup = realm.master_zonegroup()
zonegroup_conns = ZonegroupConns(zonegroup)
primary = zonegroup_conns.rw_zones[0]
secondary = zonegroup_conns.rw_zones[1]
bucket = primary.create_bucket(gen_bucket_name())
log.debug('created bucket=%s', bucket.name)
zonegroup_meta_checkpoint(zonegroup)
try:
# stop secondary zone before it starts a bucket full sync
secondary.zone.stop()
# Write zeroth generation
for obj in range(1, 6):
k = new_key(primary, bucket.name, f'obj{obj * 11}')
k.set_contents_from_string('foo')
# Write several more generations
generations = [17, 19, 23, 29, 31, 37]
for num_shards in generations:
reshard_bucket(primary.zone, bucket.name, num_shards)
for obj in range(1, 6):
k = new_key(primary, bucket.name, f'obj{obj * num_shards}')
k.set_contents_from_string('foo')
cmd = ['data', 'sync', 'init'] + secondary.zone.zone_args()
cmd += ['--source-zone', primary.name]
secondary.zone.cluster.admin(cmd)
finally:
# Do this as a finally so we bring the zone back up even on error.
secondary.zone.start()
# expect all objects to replicate via 'bucket full sync'
zonegroup_bucket_checkpoint(zonegroup_conns, bucket.name)
zonegroup_data_checkpoint(zonegroup_conns)
@attr('data_sync_init')
def test_bucket_incremental_sync_after_data_sync_init():
zonegroup = realm.master_zonegroup()
zonegroup_conns = ZonegroupConns(zonegroup)
primary = zonegroup_conns.rw_zones[0]
secondary = zonegroup_conns.rw_zones[1]
bucket = primary.create_bucket(gen_bucket_name())
log.debug('created bucket=%s', bucket.name)
zonegroup_meta_checkpoint(zonegroup)
# upload a dummy object and wait for sync. this forces each zone to finish
# a full sync and switch to incremental
k = new_key(primary, bucket, 'dummy')
k.set_contents_from_string('foo')
zonegroup_bucket_checkpoint(zonegroup_conns, bucket.name)
try:
# stop secondary zone before it syncs the rest
secondary.zone.stop()
# Write more objects to primary
for obj in range(1, 6):
k = new_key(primary, bucket.name, f'obj{obj * 11}')
k.set_contents_from_string('foo')
cmd = ['data', 'sync', 'init'] + secondary.zone.zone_args()
cmd += ['--source-zone', primary.name]
secondary.zone.cluster.admin(cmd)
finally:
# Do this as a finally so we bring the zone back up even on error.
secondary.zone.start()
# expect remaining objects to replicate via 'bucket incremental sync'
zonegroup_bucket_checkpoint(zonegroup_conns, bucket.name)
zonegroup_data_checkpoint(zonegroup_conns)
@attr('data_sync_init')
@attr('bucket_reshard')
def test_resharded_bucket_incremental_sync_latest_after_data_sync_init():
zonegroup = realm.master_zonegroup()
zonegroup_conns = ZonegroupConns(zonegroup)
primary = zonegroup_conns.rw_zones[0]
secondary = zonegroup_conns.rw_zones[1]
bucket = primary.create_bucket(gen_bucket_name())
log.debug('created bucket=%s', bucket.name)
zonegroup_meta_checkpoint(zonegroup)
# Write zeroth generation to primary
for obj in range(1, 6):
k = new_key(primary, bucket.name, f'obj{obj * 11}')
k.set_contents_from_string('foo')
# Write several more generations
generations = [17, 19, 23, 29, 31, 37]
for num_shards in generations:
reshard_bucket(primary.zone, bucket.name, num_shards)
for obj in range(1, 6):
k = new_key(primary, bucket.name, f'obj{obj * num_shards}')
k.set_contents_from_string('foo')
# wait for the secondary to catch up to the latest gen
zonegroup_bucket_checkpoint(zonegroup_conns, bucket.name)
try:
# stop secondary zone before it syncs the rest
secondary.zone.stop()
# write some more objects to the last gen
for obj in range(1, 6):
k = new_key(primary, bucket.name, f'obj{obj * generations[-1]}')
k.set_contents_from_string('foo')
cmd = ['data', 'sync', 'init'] + secondary.zone.zone_args()
cmd += ['--source-zone', primary.name]
secondary.zone.cluster.admin(cmd)
finally:
# Do this as a finally so we bring the zone back up even on error.
secondary.zone.start()
# expect remaining objects in last gen to replicate via 'bucket incremental sync'
zonegroup_bucket_checkpoint(zonegroup_conns, bucket.name)
zonegroup_data_checkpoint(zonegroup_conns)
@attr('data_sync_init')
@attr('bucket_reshard')
def test_resharded_bucket_incremental_sync_oldest_after_data_sync_init():
zonegroup = realm.master_zonegroup()
zonegroup_conns = ZonegroupConns(zonegroup)
primary = zonegroup_conns.rw_zones[0]
secondary = zonegroup_conns.rw_zones[1]
bucket = primary.create_bucket(gen_bucket_name())
log.debug('created bucket=%s', bucket.name)
zonegroup_meta_checkpoint(zonegroup)
# Write zeroth generation to primary
for obj in range(1, 6):
k = new_key(primary, bucket.name, f'obj{obj * 11}')
k.set_contents_from_string('foo')
# wait for the secondary to catch up
zonegroup_bucket_checkpoint(zonegroup_conns, bucket.name)
try:
# stop secondary zone before it syncs later generations
secondary.zone.stop()
# Write several more generations
generations = [17, 19, 23, 29, 31, 37]
for num_shards in generations:
reshard_bucket(primary.zone, bucket.name, num_shards)
for obj in range(1, 6):
k = new_key(primary, bucket.name, f'obj{obj * num_shards}')
k.set_contents_from_string('foo')
cmd = ['data', 'sync', 'init'] + secondary.zone.zone_args()
cmd += ['--source-zone', primary.name]
secondary.zone.cluster.admin(cmd)
finally:
# Do this as a finally so we bring the zone back up even on error.
secondary.zone.start()
# expect all generations to replicate via 'bucket incremental sync'
zonegroup_bucket_checkpoint(zonegroup_conns, bucket.name)
zonegroup_data_checkpoint(zonegroup_conns)
def sync_info(cluster, bucket = None):
cmd = ['sync', 'info']
if bucket:
cmd += ['--bucket', bucket]
(result_json, retcode) = cluster.admin(cmd)
if retcode != 0:
assert False, 'failed to get sync policy'
return json.loads(result_json)
def get_sync_policy(cluster, bucket = None):
cmd = ['sync', 'policy', 'get']
if bucket:
cmd += ['--bucket', bucket]
(result_json, retcode) = cluster.admin(cmd)
if retcode != 0:
assert False, 'failed to get sync policy'
return json.loads(result_json)
def create_sync_policy_group(cluster, group, status = "allowed", bucket = None):
cmd = ['sync', 'group', 'create', '--group-id', group, '--status' , status]
if bucket:
cmd += ['--bucket', bucket]
(result_json, retcode) = cluster.admin(cmd)
if retcode != 0:
assert False, 'failed to create sync policy group id=%s, bucket=%s' % (group, bucket)
return json.loads(result_json)
def set_sync_policy_group_status(cluster, group, status, bucket = None):
cmd = ['sync', 'group', 'modify', '--group-id', group, '--status' , status]
if bucket:
cmd += ['--bucket', bucket]
(result_json, retcode) = cluster.admin(cmd)
if retcode != 0:
assert False, 'failed to set sync policy group id=%s, bucket=%s' % (group, bucket)
return json.loads(result_json)
def get_sync_policy_group(cluster, group, bucket = None):
cmd = ['sync', 'group', 'get', '--group-id', group]
if bucket:
cmd += ['--bucket', bucket]
(result_json, retcode) = cluster.admin(cmd)
if retcode != 0:
assert False, 'failed to get sync policy group id=%s, bucket=%s' % (group, bucket)
return json.loads(result_json)
def remove_sync_policy_group(cluster, group, bucket = None):
cmd = ['sync', 'group', 'remove', '--group-id', group]
if bucket:
cmd += ['--bucket', bucket]
(result_json, retcode) = cluster.admin(cmd)
if retcode != 0:
assert False, 'failed to remove sync policy group id=%s, bucket=%s' % (group, bucket)
return json.loads(result_json)
def create_sync_group_flow_symmetrical(cluster, group, flow_id, zones, bucket = None):
cmd = ['sync', 'group', 'flow', 'create', '--group-id', group, '--flow-id' , flow_id, '--flow-type', 'symmetrical', '--zones=%s' % zones]
if bucket:
cmd += ['--bucket', bucket]
(result_json, retcode) = cluster.admin(cmd)
if retcode != 0:
assert False, 'failed to create sync group flow symmetrical groupid=%s, flow_id=%s, zones=%s, bucket=%s' % (group, flow_id, zones, bucket)
return json.loads(result_json)
def create_sync_group_flow_directional(cluster, group, flow_id, src_zones, dest_zones, bucket = None):
cmd = ['sync', 'group', 'flow', 'create', '--group-id', group, '--flow-id' , flow_id, '--flow-type', 'directional', '--source-zone=%s' % src_zones, '--dest-zone=%s' % dest_zones]
if bucket:
cmd += ['--bucket', bucket]
(result_json, retcode) = cluster.admin(cmd)
if retcode != 0:
assert False, 'failed to create sync group flow directional groupid=%s, flow_id=%s, src_zones=%s, dest_zones=%s, bucket=%s' % (group, flow_id, src_zones, dest_zones, bucket)
return json.loads(result_json)
def remove_sync_group_flow_symmetrical(cluster, group, flow_id, zones = None, bucket = None):
cmd = ['sync', 'group', 'flow', 'remove', '--group-id', group, '--flow-id' , flow_id, '--flow-type', 'symmetrical']
if zones:
cmd += ['--zones=%s' % zones]
if bucket:
cmd += ['--bucket', bucket]
(result_json, retcode) = cluster.admin(cmd)
if retcode != 0:
assert False, 'failed to remove sync group flow symmetrical groupid=%s, flow_id=%s, zones=%s, bucket=%s' % (group, flow_id, zones, bucket)
return json.loads(result_json)
def remove_sync_group_flow_directional(cluster, group, flow_id, src_zones, dest_zones, bucket = None):
cmd = ['sync', 'group', 'flow', 'remove', '--group-id', group, '--flow-id' , flow_id, '--flow-type', 'directional', '--source-zone=%s' % src_zones, '--dest-zone=%s' % dest_zones]
if bucket:
cmd += ['--bucket', bucket]
(result_json, retcode) = cluster.admin(cmd)
if retcode != 0:
assert False, 'failed to remove sync group flow directional groupid=%s, flow_id=%s, src_zones=%s, dest_zones=%s, bucket=%s' % (group, flow_id, src_zones, dest_zones, bucket)
return json.loads(result_json)
def create_sync_group_pipe(cluster, group, pipe_id, src_zones, dest_zones, bucket = None, args = []):
cmd = ['sync', 'group', 'pipe', 'create', '--group-id', group, '--pipe-id' , pipe_id, '--source-zones=%s' % src_zones, '--dest-zones=%s' % dest_zones]
if bucket:
b_args = '--bucket=' + bucket
cmd.append(b_args)
if args:
cmd += args
(result_json, retcode) = cluster.admin(cmd)
if retcode != 0:
assert False, 'failed to create sync group pipe groupid=%s, pipe_id=%s, src_zones=%s, dest_zones=%s, bucket=%s' % (group, pipe_id, src_zones, dest_zones, bucket)
return json.loads(result_json)
def remove_sync_group_pipe(cluster, group, pipe_id, bucket = None, args = None):
cmd = ['sync', 'group', 'pipe', 'remove', '--group-id', group, '--pipe-id' , pipe_id]
if bucket:
b_args = '--bucket=' + bucket
cmd.append(b_args)
if args:
cmd.append(args)
(result_json, retcode) = cluster.admin(cmd)
if retcode != 0:
assert False, 'failed to remove sync group pipe groupid=%s, pipe_id=%s, src_zones=%s, dest_zones=%s, bucket=%s' % (group, pipe_id, src_zones, dest_zones, bucket)
return json.loads(result_json)
def create_zone_bucket(zone):
b_name = gen_bucket_name()
log.info('create bucket zone=%s name=%s', zone.name, b_name)
bucket = zone.create_bucket(b_name)
return bucket
def create_object(zone_conn, bucket, objname, content):
k = new_key(zone_conn, bucket.name, objname)
k.set_contents_from_string(content)
def create_objects(zone_conn, bucket, obj_arr, content):
for objname in obj_arr:
create_object(zone_conn, bucket, objname, content)
def check_object_exists(bucket, objname, content = None):
k = bucket.get_key(objname)
assert_not_equal(k, None)
if (content != None):
assert_equal(k.get_contents_as_string(encoding='ascii'), content)
def check_objects_exist(bucket, obj_arr, content = None):
for objname in obj_arr:
check_object_exists(bucket, objname, content)
def check_object_not_exists(bucket, objname):
k = bucket.get_key(objname)
assert_equal(k, None)
def check_objects_not_exist(bucket, obj_arr):
for objname in obj_arr:
check_object_not_exists(bucket, objname)
@attr('sync_policy')
def test_sync_policy_config_zonegroup():
"""
test_sync_policy_config_zonegroup:
test configuration of all sync commands
"""
zonegroup = realm.master_zonegroup()
zonegroup_meta_checkpoint(zonegroup)
zonegroup_conns = ZonegroupConns(zonegroup)
z1, z2 = zonegroup.zones[0:2]
c1, c2 = (z1.cluster, z2.cluster)
zones = z1.name+","+z2.name
c1.admin(['sync', 'policy', 'get'])
# (a) zonegroup level
create_sync_policy_group(c1, "sync-group")
set_sync_policy_group_status(c1, "sync-group", "enabled")
get_sync_policy_group(c1, "sync-group")
get_sync_policy(c1)
create_sync_group_flow_symmetrical(c1, "sync-group", "sync-flow1", zones)
create_sync_group_flow_directional(c1, "sync-group", "sync-flow2", z1.name, z2.name)
create_sync_group_pipe(c1, "sync-group", "sync-pipe", zones, zones)
get_sync_policy_group(c1, "sync-group")
zonegroup.period.update(z1, commit=True)
# (b) bucket level
zc1, zc2 = zonegroup_conns.zones[0:2]
bucket = create_zone_bucket(zc1)
bucket_name = bucket.name
create_sync_policy_group(c1, "sync-bucket", "allowed", bucket_name)
set_sync_policy_group_status(c1, "sync-bucket", "enabled", bucket_name)
get_sync_policy_group(c1, "sync-bucket", bucket_name)
get_sync_policy(c1, bucket_name)
create_sync_group_flow_symmetrical(c1, "sync-bucket", "sync-flow1", zones, bucket_name)
create_sync_group_flow_directional(c1, "sync-bucket", "sync-flow2", z1.name, z2.name, bucket_name)
create_sync_group_pipe(c1, "sync-bucket", "sync-pipe", zones, zones, bucket_name)
get_sync_policy_group(c1, "sync-bucket", bucket_name)
zonegroup_meta_checkpoint(zonegroup)
remove_sync_group_pipe(c1, "sync-bucket", "sync-pipe", bucket_name)
remove_sync_group_flow_directional(c1, "sync-bucket", "sync-flow2", z1.name, z2.name, bucket_name)
remove_sync_group_flow_symmetrical(c1, "sync-bucket", "sync-flow1", zones, bucket_name)
remove_sync_policy_group(c1, "sync-bucket", bucket_name)
get_sync_policy(c1, bucket_name)
zonegroup_meta_checkpoint(zonegroup)
remove_sync_group_pipe(c1, "sync-group", "sync-pipe")
remove_sync_group_flow_directional(c1, "sync-group", "sync-flow2", z1.name, z2.name)
remove_sync_group_flow_symmetrical(c1, "sync-group", "sync-flow1")
remove_sync_policy_group(c1, "sync-group")
get_sync_policy(c1)
zonegroup.period.update(z1, commit=True)
return
@attr('sync_policy')
def test_sync_flow_symmetrical_zonegroup_all():
"""
test_sync_flow_symmetrical_zonegroup_all:
allows sync from all the zones to all other zones (default case)
"""
zonegroup = realm.master_zonegroup()
zonegroup_meta_checkpoint(zonegroup)
zonegroup_conns = ZonegroupConns(zonegroup)
(zoneA, zoneB) = zonegroup.zones[0:2]
(zcA, zcB) = zonegroup_conns.zones[0:2]
c1 = zoneA.cluster
c1.admin(['sync', 'policy', 'get'])
zones = zoneA.name + ',' + zoneB.name
create_sync_policy_group(c1, "sync-group")
create_sync_group_flow_symmetrical(c1, "sync-group", "sync-flow1", zones)
create_sync_group_pipe(c1, "sync-group", "sync-pipe", zones, zones)
set_sync_policy_group_status(c1, "sync-group", "enabled")
zonegroup.period.update(zoneA, commit=True)
get_sync_policy(c1)
objnames = [ 'obj1', 'obj2' ]
content = 'asdasd'
buckets = []
# create bucket & object in all zones
bucketA = create_zone_bucket(zcA)
buckets.append(bucketA)
create_object(zcA, bucketA, objnames[0], content)
bucketB = create_zone_bucket(zcB)
buckets.append(bucketB)
create_object(zcB, bucketB, objnames[1], content)
zonegroup_meta_checkpoint(zonegroup)
# 'zonegroup_data_checkpoint' currently fails for the zones not
# allowed to sync. So as a workaround, data checkpoint is done
# for only the ones configured.
zone_data_checkpoint(zoneB, zoneA)
# verify if objects are synced accross the zone
bucket = get_bucket(zcB, bucketA.name)
check_object_exists(bucket, objnames[0], content)
bucket = get_bucket(zcA, bucketB.name)
check_object_exists(bucket, objnames[1], content)
remove_sync_policy_group(c1, "sync-group")
return
@attr('sync_policy')
def test_sync_flow_symmetrical_zonegroup_select():
"""
test_sync_flow_symmetrical_zonegroup_select:
allow sync between zoneA & zoneB
verify zoneC doesnt sync the data
"""
zonegroup = realm.master_zonegroup()
zonegroup_conns = ZonegroupConns(zonegroup)
if len(zonegroup.zones) < 3:
raise SkipTest("test_sync_flow_symmetrical_zonegroup_select skipped. Requires 3 or more zones in master zonegroup.")
zonegroup_meta_checkpoint(zonegroup)
(zoneA, zoneB, zoneC) = zonegroup.zones[0:3]
(zcA, zcB, zcC) = zonegroup_conns.zones[0:3]
c1 = zoneA.cluster
# configure sync policy
zones = zoneA.name + ',' + zoneB.name
c1.admin(['sync', 'policy', 'get'])
create_sync_policy_group(c1, "sync-group")
create_sync_group_flow_symmetrical(c1, "sync-group", "sync-flow", zones)
create_sync_group_pipe(c1, "sync-group", "sync-pipe", zones, zones)
set_sync_policy_group_status(c1, "sync-group", "enabled")
zonegroup.period.update(zoneA, commit=True)
get_sync_policy(c1)
buckets = []
content = 'asdasd'
# create bucketA & objects in zoneA
objnamesA = [ 'obj1', 'obj2', 'obj3' ]
bucketA = create_zone_bucket(zcA)
buckets.append(bucketA)
create_objects(zcA, bucketA, objnamesA, content)
# create bucketB & objects in zoneB
objnamesB = [ 'obj4', 'obj5', 'obj6' ]
bucketB = create_zone_bucket(zcB)
buckets.append(bucketB)
create_objects(zcB, bucketB, objnamesB, content)
zonegroup_meta_checkpoint(zonegroup)
zone_data_checkpoint(zoneB, zoneA)
zone_data_checkpoint(zoneA, zoneB)
# verify if objnamesA synced to only zoneB but not zoneC
bucket = get_bucket(zcB, bucketA.name)
check_objects_exist(bucket, objnamesA, content)
bucket = get_bucket(zcC, bucketA.name)
check_objects_not_exist(bucket, objnamesA)
# verify if objnamesB synced to only zoneA but not zoneC
bucket = get_bucket(zcA, bucketB.name)
check_objects_exist(bucket, objnamesB, content)
bucket = get_bucket(zcC, bucketB.name)
check_objects_not_exist(bucket, objnamesB)
remove_sync_policy_group(c1, "sync-group")
return
@attr('sync_policy')
def test_sync_flow_directional_zonegroup_select():
"""
test_sync_flow_directional_zonegroup_select:
allow sync from only zoneA to zoneB
verify that data doesn't get synced to zoneC and
zoneA shouldn't sync data from zoneB either
"""
zonegroup = realm.master_zonegroup()
zonegroup_conns = ZonegroupConns(zonegroup)
if len(zonegroup.zones) < 3:
raise SkipTest("test_sync_flow_symmetrical_zonegroup_select skipped. Requires 3 or more zones in master zonegroup.")
zonegroup_meta_checkpoint(zonegroup)
(zoneA, zoneB, zoneC) = zonegroup.zones[0:3]
(zcA, zcB, zcC) = zonegroup_conns.zones[0:3]
c1 = zoneA.cluster
# configure sync policy
zones = zoneA.name + ',' + zoneB.name
c1.admin(['sync', 'policy', 'get'])
create_sync_policy_group(c1, "sync-group")
create_sync_group_flow_directional(c1, "sync-group", "sync-flow", zoneA.name, zoneB.name)
create_sync_group_pipe(c1, "sync-group", "sync-pipe", zoneA.name, zoneB.name)
set_sync_policy_group_status(c1, "sync-group", "enabled")
zonegroup.period.update(zoneA, commit=True)
get_sync_policy(c1)
buckets = []
content = 'asdasd'
# create bucketA & objects in zoneA
objnamesA = [ 'obj1', 'obj2', 'obj3' ]
bucketA = create_zone_bucket(zcA)
buckets.append(bucketA)
create_objects(zcA, bucketA, objnamesA, content)
# create bucketB & objects in zoneB
objnamesB = [ 'obj4', 'obj5', 'obj6' ]
bucketB = create_zone_bucket(zcB)
buckets.append(bucketB)
create_objects(zcB, bucketB, objnamesB, content)
zonegroup_meta_checkpoint(zonegroup)
zone_data_checkpoint(zoneB, zoneA)
# verify if objnamesA synced to only zoneB but not zoneC
bucket = get_bucket(zcB, bucketA.name)
check_objects_exist(bucket, objnamesA, content)
bucket = get_bucket(zcC, bucketA.name)
check_objects_not_exist(bucket, objnamesA)
# verify if objnamesB are not synced to either zoneA or zoneC
bucket = get_bucket(zcA, bucketB.name)
check_objects_not_exist(bucket, objnamesB)
bucket = get_bucket(zcC, bucketB.name)
check_objects_not_exist(bucket, objnamesB)
"""
verify the same at bucketA level
configure another policy at bucketA level with src and dest
zones specified to zoneA and zoneB resp.
verify zoneA bucketA syncs to zoneB BucketA but not viceversa.
"""
# reconfigure zonegroup pipe & flow
remove_sync_group_pipe(c1, "sync-group", "sync-pipe")
remove_sync_group_flow_directional(c1, "sync-group", "sync-flow", zoneA.name, zoneB.name)
create_sync_group_flow_symmetrical(c1, "sync-group", "sync-flow1", zones)
create_sync_group_pipe(c1, "sync-group", "sync-pipe", zones, zones)
# change state to allowed
set_sync_policy_group_status(c1, "sync-group", "allowed")
zonegroup.period.update(zoneA, commit=True)
get_sync_policy(c1)
# configure sync policy for only bucketA and enable it
create_sync_policy_group(c1, "sync-bucket", "allowed", bucketA.name)
create_sync_group_flow_symmetrical(c1, "sync-bucket", "sync-flowA", zones, bucketA.name)
args = ['--source-bucket=*', '--dest-bucket=*']
create_sync_group_pipe(c1, "sync-bucket", "sync-pipe", zoneA.name, zoneB.name, bucketA.name, args)
set_sync_policy_group_status(c1, "sync-bucket", "enabled", bucketA.name)
get_sync_policy(c1, bucketA.name)
zonegroup_meta_checkpoint(zonegroup)
# create objects in bucketA in zoneA and zoneB
objnamesC = [ 'obj7', 'obj8', 'obj9' ]
objnamesD = [ 'obj10', 'obj11', 'obj12' ]
create_objects(zcA, bucketA, objnamesC, content)
create_objects(zcB, bucketA, objnamesD, content)
zonegroup_meta_checkpoint(zonegroup)
zone_data_checkpoint(zoneB, zoneA)
# verify that objnamesC are synced to bucketA in zoneB
bucket = get_bucket(zcB, bucketA.name)
check_objects_exist(bucket, objnamesC, content)
# verify that objnamesD are not synced to bucketA in zoneA
bucket = get_bucket(zcA, bucketA.name)
check_objects_not_exist(bucket, objnamesD)
remove_sync_policy_group(c1, "sync-bucket", bucketA.name)
remove_sync_policy_group(c1, "sync-group")
return
@attr('sync_policy')
def test_sync_single_bucket():
"""
test_sync_single_bucket:
Allow data sync for only bucketA but not for other buckets via
below 2 methods
(a) zonegroup: symmetrical flow but configure pipe for only bucketA.
(b) bucket level: configure policy for bucketA
"""
zonegroup = realm.master_zonegroup()
zonegroup_meta_checkpoint(zonegroup)
zonegroup_conns = ZonegroupConns(zonegroup)
(zoneA, zoneB) = zonegroup.zones[0:2]
(zcA, zcB) = zonegroup_conns.zones[0:2]
c1 = zoneA.cluster
c1.admin(['sync', 'policy', 'get'])
zones = zoneA.name + ',' + zoneB.name
get_sync_policy(c1)
objnames = [ 'obj1', 'obj2', 'obj3' ]
content = 'asdasd'
buckets = []
# create bucketA & bucketB in zoneA
bucketA = create_zone_bucket(zcA)
buckets.append(bucketA)
bucketB = create_zone_bucket(zcA)
buckets.append(bucketB)
zonegroup_meta_checkpoint(zonegroup)
"""
Method (a): configure pipe for only bucketA
"""
# configure sync policy & pipe for only bucketA
create_sync_policy_group(c1, "sync-group")
create_sync_group_flow_symmetrical(c1, "sync-group", "sync-flow1", zones)
args = ['--source-bucket=' + bucketA.name, '--dest-bucket=' + bucketA.name]
create_sync_group_pipe(c1, "sync-group", "sync-pipe", zones, zones, None, args)
set_sync_policy_group_status(c1, "sync-group", "enabled")
get_sync_policy(c1)
zonegroup.period.update(zoneA, commit=True)
sync_info(c1)
# create objects in bucketA & bucketB
create_objects(zcA, bucketA, objnames, content)
create_object(zcA, bucketB, objnames, content)
zonegroup_meta_checkpoint(zonegroup)
zone_data_checkpoint(zoneB, zoneA)
# verify if bucketA objects are synced
bucket = get_bucket(zcB, bucketA.name)
check_objects_exist(bucket, objnames, content)
# bucketB objects should not be synced
bucket = get_bucket(zcB, bucketB.name)
check_objects_not_exist(bucket, objnames)
"""
Method (b): configure policy at only bucketA level
"""
# reconfigure group pipe
remove_sync_group_pipe(c1, "sync-group", "sync-pipe")
create_sync_group_pipe(c1, "sync-group", "sync-pipe", zones, zones)
# change state to allowed
set_sync_policy_group_status(c1, "sync-group", "allowed")
zonegroup.period.update(zoneA, commit=True)
get_sync_policy(c1)
# configure sync policy for only bucketA and enable it
create_sync_policy_group(c1, "sync-bucket", "allowed", bucketA.name)
create_sync_group_flow_symmetrical(c1, "sync-bucket", "sync-flowA", zones, bucketA.name)
create_sync_group_pipe(c1, "sync-bucket", "sync-pipe", zones, zones, bucketA.name)
set_sync_policy_group_status(c1, "sync-bucket", "enabled", bucketA.name)
get_sync_policy(c1, bucketA.name)
# create object in bucketA
create_object(zcA, bucketA, objnames[2], content)
# create object in bucketA too
create_object(zcA, bucketB, objnames[2], content)
zonegroup_meta_checkpoint(zonegroup)
zone_data_checkpoint(zoneB, zoneA)
# verify if bucketA objects are synced
bucket = get_bucket(zcB, bucketA.name)
check_object_exists(bucket, objnames[2], content)
# bucketB objects should not be synced
bucket = get_bucket(zcB, bucketB.name)
check_object_not_exists(bucket, objnames[2])
remove_sync_policy_group(c1, "sync-bucket", bucketA.name)
remove_sync_policy_group(c1, "sync-group")
return
@attr('sync_policy')
def test_sync_different_buckets():
"""
test_sync_different_buckets:
sync zoneA bucketA to zoneB bucketB via below methods
(a) zonegroup: directional flow but configure pipe for zoneA bucketA to zoneB bucketB
(b) bucket: configure another policy at bucketA level with pipe set to
another bucket(bucketB) in target zone.
sync zoneA bucketA from zoneB bucketB
(c) configure another policy at bucketA level with pipe set from
another bucket(bucketB) in source zone.
"""
zonegroup = realm.master_zonegroup()
zonegroup_meta_checkpoint(zonegroup)
zonegroup_conns = ZonegroupConns(zonegroup)
(zoneA, zoneB) = zonegroup.zones[0:2]
(zcA, zcB) = zonegroup_conns.zones[0:2]
zones = zoneA.name + ',' + zoneB.name
c1 = zoneA.cluster
c1.admin(['sync', 'policy', 'get'])
objnames = [ 'obj1', 'obj2' ]
objnamesB = [ 'obj3', 'obj4' ]
content = 'asdasd'
buckets = []
# create bucketA & bucketB in zoneA
bucketA = create_zone_bucket(zcA)
buckets.append(bucketA)
bucketB = create_zone_bucket(zcA)
buckets.append(bucketB)
zonegroup_meta_checkpoint(zonegroup)
"""
Method (a): zonegroup - configure pipe for only bucketA
"""
# configure pipe from zoneA bucketA to zoneB bucketB
create_sync_policy_group(c1, "sync-group")
create_sync_group_flow_symmetrical(c1, "sync-group", "sync-flow1", zones)
args = ['--source-bucket=' + bucketA.name, '--dest-bucket=' + bucketB.name]
create_sync_group_pipe(c1, "sync-group", "sync-pipe", zoneA.name, zoneB.name, None, args)
set_sync_policy_group_status(c1, "sync-group", "enabled")
zonegroup.period.update(zoneA, commit=True)
get_sync_policy(c1)
# create objects in bucketA
create_objects(zcA, bucketA, objnames, content)
zonegroup_meta_checkpoint(zonegroup)
zone_data_checkpoint(zoneB, zoneA)
# verify that objects are synced to bucketB in zoneB
# but not to bucketA
bucket = get_bucket(zcB, bucketA.name)
check_objects_not_exist(bucket, objnames)
bucket = get_bucket(zcB, bucketB.name)
check_objects_exist(bucket, objnames, content)
"""
Method (b): configure policy at only bucketA level with pipe
set to bucketB in target zone
"""
remove_sync_group_pipe(c1, "sync-group", "sync-pipe")
create_sync_group_pipe(c1, "sync-group", "sync-pipe", zones, zones)
# change state to allowed
set_sync_policy_group_status(c1, "sync-group", "allowed")
zonegroup.period.update(zoneA, commit=True)
get_sync_policy(c1)
# configure sync policy for only bucketA and enable it
create_sync_policy_group(c1, "sync-bucket", "allowed", bucketA.name)
create_sync_group_flow_symmetrical(c1, "sync-bucket", "sync-flowA", zones, bucketA.name)
args = ['--source-bucket=*', '--dest-bucket=' + bucketB.name]
create_sync_group_pipe(c1, "sync-bucket", "sync-pipeA", zones, zones, bucketA.name, args)
set_sync_policy_group_status(c1, "sync-bucket", "enabled", bucketA.name)
get_sync_policy(c1, bucketA.name)
objnamesC = [ 'obj5', 'obj6' ]
zonegroup_meta_checkpoint(zonegroup)
# create objects in bucketA
create_objects(zcA, bucketA, objnamesC, content)
zonegroup_meta_checkpoint(zonegroup)
zone_data_checkpoint(zoneB, zoneA)
"""
# verify that objects are synced to bucketB in zoneB
# but not to bucketA
"""
bucket = get_bucket(zcB, bucketA.name)
check_objects_not_exist(bucket, objnamesC)
bucket = get_bucket(zcB, bucketB.name)
check_objects_exist(bucket, objnamesC, content)
remove_sync_policy_group(c1, "sync-bucket", bucketA.name)
zonegroup_meta_checkpoint(zonegroup)
get_sync_policy(c1, bucketA.name)
"""
Method (c): configure policy at only bucketA level with pipe
set from bucketB in source zone
verify zoneA bucketA syncs from zoneB BucketB but not bucketA
"""
# configure sync policy for only bucketA and enable it
create_sync_policy_group(c1, "sync-bucket", "allowed", bucketA.name)
create_sync_group_flow_symmetrical(c1, "sync-bucket", "sync-flowA", zones, bucketA.name)
args = ['--source-bucket=' + bucketB.name, '--dest-bucket=' + '*']
create_sync_group_pipe(c1, "sync-bucket", "sync-pipe", zones, zones, bucketA.name, args)
set_sync_policy_group_status(c1, "sync-bucket", "enabled", bucketA.name)
get_sync_policy(c1, bucketA.name)
# create objects in bucketA & B in ZoneB
objnamesD = [ 'obj7', 'obj8' ]
objnamesE = [ 'obj9', 'obj10' ]
create_objects(zcB, bucketA, objnamesD, content)
create_objects(zcB, bucketB, objnamesE, content)
zonegroup_meta_checkpoint(zonegroup)
zone_data_checkpoint(zoneA, zoneB)
"""
# verify that objects from only bucketB are synced to
# bucketA in zoneA
"""
bucket = get_bucket(zcA, bucketA.name)
check_objects_not_exist(bucket, objnamesD)
check_objects_exist(bucket, objnamesE, content)
remove_sync_policy_group(c1, "sync-bucket", bucketA.name)
remove_sync_policy_group(c1, "sync-group")
return
@attr('sync_policy')
def test_sync_multiple_buckets_to_single():
"""
test_sync_multiple_buckets_to_single:
directional flow
(a) pipe: sync zoneA bucketA,bucketB to zoneB bucketB
(b) configure another policy at bucketA level with pipe configured
to sync from multiple buckets (bucketA & bucketB)
verify zoneA bucketA & bucketB syncs to zoneB BucketB
"""
zonegroup = realm.master_zonegroup()
zonegroup_meta_checkpoint(zonegroup)
zonegroup_conns = ZonegroupConns(zonegroup)
(zoneA, zoneB) = zonegroup.zones[0:2]
(zcA, zcB) = zonegroup_conns.zones[0:2]
zones = zoneA.name + ',' + zoneB.name
c1 = zoneA.cluster
c1.admin(['sync', 'policy', 'get'])
objnamesA = [ 'obj1', 'obj2' ]
objnamesB = [ 'obj3', 'obj4' ]
content = 'asdasd'
buckets = []
# create bucketA & bucketB in zoneA
bucketA = create_zone_bucket(zcA)
buckets.append(bucketA)
bucketB = create_zone_bucket(zcA)
buckets.append(bucketB)
zonegroup_meta_checkpoint(zonegroup)
# configure pipe from zoneA bucketA,bucketB to zoneB bucketB
create_sync_policy_group(c1, "sync-group")
create_sync_group_flow_directional(c1, "sync-group", "sync-flow", zoneA.name, zoneB.name)
source_buckets = [ bucketA.name, bucketB.name ]
for source_bucket in source_buckets:
args = ['--source-bucket=' + source_bucket, '--dest-bucket=' + bucketB.name]
create_sync_group_pipe(c1, "sync-group", "sync-pipe-%s" % source_bucket, zoneA.name, zoneB.name, None, args)
set_sync_policy_group_status(c1, "sync-group", "enabled")
zonegroup.period.update(zoneA, commit=True)
get_sync_policy(c1)
# create objects in bucketA & bucketB
create_objects(zcA, bucketA, objnamesA, content)
create_objects(zcA, bucketB, objnamesB, content)
zonegroup_meta_checkpoint(zonegroup)
zone_data_checkpoint(zoneB, zoneA)
# verify that both zoneA bucketA & bucketB objects are synced to
# bucketB in zoneB but not to bucketA
bucket = get_bucket(zcB, bucketA.name)
check_objects_not_exist(bucket, objnamesA)
check_objects_not_exist(bucket, objnamesB)
bucket = get_bucket(zcB, bucketB.name)
check_objects_exist(bucket, objnamesA, content)
check_objects_exist(bucket, objnamesB, content)
"""
Method (b): configure at bucket level
"""
# reconfigure pipe & flow
for source_bucket in source_buckets:
remove_sync_group_pipe(c1, "sync-group", "sync-pipe-%s" % source_bucket)
remove_sync_group_flow_directional(c1, "sync-group", "sync-flow", zoneA.name, zoneB.name)
create_sync_group_flow_symmetrical(c1, "sync-group", "sync-flow1", zones)
create_sync_group_pipe(c1, "sync-group", "sync-pipe", zones, zones)
# change state to allowed
set_sync_policy_group_status(c1, "sync-group", "allowed")
zonegroup.period.update(zoneA, commit=True)
get_sync_policy(c1)
objnamesC = [ 'obj5', 'obj6' ]
objnamesD = [ 'obj7', 'obj8' ]
# configure sync policy for only bucketA and enable it
create_sync_policy_group(c1, "sync-bucket", "allowed", bucketA.name)
create_sync_group_flow_symmetrical(c1, "sync-bucket", "sync-flowA", zones, bucketA.name)
source_buckets = [ bucketA.name, bucketB.name ]
for source_bucket in source_buckets:
args = ['--source-bucket=' + source_bucket, '--dest-bucket=' + '*']
create_sync_group_pipe(c1, "sync-bucket", "sync-pipe-%s" % source_bucket, zoneA.name, zoneB.name, bucketA.name, args)
set_sync_policy_group_status(c1, "sync-bucket", "enabled", bucketA.name)
get_sync_policy(c1)
zonegroup_meta_checkpoint(zonegroup)
# create objects in bucketA
create_objects(zcA, bucketA, objnamesC, content)
create_objects(zcA, bucketB, objnamesD, content)
zonegroup_meta_checkpoint(zonegroup)
zone_data_checkpoint(zoneB, zoneA)
# verify that both zoneA bucketA & bucketB objects are synced to
# bucketA in zoneB but not to bucketB
bucket = get_bucket(zcB, bucketB.name)
check_objects_not_exist(bucket, objnamesC)
check_objects_not_exist(bucket, objnamesD)
bucket = get_bucket(zcB, bucketA.name)
check_objects_exist(bucket, objnamesD, content)
check_objects_exist(bucket, objnamesD, content)
remove_sync_policy_group(c1, "sync-bucket", bucketA.name)
remove_sync_policy_group(c1, "sync-group")
return
@attr('sync_policy')
def test_sync_single_bucket_to_multiple():
"""
test_sync_single_bucket_to_multiple:
directional flow
(a) pipe: sync zoneA bucketA to zoneB bucketA & bucketB
(b) configure another policy at bucketA level with pipe configured
to sync to multiple buckets (bucketA & bucketB)
verify zoneA bucketA syncs to zoneB bucketA & bucketB
"""
zonegroup = realm.master_zonegroup()
zonegroup_meta_checkpoint(zonegroup)
zonegroup_conns = ZonegroupConns(zonegroup)
(zoneA, zoneB) = zonegroup.zones[0:2]
(zcA, zcB) = zonegroup_conns.zones[0:2]
zones = zoneA.name + ',' + zoneB.name
c1 = zoneA.cluster
c1.admin(['sync', 'policy', 'get'])
objnamesA = [ 'obj1', 'obj2' ]
content = 'asdasd'
buckets = []
# create bucketA & bucketB in zoneA
bucketA = create_zone_bucket(zcA)
buckets.append(bucketA)
bucketB = create_zone_bucket(zcA)
buckets.append(bucketB)
zonegroup_meta_checkpoint(zonegroup)
# configure pipe from zoneA bucketA to zoneB bucketA, bucketB
create_sync_policy_group(c1, "sync-group")
create_sync_group_flow_symmetrical(c1, "sync-group", "sync-flow1", zones)
dest_buckets = [ bucketA.name, bucketB.name ]
for dest_bucket in dest_buckets:
args = ['--source-bucket=' + bucketA.name, '--dest-bucket=' + dest_bucket]
create_sync_group_pipe(c1, "sync-group", "sync-pipe-%s" % dest_bucket, zoneA.name, zoneB.name, None, args)
create_sync_group_pipe(c1, "sync-group", "sync-pipe", zoneA.name, zoneB.name, None, args)
set_sync_policy_group_status(c1, "sync-group", "enabled")
zonegroup.period.update(zoneA, commit=True)
get_sync_policy(c1)
# create objects in bucketA
create_objects(zcA, bucketA, objnamesA, content)
zonegroup_meta_checkpoint(zonegroup)
zone_data_checkpoint(zoneB, zoneA)
# verify that objects from zoneA bucketA are synced to both
# bucketA & bucketB in zoneB
bucket = get_bucket(zcB, bucketA.name)
check_objects_exist(bucket, objnamesA, content)
bucket = get_bucket(zcB, bucketB.name)
check_objects_exist(bucket, objnamesA, content)
"""
Method (b): configure at bucket level
"""
remove_sync_group_pipe(c1, "sync-group", "sync-pipe")
create_sync_group_pipe(c1, "sync-group", "sync-pipe", '*', '*')
# change state to allowed
set_sync_policy_group_status(c1, "sync-group", "allowed")
zonegroup.period.update(zoneA, commit=True)
get_sync_policy(c1)
objnamesB = [ 'obj3', 'obj4' ]
# configure sync policy for only bucketA and enable it
create_sync_policy_group(c1, "sync-bucket", "allowed", bucketA.name)
create_sync_group_flow_symmetrical(c1, "sync-bucket", "sync-flowA", zones, bucketA.name)
dest_buckets = [ bucketA.name, bucketB.name ]
for dest_bucket in dest_buckets:
args = ['--source-bucket=' + '*', '--dest-bucket=' + dest_bucket]
create_sync_group_pipe(c1, "sync-bucket", "sync-pipe-%s" % dest_bucket, zoneA.name, zoneB.name, bucketA.name, args)
set_sync_policy_group_status(c1, "sync-bucket", "enabled", bucketA.name)
get_sync_policy(c1)
zonegroup_meta_checkpoint(zonegroup)
# create objects in bucketA
create_objects(zcA, bucketA, objnamesB, content)
zonegroup_meta_checkpoint(zonegroup)
zone_data_checkpoint(zoneB, zoneA)
# verify that objects from zoneA bucketA are synced to both
# bucketA & bucketB in zoneB
bucket = get_bucket(zcB, bucketA.name)
check_objects_exist(bucket, objnamesB, content)
bucket = get_bucket(zcB, bucketB.name)
check_objects_exist(bucket, objnamesB, content)
remove_sync_policy_group(c1, "sync-bucket", bucketA.name)
remove_sync_policy_group(c1, "sync-group")
return
| 102,721 | 34.891684 | 189 | py |
null | ceph-main/src/test/rgw/rgw_multi/tests_az.py | import logging
from nose import SkipTest
from nose.tools import assert_not_equal, assert_equal
from boto.s3.deletemarker import DeleteMarker
from .tests import get_realm, \
ZonegroupConns, \
zonegroup_meta_checkpoint, \
zone_meta_checkpoint, \
zone_bucket_checkpoint, \
zone_data_checkpoint, \
zonegroup_bucket_checkpoint, \
check_bucket_eq, \
gen_bucket_name, \
get_user, \
get_tenant
from .zone_az import print_connection_info
# configure logging for the tests module
log = logging.getLogger(__name__)
##########################################
# utility functions for archive zone tests
##########################################
def check_az_configured():
"""check if at least one archive zone exist"""
realm = get_realm()
zonegroup = realm.master_zonegroup()
az_zones = zonegroup.zones_by_type.get("archive")
if az_zones is None or len(az_zones) != 1:
raise SkipTest("Requires one archive zone")
def is_az_zone(zone_conn):
"""check if a specific zone is archive zone"""
if not zone_conn:
return False
return zone_conn.zone.tier_type() == "archive"
def init_env():
"""initialize the environment"""
check_az_configured()
realm = get_realm()
zonegroup = realm.master_zonegroup()
zonegroup_conns = ZonegroupConns(zonegroup)
zonegroup_meta_checkpoint(zonegroup)
az_zones = []
zones = []
for conn in zonegroup_conns.zones:
if is_az_zone(conn):
zone_meta_checkpoint(conn.zone)
az_zones.append(conn)
elif not conn.zone.is_read_only():
zones.append(conn)
assert_not_equal(len(zones), 0)
assert_not_equal(len(az_zones), 0)
return zones, az_zones
def zone_full_checkpoint(target_zone, source_zone):
zone_meta_checkpoint(target_zone)
zone_data_checkpoint(target_zone, source_zone)
def check_bucket_exists_on_zone(zone, bucket_name):
try:
zone.conn.get_bucket(bucket_name)
except:
return False
return True
def check_key_exists(key):
try:
key.get_contents_as_string()
except:
return False
return True
def get_versioning_status(bucket):
res = bucket.get_versioning_status()
key = 'Versioning'
if not key in res:
return None
else:
return res[key]
def get_versioned_objs(bucket):
b = []
for b_entry in bucket.list_versions():
if isinstance(b_entry, DeleteMarker):
continue
d = {}
d['version_id'] = b_entry.version_id
d['size'] = b_entry.size
d['etag'] = b_entry.etag
d['is_latest'] = b_entry.is_latest
b.append({b_entry.key:d})
return b
def get_versioned_entries(bucket):
dm = []
ver = []
for b_entry in bucket.list_versions():
if isinstance(b_entry, DeleteMarker):
d = {}
d['version_id'] = b_entry.version_id
d['is_latest'] = b_entry.is_latest
dm.append({b_entry.name:d})
else:
d = {}
d['version_id'] = b_entry.version_id
d['size'] = b_entry.size
d['etag'] = b_entry.etag
d['is_latest'] = b_entry.is_latest
ver.append({b_entry.key:d})
return (dm, ver)
def get_number_buckets_by_zone(zone):
return len(zone.conn.get_all_buckets())
def get_bucket_names_by_zone(zone):
return [b.name for b in zone.conn.get_all_buckets()]
def get_full_bucket_name(partial_bucket_name, bucket_names_az):
full_bucket_name = None
for bucket_name in bucket_names_az:
if bucket_name.startswith(partial_bucket_name):
full_bucket_name = bucket_name
break
return full_bucket_name
####################
# archive zone tests
####################
def test_az_info():
""" log information for manual testing """
return SkipTest("only used in manual testing")
zones, az_zones = init_env()
realm = get_realm()
zonegroup = realm.master_zonegroup()
bucket_name = gen_bucket_name()
# create bucket on the first of the rados zones
bucket = zones[0].create_bucket(bucket_name)
# create objects in the bucket
number_of_objects = 3
for i in range(number_of_objects):
key = bucket.new_key(str(i))
key.set_contents_from_string('bar')
print('Zonegroup: ' + zonegroup.name)
print('user: ' + get_user())
print('tenant: ' + get_tenant())
print('Master Zone')
print_connection_info(zones[0].conn)
print('Archive Zone')
print_connection_info(az_zones[0].conn)
print('Bucket: ' + bucket_name)
def test_az_create_empty_bucket():
""" test empty bucket replication """
zones, az_zones = init_env()
bucket_name = gen_bucket_name()
# create bucket on the non archive zone
zones[0].create_bucket(bucket_name)
# sync
zone_full_checkpoint(az_zones[0].zone, zones[0].zone)
# bucket exist on the archive zone
p = check_bucket_exists_on_zone(az_zones[0], bucket_name)
assert_equal(p, True)
def test_az_check_empty_bucket_versioning():
""" test bucket vesioning with empty bucket """
zones, az_zones = init_env()
bucket_name = gen_bucket_name()
# create bucket on the non archive zone
bucket = zones[0].create_bucket(bucket_name)
# sync
zone_full_checkpoint(az_zones[0].zone, zones[0].zone)
# get bucket on archive zone
bucket_az = az_zones[0].conn.get_bucket(bucket_name)
# check for non bucket versioning
p1 = get_versioning_status(bucket) is None
assert_equal(p1, True)
p2 = get_versioning_status(bucket_az) is None
assert_equal(p2, True)
def test_az_object_replication():
""" test object replication """
zones, az_zones = init_env()
bucket_name = gen_bucket_name()
# create bucket on the non archive zone
bucket = zones[0].create_bucket(bucket_name)
key = bucket.new_key("foo")
key.set_contents_from_string("bar")
# sync
zone_full_checkpoint(az_zones[0].zone, zones[0].zone)
# check object on archive zone
bucket_az = az_zones[0].conn.get_bucket(bucket_name)
key_az = bucket_az.get_key("foo")
p1 = key_az.get_contents_as_string(encoding='ascii') == "bar"
assert_equal(p1, True)
def test_az_object_replication_versioning():
""" test object replication versioning """
zones, az_zones = init_env()
bucket_name = gen_bucket_name()
# create object on the non archive zone
bucket = zones[0].create_bucket(bucket_name)
key = bucket.new_key("foo")
key.set_contents_from_string("bar")
# sync
zone_full_checkpoint(az_zones[0].zone, zones[0].zone)
# check object content on archive zone
bucket_az = az_zones[0].conn.get_bucket(bucket_name)
key_az = bucket_az.get_key("foo")
p1 = key_az.get_contents_as_string(encoding='ascii') == "bar"
assert_equal(p1, True)
# grab object versioning and etag
for b_version in bucket.list_versions():
b_version_id = b_version.version_id
b_version_etag = b_version.etag
for b_az_version in bucket_az.list_versions():
b_az_version_id = b_az_version.version_id
b_az_version_etag = b_az_version.etag
# check
p2 = b_version_id == 'null'
assert_equal(p2, True)
p3 = b_az_version_id != 'null'
assert_equal(p3, True)
p4 = b_version_etag == b_az_version_etag
assert_equal(p4, True)
def test_az_lazy_activation_of_versioned_bucket():
""" test lazy activation of versioned bucket """
zones, az_zones = init_env()
bucket_name = gen_bucket_name()
# create object on the non archive zone
bucket = zones[0].create_bucket(bucket_name)
# sync
zone_full_checkpoint(az_zones[0].zone, zones[0].zone)
# get bucket on archive zone
bucket_az = az_zones[0].conn.get_bucket(bucket_name)
# check for non bucket versioning
p1 = get_versioning_status(bucket) is None
assert_equal(p1, True)
p2 = get_versioning_status(bucket_az) is None
assert_equal(p2, True)
# create object on non archive zone
key = bucket.new_key("foo")
key.set_contents_from_string("bar")
# sync
zone_full_checkpoint(az_zones[0].zone, zones[0].zone)
# check lazy versioned buckets
p3 = get_versioning_status(bucket) is None
assert_equal(p3, True)
p4 = get_versioning_status(bucket_az) == 'Enabled'
assert_equal(p4, True)
def test_az_archive_zone_double_object_replication_versioning():
""" test archive zone double object replication versioning """
zones, az_zones = init_env()
bucket_name = gen_bucket_name()
# create object on the non archive zone
bucket = zones[0].create_bucket(bucket_name)
key = bucket.new_key("foo")
key.set_contents_from_string("bar")
# sync
zone_full_checkpoint(az_zones[0].zone, zones[0].zone)
# get bucket on archive zone
bucket_az = az_zones[0].conn.get_bucket(bucket_name)
# check for non bucket versioning
p1 = get_versioning_status(bucket) is None
assert_equal(p1, True)
p2 = get_versioning_status(bucket_az) == 'Enabled'
assert_equal(p2, True)
# overwrite object on non archive zone
key = bucket.new_key("foo")
key.set_contents_from_string("ouch")
# sync
zone_full_checkpoint(az_zones[0].zone, zones[0].zone)
# check lazy versioned buckets
p3 = get_versioning_status(bucket) is None
assert_equal(p3, True)
p4 = get_versioning_status(bucket_az) == 'Enabled'
assert_equal(p4, True)
# get versioned objects
objs = get_versioned_objs(bucket)
objs_az = get_versioned_objs(bucket_az)
# check version_id, size, and is_latest on non archive zone
p5 = objs[0]['foo']['version_id'] == 'null'
assert_equal(p5, True)
p6 = objs[0]['foo']['size'] == 4
assert_equal(p6, True)
p7 = objs[0]['foo']['is_latest'] == True
assert_equal(p7, True)
# check version_id, size, is_latest on archive zone
latest_obj_az_etag = None
for obj_az in objs_az:
current_obj_az = obj_az['foo']
if current_obj_az['is_latest'] == True:
p8 = current_obj_az['size'] == 4
assert_equal(p8, True)
latest_obj_az_etag = current_obj_az['etag']
else:
p9 = current_obj_az['size'] == 3
assert_equal(p9, True)
assert_not_equal(current_obj_az['version_id'], 'null')
# check last versions' etags
p10 = objs[0]['foo']['etag'] == latest_obj_az_etag
assert_equal(p10, True)
def test_az_deleted_object_replication():
""" test zone deleted object replication """
zones, az_zones = init_env()
bucket_name = gen_bucket_name()
# create object on the non archive zone
bucket = zones[0].create_bucket(bucket_name)
key = bucket.new_key("foo")
key.set_contents_from_string("bar")
p1 = key.get_contents_as_string(encoding='ascii') == "bar"
assert_equal(p1, True)
# sync
zone_full_checkpoint(az_zones[0].zone, zones[0].zone)
# update object on non archive zone
key.set_contents_from_string("soup")
p2 = key.get_contents_as_string(encoding='ascii') == "soup"
assert_equal(p2, True)
# sync
zone_full_checkpoint(az_zones[0].zone, zones[0].zone)
# delete object on non archive zone
key.delete()
# sync
zone_full_checkpoint(az_zones[0].zone, zones[0].zone)
# check object on non archive zone
p3 = check_key_exists(key) == False
assert_equal(p3, True)
# check objects on archive zone
bucket_az = az_zones[0].conn.get_bucket(bucket_name)
key_az = bucket_az.get_key("foo")
p4 = check_key_exists(key_az) == True
assert_equal(p4, True)
p5 = key_az.get_contents_as_string(encoding='ascii') == "soup"
assert_equal(p5, True)
b_ver_az = get_versioned_objs(bucket_az)
p6 = len(b_ver_az) == 2
assert_equal(p6, True)
def test_az_bucket_renaming_on_empty_bucket_deletion():
""" test bucket renaming on empty bucket deletion """
zones, az_zones = init_env()
bucket_name = gen_bucket_name()
# grab number of buckets on non archive zone
num_buckets = get_number_buckets_by_zone(zones[0])
# grab number of buckets on archive zone
num_buckets_az = get_number_buckets_by_zone(az_zones[0])
# create bucket on non archive zone
bucket = zones[0].create_bucket(bucket_name)
# sync
zone_full_checkpoint(az_zones[0].zone, zones[0].zone)
# delete bucket in non archive zone
zones[0].delete_bucket(bucket_name)
# sync
zone_full_checkpoint(az_zones[0].zone, zones[0].zone)
# check no new buckets on non archive zone
p1 = get_number_buckets_by_zone(zones[0]) == num_buckets
assert_equal(p1, True)
# check non deletion on bucket on archive zone
p2 = get_number_buckets_by_zone(az_zones[0]) == (num_buckets_az + 1)
assert_equal(p2, True)
# check bucket renaming
bucket_names_az = get_bucket_names_by_zone(az_zones[0])
new_bucket_name = bucket_name + '-deleted-'
p3 = any(bucket_name.startswith(new_bucket_name) for bucket_name in bucket_names_az)
assert_equal(p3, True)
def test_az_old_object_version_in_archive_zone():
""" test old object version in archive zone """
zones, az_zones = init_env()
bucket_name = gen_bucket_name()
# grab number of buckets on non archive zone
num_buckets = get_number_buckets_by_zone(zones[0])
# grab number of buckets on archive zone
num_buckets_az = get_number_buckets_by_zone(az_zones[0])
# create bucket on non archive zone
bucket = zones[0].create_bucket(bucket_name)
# create object on non archive zone
key = bucket.new_key("foo")
key.set_contents_from_string("zero")
# sync
zone_full_checkpoint(az_zones[0].zone, zones[0].zone)
# save object version on archive zone
bucket_az = az_zones[0].conn.get_bucket(bucket_name)
b_ver_az = get_versioned_objs(bucket_az)
obj_az_version_id = b_ver_az[0]['foo']['version_id']
# update object on non archive zone
key.set_contents_from_string("one")
# sync
zone_full_checkpoint(az_zones[0].zone, zones[0].zone)
# delete object on non archive zone
key.delete()
# delete bucket on non archive zone
zones[0].delete_bucket(bucket_name)
# sync
zone_full_checkpoint(az_zones[0].zone, zones[0].zone)
# check same buckets on non archive zone
p1 = get_number_buckets_by_zone(zones[0]) == num_buckets
assert_equal(p1, True)
# check for new bucket on archive zone
p2 = get_number_buckets_by_zone(az_zones[0]) == (num_buckets_az + 1)
assert_equal(p2, True)
# get new bucket name on archive zone
bucket_names_az = get_bucket_names_by_zone(az_zones[0])
new_bucket_name_az = get_full_bucket_name(bucket_name + '-deleted-', bucket_names_az)
p3 = new_bucket_name_az is not None
assert_equal(p3, True)
# check number of objects on archive zone
new_bucket_az = az_zones[0].conn.get_bucket(new_bucket_name_az)
new_b_ver_az = get_versioned_objs(new_bucket_az)
p4 = len(new_b_ver_az) == 2
assert_equal(p4, True)
# check versioned objects on archive zone
new_key_az = new_bucket_az.get_key("foo", version_id=obj_az_version_id)
p5 = new_key_az.get_contents_as_string(encoding='ascii') == "zero"
assert_equal(p5, True)
new_key_latest_az = new_bucket_az.get_key("foo")
p6 = new_key_latest_az.get_contents_as_string(encoding='ascii') == "one"
assert_equal(p6, True)
def test_az_force_bucket_renaming_if_same_bucket_name():
""" test force bucket renaming if same bucket name """
zones, az_zones = init_env()
bucket_name = gen_bucket_name()
# grab number of buckets on non archive zone
num_buckets = get_number_buckets_by_zone(zones[0])
# grab number of buckets on archive zone
num_buckets_az = get_number_buckets_by_zone(az_zones[0])
# create bucket on non archive zone
bucket = zones[0].create_bucket(bucket_name)
# sync
zone_full_checkpoint(az_zones[0].zone, zones[0].zone)
# check same buckets on non archive zone
p1 = get_number_buckets_by_zone(zones[0]) == (num_buckets + 1)
assert_equal(p1, True)
# check for new bucket on archive zone
p2 = get_number_buckets_by_zone(az_zones[0]) == (num_buckets_az + 1)
assert_equal(p2, True)
# delete bucket on non archive zone
zones[0].delete_bucket(bucket_name)
# sync
zone_full_checkpoint(az_zones[0].zone, zones[0].zone)
# check number of buckets on non archive zone
p3 = get_number_buckets_by_zone(zones[0]) == num_buckets
assert_equal(p3, True)
# check number of buckets on archive zone
p4 = get_number_buckets_by_zone(az_zones[0]) == (num_buckets_az + 1)
assert_equal(p4, True)
# get new bucket name on archive zone
bucket_names_az = get_bucket_names_by_zone(az_zones[0])
new_bucket_name_az = get_full_bucket_name(bucket_name + '-deleted-', bucket_names_az)
p5 = new_bucket_name_az is not None
assert_equal(p5, True)
# create bucket on non archive zone
_ = zones[0].create_bucket(new_bucket_name_az)
# sync
zone_full_checkpoint(az_zones[0].zone, zones[0].zone)
# check number of buckets on non archive zone
p6 = get_number_buckets_by_zone(zones[0]) == (num_buckets + 1)
assert_equal(p6, True)
# check number of buckets on archive zone
p7 = get_number_buckets_by_zone(az_zones[0]) == (num_buckets_az + 2)
assert_equal(p7, True)
def test_az_versioning_support_in_zones():
""" test versioning support on zones """
zones, az_zones = init_env()
bucket_name = gen_bucket_name()
# create bucket on non archive zone
bucket = zones[0].create_bucket(bucket_name)
# sync
zone_full_checkpoint(az_zones[0].zone, zones[0].zone)
# get bucket on archive zone
bucket_az = az_zones[0].conn.get_bucket(bucket_name)
# check non versioned buckets
p1 = get_versioning_status(bucket) is None
assert_equal(p1, True)
p2 = get_versioning_status(bucket_az) is None
assert_equal(p2, True)
# create object on non archive zone
key = bucket.new_key("foo")
key.set_contents_from_string("zero")
# sync
zone_full_checkpoint(az_zones[0].zone, zones[0].zone)
# check bucket versioning
p3 = get_versioning_status(bucket) is None
assert_equal(p3, True)
p4 = get_versioning_status(bucket_az) == 'Enabled'
assert_equal(p4, True)
# enable bucket versioning on non archive zone
bucket.configure_versioning(True)
# sync
zone_full_checkpoint(az_zones[0].zone, zones[0].zone)
# check bucket versioning
p5 = get_versioning_status(bucket) == 'Enabled'
assert_equal(p5, True)
p6 = get_versioning_status(bucket_az) == 'Enabled'
assert_equal(p6, True)
# delete object on non archive zone
key.delete()
# sync
zone_full_checkpoint(az_zones[0].zone, zones[0].zone)
# check delete-markers and versions on non archive zone
(b_dm, b_ver) = get_versioned_entries(bucket)
p7 = len(b_dm) == 1
assert_equal(p7, True)
p8 = len(b_ver) == 1
assert_equal(p8, True)
# check delete-markers and versions on archive zone
(b_dm_az, b_ver_az) = get_versioned_entries(bucket_az)
p9 = len(b_dm_az) == 1
assert_equal(p9, True)
p10 = len(b_ver_az) == 1
assert_equal(p10, True)
# delete delete-marker on non archive zone
dm_version_id = b_dm[0]['foo']['version_id']
bucket.delete_key("foo", version_id=dm_version_id)
# sync
zone_full_checkpoint(az_zones[0].zone, zones[0].zone)
# check delete-markers and versions on non archive zone
(b_dm, b_ver) = get_versioned_entries(bucket)
p11 = len(b_dm) == 0
assert_equal(p11, True)
p12 = len(b_ver) == 1
assert_equal(p12, True)
# check delete-markers and versions on archive zone
(b_dm_az, b_ver_az) = get_versioned_entries(bucket_az)
p13 = len(b_dm_az) == 1
assert_equal(p13, True)
p14 = len(b_ver_az) == 1
assert_equal(p14, True)
# delete delete-marker on archive zone
dm_az_version_id = b_dm_az[0]['foo']['version_id']
bucket_az.delete_key("foo", version_id=dm_az_version_id)
# sync
zone_full_checkpoint(az_zones[0].zone, zones[0].zone)
# check delete-markers and versions on non archive zone
(b_dm, b_ver) = get_versioned_entries(bucket)
p15 = len(b_dm) == 0
assert_equal(p15, True)
p16 = len(b_ver) == 1
assert_equal(p16, True)
# check delete-markers and versions on archive zone
(b_dm_az, b_ver_az) = get_versioned_entries(bucket_az)
p17 = len(b_dm_az) == 0
assert_equal(p17, True)
p17 = len(b_ver_az) == 1
assert_equal(p17, True)
# check body in zones
obj_version_id = b_ver[0]['foo']['version_id']
key = bucket.get_key("foo", version_id=obj_version_id)
p18 = key.get_contents_as_string(encoding='ascii') == "zero"
assert_equal(p18, True)
obj_az_version_id = b_ver_az[0]['foo']['version_id']
key_az = bucket_az.get_key("foo", version_id=obj_az_version_id)
p19 = key_az.get_contents_as_string(encoding='ascii') == "zero"
assert_equal(p19, True)
| 21,098 | 34.282609 | 89 | py |
null | ceph-main/src/test/rgw/rgw_multi/tests_es.py | import json
import logging
import boto
import boto.s3.connection
import datetime
import dateutil
from itertools import zip_longest # type: ignore
from nose.tools import eq_ as eq
from .multisite import *
from .tests import *
from .zone_es import *
log = logging.getLogger(__name__)
def check_es_configured():
realm = get_realm()
zonegroup = realm.master_zonegroup()
es_zones = zonegroup.zones_by_type.get("elasticsearch")
if not es_zones:
raise SkipTest("Requires at least one ES zone")
def is_es_zone(zone_conn):
if not zone_conn:
return False
return zone_conn.zone.tier_type() == "elasticsearch"
def verify_search(bucket_name, src_keys, result_keys, f):
check_keys = []
for k in src_keys:
if bucket_name:
if bucket_name != k.bucket.name:
continue
if f(k):
check_keys.append(k)
check_keys.sort(key = lambda l: (l.bucket.name, l.name, l.version_id))
log.debug('check keys:' + dump_json(check_keys))
log.debug('result keys:' + dump_json(result_keys))
for k1, k2 in zip_longest(check_keys, result_keys):
assert k1
assert k2
check_object_eq(k1, k2)
def do_check_mdsearch(conn, bucket, src_keys, req_str, src_filter):
if bucket:
bucket_name = bucket.name
else:
bucket_name = ''
req = MDSearch(conn, bucket_name, req_str)
result_keys = req.search(sort_key = lambda k: (k.bucket.name, k.name, k.version_id))
verify_search(bucket_name, src_keys, result_keys, src_filter)
def init_env(create_obj, num_keys = 5, buckets_per_zone = 1, bucket_init_cb = None):
check_es_configured()
realm = get_realm()
zonegroup = realm.master_zonegroup()
zonegroup_conns = ZonegroupConns(zonegroup)
buckets, zone_bucket = create_bucket_per_zone(zonegroup_conns, buckets_per_zone = buckets_per_zone)
if bucket_init_cb:
for zone_conn, bucket in zone_bucket:
bucket_init_cb(zone_conn, bucket)
src_keys = []
owner = None
obj_prefix=''.join(random.choice(string.ascii_lowercase) for _ in range(6))
# don't wait for meta sync just yet
for zone, bucket in zone_bucket:
for count in range(num_keys):
objname = obj_prefix + str(count)
k = new_key(zone, bucket.name, objname)
# k.set_contents_from_string(content + 'x' * count)
if not create_obj:
continue
create_obj(k, count)
if not owner:
for list_key in bucket.list_versions():
owner = list_key.owner
break
k = bucket.get_key(k.name, version_id = k.version_id)
k.owner = owner # owner is not set when doing get_key()
src_keys.append(k)
zonegroup_meta_checkpoint(zonegroup)
sources = []
targets = []
for target_conn in zonegroup_conns.zones:
if not is_es_zone(target_conn):
sources.append(target_conn)
continue
targets.append(target_conn)
buckets = []
# make sure all targets are synced
for source_conn, bucket in zone_bucket:
buckets.append(bucket)
for target_conn in targets:
zone_bucket_checkpoint(target_conn.zone, source_conn.zone, bucket.name)
return targets, sources, buckets, src_keys
def test_es_object_search():
min_size = 10
content = 'a' * min_size
def create_obj(k, i):
k.set_contents_from_string(content + 'x' * i)
targets, _, buckets, src_keys = init_env(create_obj, num_keys = 5, buckets_per_zone = 2)
for target_conn in targets:
# bucket checks
for bucket in buckets:
# check name
do_check_mdsearch(target_conn.conn, None, src_keys , 'bucket == ' + bucket.name, lambda k: k.bucket.name == bucket.name)
do_check_mdsearch(target_conn.conn, bucket, src_keys , 'bucket == ' + bucket.name, lambda k: k.bucket.name == bucket.name)
# check on all buckets
for key in src_keys:
# limiting to checking specific key name, otherwise could get results from
# other runs / tests
do_check_mdsearch(target_conn.conn, None, src_keys , 'name == ' + key.name, lambda k: k.name == key.name)
# check on specific bucket
for bucket in buckets:
for key in src_keys:
do_check_mdsearch(target_conn.conn, bucket, src_keys , 'name < ' + key.name, lambda k: k.name < key.name)
do_check_mdsearch(target_conn.conn, bucket, src_keys , 'name <= ' + key.name, lambda k: k.name <= key.name)
do_check_mdsearch(target_conn.conn, bucket, src_keys , 'name == ' + key.name, lambda k: k.name == key.name)
do_check_mdsearch(target_conn.conn, bucket, src_keys , 'name >= ' + key.name, lambda k: k.name >= key.name)
do_check_mdsearch(target_conn.conn, bucket, src_keys , 'name > ' + key.name, lambda k: k.name > key.name)
do_check_mdsearch(target_conn.conn, bucket, src_keys , 'name == ' + src_keys[0].name + ' or name >= ' + src_keys[2].name,
lambda k: k.name == src_keys[0].name or k.name >= src_keys[2].name)
# check etag
for key in src_keys:
do_check_mdsearch(target_conn.conn, bucket, src_keys , 'etag < ' + key.etag[1:-1], lambda k: k.etag < key.etag)
for key in src_keys:
do_check_mdsearch(target_conn.conn, bucket, src_keys , 'etag == ' + key.etag[1:-1], lambda k: k.etag == key.etag)
for key in src_keys:
do_check_mdsearch(target_conn.conn, bucket, src_keys , 'etag > ' + key.etag[1:-1], lambda k: k.etag > key.etag)
# check size
for key in src_keys:
do_check_mdsearch(target_conn.conn, bucket, src_keys , 'size < ' + str(key.size), lambda k: k.size < key.size)
for key in src_keys:
do_check_mdsearch(target_conn.conn, bucket, src_keys , 'size <= ' + str(key.size), lambda k: k.size <= key.size)
for key in src_keys:
do_check_mdsearch(target_conn.conn, bucket, src_keys , 'size == ' + str(key.size), lambda k: k.size == key.size)
for key in src_keys:
do_check_mdsearch(target_conn.conn, bucket, src_keys , 'size >= ' + str(key.size), lambda k: k.size >= key.size)
for key in src_keys:
do_check_mdsearch(target_conn.conn, bucket, src_keys , 'size > ' + str(key.size), lambda k: k.size > key.size)
def date_from_str(s):
return dateutil.parser.parse(s)
def test_es_object_search_custom():
min_size = 10
content = 'a' * min_size
def bucket_init(zone_conn, bucket):
req = MDSearchConfig(zone_conn.conn, bucket.name)
req.set_config('x-amz-meta-foo-str; string, x-amz-meta-foo-int; int, x-amz-meta-foo-date; date')
def create_obj(k, i):
date = datetime.datetime.now() + datetime.timedelta(seconds=1) * i
date_str = date.strftime('%Y-%m-%dT%H:%M:%S.%f')[:-3] + 'Z'
k.set_contents_from_string(content + 'x' * i, headers = { 'X-Amz-Meta-Foo-Str': str(i * 5),
'X-Amz-Meta-Foo-Int': str(i * 5),
'X-Amz-Meta-Foo-Date': date_str})
targets, _, buckets, src_keys = init_env(create_obj, num_keys = 5, buckets_per_zone = 1, bucket_init_cb = bucket_init)
for target_conn in targets:
# bucket checks
for bucket in buckets:
str_vals = []
for key in src_keys:
# check string values
val = key.get_metadata('foo-str')
str_vals.append(val)
do_check_mdsearch(target_conn.conn, bucket, src_keys , 'x-amz-meta-foo-str < ' + val, lambda k: k.get_metadata('foo-str') < val)
do_check_mdsearch(target_conn.conn, bucket, src_keys , 'x-amz-meta-foo-str <= ' + val, lambda k: k.get_metadata('foo-str') <= val)
do_check_mdsearch(target_conn.conn, bucket, src_keys , 'x-amz-meta-foo-str == ' + val, lambda k: k.get_metadata('foo-str') == val)
do_check_mdsearch(target_conn.conn, bucket, src_keys , 'x-amz-meta-foo-str >= ' + val, lambda k: k.get_metadata('foo-str') >= val)
do_check_mdsearch(target_conn.conn, bucket, src_keys , 'x-amz-meta-foo-str > ' + val, lambda k: k.get_metadata('foo-str') > val)
# check int values
sval = key.get_metadata('foo-int')
val = int(sval)
do_check_mdsearch(target_conn.conn, bucket, src_keys , 'x-amz-meta-foo-int < ' + sval, lambda k: int(k.get_metadata('foo-int')) < val)
do_check_mdsearch(target_conn.conn, bucket, src_keys , 'x-amz-meta-foo-int <= ' + sval, lambda k: int(k.get_metadata('foo-int')) <= val)
do_check_mdsearch(target_conn.conn, bucket, src_keys , 'x-amz-meta-foo-int == ' + sval, lambda k: int(k.get_metadata('foo-int')) == val)
do_check_mdsearch(target_conn.conn, bucket, src_keys , 'x-amz-meta-foo-int >= ' + sval, lambda k: int(k.get_metadata('foo-int')) >= val)
do_check_mdsearch(target_conn.conn, bucket, src_keys , 'x-amz-meta-foo-int > ' + sval, lambda k: int(k.get_metadata('foo-int')) > val)
# check int values
sval = key.get_metadata('foo-date')
val = date_from_str(sval)
do_check_mdsearch(target_conn.conn, bucket, src_keys , 'x-amz-meta-foo-date < ' + sval, lambda k: date_from_str(k.get_metadata('foo-date')) < val)
do_check_mdsearch(target_conn.conn, bucket, src_keys , 'x-amz-meta-foo-date <= ' + sval, lambda k: date_from_str(k.get_metadata('foo-date')) <= val)
do_check_mdsearch(target_conn.conn, bucket, src_keys , 'x-amz-meta-foo-date == ' + sval, lambda k: date_from_str(k.get_metadata('foo-date')) == val)
do_check_mdsearch(target_conn.conn, bucket, src_keys , 'x-amz-meta-foo-date >= ' + sval, lambda k: date_from_str(k.get_metadata('foo-date')) >= val)
do_check_mdsearch(target_conn.conn, bucket, src_keys , 'x-amz-meta-foo-date > ' + sval, lambda k: date_from_str(k.get_metadata('foo-date')) > val)
# 'or' query
for i in range(len(src_keys) // 2):
do_check_mdsearch(target_conn.conn, bucket, src_keys , 'x-amz-meta-foo-str <= ' + str_vals[i] + ' or x-amz-meta-foo-str >= ' + str_vals[-i],
lambda k: k.get_metadata('foo-str') <= str_vals[i] or k.get_metadata('foo-str') >= str_vals[-i] )
# 'and' query
for i in range(len(src_keys) // 2):
do_check_mdsearch(target_conn.conn, bucket, src_keys , 'x-amz-meta-foo-str >= ' + str_vals[i] + ' and x-amz-meta-foo-str <= ' + str_vals[i + 1],
lambda k: k.get_metadata('foo-str') >= str_vals[i] and k.get_metadata('foo-str') <= str_vals[i + 1] )
# more complicated query
for i in range(len(src_keys) // 2):
do_check_mdsearch(target_conn.conn, None, src_keys , 'bucket == ' + bucket.name + ' and x-amz-meta-foo-str >= ' + str_vals[i] +
' and (x-amz-meta-foo-str <= ' + str_vals[i + 1] + ')',
lambda k: k.bucket.name == bucket.name and (k.get_metadata('foo-str') >= str_vals[i] and
k.get_metadata('foo-str') <= str_vals[i + 1]) )
def test_es_bucket_conf():
min_size = 0
def bucket_init(zone_conn, bucket):
req = MDSearchConfig(zone_conn.conn, bucket.name)
req.set_config('x-amz-meta-foo-str; string, x-amz-meta-foo-int; int, x-amz-meta-foo-date; date')
targets, sources, buckets, _ = init_env(None, num_keys = 5, buckets_per_zone = 1, bucket_init_cb = bucket_init)
for source_conn in sources:
for bucket in buckets:
req = MDSearchConfig(source_conn.conn, bucket.name)
conf = req.get_config()
d = {}
for entry in conf:
d[entry['Key']] = entry['Type']
eq(len(d), 3)
eq(d['x-amz-meta-foo-str'], 'str')
eq(d['x-amz-meta-foo-int'], 'int')
eq(d['x-amz-meta-foo-date'], 'date')
req.del_config()
conf = req.get_config()
eq(len(conf), 0)
break # no need to iterate over all zones
| 12,618 | 44.555957 | 164 | py |
null | ceph-main/src/test/rgw/rgw_multi/tools.py | import json
import boto
def append_attr_value(d, attr, attrv):
if attrv and len(str(attrv)) > 0:
d[attr] = attrv
def append_attr(d, k, attr):
try:
attrv = getattr(k, attr)
except:
return
append_attr_value(d, attr, attrv)
def get_attrs(k, attrs):
d = {}
for a in attrs:
append_attr(d, k, a)
return d
def append_query_arg(s, n, v):
if not v:
return s
nv = '{n}={v}'.format(n=n, v=v)
if not s:
return nv
return '{s}&{nv}'.format(s=s, nv=nv)
class KeyJSONEncoder(boto.s3.key.Key):
@staticmethod
def default(k, versioned=False):
attrs = ['bucket', 'name', 'size', 'last_modified', 'metadata', 'cache_control',
'content_type', 'content_disposition', 'content_language',
'owner', 'storage_class', 'md5', 'version_id', 'encrypted',
'delete_marker', 'expiry_date', 'VersionedEpoch', 'RgwxTag']
d = get_attrs(k, attrs)
d['etag'] = k.etag[1:-1]
if versioned:
d['is_latest'] = k.is_latest
return d
class DeleteMarkerJSONEncoder(boto.s3.key.Key):
@staticmethod
def default(k):
attrs = ['name', 'version_id', 'last_modified', 'owner']
d = get_attrs(k, attrs)
d['delete_marker'] = True
d['is_latest'] = k.is_latest
return d
class UserJSONEncoder(boto.s3.user.User):
@staticmethod
def default(k):
attrs = ['id', 'display_name']
return get_attrs(k, attrs)
class BucketJSONEncoder(boto.s3.bucket.Bucket):
@staticmethod
def default(k):
attrs = ['name', 'creation_date']
return get_attrs(k, attrs)
class BotoJSONEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, boto.s3.key.Key):
return KeyJSONEncoder.default(obj)
if isinstance(obj, boto.s3.deletemarker.DeleteMarker):
return DeleteMarkerJSONEncoder.default(obj)
if isinstance(obj, boto.s3.user.User):
return UserJSONEncoder.default(obj)
if isinstance(obj, boto.s3.prefix.Prefix):
return (lambda x: {'prefix': x.name})(obj)
if isinstance(obj, boto.s3.bucket.Bucket):
return BucketJSONEncoder.default(obj)
return json.JSONEncoder.default(self, obj)
def dump_json(o, cls=BotoJSONEncoder):
return json.dumps(o, cls=cls, indent=4)
def assert_raises(excClass, callableObj, *args, **kwargs):
"""
Like unittest.TestCase.assertRaises, but returns the exception.
"""
try:
callableObj(*args, **kwargs)
except excClass as e:
return e
else:
if hasattr(excClass, '__name__'):
excName = excClass.__name__
else:
excName = str(excClass)
raise AssertionError("%s not raised" % excName)
| 2,842 | 28.010204 | 88 | py |
null | ceph-main/src/test/rgw/rgw_multi/zone_az.py | import logging
from .multisite import Zone
log = logging.getLogger('rgw_multi.tests')
class AZone(Zone): # pylint: disable=too-many-ancestors
""" archive zone class """
def __init__(self, name, zonegroup=None, cluster=None, data=None, zone_id=None, gateways=None):
super(AZone, self).__init__(name, zonegroup, cluster, data, zone_id, gateways)
def is_read_only(self):
return False
def tier_type(self):
return "archive"
def create(self, cluster, args=None, **kwargs):
if args is None:
args = ''
args += ['--tier-type', self.tier_type()]
return self.json_command(cluster, 'create', args)
def has_buckets(self):
return False
def has_roles(self):
return True
class AZoneConfig:
""" archive zone configuration """
def __init__(self, cfg, section):
pass
def print_connection_info(conn):
"""print info of connection"""
print("Host: " + conn.host+':'+str(conn.port))
print("AWS Secret Key: " + conn.aws_secret_access_key)
print("AWS Access Key: " + conn.aws_access_key_id)
| 1,115 | 24.953488 | 99 | py |
null | ceph-main/src/test/rgw/rgw_multi/zone_cloud.py | import json
import requests.compat
import logging
import boto
import boto.s3.connection
import dateutil.parser
import datetime
import re
from nose.tools import eq_ as eq
from itertools import zip_longest # type: ignore
from urllib.parse import urlparse
from .multisite import *
from .tools import *
log = logging.getLogger(__name__)
def get_key_ver(k):
if not k.version_id:
return 'null'
return k.version_id
def unquote(s):
if s[0] == '"' and s[-1] == '"':
return s[1:-1]
return s
def check_object_eq(k1, k2, check_extra = True):
assert k1
assert k2
log.debug('comparing key name=%s', k1.name)
eq(k1.name, k2.name)
eq(k1.metadata, k2.metadata)
# eq(k1.cache_control, k2.cache_control)
eq(k1.content_type, k2.content_type)
eq(k1.content_encoding, k2.content_encoding)
eq(k1.content_disposition, k2.content_disposition)
eq(k1.content_language, k2.content_language)
eq(unquote(k1.etag), unquote(k2.etag))
mtime1 = dateutil.parser.parse(k1.last_modified)
mtime2 = dateutil.parser.parse(k2.last_modified)
log.debug('k1.last_modified=%s k2.last_modified=%s', k1.last_modified, k2.last_modified)
assert abs((mtime1 - mtime2).total_seconds()) < 1 # handle different time resolution
# if check_extra:
# eq(k1.owner.id, k2.owner.id)
# eq(k1.owner.display_name, k2.owner.display_name)
# eq(k1.storage_class, k2.storage_class)
eq(k1.size, k2.size)
eq(get_key_ver(k1), get_key_ver(k2))
# eq(k1.encrypted, k2.encrypted)
def make_request(conn, method, bucket, key, query_args, headers):
result = conn.make_request(method, bucket=bucket, key=key, query_args=query_args, headers=headers)
if result.status // 100 != 2:
raise boto.exception.S3ResponseError(result.status, result.reason, result.read())
return result
class CloudKey:
def __init__(self, zone_bucket, k):
self.zone_bucket = zone_bucket
# we need two keys: when listing buckets, we get keys that only contain partial data
# but we need to have the full data so that we could use all the meta-rgwx- headers
# that are needed in order to create a correct representation of the object
self.key = k
self.rgwx_key = k # assuming k has all the meta info on, if not then we'll update it in update()
self.update()
def update(self):
k = self.key
rk = self.rgwx_key
self.size = rk.size
orig_name = rk.metadata.get('rgwx-source-key')
if not orig_name:
self.rgwx_key = self.zone_bucket.bucket.get_key(k.name, version_id = k.version_id)
rk = self.rgwx_key
orig_name = rk.metadata.get('rgwx-source-key')
self.name = orig_name
self.version_id = rk.metadata.get('rgwx-source-version-id')
ve = rk.metadata.get('rgwx-versioned-epoch')
if ve:
self.versioned_epoch = int(ve)
else:
self.versioned_epoch = 0
mt = rk.metadata.get('rgwx-source-mtime')
if mt:
self.last_modified = datetime.datetime.utcfromtimestamp(float(mt)).strftime('%a, %d %b %Y %H:%M:%S GMT')
else:
self.last_modified = k.last_modified
et = rk.metadata.get('rgwx-source-etag')
if rk.etag.find('-') >= 0 or et.find('-') >= 0:
# in this case we will use the source etag as it was uploaded via multipart upload
# in one of the zones, so there's no way to make sure etags are calculated the same
# way. In the other case we'd just want to keep the etag that was generated in the
# regular upload mechanism, which should be consistent in both ends
self.etag = et
else:
self.etag = rk.etag
if k.etag[0] == '"' and self.etag[0] != '"': # inconsistent etag quoting when listing bucket vs object get
self.etag = '"' + self.etag + '"'
new_meta = {}
for meta_key, meta_val in k.metadata.items():
if not meta_key.startswith('rgwx-'):
new_meta[meta_key] = meta_val
self.metadata = new_meta
self.cache_control = k.cache_control
self.content_type = k.content_type
self.content_encoding = k.content_encoding
self.content_disposition = k.content_disposition
self.content_language = k.content_language
def get_contents_as_string(self, encoding=None):
r = self.key.get_contents_as_string(encoding=encoding)
# the previous call changed the status of the source object, as it loaded
# its metadata
self.rgwx_key = self.key
self.update()
return r
class CloudZoneBucket:
def __init__(self, zone_conn, target_path, name):
self.zone_conn = zone_conn
self.name = name
self.cloud_conn = zone_conn.zone.cloud_conn
target_path = target_path[:]
if target_path[-1] != '/':
target_path += '/'
target_path = target_path.replace('${bucket}', name)
tp = target_path.split('/', 1)
if len(tp) == 1:
self.target_bucket = target_path
self.target_prefix = ''
else:
self.target_bucket = tp[0]
self.target_prefix = tp[1]
log.debug('target_path=%s target_bucket=%s target_prefix=%s', target_path, self.target_bucket, self.target_prefix)
self.bucket = self.cloud_conn.get_bucket(self.target_bucket)
def get_all_versions(self):
l = []
for k in self.bucket.get_all_keys(prefix=self.target_prefix):
new_key = CloudKey(self, k)
log.debug('appending o=[\'%s\', \'%s\', \'%d\']', new_key.name, new_key.version_id, new_key.versioned_epoch)
l.append(new_key)
sort_key = lambda k: (k.name, -k.versioned_epoch)
l.sort(key = sort_key)
for new_key in l:
yield new_key
def get_key(self, name, version_id=None):
return CloudKey(self, self.bucket.get_key(name, version_id=version_id))
def parse_endpoint(endpoint):
o = urlparse(endpoint)
netloc = o.netloc.split(':')
host = netloc[0]
if len(netloc) > 1:
port = int(netloc[1])
else:
port = o.port
is_secure = False
if o.scheme == 'https':
is_secure = True
if not port:
if is_secure:
port = 443
else:
port = 80
return host, port, is_secure
class CloudZone(Zone):
def __init__(self, name, cloud_endpoint, credentials, source_bucket, target_path,
zonegroup = None, cluster = None, data = None, zone_id = None, gateways = None):
self.cloud_endpoint = cloud_endpoint
self.credentials = credentials
self.source_bucket = source_bucket
self.target_path = target_path
self.target_path = self.target_path.replace('${zone}', name)
# self.target_path = self.target_path.replace('${zone_id}', zone_id)
self.target_path = self.target_path.replace('${zonegroup}', zonegroup.name)
self.target_path = self.target_path.replace('${zonegroup_id}', zonegroup.id)
log.debug('target_path=%s', self.target_path)
host, port, is_secure = parse_endpoint(cloud_endpoint)
self.cloud_conn = boto.connect_s3(
aws_access_key_id = credentials.access_key,
aws_secret_access_key = credentials.secret,
host = host,
port = port,
is_secure = is_secure,
calling_format = boto.s3.connection.OrdinaryCallingFormat())
super(CloudZone, self).__init__(name, zonegroup, cluster, data, zone_id, gateways)
def is_read_only(self):
return True
def tier_type(self):
return "cloud"
def create(self, cluster, args = None, check_retcode = True):
""" create the object with the given arguments """
if args is None:
args = ''
tier_config = ','.join([ 'connection.endpoint=' + self.cloud_endpoint,
'connection.access_key=' + self.credentials.access_key,
'connection.secret=' + self.credentials.secret,
'target_path=' + re.escape(self.target_path)])
args += [ '--tier-type', self.tier_type(), '--tier-config', tier_config ]
return self.json_command(cluster, 'create', args, check_retcode=check_retcode)
def has_buckets(self):
return False
def has_roles(self):
return False
class Conn(ZoneConn):
def __init__(self, zone, credentials):
super(CloudZone.Conn, self).__init__(zone, credentials)
def get_bucket(self, bucket_name):
return CloudZoneBucket(self, self.zone.target_path, bucket_name)
def create_bucket(self, name):
# should not be here, a bug in the test suite
log.critical('Conn.create_bucket() should not be called in cloud zone')
assert False
def check_bucket_eq(self, zone_conn, bucket_name):
assert(zone_conn.zone.tier_type() == "rados")
log.info('comparing bucket=%s zones={%s, %s}', bucket_name, self.name, self.name)
b1 = self.get_bucket(bucket_name)
b2 = zone_conn.get_bucket(bucket_name)
log.debug('bucket1 objects:')
for o in b1.get_all_versions():
log.debug('o=%s', o.name)
log.debug('bucket2 objects:')
for o in b2.get_all_versions():
log.debug('o=%s', o.name)
for k1, k2 in zip_longest(b1.get_all_versions(), b2.get_all_versions()):
if k1 is None:
log.critical('key=%s is missing from zone=%s', k2.name, self.name)
assert False
if k2 is None:
log.critical('key=%s is missing from zone=%s', k1.name, zone_conn.name)
assert False
check_object_eq(k1, k2)
log.info('success, bucket identical: bucket=%s zones={%s, %s}', bucket_name, self.name, zone_conn.name)
return True
def create_role(self, path, rolename, policy_document, tag_list):
assert False
def get_conn(self, credentials):
return self.Conn(self, credentials)
class CloudZoneConfig:
def __init__(self, cfg, section):
self.endpoint = cfg.get(section, 'endpoint')
access_key = cfg.get(section, 'access_key')
secret = cfg.get(section, 'secret')
self.credentials = Credentials(access_key, secret)
try:
self.target_path = cfg.get(section, 'target_path')
except:
self.target_path = 'rgw-${zonegroup_id}/${bucket}'
try:
self.source_bucket = cfg.get(section, 'source_bucket')
except:
self.source_bucket = '*'
| 10,952 | 32.495413 | 122 | py |
null | ceph-main/src/test/rgw/rgw_multi/zone_es.py | import json
import requests.compat
import logging
import boto
import boto.s3.connection
import dateutil.parser
from nose.tools import eq_ as eq
from itertools import zip_longest # type: ignore
from .multisite import *
from .tools import *
log = logging.getLogger(__name__)
def get_key_ver(k):
if not k.version_id:
return 'null'
return k.version_id
def check_object_eq(k1, k2, check_extra = True):
assert k1
assert k2
log.debug('comparing key name=%s', k1.name)
eq(k1.name, k2.name)
eq(k1.metadata, k2.metadata)
# eq(k1.cache_control, k2.cache_control)
eq(k1.content_type, k2.content_type)
# eq(k1.content_encoding, k2.content_encoding)
# eq(k1.content_disposition, k2.content_disposition)
# eq(k1.content_language, k2.content_language)
eq(k1.etag, k2.etag)
mtime1 = dateutil.parser.parse(k1.last_modified)
mtime2 = dateutil.parser.parse(k2.last_modified)
assert abs((mtime1 - mtime2).total_seconds()) < 1 # handle different time resolution
if check_extra:
eq(k1.owner.id, k2.owner.id)
eq(k1.owner.display_name, k2.owner.display_name)
# eq(k1.storage_class, k2.storage_class)
eq(k1.size, k2.size)
eq(get_key_ver(k1), get_key_ver(k2))
# eq(k1.encrypted, k2.encrypted)
def make_request(conn, method, bucket, key, query_args, headers):
result = conn.make_request(method, bucket=bucket, key=key, query_args=query_args, headers=headers)
if result.status // 100 != 2:
raise boto.exception.S3ResponseError(result.status, result.reason, result.read())
return result
class MDSearch:
def __init__(self, conn, bucket_name, query, query_args = None, marker = None):
self.conn = conn
self.bucket_name = bucket_name or ''
if bucket_name:
self.bucket = boto.s3.bucket.Bucket(name=bucket_name)
else:
self.bucket = None
self.query = query
self.query_args = query_args
self.max_keys = None
self.marker = marker
def raw_search(self):
q = self.query or ''
query_args = append_query_arg(self.query_args, 'query', requests.compat.quote_plus(q))
if self.max_keys is not None:
query_args = append_query_arg(query_args, 'max-keys', self.max_keys)
if self.marker:
query_args = append_query_arg(query_args, 'marker', self.marker)
query_args = append_query_arg(query_args, 'format', 'json')
headers = {}
result = make_request(self.conn, "GET", bucket=self.bucket_name, key='', query_args=query_args, headers=headers)
l = []
result_dict = json.loads(result.read())
for entry in result_dict['Objects']:
bucket = self.conn.get_bucket(entry['Bucket'], validate = False)
k = boto.s3.key.Key(bucket, entry['Key'])
k.version_id = entry['Instance']
k.etag = entry['ETag']
k.owner = boto.s3.user.User(id=entry['Owner']['ID'], display_name=entry['Owner']['DisplayName'])
k.last_modified = entry['LastModified']
k.size = entry['Size']
k.content_type = entry['ContentType']
k.versioned_epoch = entry['VersionedEpoch']
k.metadata = {}
for e in entry['CustomMetadata']:
k.metadata[e['Name']] = str(e['Value']) # int values will return as int, cast to string for compatibility with object meta response
l.append(k)
return result_dict, l
def search(self, drain = True, sort = True, sort_key = None):
l = []
is_done = False
while not is_done:
result, result_keys = self.raw_search()
l = l + result_keys
is_done = not (drain and (result['IsTruncated'] == "true"))
marker = result['Marker']
if sort:
if not sort_key:
sort_key = lambda k: (k.name, -k.versioned_epoch)
l.sort(key = sort_key)
return l
class MDSearchConfig:
def __init__(self, conn, bucket_name):
self.conn = conn
self.bucket_name = bucket_name or ''
if bucket_name:
self.bucket = boto.s3.bucket.Bucket(name=bucket_name)
else:
self.bucket = None
def send_request(self, conf, method):
query_args = 'mdsearch'
headers = None
if conf:
headers = { 'X-Amz-Meta-Search': conf }
query_args = append_query_arg(query_args, 'format', 'json')
return make_request(self.conn, method, bucket=self.bucket_name, key='', query_args=query_args, headers=headers)
def get_config(self):
result = self.send_request(None, 'GET')
return json.loads(result.read())
def set_config(self, conf):
self.send_request(conf, 'POST')
def del_config(self):
self.send_request(None, 'DELETE')
class ESZoneBucket:
def __init__(self, zone_conn, name, conn):
self.zone_conn = zone_conn
self.name = name
self.conn = conn
self.bucket = boto.s3.bucket.Bucket(name=name)
def get_all_versions(self):
marker = None
is_done = False
req = MDSearch(self.conn, self.name, 'bucket == ' + self.name, marker=marker)
for k in req.search():
yield k
class ESZone(Zone):
def __init__(self, name, es_endpoint, zonegroup = None, cluster = None, data = None, zone_id = None, gateways = None):
self.es_endpoint = es_endpoint
super(ESZone, self).__init__(name, zonegroup, cluster, data, zone_id, gateways)
def is_read_only(self):
return True
def tier_type(self):
return "elasticsearch"
def create(self, cluster, args = None, check_retcode = True):
""" create the object with the given arguments """
if args is None:
args = ''
tier_config = ','.join([ 'endpoint=' + self.es_endpoint, 'explicit_custom_meta=false' ])
args += [ '--tier-type', self.tier_type(), '--tier-config', tier_config ]
return self.json_command(cluster, 'create', args, check_retcode=check_retcode)
def has_buckets(self):
return False
def has_roles(self):
return False
class Conn(ZoneConn):
def __init__(self, zone, credentials):
super(ESZone.Conn, self).__init__(zone, credentials)
def get_bucket(self, bucket_name):
return ESZoneBucket(self, bucket_name, self.conn)
def create_bucket(self, name):
# should not be here, a bug in the test suite
log.critical('Conn.create_bucket() should not be called in ES zone')
assert False
def check_bucket_eq(self, zone_conn, bucket_name):
assert(zone_conn.zone.tier_type() == "rados")
log.info('comparing bucket=%s zones={%s, %s}', bucket_name, self.name, self.name)
b1 = self.get_bucket(bucket_name)
b2 = zone_conn.get_bucket(bucket_name)
log.debug('bucket1 objects:')
for o in b1.get_all_versions():
log.debug('o=%s', o.name)
log.debug('bucket2 objects:')
for o in b2.get_all_versions():
log.debug('o=%s', o.name)
for k1, k2 in zip_longest(b1.get_all_versions(), b2.get_all_versions()):
if k1 is None:
log.critical('key=%s is missing from zone=%s', k2.name, self.name)
assert False
if k2 is None:
log.critical('key=%s is missing from zone=%s', k1.name, zone_conn.name)
assert False
check_object_eq(k1, k2)
log.info('success, bucket identical: bucket=%s zones={%s, %s}', bucket_name, self.name, zone_conn.name)
return True
def create_role(self, path, rolename, policy_document, tag_list):
assert False
def get_conn(self, credentials):
return self.Conn(self, credentials)
class ESZoneConfig:
def __init__(self, cfg, section):
self.endpoint = cfg.get(section, 'endpoint')
| 8,133 | 30.649805 | 147 | py |
null | ceph-main/src/test/rgw/rgw_multi/zone_rados.py | import logging
from boto.s3.deletemarker import DeleteMarker
from itertools import zip_longest # type: ignore
from nose.tools import eq_ as eq
from .multisite import *
log = logging.getLogger(__name__)
def check_object_eq(k1, k2, check_extra = True):
assert k1
assert k2
log.debug('comparing key name=%s', k1.name)
eq(k1.name, k2.name)
eq(k1.version_id, k2.version_id)
eq(k1.is_latest, k2.is_latest)
eq(k1.last_modified, k2.last_modified)
if isinstance(k1, DeleteMarker):
assert isinstance(k2, DeleteMarker)
return
eq(k1.get_contents_as_string(), k2.get_contents_as_string())
eq(k1.metadata, k2.metadata)
eq(k1.cache_control, k2.cache_control)
eq(k1.content_type, k2.content_type)
eq(k1.content_encoding, k2.content_encoding)
eq(k1.content_disposition, k2.content_disposition)
eq(k1.content_language, k2.content_language)
eq(k1.etag, k2.etag)
if check_extra:
eq(k1.owner.id, k2.owner.id)
eq(k1.owner.display_name, k2.owner.display_name)
eq(k1.storage_class, k2.storage_class)
eq(k1.size, k2.size)
eq(k1.encrypted, k2.encrypted)
class RadosZone(Zone):
def __init__(self, name, zonegroup = None, cluster = None, data = None, zone_id = None, gateways = None):
super(RadosZone, self).__init__(name, zonegroup, cluster, data, zone_id, gateways)
def tier_type(self):
return "rados"
class Conn(ZoneConn):
def __init__(self, zone, credentials):
super(RadosZone.Conn, self).__init__(zone, credentials)
def get_bucket(self, name):
return self.conn.get_bucket(name)
def create_bucket(self, name):
return self.conn.create_bucket(name)
def delete_bucket(self, name):
return self.conn.delete_bucket(name)
def check_bucket_eq(self, zone_conn, bucket_name):
log.info('comparing bucket=%s zones={%s, %s}', bucket_name, self.name, zone_conn.name)
b1 = self.get_bucket(bucket_name)
b2 = zone_conn.get_bucket(bucket_name)
b1_versions = b1.list_versions()
log.debug('bucket1 objects:')
for o in b1_versions:
log.debug('o=%s', o.name)
b2_versions = b2.list_versions()
log.debug('bucket2 objects:')
for o in b2_versions:
log.debug('o=%s', o.name)
for k1, k2 in zip_longest(b1_versions, b2_versions):
if k1 is None:
log.critical('key=%s is missing from zone=%s', k2.name, self.name)
assert False
if k2 is None:
log.critical('key=%s is missing from zone=%s', k1.name, zone_conn.name)
assert False
check_object_eq(k1, k2)
if isinstance(k1, DeleteMarker):
# verify that HEAD sees a delete marker
assert b1.get_key(k1.name) is None
assert b2.get_key(k2.name) is None
else:
# now get the keys through a HEAD operation, verify that the available data is the same
k1_head = b1.get_key(k1.name, version_id=k1.version_id)
k2_head = b2.get_key(k2.name, version_id=k2.version_id)
check_object_eq(k1_head, k2_head, False)
if k1.version_id:
# compare the olh to make sure they agree about the current version
k1_olh = b1.get_key(k1.name)
k2_olh = b2.get_key(k2.name)
# if there's a delete marker, HEAD will return None
if k1_olh or k2_olh:
check_object_eq(k1_olh, k2_olh, False)
log.info('success, bucket identical: bucket=%s zones={%s, %s}', bucket_name, self.name, zone_conn.name)
return True
def get_role(self, role_name):
return self.iam_conn.get_role(role_name)
def check_role_eq(self, zone_conn, role_name):
log.info('comparing role=%s zones={%s, %s}', role_name, self.name, zone_conn.name)
r1 = self.get_role(role_name)
r2 = zone_conn.get_role(role_name)
assert r1
assert r2
log.debug('comparing role name=%s', r1['get_role_response']['get_role_result']['role']['role_name'])
eq(r1['get_role_response']['get_role_result']['role']['role_name'], r2['get_role_response']['get_role_result']['role']['role_name'])
eq(r1['get_role_response']['get_role_result']['role']['role_id'], r2['get_role_response']['get_role_result']['role']['role_id'])
eq(r1['get_role_response']['get_role_result']['role']['path'], r2['get_role_response']['get_role_result']['role']['path'])
eq(r1['get_role_response']['get_role_result']['role']['arn'], r2['get_role_response']['get_role_result']['role']['arn'])
eq(r1['get_role_response']['get_role_result']['role']['max_session_duration'], r2['get_role_response']['get_role_result']['role']['max_session_duration'])
eq(r1['get_role_response']['get_role_result']['role']['assume_role_policy_document'], r2['get_role_response']['get_role_result']['role']['assume_role_policy_document'])
log.info('success, role identical: role=%s zones={%s, %s}', role_name, self.name, zone_conn.name)
return True
def create_role(self, path, rolename, policy_document, tag_list):
return self.iam_conn.create_role(rolename, policy_document, path)
def get_conn(self, credentials):
return self.Conn(self, credentials)
| 5,719 | 41.37037 | 180 | py |
null | ceph-main/src/test/system/cross_process_sem.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2011 New Dream Network
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "cross_process_sem.h"
#include <errno.h>
#include <semaphore.h>
#include <stdlib.h>
#ifndef _WIN32
#include <sys/mman.h>
#endif
#include "include/ceph_assert.h"
/* We put our cross-process semaphore into a page of memory mapped with mmap. */
struct cross_process_sem_data_t
{
sem_t sem;
};
/* A factory function is a good choice here because we want to be able to
* return an error code. It does force heap allocation, but that is the
* easiest way to use synchronization primitives anyway. Most programmers don't
* care about destroying semaphores before the process finishes. It's pretty
* difficult to get it right and there is usually no benefit.
*/
int CrossProcessSem::
create(int initial_val, CrossProcessSem** res)
{
#ifndef _WIN32
struct cross_process_sem_data_t *data = static_cast < cross_process_sem_data_t*> (
mmap(NULL, sizeof(struct cross_process_sem_data_t),
PROT_READ|PROT_WRITE, MAP_SHARED|MAP_ANONYMOUS, -1, 0));
if (data == MAP_FAILED) {
int err = errno;
return err;
}
int ret = sem_init(&data->sem, 1, initial_val);
#else
// We can't use multiple processes on Windows for the time being.
struct cross_process_sem_data_t *data = (cross_process_sem_data_t*)malloc(
sizeof(cross_process_sem_data_t));
int ret = sem_init(&data->sem, 0, initial_val);
#endif /* _WIN32 */
if (ret) {
return ret;
}
*res = new CrossProcessSem(data);
return 0;
}
CrossProcessSem::
~CrossProcessSem()
{
#ifndef _WIN32
munmap(m_data, sizeof(struct cross_process_sem_data_t));
#else
free(m_data);
#endif
m_data = NULL;
}
void CrossProcessSem::
wait()
{
while(true) {
int ret = sem_wait(&m_data->sem);
if (ret == 0)
return;
int err = errno;
if (err == -EINTR)
continue;
ceph_abort();
}
}
void CrossProcessSem::
post()
{
int ret = sem_post(&m_data->sem);
if (ret == -1) {
ceph_abort();
}
}
int CrossProcessSem::
reinit(int dval)
{
if (dval < 0)
return -EINVAL;
int cval;
if (sem_getvalue(&m_data->sem, &cval) == -1)
return errno;
if (cval < dval) {
int diff = dval - cval;
for (int i = 0; i < diff; ++i)
sem_post(&m_data->sem);
}
else {
int diff = cval - dval;
for (int i = 0; i < diff; ++i)
sem_wait(&m_data->sem);
}
return 0;
}
CrossProcessSem::
CrossProcessSem(struct cross_process_sem_data_t *data)
: m_data(data)
{
}
| 2,803 | 21.796748 | 84 | cc |
null | ceph-main/src/test/system/cross_process_sem.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2011 New Dream Network
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
struct cross_process_sem_data_t;
class CrossProcessSem
{
public:
static int create(int initial_val, CrossProcessSem** ret);
~CrossProcessSem();
/* Initialize the semaphore. Must be called before any operations */
int init();
/* Semaphore wait */
void wait();
/* Semaphore post */
void post();
/* Reinitialize the semaphore to the desired value.
* NOT thread-safe if it is in use at the time!
*/
int reinit(int dval);
private:
explicit CrossProcessSem(struct cross_process_sem_data_t *data);
struct cross_process_sem_data_t *m_data;
};
| 971 | 22.707317 | 70 | h |
null | ceph-main/src/test/system/rados_delete_pools_parallel.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2011 New Dream Network
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "cross_process_sem.h"
#include "include/rados/librados.h"
#include "st_rados_create_pool.h"
#include "st_rados_delete_pool.h"
#include "st_rados_list_objects.h"
#include "systest_runnable.h"
#include "systest_settings.h"
#include <errno.h>
#include <pthread.h>
#include <semaphore.h>
#include <sstream>
#include <stdarg.h>
#include <stdio.h>
#include <stdlib.h>
#include <string>
#include <time.h>
#include <vector>
using std::ostringstream;
using std::string;
using std::vector;
static int g_num_objects = 50;
/*
* rados_delete_pools_parallel
*
* This tests creation and deletion races.
*
* EXPECT: * can delete a pool while another user is using it
* * operations on pools return error codes after the pools
* are deleted
*
* DO NOT EXPECT * hangs, crashes
*/
const char *get_id_str()
{
return "main";
}
int main(int argc, const char **argv)
{
const char *num_objects = getenv("NUM_OBJECTS");
const std::string pool = get_temp_pool_name(argv[0]);
if (num_objects) {
g_num_objects = atoi(num_objects);
if (g_num_objects == 0)
return 100;
}
CrossProcessSem *pool_setup_sem = NULL;
RETURN1_IF_NONZERO(CrossProcessSem::create(0, &pool_setup_sem));
CrossProcessSem *delete_pool_sem = NULL;
RETURN1_IF_NONZERO(CrossProcessSem::create(0, &delete_pool_sem));
CrossProcessSem *deleted_pool_sem = NULL;
RETURN1_IF_NONZERO(CrossProcessSem::create(0, &deleted_pool_sem));
// first test: create a pool, then delete that pool
{
StRadosCreatePool r1(argc, argv, NULL, pool_setup_sem, NULL,
pool, 50, ".obj");
StRadosDeletePool r2(argc, argv, pool_setup_sem, deleted_pool_sem, pool);
vector < SysTestRunnable* > vec;
vec.push_back(&r1);
vec.push_back(&r2);
std::string error = SysTestRunnable::run_until_finished(vec);
if (!error.empty()) {
printf("test1: got error: %s\n", error.c_str());
return EXIT_FAILURE;
}
}
// second test: create a pool, the list objects in that pool while it's
// being deleted.
RETURN1_IF_NONZERO(pool_setup_sem->reinit(0));
RETURN1_IF_NONZERO(delete_pool_sem->reinit(0));
{
StRadosCreatePool r1(argc, argv, deleted_pool_sem, pool_setup_sem, NULL,
pool, g_num_objects, ".obj");
StRadosDeletePool r2(argc, argv, delete_pool_sem, NULL, pool);
StRadosListObjects r3(argc, argv, pool, true, g_num_objects / 2,
pool_setup_sem, NULL, delete_pool_sem);
vector < SysTestRunnable* > vec;
vec.push_back(&r1);
vec.push_back(&r2);
vec.push_back(&r3);
std::string error = SysTestRunnable::run_until_finished(vec);
if (!error.empty()) {
printf("test2: got error: %s\n", error.c_str());
return EXIT_FAILURE;
}
}
printf("******* SUCCESS **********\n");
return EXIT_SUCCESS;
}
| 3,226 | 27.557522 | 78 | cc |
null | ceph-main/src/test/system/rados_list_parallel.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2011 New Dream Network
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "cross_process_sem.h"
#include "include/rados/librados.h"
#include "include/stringify.h"
#include "st_rados_create_pool.h"
#include "st_rados_list_objects.h"
#include "systest_runnable.h"
#include "systest_settings.h"
#include <errno.h>
#include <map>
#include <pthread.h>
#include <semaphore.h>
#include <sstream>
#include <stdarg.h>
#include <stdio.h>
#include <stdlib.h>
#include <string>
#include <time.h>
#include <vector>
#include <sys/types.h>
#include <unistd.h>
using std::ostringstream;
using std::string;
using std::vector;
static int g_num_objects = 50;
static CrossProcessSem *pool_setup_sem = NULL;
static CrossProcessSem *modify_sem = NULL;
class RadosDeleteObjectsR : public SysTestRunnable
{
public:
RadosDeleteObjectsR(int argc, const char **argv,
const std::string &pool_name)
: SysTestRunnable(argc, argv), m_pool_name(pool_name)
{
}
~RadosDeleteObjectsR() override
{
}
int run(void) override
{
int ret_val = 0;
rados_t cl;
RETURN1_IF_NONZERO(rados_create(&cl, NULL));
rados_conf_parse_argv(cl, m_argc, m_argv);
RETURN1_IF_NONZERO(rados_conf_read_file(cl, NULL));
rados_conf_parse_env(cl, NULL);
std::string log_name = SysTestSettings::inst().get_log_name(get_id_str());
if (!log_name.empty())
rados_conf_set(cl, "log_file", log_name.c_str());
RETURN1_IF_NONZERO(rados_connect(cl));
pool_setup_sem->wait();
pool_setup_sem->post();
rados_ioctx_t io_ctx;
rados_pool_create(cl, m_pool_name.c_str());
RETURN1_IF_NONZERO(rados_ioctx_create(cl, m_pool_name.c_str(), &io_ctx));
std::map <int, std::string> to_delete;
for (int i = 0; i < g_num_objects; ++i) {
char oid[128];
snprintf(oid, sizeof(oid), "%d.obj", i);
to_delete[i] = oid;
}
int removed = 0;
while (true) {
if (to_delete.empty())
break;
int r = rand() % to_delete.size();
std::map <int, std::string>::iterator d = to_delete.begin();
for (int i = 0; i < r; ++i)
++d;
if (d == to_delete.end()) {
ret_val = -EDOM;
goto out;
}
std::string oid(d->second);
to_delete.erase(d);
int ret = rados_remove(io_ctx, oid.c_str());
if (ret != 0) {
printf("%s: rados_remove(%s) failed with error %d\n",
get_id_str(), oid.c_str(), ret);
ret_val = ret;
goto out;
}
++removed;
if ((removed % 25) == 0) {
printf("%s: removed %d objects...\n", get_id_str(), removed);
}
if (removed == g_num_objects / 2) {
printf("%s: removed half of the objects\n", get_id_str());
modify_sem->post();
}
}
printf("%s: removed %d objects\n", get_id_str(), removed);
out:
rados_ioctx_destroy(io_ctx);
rados_shutdown(cl);
return ret_val;
}
private:
std::string m_pool_name;
};
class RadosAddObjectsR : public SysTestRunnable
{
public:
RadosAddObjectsR(int argc, const char **argv,
const std::string &pool_name,
const std::string &suffix)
: SysTestRunnable(argc, argv),
m_pool_name(pool_name),
m_suffix(suffix)
{
}
~RadosAddObjectsR() override
{
}
int run(void) override
{
int ret_val = 0;
rados_t cl;
RETURN1_IF_NONZERO(rados_create(&cl, NULL));
rados_conf_parse_argv(cl, m_argc, m_argv);
RETURN1_IF_NONZERO(rados_conf_read_file(cl, NULL));
rados_conf_parse_env(cl, NULL);
std::string log_name = SysTestSettings::inst().get_log_name(get_id_str());
if (!log_name.empty())
rados_conf_set(cl, "log_file", log_name.c_str());
RETURN1_IF_NONZERO(rados_connect(cl));
pool_setup_sem->wait();
pool_setup_sem->post();
rados_ioctx_t io_ctx;
rados_pool_create(cl, m_pool_name.c_str());
RETURN1_IF_NONZERO(rados_ioctx_create(cl, m_pool_name.c_str(), &io_ctx));
std::map <int, std::string> to_add;
for (int i = 0; i < g_num_objects; ++i) {
char oid[128];
snprintf(oid, sizeof(oid), "%d%s", i, m_suffix.c_str());
to_add[i] = oid;
}
int added = 0;
while (true) {
if (to_add.empty())
break;
int r = rand() % to_add.size();
std::map <int, std::string>::iterator d = to_add.begin();
for (int i = 0; i < r; ++i)
++d;
if (d == to_add.end()) {
ret_val = -EDOM;
goto out;
}
std::string oid(d->second);
to_add.erase(d);
std::string buf(StRadosCreatePool::get_random_buf(256));
int ret = rados_write(io_ctx, oid.c_str(), buf.c_str(), buf.size(), 0);
if (ret != 0) {
printf("%s: rados_write(%s) failed with error %d\n",
get_id_str(), oid.c_str(), ret);
ret_val = ret;
goto out;
}
++added;
if ((added % 25) == 0) {
printf("%s: added %d objects...\n", get_id_str(), added);
}
if (added == g_num_objects / 2) {
printf("%s: added half of the objects\n", get_id_str());
modify_sem->post();
}
}
printf("%s: added %d objects\n", get_id_str(), added);
out:
rados_ioctx_destroy(io_ctx);
rados_shutdown(cl);
return ret_val;
}
private:
std::string m_pool_name;
std::string m_suffix;
};
const char *get_id_str()
{
return "main";
}
int main(int argc, const char **argv)
{
const char *num_objects = getenv("NUM_OBJECTS");
const std::string pool = get_temp_pool_name(argv[0]);
if (num_objects) {
g_num_objects = atoi(num_objects);
if (g_num_objects == 0)
return 100;
}
RETURN1_IF_NONZERO(CrossProcessSem::create(0, &pool_setup_sem));
RETURN1_IF_NONZERO(CrossProcessSem::create(1, &modify_sem));
std::string error;
// Test 1... list objects
{
StRadosCreatePool r1(argc, argv, NULL, pool_setup_sem, NULL,
pool, g_num_objects, ".obj");
StRadosListObjects r2(argc, argv, pool, false, g_num_objects,
pool_setup_sem, modify_sem, NULL);
vector < SysTestRunnable* > vec;
vec.push_back(&r1);
vec.push_back(&r2);
error = SysTestRunnable::run_until_finished(vec);
if (!error.empty()) {
printf("got error: %s\n", error.c_str());
return EXIT_FAILURE;
}
}
// Test 2... list objects while they're being deleted
RETURN1_IF_NONZERO(pool_setup_sem->reinit(0));
RETURN1_IF_NONZERO(modify_sem->reinit(0));
{
StRadosCreatePool r1(argc, argv, NULL, pool_setup_sem, NULL,
pool, g_num_objects, ".obj");
StRadosListObjects r2(argc, argv, pool, false, g_num_objects / 2,
pool_setup_sem, modify_sem, NULL);
RadosDeleteObjectsR r3(argc, argv, pool);
vector < SysTestRunnable* > vec;
vec.push_back(&r1);
vec.push_back(&r2);
vec.push_back(&r3);
error = SysTestRunnable::run_until_finished(vec);
if (!error.empty()) {
printf("got error: %s\n", error.c_str());
return EXIT_FAILURE;
}
}
// Test 3... list objects while others are being added
RETURN1_IF_NONZERO(pool_setup_sem->reinit(0));
RETURN1_IF_NONZERO(modify_sem->reinit(0));
{
StRadosCreatePool r1(argc, argv, NULL, pool_setup_sem, NULL,
pool, g_num_objects, ".obj");
StRadosListObjects r2(argc, argv, pool, false, g_num_objects / 2,
pool_setup_sem, modify_sem, NULL);
RadosAddObjectsR r3(argc, argv, pool, ".obj2");
vector < SysTestRunnable* > vec;
vec.push_back(&r1);
vec.push_back(&r2);
vec.push_back(&r3);
error = SysTestRunnable::run_until_finished(vec);
if (!error.empty()) {
printf("got error: %s\n", error.c_str());
return EXIT_FAILURE;
}
}
// Test 4... list objects while others are being added and deleted
RETURN1_IF_NONZERO(pool_setup_sem->reinit(0));
RETURN1_IF_NONZERO(modify_sem->reinit(0));
{
StRadosCreatePool r1(argc, argv, NULL, pool_setup_sem, NULL,
pool, g_num_objects, ".obj");
StRadosListObjects r2(argc, argv, pool, false, g_num_objects / 2,
pool_setup_sem, modify_sem, NULL);
RadosAddObjectsR r3(argc, argv, pool, ".obj2");
RadosAddObjectsR r4(argc, argv, pool, ".obj3");
RadosDeleteObjectsR r5(argc, argv, pool);
vector < SysTestRunnable* > vec;
vec.push_back(&r1);
vec.push_back(&r2);
vec.push_back(&r3);
vec.push_back(&r4);
vec.push_back(&r5);
error = SysTestRunnable::run_until_finished(vec);
if (!error.empty()) {
printf("got error: %s\n", error.c_str());
return EXIT_FAILURE;
}
}
// Test 5... list objects while they are being modified
RETURN1_IF_NONZERO(pool_setup_sem->reinit(0));
RETURN1_IF_NONZERO(modify_sem->reinit(0));
{
StRadosCreatePool r1(argc, argv, NULL, pool_setup_sem, NULL,
pool, g_num_objects, ".obj");
StRadosListObjects r2(argc, argv, pool, false, g_num_objects / 2,
pool_setup_sem, modify_sem, NULL);
// AddObjects with the same 'suffix' as used in StRadosCreatePool
RadosAddObjectsR r3(argc, argv, pool, ".obj");
vector < SysTestRunnable* > vec;
vec.push_back(&r1);
vec.push_back(&r2);
vec.push_back(&r3);
error = SysTestRunnable::run_until_finished(vec);
if (!error.empty()) {
printf("got error: %s\n", error.c_str());
return EXIT_FAILURE;
}
}
rados_t cl;
rados_create(&cl, NULL);
rados_conf_parse_argv(cl, argc, argv);
rados_conf_parse_argv(cl, argc, argv);
rados_conf_read_file(cl, NULL);
rados_conf_parse_env(cl, NULL);
rados_connect(cl);
rados_pool_delete(cl, pool.c_str());
printf("******* SUCCESS **********\n");
return EXIT_SUCCESS;
}
| 9,720 | 26.853868 | 78 | cc |
null | ceph-main/src/test/system/rados_open_pools_parallel.cc |
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2011 New Dream Network
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "cross_process_sem.h"
#include "include/rados/librados.h"
#include "st_rados_create_pool.h"
#include "systest_runnable.h"
#include "systest_settings.h"
#include <errno.h>
#include <pthread.h>
#include <semaphore.h>
#include <sstream>
#include <stdarg.h>
#include <stdio.h>
#include <stdlib.h>
#include <string>
#include <time.h>
#include <vector>
using std::ostringstream;
using std::string;
using std::vector;
/*
* rados_open_pools_parallel
*
* This tests creating a pool in one Runnable, and then opening an io context
* based on that pool in another.
*
* EXPECT: * can't create the same pool twice
* * one Runnable can use the pool after the other one creates it
*
* DO NOT EXPECT * hangs, crashes
*/
class StRadosOpenPool : public SysTestRunnable
{
public:
StRadosOpenPool(int argc, const char **argv,
CrossProcessSem *pool_setup_sem,
CrossProcessSem *open_pool_sem,
const std::string& pool_name)
: SysTestRunnable(argc, argv),
m_pool_setup_sem(pool_setup_sem),
m_open_pool_sem(open_pool_sem),
m_pool_name(pool_name)
{
}
~StRadosOpenPool() override
{
}
int run() override
{
rados_t cl;
RETURN1_IF_NONZERO(rados_create(&cl, NULL));
rados_conf_parse_argv(cl, m_argc, m_argv);
std::string log_name = SysTestSettings::inst().get_log_name(get_id_str());
if (!log_name.empty())
rados_conf_set(cl, "log_file", log_name.c_str());
RETURN1_IF_NONZERO(rados_conf_read_file(cl, NULL));
rados_conf_parse_env(cl, NULL);
RETURN1_IF_NONZERO(rados_connect(cl));
if (m_pool_setup_sem)
m_pool_setup_sem->wait();
printf("%s: rados_pool_create.\n", get_id_str());
rados_pool_create(cl, m_pool_name.c_str());
rados_ioctx_t io_ctx;
printf("%s: rados_ioctx_create.\n", get_id_str());
RETURN1_IF_NONZERO(rados_ioctx_create(cl, m_pool_name.c_str(), &io_ctx));
if (m_open_pool_sem)
m_open_pool_sem->post();
rados_ioctx_destroy(io_ctx);
rados_pool_delete(cl, m_pool_name.c_str());
rados_shutdown(cl);
return 0;
}
private:
CrossProcessSem *m_pool_setup_sem;
CrossProcessSem *m_open_pool_sem;
std::string m_pool_name;
};
const char *get_id_str()
{
return "main";
}
int main(int argc, const char **argv)
{
const std::string pool = get_temp_pool_name(argv[0]);
// first test: create a pool, shut down the client, access that
// pool in a different process.
CrossProcessSem *pool_setup_sem = NULL;
RETURN1_IF_NONZERO(CrossProcessSem::create(0, &pool_setup_sem));
StRadosCreatePool r1(argc, argv, NULL, pool_setup_sem, NULL,
pool, 50, ".obj");
StRadosOpenPool r2(argc, argv, pool_setup_sem, NULL, pool);
vector < SysTestRunnable* > vec;
vec.push_back(&r1);
vec.push_back(&r2);
std::string error = SysTestRunnable::run_until_finished(vec);
if (!error.empty()) {
printf("test1: got error: %s\n", error.c_str());
return EXIT_FAILURE;
}
// second test: create a pool, access that
// pool in a different process, THEN shut down the first client.
CrossProcessSem *pool_setup_sem2 = NULL;
RETURN1_IF_NONZERO(CrossProcessSem::create(0, &pool_setup_sem2));
CrossProcessSem *open_pool_sem2 = NULL;
RETURN1_IF_NONZERO(CrossProcessSem::create(0, &open_pool_sem2));
StRadosCreatePool r3(argc, argv, NULL, pool_setup_sem2, open_pool_sem2,
pool, 50, ".obj");
StRadosOpenPool r4(argc, argv, pool_setup_sem2, open_pool_sem2, pool);
vector < SysTestRunnable* > vec2;
vec2.push_back(&r3);
vec2.push_back(&r4);
error = SysTestRunnable::run_until_finished(vec2);
if (!error.empty()) {
printf("test2: got error: %s\n", error.c_str());
return EXIT_FAILURE;
}
printf("******* SUCCESS **********\n");
return EXIT_SUCCESS;
}
| 4,223 | 28.333333 | 84 | cc |
null | ceph-main/src/test/system/rados_watch_notify.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2011 New Dream Network
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "cross_process_sem.h"
#include "include/rados/librados.h"
#include "st_rados_create_pool.h"
#include "st_rados_delete_pool.h"
#include "st_rados_delete_objs.h"
#include "st_rados_watch.h"
#include "st_rados_notify.h"
#include "systest_runnable.h"
#include "systest_settings.h"
#include "include/stringify.h"
#include <errno.h>
#include <pthread.h>
#include <semaphore.h>
#include <sstream>
#include <stdarg.h>
#include <stdio.h>
#include <stdlib.h>
#include <string>
#include <time.h>
#include <vector>
#include <sys/types.h>
#include <unistd.h>
using std::ostringstream;
using std::string;
using std::vector;
/*
* rados_watch_notify
*
* This tests watch/notify with pool and object deletion.
*
* EXPECT: * notifies to a deleted object or pool are not received
* * notifies to existing objects are received
*
* DO NOT EXPECT * hangs, crashes
*/
const char *get_id_str()
{
return "main";
}
int main(int argc, const char **argv)
{
std::string pool = "foo." + stringify(getpid());
CrossProcessSem *setup_sem = NULL;
RETURN1_IF_NONZERO(CrossProcessSem::create(0, &setup_sem));
CrossProcessSem *watch_sem = NULL;
RETURN1_IF_NONZERO(CrossProcessSem::create(0, &watch_sem));
CrossProcessSem *notify_sem = NULL;
RETURN1_IF_NONZERO(CrossProcessSem::create(0, ¬ify_sem));
// create a pool and an object, watch the object, notify.
{
StRadosCreatePool r1(argc, argv, NULL, setup_sem, NULL, pool, 1, ".obj");
StRadosWatch r2(argc, argv, setup_sem, watch_sem, notify_sem,
1, 0, pool, "0.obj");
StRadosNotify r3(argc, argv, setup_sem, watch_sem, notify_sem,
0, pool, "0.obj");
StRadosDeletePool r4(argc, argv, notify_sem, NULL, pool);
vector<SysTestRunnable*> vec;
vec.push_back(&r1);
vec.push_back(&r2);
vec.push_back(&r3);
vec.push_back(&r4);
std::string error = SysTestRunnable::run_until_finished(vec);
if (!error.empty()) {
printf("test1: got error: %s\n", error.c_str());
return EXIT_FAILURE;
}
}
RETURN1_IF_NONZERO(setup_sem->reinit(0));
RETURN1_IF_NONZERO(watch_sem->reinit(0));
RETURN1_IF_NONZERO(notify_sem->reinit(0));
// create a pool and an object, watch a non-existent object,
// notify non-existent object.watch
pool += ".";
{
StRadosCreatePool r1(argc, argv, NULL, setup_sem, NULL, pool, 0, ".obj");
StRadosWatch r2(argc, argv, setup_sem, watch_sem, notify_sem,
0, -ENOENT, pool, "0.obj");
StRadosNotify r3(argc, argv, setup_sem, watch_sem, notify_sem,
-ENOENT, pool, "0.obj");
StRadosDeletePool r4(argc, argv, notify_sem, NULL, pool);
vector<SysTestRunnable*> vec;
vec.push_back(&r1);
vec.push_back(&r2);
vec.push_back(&r3);
vec.push_back(&r4);
std::string error = SysTestRunnable::run_until_finished(vec);
if (!error.empty()) {
printf("test2: got error: %s\n", error.c_str());
return EXIT_FAILURE;
}
}
RETURN1_IF_NONZERO(setup_sem->reinit(0));
RETURN1_IF_NONZERO(watch_sem->reinit(0));
RETURN1_IF_NONZERO(notify_sem->reinit(0));
CrossProcessSem *finished_notifies_sem = NULL;
RETURN1_IF_NONZERO(CrossProcessSem::create(0, &finished_notifies_sem));
CrossProcessSem *deleted_sem = NULL;
RETURN1_IF_NONZERO(CrossProcessSem::create(0, &deleted_sem));
CrossProcessSem *second_pool_sem = NULL;
RETURN1_IF_NONZERO(CrossProcessSem::create(0, &second_pool_sem));
// create a pool and an object, watch the object, notify,
// then delete the pool.
// Create a new pool and write to it to make the osd get the updated map,
// then try notifying on the deleted pool.
pool += ".";
{
StRadosCreatePool r1(argc, argv, NULL, setup_sem, NULL, pool, 1, ".obj");
StRadosWatch r2(argc, argv, setup_sem, watch_sem, finished_notifies_sem,
1, 0, pool, "0.obj");
StRadosNotify r3(argc, argv, setup_sem, watch_sem, notify_sem,
0, pool, "0.obj");
StRadosDeletePool r4(argc, argv, notify_sem, deleted_sem, pool);
StRadosCreatePool r5(argc, argv, deleted_sem, second_pool_sem, NULL,
"bar", 1, ".obj");
StRadosNotify r6(argc, argv, second_pool_sem, NULL, finished_notifies_sem,
0, "bar", "0.obj");
StRadosDeletePool r7(argc, argv, finished_notifies_sem, NULL, "bar");
vector<SysTestRunnable*> vec;
vec.push_back(&r1);
vec.push_back(&r2);
vec.push_back(&r3);
vec.push_back(&r4);
vec.push_back(&r5);
vec.push_back(&r6);
vec.push_back(&r7);
std::string error = SysTestRunnable::run_until_finished(vec);
if (!error.empty()) {
printf("test3: got error: %s\n", error.c_str());
return EXIT_FAILURE;
}
}
RETURN1_IF_NONZERO(setup_sem->reinit(0));
RETURN1_IF_NONZERO(watch_sem->reinit(0));
RETURN1_IF_NONZERO(notify_sem->reinit(0));
RETURN1_IF_NONZERO(finished_notifies_sem->reinit(0));
RETURN1_IF_NONZERO(deleted_sem->reinit(0));
// create a pool and an object, watch the object, notify,
// then delete the object, notify
// this test is enabled for the resolution of bug #2339.
pool += ".";
{
StRadosCreatePool r1(argc, argv, NULL, setup_sem, NULL, pool, 1, ".obj");
StRadosWatch r2(argc, argv, setup_sem, watch_sem, finished_notifies_sem,
1, 0, pool, "0.obj");
StRadosNotify r3(argc, argv, setup_sem, watch_sem, notify_sem,
0, pool, "0.obj");
StRadosDeleteObjs r4(argc, argv, notify_sem, deleted_sem, 1, pool, ".obj");
StRadosNotify r5(argc, argv, setup_sem, deleted_sem, finished_notifies_sem,
-ENOENT, pool, "0.obj");
StRadosDeletePool r6(argc, argv, finished_notifies_sem, NULL, pool);
vector<SysTestRunnable*> vec;
vec.push_back(&r1);
vec.push_back(&r2);
vec.push_back(&r3);
vec.push_back(&r4);
vec.push_back(&r5);
vec.push_back(&r6);
std::string error = SysTestRunnable::run_until_finished(vec);
if (!error.empty()) {
printf("test4: got error: %s\n", error.c_str());
return EXIT_FAILURE;
}
}
printf("******* SUCCESS **********\n");
return EXIT_SUCCESS;
}
| 6,442 | 31.872449 | 79 | cc |
null | ceph-main/src/test/system/rerun.sh | #!/usr/bin/env bash
[ -z $ITERATIONS ] && ITERATIONS=10
TMPDIR=`mktemp -d -t rerun_logs.XXXXXXXXXX` || exit 1
rm -rf $TMPDIR/logs
mkdir $TMPDIR/logs
for i in `seq 1 $ITERATIONS`; do
echo "********************* iteration $i *********************"
LOG_FILE_BASE=$TMPDIR/logs $EXE "$@"
if [ $? -ne 0 ]; then
die "failed! logs are in $TMPDIR/logs"
fi
done
echo "********************* success *********************"
rm -rf $TMPDIR
exit 0
| 462 | 21.047619 | 67 | sh |
null | ceph-main/src/test/system/st_rados_create_pool.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2011 New Dream Network
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "cross_process_sem.h"
#include "include/ceph_assert.h"
#include "include/rados/librados.h"
#include "st_rados_create_pool.h"
#include "systest_runnable.h"
#include "systest_settings.h"
#include <errno.h>
#include <stdarg.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <sstream>
#include <string>
using std::ostringstream;
std::string StRadosCreatePool::
get_random_buf(int sz)
{
ostringstream oss;
int size = rand() % sz; // yep, it's not very random
for (int i = 0; i < size; ++i) {
oss << ".";
}
return oss.str();
}
StRadosCreatePool::
StRadosCreatePool(int argc, const char **argv,
CrossProcessSem *setup_sem,
CrossProcessSem *pool_setup_sem,
CrossProcessSem *close_create_pool,
const std::string &pool_name,
int num_objects,
const std::string &suffix)
: SysTestRunnable(argc, argv),
m_setup_sem(setup_sem),
m_pool_setup_sem(pool_setup_sem),
m_close_create_pool(close_create_pool),
m_pool_name(pool_name),
m_num_objects(num_objects),
m_suffix(suffix)
{
}
StRadosCreatePool::
~StRadosCreatePool()
{
}
int StRadosCreatePool::
run()
{
int ret_val = 0;
rados_t cl;
RETURN1_IF_NONZERO(rados_create(&cl, NULL));
rados_conf_parse_argv(cl, m_argc, m_argv);
rados_conf_parse_argv(cl, m_argc, m_argv);
RETURN1_IF_NONZERO(rados_conf_read_file(cl, NULL));
std::string log_name = SysTestSettings::inst().get_log_name(get_id_str());
if (!log_name.empty())
rados_conf_set(cl, "log_file", log_name.c_str());
rados_conf_parse_env(cl, NULL);
if (m_setup_sem) {
m_setup_sem->wait();
m_setup_sem->post();
}
RETURN1_IF_NONZERO(rados_connect(cl));
printf("%s: creating pool %s\n", get_id_str(), m_pool_name.c_str());
rados_pool_create(cl, m_pool_name.c_str());
rados_ioctx_t io_ctx;
RETURN1_IF_NONZERO(rados_ioctx_create(cl, m_pool_name.c_str(), &io_ctx));
for (int i = 0; i < m_num_objects; ++i) {
char oid[128];
snprintf(oid, sizeof(oid), "%d%s", i, m_suffix.c_str());
std::string buf(get_random_buf(256));
int ret = rados_write(io_ctx, oid, buf.c_str(), buf.size(), 0);
if (ret != 0) {
printf("%s: rados_write(%s) failed with error: %d\n",
get_id_str(), oid, ret);
ret_val = ret;
goto out;
}
if (((i % 25) == 0) || (i == m_num_objects - 1)) {
printf("%s: created object %d...\n", get_id_str(), i);
}
}
out:
printf("%s: finishing.\n", get_id_str());
if (m_pool_setup_sem)
m_pool_setup_sem->post();
if (m_close_create_pool)
m_close_create_pool->wait();
rados_ioctx_destroy(io_ctx);
rados_shutdown(cl);
return ret_val;
}
std::string get_temp_pool_name(const char* prefix)
{
ceph_assert(prefix);
char hostname[80];
int ret = 0;
ret = gethostname(hostname, sizeof(hostname));
ceph_assert(!ret);
char poolname[256];
ret = snprintf(poolname, sizeof(poolname),
"%s.%s-%d", prefix, hostname, getpid());
ceph_assert(ret > 0);
ceph_assert((unsigned int)ret < sizeof(poolname));
return poolname;
}
| 3,445 | 24.909774 | 76 | cc |
null | ceph-main/src/test/system/st_rados_create_pool.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2011 New Dream Network
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef TEST_SYSTEM_ST_RADOS_CREATE_POOL_H
#define TEST_SYSTEM_ST_RADOS_CREATE_POOL_H
#include "systest_runnable.h"
class CrossProcessSem;
/*
* st_rados_create_pool
*
* Waits, then posts to setup_sem.
* Creates a pool and populates it with some objects.
* Then, calls pool_setup_sem->post()
*/
class StRadosCreatePool : public SysTestRunnable
{
public:
static std::string get_random_buf(int sz);
StRadosCreatePool(int argc, const char **argv,
CrossProcessSem *setup_sem,
CrossProcessSem *pool_setup_sem,
CrossProcessSem *close_create_pool_sem,
const std::string &pool_name,
int num_objects,
const std::string &suffix);
~StRadosCreatePool() override;
int run() override;
private:
CrossProcessSem *m_setup_sem;
CrossProcessSem *m_pool_setup_sem;
CrossProcessSem *m_close_create_pool;
std::string m_pool_name;
int m_num_objects;
std::string m_suffix;
};
std::string get_temp_pool_name(const char* prefix);
#endif
| 1,376 | 24.5 | 70 | h |
null | ceph-main/src/test/system/st_rados_delete_objs.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2011 New Dream Network
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "cross_process_sem.h"
#include "include/rados/librados.h"
#include "st_rados_delete_objs.h"
#include "systest_runnable.h"
#include "systest_settings.h"
#include <errno.h>
StRadosDeleteObjs::StRadosDeleteObjs(int argc, const char **argv,
CrossProcessSem *setup_sem,
CrossProcessSem *deleted_sem,
int num_objs,
const std::string &pool_name,
const std::string &suffix)
: SysTestRunnable(argc, argv),
m_setup_sem(setup_sem),
m_deleted_sem(deleted_sem),
m_num_objs(num_objs),
m_pool_name(pool_name),
m_suffix(suffix)
{
}
StRadosDeleteObjs::~StRadosDeleteObjs()
{
}
int StRadosDeleteObjs::run()
{
rados_t cl;
RETURN1_IF_NONZERO(rados_create(&cl, NULL));
rados_conf_parse_argv(cl, m_argc, m_argv);
RETURN1_IF_NONZERO(rados_conf_read_file(cl, NULL));
rados_conf_parse_env(cl, NULL);
RETURN1_IF_NONZERO(rados_connect(cl));
m_setup_sem->wait();
m_setup_sem->post();
rados_ioctx_t io_ctx;
rados_pool_create(cl, m_pool_name.c_str());
RETURN1_IF_NONZERO(rados_ioctx_create(cl, m_pool_name.c_str(), &io_ctx));
for (int i = 0; i < m_num_objs; ++i) {
char oid[128];
snprintf(oid, sizeof(oid), "%d%s", i, m_suffix.c_str());
RETURN1_IF_NONZERO(rados_remove(io_ctx, oid));
if (((i % 25) == 0) || (i == m_num_objs - 1)) {
printf("%s: deleted object %d...\n", get_id_str(), i);
}
}
rados_ioctx_destroy(io_ctx);
if (m_deleted_sem)
m_deleted_sem->post();
rados_shutdown(cl);
return 0;
}
| 1,910 | 25.541667 | 75 | cc |
null | ceph-main/src/test/system/st_rados_delete_objs.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2011 New Dream Network
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef TEST_SYSTEM_ST_RADOS_DELETE_OBJS_H
#define TEST_SYSTEM_ST_RADOS_DELETE_OBJS_H
#include "systest_runnable.h"
class CrossProcessSem;
/*
* st_rados_delete_objs
*
* Waits on setup_sem, posts to it,
* deletes num_objs objects from the pool,
* and posts to deleted_sem.
*/
class StRadosDeleteObjs : public SysTestRunnable
{
public:
StRadosDeleteObjs(int argc, const char **argv,
CrossProcessSem *setup_sem,
CrossProcessSem *deleted_sem,
int num_objs,
const std::string &pool_name,
const std::string &suffix);
~StRadosDeleteObjs() override;
int run() override;
private:
CrossProcessSem *m_setup_sem;
CrossProcessSem *m_deleted_sem;
int m_num_objs;
std::string m_pool_name;
std::string m_suffix;
};
#endif
| 1,161 | 22.714286 | 70 | h |
null | ceph-main/src/test/system/st_rados_delete_pool.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2011 New Dream Network
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "cross_process_sem.h"
#include "include/rados/librados.h"
#include "st_rados_delete_pool.h"
#include "systest_runnable.h"
#include "systest_settings.h"
#include <errno.h>
StRadosDeletePool::StRadosDeletePool(int argc, const char **argv,
CrossProcessSem *pool_setup_sem,
CrossProcessSem *delete_pool_sem,
const std::string &pool_name)
: SysTestRunnable(argc, argv),
m_pool_setup_sem(pool_setup_sem),
m_delete_pool_sem(delete_pool_sem),
m_pool_name(pool_name)
{
}
StRadosDeletePool::~StRadosDeletePool()
{
}
int StRadosDeletePool::run()
{
rados_t cl;
RETURN1_IF_NONZERO(rados_create(&cl, NULL));
rados_conf_parse_argv(cl, m_argc, m_argv);
RETURN1_IF_NONZERO(rados_conf_read_file(cl, NULL));
rados_conf_parse_env(cl, NULL);
RETURN1_IF_NONZERO(rados_connect(cl));
m_pool_setup_sem->wait();
m_pool_setup_sem->post();
rados_ioctx_t io_ctx;
rados_pool_create(cl, m_pool_name.c_str());
RETURN1_IF_NONZERO(rados_ioctx_create(cl, m_pool_name.c_str(), &io_ctx));
rados_ioctx_destroy(io_ctx);
printf("%s: deleting pool %s\n", get_id_str(), m_pool_name.c_str());
RETURN1_IF_NONZERO(rados_pool_delete(cl, m_pool_name.c_str()));
if (m_delete_pool_sem)
m_delete_pool_sem->post();
rados_shutdown(cl);
return 0;
}
| 1,696 | 27.283333 | 75 | cc |
null | ceph-main/src/test/system/st_rados_delete_pool.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2011 New Dream Network
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef TEST_SYSTEM_ST_RADOS_DELETE_POOL_H
#define TEST_SYSTEM_ST_RADOS_DELETE_POOL_H
#include "systest_runnable.h"
class CrossProcessSem;
/*
* st_rados_delete_pool
*
* Waits on pool_setup_sem, posts to it,
* deletes a pool, and posts to delete_pool_sem.
*/
class StRadosDeletePool : public SysTestRunnable
{
public:
StRadosDeletePool(int argc, const char **argv,
CrossProcessSem *pool_setup_sem,
CrossProcessSem *delete_pool_sem,
const std::string &pool_name);
~StRadosDeletePool() override;
int run() override;
private:
CrossProcessSem *m_pool_setup_sem;
CrossProcessSem *m_delete_pool_sem;
std::string m_pool_name;
};
#endif
| 1,066 | 23.25 | 70 | h |
null | ceph-main/src/test/system/st_rados_list_objects.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2011 New Dream Network
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "cross_process_sem.h"
#include "include/rados/librados.h"
#include "st_rados_list_objects.h"
#include "systest_runnable.h"
#include "systest_settings.h"
#include <errno.h>
#include <stdarg.h>
#include <stdio.h>
#include <stdlib.h>
#include <sstream>
#include <string>
using std::ostringstream;
StRadosListObjects::
StRadosListObjects(int argc, const char **argv,
const std::string &pool_name,
bool accept_list_errors,
int midway_cnt,
CrossProcessSem *pool_setup_sem,
CrossProcessSem *midway_sem_wait,
CrossProcessSem *midway_sem_post)
: SysTestRunnable(argc, argv),
m_pool_name(pool_name),
m_accept_list_errors(accept_list_errors),
m_midway_cnt(midway_cnt),
m_pool_setup_sem(pool_setup_sem),
m_midway_sem_wait(midway_sem_wait),
m_midway_sem_post(midway_sem_post)
{
}
StRadosListObjects::
~StRadosListObjects()
{
}
int StRadosListObjects::
run()
{
int retval = 0;
rados_t cl;
RETURN1_IF_NONZERO(rados_create(&cl, NULL));
rados_conf_parse_argv(cl, m_argc, m_argv);
RETURN1_IF_NONZERO(rados_conf_read_file(cl, NULL));
rados_conf_parse_env(cl, NULL);
RETURN1_IF_NONZERO(rados_connect(cl));
m_pool_setup_sem->wait();
m_pool_setup_sem->post();
rados_ioctx_t io_ctx;
rados_pool_create(cl, m_pool_name.c_str());
RETURN1_IF_NONZERO(rados_ioctx_create(cl, m_pool_name.c_str(), &io_ctx));
int saw = 0;
const char *obj_name;
rados_list_ctx_t h;
printf("%s: listing objects.\n", get_id_str());
RETURN1_IF_NONZERO(rados_nobjects_list_open(io_ctx, &h));
while (true) {
int ret = rados_nobjects_list_next(h, &obj_name, NULL, NULL);
if (ret == -ENOENT) {
break;
}
else if (ret != 0) {
if (m_accept_list_errors && (!m_midway_sem_post || saw > m_midway_cnt))
break;
printf("%s: rados_objects_list_next error: %d\n", get_id_str(), ret);
retval = ret;
goto out;
}
if ((saw % 25) == 0) {
printf("%s: listed object %d...\n", get_id_str(), saw);
}
++saw;
if (saw == m_midway_cnt) {
if (m_midway_sem_wait)
m_midway_sem_wait->wait();
if (m_midway_sem_post)
m_midway_sem_post->post();
}
}
printf("%s: saw %d objects\n", get_id_str(), saw);
out:
rados_nobjects_list_close(h);
rados_ioctx_destroy(io_ctx);
rados_shutdown(cl);
return retval;
}
| 2,727 | 24.259259 | 77 | cc |
null | ceph-main/src/test/system/st_rados_list_objects.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2011 New Dream Network
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef TEST_SYSTEM_ST_RADOS_LIST_OBJECTS_H
#define TEST_SYSTEM_ST_RADOS_LIST_OBJECTS_H
#include "systest_runnable.h"
class CrossProcessSem;
/*
* st_rados_list_objects
*
* 1. calls pool_setup_sem->wait()
* 2. calls pool_setup_sem->post()
* 3. list some objects
* 4. modify_sem->wait()
* 5. list some objects
*/
class StRadosListObjects : public SysTestRunnable
{
public:
static std::string get_random_buf(int sz);
StRadosListObjects(int argc, const char **argv,
const std::string &pool_name,
bool accept_list_errors,
int midway_cnt,
CrossProcessSem *pool_setup_sem,
CrossProcessSem *midway_sem_wait,
CrossProcessSem *midway_sem_post);
~StRadosListObjects() override;
int run() override;
private:
std::string m_pool_name;
bool m_accept_list_errors;
int m_midway_cnt;
CrossProcessSem *m_pool_setup_sem;
CrossProcessSem *m_midway_sem_wait;
CrossProcessSem *m_midway_sem_post;
};
#endif
| 1,356 | 24.12963 | 70 | h |
null | ceph-main/src/test/system/st_rados_notify.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2011 New Dream Network
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef TEST_SYSTEM_ST_RADOS_NOTIFY_H
#define TEST_SYSTEM_ST_RADOS_NOTIFY_H
#include "systest_runnable.h"
class CrossProcessSem;
/*
* st_rados_notify
*
* 1. waits on and then posts to setup_sem
* 2. connects and opens the pool
* 3. waits on and then posts to notify_sem
* 4. notifies on the object
* 5. posts to notified_sem
*/
class StRadosNotify : public SysTestRunnable
{
public:
StRadosNotify(int argc, const char **argv,
CrossProcessSem *setup_sem,
CrossProcessSem *notify_sem,
CrossProcessSem *notified_sem,
int notify_retcode,
const std::string &pool_name,
const std::string &obj_name);
~StRadosNotify() override;
int run() override;
private:
CrossProcessSem *m_setup_sem;
CrossProcessSem *m_notify_sem;
CrossProcessSem *m_notified_sem;
int m_notify_retcode;
std::string m_pool_name;
std::string m_obj_name;
};
#endif
| 1,266 | 22.90566 | 70 | h |
null | ceph-main/src/test/system/st_rados_watch.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2011 New Dream Network
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef TEST_SYSTEM_ST_RADOS_WATCH_H
#define TEST_SYSTEM_ST_RADOS_WATCH_H
#include "systest_runnable.h"
class CrossProcessSem;
/*
* st_rados_watch
*
* 1. waits on setup_sem
* 2. posts to setup_sem
* 3. watches an object
* 4. posts to watch_sem
* 5. waits on notify_sem
* 6. posts to notify_sem
* 7. checks that the correct number of notifies were received
*/
class StRadosWatch : public SysTestRunnable
{
public:
StRadosWatch(int argc, const char **argv,
CrossProcessSem *setup_sem,
CrossProcessSem *watch_sem,
CrossProcessSem *notify_sem,
int num_notifies,
int watch_retcode,
const std::string &pool_name,
const std::string &obj_name);
~StRadosWatch() override;
int run() override;
private:
CrossProcessSem *m_setup_sem;
CrossProcessSem *m_watch_sem;
CrossProcessSem *m_notify_sem;
int m_num_notifies;
int m_watch_retcode;
std::string m_pool_name;
std::string m_obj_name;
};
#endif
| 1,372 | 23.087719 | 70 | h |
null | ceph-main/src/test/system/systest_runnable.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2011 New Dream Network
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "include/compat.h"
#include "common/errno.h"
#include "systest_runnable.h"
#include "systest_settings.h"
#include <errno.h>
#include <pthread.h>
#include <sstream>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <string>
#ifndef _WIN32
#include <sys/syscall.h>
#include <sys/wait.h>
#endif
#include <sys/types.h>
#include <unistd.h>
#include <atomic>
#include <limits>
#include <vector>
using std::ostringstream;
using std::string;
static pid_t do_gettid(void)
{
#if defined(__linux__)
return static_cast < pid_t >(syscall(SYS_gettid));
#elif defined(_WIN32)
return static_cast < pid_t >(GetCurrentThreadId());
#else
return static_cast < pid_t >(pthread_getthreadid_np());
#endif
}
std::atomic<unsigned> m_highest_id = { 0 };
SysTestRunnable::
SysTestRunnable(int argc, const char **argv)
: m_argc(0),
m_argv(NULL),
m_argv_orig(NULL)
{
m_started = false;
m_id = ++m_highest_id;
memset(&m_pthread, 0, sizeof(m_pthread));
update_id_str(false);
set_argv(argc, argv);
}
SysTestRunnable::
~SysTestRunnable()
{
set_argv(0, NULL);
}
const char* SysTestRunnable::
get_id_str(void) const
{
return m_id_str;
}
int SysTestRunnable::
start()
{
if (m_started) {
return -EDOM;
}
int ret;
bool use_threads = SysTestSettings::inst().use_threads();
if (use_threads) {
ret = pthread_create(&m_pthread, NULL, systest_runnable_pthread_helper,
static_cast<void*>(this));
if (ret)
return ret;
m_started = true;
} else {
#ifdef _WIN32
printf("Using separate processes is not supported on Windows.\n");
return -1;
#else
std::string err_msg;
ret = preforker.prefork(err_msg);
if (ret < 0) {
printf("prefork failed: %s\n", err_msg.c_str());
return ret;
}
if (preforker.is_child()) {
m_started = true;
void *retptr = systest_runnable_pthread_helper(static_cast<void*>(this));
preforker.exit((int)(uintptr_t)retptr);
} else {
m_started = true;
}
#endif
}
return 0;
}
std::string SysTestRunnable::
join()
{
if (!m_started) {
return "SysTestRunnable was never started.";
}
int ret;
bool use_threads = SysTestSettings::inst().use_threads();
if (use_threads) {
void *ptrretval;
ret = pthread_join(m_pthread, &ptrretval);
if (ret) {
ostringstream oss;
oss << "pthread_join failed with error " << ret;
return oss.str();
}
int retval = (int)(uintptr_t)ptrretval;
if (retval != 0) {
ostringstream oss;
oss << "ERROR " << retval;
return oss.str();
}
return "";
} else {
#ifdef _WIN32
return "Using separate processes is not supported on Windows.\n";
#else
std::string err_msg;
ret = preforker.parent_wait(err_msg);
return err_msg;
#endif
}
}
std::string SysTestRunnable::
run_until_finished(std::vector < SysTestRunnable * > &runnables)
{
int index = 0;
for (std::vector < SysTestRunnable * >::const_iterator r = runnables.begin();
r != runnables.end(); ++r) {
int ret = (*r)->start();
if (ret) {
ostringstream oss;
oss << "run_until_finished: got error " << ret
<< " when starting runnable " << index;
return oss.str();
}
++index;
}
for (std::vector < SysTestRunnable * >::const_iterator r = runnables.begin();
r != runnables.end(); ++r) {
std::string rstr = (*r)->join();
if (!rstr.empty()) {
ostringstream oss;
oss << "run_until_finished: runnable " << (*r)->get_id_str()
<< ": got error: " << rstr;
return oss.str();
}
}
printf("*******************************\n");
return "";
}
void *systest_runnable_pthread_helper(void *arg)
{
SysTestRunnable *st = static_cast < SysTestRunnable * >(arg);
st->update_id_str(true);
printf("%s: starting.\n", st->get_id_str());
int ret = st->run();
printf("%s: shutting down.\n", st->get_id_str());
return (void*)(uintptr_t)ret;
}
void SysTestRunnable::
update_id_str(bool started)
{
bool use_threads = SysTestSettings::inst().use_threads();
char extra[std::numeric_limits<int>::digits10 + 1];
extra[0] = '\0';
if (started) {
if (use_threads)
snprintf(extra, sizeof(extra), "_[%d]", do_gettid());
else
snprintf(extra, sizeof(extra), "_[%d]", getpid());
}
if (use_threads)
snprintf(m_id_str, SysTestRunnable::ID_STR_SZ, "thread_%d%s", m_id, extra);
else
snprintf(m_id_str, SysTestRunnable::ID_STR_SZ, "process_%d%s", m_id, extra);
}
// Copy argv so that if some fiend decides to modify it, it's ok.
void SysTestRunnable::
set_argv(int argc, const char **argv)
{
if (m_argv_orig != NULL) {
for (int i = 0; i < m_argc; ++i)
free((void*)(m_argv_orig[i]));
delete[] m_argv_orig;
m_argv_orig = NULL;
delete[] m_argv;
m_argv = NULL;
m_argc = 0;
}
if (argv == NULL)
return;
m_argc = argc;
m_argv_orig = new const char*[m_argc+1];
for (int i = 0; i < m_argc; ++i)
m_argv_orig[i] = strdup(argv[i]);
m_argv_orig[argc] = NULL;
m_argv = new const char*[m_argc+1];
for (int i = 0; i <= m_argc; ++i)
m_argv[i] = m_argv_orig[i];
}
| 5,575 | 22.82906 | 80 | cc |
null | ceph-main/src/test/system/systest_runnable.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2011 New Dream Network
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_SYSTEM_TEST_H
#define CEPH_SYSTEM_TEST_H
#include <pthread.h>
#include <stdio.h>
#include <string>
#include <vector>
#ifndef _WIN32
#include "common/Preforker.h"
#endif
#define RETURN1_IF_NOT_VAL(expected, expr) \
do {\
int _rinv_ret = expr;\
if (_rinv_ret != expected) {\
printf("%s: file %s, line %d: expected %d, got %d\n",\
get_id_str(), __FILE__, __LINE__, expected, _rinv_ret);\
return 1; \
}\
} while(0);
#define RETURN1_IF_NONZERO(expr) \
RETURN1_IF_NOT_VAL(0, expr)
extern void* systest_runnable_pthread_helper(void *arg);
std::string get_temp_pool_name(const char* prefix);
/* Represents a single test thread / process.
*
* Inherit from this class and implement the test body in run().
*/
class SysTestRunnable
{
public:
static const int ID_STR_SZ = 196;
SysTestRunnable(int argc, const char **argv);
virtual ~SysTestRunnable();
/* Returns 0 on success; error code otherwise. */
virtual int run() = 0;
/* Return a string identifying the runnable. */
const char* get_id_str(void) const;
/* Start the Runnable */
int start();
/* Wait until the Runnable is finished. Returns an error string on failure. */
std::string join();
/* Starts a bunch of SystemTestRunnables and waits until they're done.
*
* Returns an error string on failure. */
static std::string run_until_finished(std::vector < SysTestRunnable * >&
runnables);
protected:
int m_argc;
const char **m_argv;
private:
explicit SysTestRunnable(const SysTestRunnable &rhs);
SysTestRunnable& operator=(const SysTestRunnable &rhs);
void update_id_str(bool started);
void set_argv(int argc, const char **argv);
friend void* systest_runnable_pthread_helper(void *arg);
#ifndef _WIN32
Preforker preforker;
#endif
const char **m_argv_orig;
bool m_started;
int m_id;
pthread_t m_pthread;
char m_id_str[ID_STR_SZ];
};
#endif
| 2,326 | 23.494737 | 80 | h |
null | ceph-main/src/test/system/systest_settings.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2011 New Dream Network
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "systest_settings.h"
#include <pthread.h>
#include <sstream>
#include <stdlib.h>
pthread_mutex_t g_system_test_settings_lock = PTHREAD_MUTEX_INITIALIZER;
SysTestSettings& SysTestSettings::
inst()
{
pthread_mutex_lock(&g_system_test_settings_lock);
if (!m_inst)
m_inst = new SysTestSettings();
pthread_mutex_unlock(&g_system_test_settings_lock);
return *m_inst;
}
bool SysTestSettings::
use_threads() const
{
#ifdef _WIN32
// We can't use multiple processes on Windows for the time being.
// We'd need a mechanism for spawning those procecesses and also handle
// the inter-process communication.
return true;
#else
return m_use_threads;
#endif
}
std::string SysTestSettings::
get_log_name(const std::string &suffix) const
{
if (m_log_file_base.empty())
return "";
std::ostringstream oss;
oss << m_log_file_base << "." << suffix;
return oss.str();
}
SysTestSettings* SysTestSettings::
m_inst = NULL;
SysTestSettings::
SysTestSettings()
{
m_use_threads = !!getenv("USE_THREADS");
const char *lfb = getenv("LOG_FILE_BASE");
if (lfb)
m_log_file_base.assign(lfb);
}
SysTestSettings::
~SysTestSettings()
{
}
| 1,578 | 20.930556 | 73 | cc |
null | ceph-main/src/test/system/systest_settings.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2011 New Dream Network
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_SYSTEM_TEST_SETTINGS_H
#define CEPH_SYSTEM_TEST_SETTINGS_H
#include <string>
/* Singleton with settings grabbed from environment variables */
class SysTestSettings
{
public:
static SysTestSettings& inst();
bool use_threads() const;
std::string get_log_name(const std::string &suffix) const;
private:
static SysTestSettings* m_inst;
SysTestSettings();
~SysTestSettings();
bool m_use_threads;
std::string m_log_file_base;
};
#endif
| 872 | 22.594595 | 70 | h |
null | ceph-main/src/test/ubuntu-18.04/install-deps.sh | ../../../install-deps.sh | 24 | 24 | 24 | sh |
null | ceph-main/src/tools/RadosDump.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2015 Red Hat
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "RadosDump.h"
using std::cerr;
using std::cout;
int RadosDump::read_super()
{
bufferlist ebl;
auto ebliter = ebl.cbegin();
ssize_t bytes;
bytes = ebl.read_fd(file_fd, super_header::FIXED_LENGTH);
if ((size_t)bytes != super_header::FIXED_LENGTH) {
cerr << "Unexpected EOF" << std::endl;
return -EFAULT;
}
sh.decode(ebliter);
return 0;
}
int RadosDump::get_header(header *h)
{
assert (h != NULL);
bufferlist ebl;
auto ebliter = ebl.cbegin();
ssize_t bytes;
bytes = ebl.read_fd(file_fd, sh.header_size);
if ((size_t)bytes != sh.header_size) {
cerr << "Unexpected EOF" << std::endl;
return -EFAULT;
}
h->decode(ebliter);
return 0;
}
int RadosDump::get_footer(footer *f)
{
ceph_assert(f != NULL);
bufferlist ebl;
auto ebliter = ebl.cbegin();
ssize_t bytes;
bytes = ebl.read_fd(file_fd, sh.footer_size);
if ((size_t)bytes != sh.footer_size) {
cerr << "Unexpected EOF" << std::endl;
return -EFAULT;
}
f->decode(ebliter);
if (f->magic != endmagic) {
cerr << "Bad footer magic" << std::endl;
return -EFAULT;
}
return 0;
}
int RadosDump::read_section(sectiontype_t *type, bufferlist *bl)
{
header hdr;
ssize_t bytes;
int ret = get_header(&hdr);
if (ret)
return ret;
*type = hdr.type;
bl->clear();
bytes = bl->read_fd(file_fd, hdr.size);
if (bytes != hdr.size) {
cerr << "Unexpected EOF" << std::endl;
return -EFAULT;
}
if (hdr.size > 0) {
footer ft;
ret = get_footer(&ft);
if (ret)
return ret;
}
return 0;
}
int RadosDump::skip_object(bufferlist &bl)
{
bufferlist ebl;
bool done = false;
while(!done) {
sectiontype_t type;
int ret = read_section(&type, &ebl);
if (ret)
return ret;
if (type >= END_OF_TYPES) {
cout << "Skipping unknown object section type" << std::endl;
continue;
}
switch(type) {
case TYPE_DATA:
case TYPE_ATTRS:
case TYPE_OMAP_HDR:
case TYPE_OMAP:
#ifdef DIAGNOSTIC
cerr << "Skip type " << (int)type << std::endl;
#endif
break;
case TYPE_OBJECT_END:
done = true;
break;
default:
cerr << "Can't skip unknown type: " << type << std::endl;
return -EFAULT;
}
}
return 0;
}
//Write super_header with its fixed 16 byte length
void RadosDump::write_super()
{
if (dry_run) {
return;
}
bufferlist superbl;
super_header sh;
footer ft;
header hdr(TYPE_NONE, 0);
hdr.encode(superbl);
sh.magic = super_header::super_magic;
sh.version = super_header::super_ver;
sh.header_size = superbl.length();
superbl.clear();
ft.encode(superbl);
sh.footer_size = superbl.length();
superbl.clear();
sh.encode(superbl);
ceph_assert(super_header::FIXED_LENGTH == superbl.length());
superbl.write_fd(file_fd);
}
| 3,225 | 17.976471 | 70 | cc |
null | ceph-main/src/tools/RadosDump.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2015 Red Hat
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef RADOS_DUMP_H_
#define RADOS_DUMP_H_
#include <stdint.h>
#include "include/buffer.h"
#include "include/encoding.h"
#include "osd/osd_types.h"
#include "osd/OSDMap.h"
typedef uint8_t sectiontype_t;
typedef uint32_t mymagic_t;
typedef int64_t mysize_t;
enum {
TYPE_NONE = 0,
TYPE_PG_BEGIN,
TYPE_PG_END,
TYPE_OBJECT_BEGIN,
TYPE_OBJECT_END,
TYPE_DATA,
TYPE_ATTRS,
TYPE_OMAP_HDR,
TYPE_OMAP,
TYPE_PG_METADATA,
TYPE_POOL_BEGIN,
TYPE_POOL_END,
END_OF_TYPES, //Keep at the end
};
const uint16_t shortmagic = 0xffce; //goes into stream as "ceff"
//endmagic goes into stream as "ceff ffec"
const mymagic_t endmagic = (0xecff << 16) | shortmagic;
//The first FIXED_LENGTH bytes are a fixed
//portion of the export output. This includes the overall
//version number, and size of header and footer.
//THIS STRUCTURE CAN ONLY BE APPENDED TO. If it needs to expand,
//the version can be bumped and then anything
//can be added to the export format.
struct super_header {
static const uint32_t super_magic = (shortmagic << 16) | shortmagic;
// ver = 1, Initial version
// ver = 2, Add OSDSuperblock to pg_begin
static const uint32_t super_ver = 2;
static const uint32_t FIXED_LENGTH = 16;
uint32_t magic;
uint32_t version;
uint32_t header_size;
uint32_t footer_size;
super_header() : magic(0), version(0), header_size(0), footer_size(0) { }
void encode(bufferlist& bl) const {
using ceph::encode;
encode(magic, bl);
encode(version, bl);
encode(header_size, bl);
encode(footer_size, bl);
}
void decode(bufferlist::const_iterator& bl) {
using ceph::decode;
decode(magic, bl);
decode(version, bl);
decode(header_size, bl);
decode(footer_size, bl);
}
};
struct header {
sectiontype_t type;
mysize_t size;
header(sectiontype_t type, mysize_t size) :
type(type), size(size) { }
header(): type(0), size(0) { }
void encode(bufferlist& bl) const {
uint32_t debug_type = (type << 24) | (type << 16) | shortmagic;
ENCODE_START(1, 1, bl);
encode(debug_type, bl);
encode(size, bl);
ENCODE_FINISH(bl);
}
void decode(bufferlist::const_iterator& bl) {
uint32_t debug_type;
DECODE_START(1, bl);
decode(debug_type, bl);
type = debug_type >> 24;
decode(size, bl);
DECODE_FINISH(bl);
}
};
struct footer {
mymagic_t magic;
footer() : magic(endmagic) { }
void encode(bufferlist& bl) const {
ENCODE_START(1, 1, bl);
encode(magic, bl);
ENCODE_FINISH(bl);
}
void decode(bufferlist::const_iterator& bl) {
DECODE_START(1, bl);
decode(magic, bl);
DECODE_FINISH(bl);
}
};
struct pg_begin {
spg_t pgid;
OSDSuperblock superblock;
pg_begin(spg_t pg, const OSDSuperblock& sb):
pgid(pg), superblock(sb) { }
pg_begin() { }
void encode(bufferlist& bl) const {
// If superblock doesn't include CEPH_FS_FEATURE_INCOMPAT_SHARDS then
// shard will be NO_SHARD for a replicated pool. This means
// that we allow the decode by struct_v 2.
ENCODE_START(3, 2, bl);
encode(pgid.pgid, bl);
encode(superblock, bl);
encode(pgid.shard, bl);
ENCODE_FINISH(bl);
}
// NOTE: New super_ver prevents decode from ver 1
void decode(bufferlist::const_iterator& bl) {
DECODE_START(3, bl);
decode(pgid.pgid, bl);
if (struct_v > 1) {
decode(superblock, bl);
}
if (struct_v > 2) {
decode(pgid.shard, bl);
} else {
pgid.shard = shard_id_t::NO_SHARD;
}
DECODE_FINISH(bl);
}
};
struct object_begin {
ghobject_t hoid;
// Duplicate what is in the OI_ATTR so we have it at the start
// of object processing.
object_info_t oi;
explicit object_begin(const ghobject_t &hoid): hoid(hoid) { }
object_begin() { }
// If superblock doesn't include CEPH_FS_FEATURE_INCOMPAT_SHARDS then
// generation will be NO_GEN, shard_id will be NO_SHARD for a replicated
// pool. This means we will allow the decode by struct_v 1.
void encode(bufferlist& bl) const {
ENCODE_START(3, 1, bl);
encode(hoid.hobj, bl);
encode(hoid.generation, bl);
encode(hoid.shard_id, bl);
encode(oi, bl, -1); /* FIXME: we always encode with full features */
ENCODE_FINISH(bl);
}
void decode(bufferlist::const_iterator& bl) {
DECODE_START(3, bl);
decode(hoid.hobj, bl);
if (struct_v > 1) {
decode(hoid.generation, bl);
decode(hoid.shard_id, bl);
} else {
hoid.generation = ghobject_t::NO_GEN;
hoid.shard_id = shard_id_t::NO_SHARD;
}
if (struct_v > 2) {
decode(oi, bl);
}
DECODE_FINISH(bl);
}
};
struct data_section {
uint64_t offset;
uint64_t len;
bufferlist databl;
data_section(uint64_t offset, uint64_t len, bufferlist bl):
offset(offset), len(len), databl(bl) { }
data_section(): offset(0), len(0) { }
void encode(bufferlist& bl) const {
ENCODE_START(1, 1, bl);
encode(offset, bl);
encode(len, bl);
encode(databl, bl);
ENCODE_FINISH(bl);
}
void decode(bufferlist::const_iterator& bl) {
DECODE_START(1, bl);
decode(offset, bl);
decode(len, bl);
decode(databl, bl);
DECODE_FINISH(bl);
}
};
struct attr_section {
using data_t = std::map<std::string,bufferlist,std::less<>>;
data_t data;
explicit attr_section(const data_t &data) : data(data) { }
explicit attr_section(std::map<std::string, bufferptr, std::less<>> &data_)
{
for (auto& [k, v] : data_) {
bufferlist bl;
bl.push_back(v);
data.emplace(k, std::move(bl));
}
}
attr_section() { }
void encode(bufferlist& bl) const {
ENCODE_START(1, 1, bl);
encode(data, bl);
ENCODE_FINISH(bl);
}
void decode(bufferlist::const_iterator& bl) {
DECODE_START(1, bl);
decode(data, bl);
DECODE_FINISH(bl);
}
};
struct omap_hdr_section {
bufferlist hdr;
explicit omap_hdr_section(bufferlist hdr) : hdr(hdr) { }
omap_hdr_section() { }
void encode(bufferlist& bl) const {
ENCODE_START(1, 1, bl);
encode(hdr, bl);
ENCODE_FINISH(bl);
}
void decode(bufferlist::const_iterator& bl) {
DECODE_START(1, bl);
decode(hdr, bl);
DECODE_FINISH(bl);
}
};
struct omap_section {
std::map<std::string, bufferlist> omap;
explicit omap_section(const std::map<std::string, bufferlist> &omap) :
omap(omap) { }
omap_section() { }
void encode(bufferlist& bl) const {
ENCODE_START(1, 1, bl);
encode(omap, bl);
ENCODE_FINISH(bl);
}
void decode(bufferlist::const_iterator& bl) {
DECODE_START(1, bl);
decode(omap, bl);
DECODE_FINISH(bl);
}
};
struct metadata_section {
// struct_ver is the on-disk version of original pg
__u8 struct_ver; // for reference
epoch_t map_epoch;
pg_info_t info;
pg_log_t log;
PastIntervals past_intervals;
OSDMap osdmap;
bufferlist osdmap_bl; // Used in lieu of encoding osdmap due to crc checking
std::map<eversion_t, hobject_t> divergent_priors;
pg_missing_t missing;
metadata_section(
__u8 struct_ver,
epoch_t map_epoch,
const pg_info_t &info,
const pg_log_t &log,
const PastIntervals &past_intervals,
const pg_missing_t &missing)
: struct_ver(struct_ver),
map_epoch(map_epoch),
info(info),
log(log),
past_intervals(past_intervals),
missing(missing) {}
metadata_section()
: struct_ver(0),
map_epoch(0) { }
void encode(bufferlist& bl) const {
ENCODE_START(6, 6, bl);
encode(struct_ver, bl);
encode(map_epoch, bl);
encode(info, bl);
encode(log, bl);
encode(past_intervals, bl);
// Equivalent to osdmap.encode(bl, features); but
// preserving exact layout for CRC checking.
bl.append(osdmap_bl);
encode(divergent_priors, bl);
encode(missing, bl);
ENCODE_FINISH(bl);
}
void decode(bufferlist::const_iterator& bl) {
DECODE_START(6, bl);
decode(struct_ver, bl);
decode(map_epoch, bl);
decode(info, bl);
decode(log, bl);
if (struct_v >= 6) {
decode(past_intervals, bl);
} else if (struct_v > 1) {
std::cout << "NOTICE: Older export with classic past_intervals" << std::endl;
} else {
std::cout << "NOTICE: Older export without past_intervals" << std::endl;
}
if (struct_v > 2) {
osdmap.decode(bl);
} else {
std::cout << "WARNING: Older export without OSDMap information" << std::endl;
}
if (struct_v > 3) {
decode(divergent_priors, bl);
}
if (struct_v > 4) {
decode(missing, bl);
}
DECODE_FINISH(bl);
}
};
/**
* Superclass for classes that will need to handle a serialized RADOS
* dump. Requires that the serialized dump be opened with a known FD.
*/
class RadosDump
{
protected:
int file_fd;
super_header sh;
bool dry_run;
public:
RadosDump(int file_fd_, bool dry_run_)
: file_fd(file_fd_), dry_run(dry_run_)
{}
int read_super();
int get_header(header *h);
int get_footer(footer *f);
int read_section(sectiontype_t *type, bufferlist *bl);
int skip_object(bufferlist &bl);
void write_super();
// Define this in .h because it's templated
template <typename T>
int write_section(sectiontype_t type, const T& obj, int fd) {
if (dry_run)
return 0;
bufferlist blhdr, bl, blftr;
obj.encode(bl);
header hdr(type, bl.length());
hdr.encode(blhdr);
footer ft;
ft.encode(blftr);
int ret = blhdr.write_fd(fd);
if (ret) return ret;
ret = bl.write_fd(fd);
if (ret) return ret;
ret = blftr.write_fd(fd);
return ret;
}
int write_simple(sectiontype_t type, int fd)
{
if (dry_run)
return 0;
bufferlist hbl;
header hdr(type, 0);
hdr.encode(hbl);
return hbl.write_fd(fd);
}
};
#endif
| 10,275 | 24.063415 | 83 | h |
null | ceph-main/src/tools/ceph-client-debug.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <sage@newdream.net>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "common/ceph_argparse.h"
#include "global/global_init.h"
#include "common/Formatter.h"
#include "common/debug.h"
#include "common/errno.h"
#include "client/Inode.h"
#include "client/Dentry.h"
#include "client/Dir.h"
#include "include/cephfs/libcephfs.h"
#define dout_context g_ceph_context
#define dout_subsys ceph_subsys_client
using namespace std;
void usage()
{
std::cout << "Usage: ceph-client-debug [options] <inode number>" << std::endl;
generic_client_usage();
}
/**
* Given an inode, look up the path from the Client cache: assumes
* client cache is fully populated.
*/
void traverse_dentries(Inode *ino, std::vector<Dentry*> &parts)
{
if (ino->dentries.empty()) {
return;
}
Dentry* dn = *(ino->dentries.begin());
parts.push_back(dn);
traverse_dentries(dn->dir->parent_inode, parts);
}
/**
* Given an inode, send lookup requests to the MDS for
* all its ancestors, such that the full trace will be
* populated in client cache.
*/
int lookup_trace(ceph_mount_info *client, inodeno_t const ino)
{
Inode *inode;
int r = ceph_ll_lookup_inode(client, ino, &inode);
if (r != 0) {
return r;
} else {
if (!inode->dentries.empty()) {
Dentry *dn = *(inode->dentries.begin());
ceph_assert(dn->dir);
ceph_assert(dn->dir->parent_inode);
r = lookup_trace(client, dn->dir->parent_inode->ino);
if (r) {
return r;
}
} else {
// We reached the root of the tree
ceph_assert(inode->ino == CEPH_INO_ROOT);
}
}
return r;
}
int main(int argc, const char **argv)
{
// Argument handling
auto args = argv_to_vec(argc, argv);
if (args.empty()) {
cerr << argv[0] << ": -h or --help for usage" << std::endl;
exit(1);
}
if (ceph_argparse_need_usage(args)) {
usage();
exit(0);
}
auto cct = global_init(NULL, args, CEPH_ENTITY_TYPE_CLIENT,
CODE_ENVIRONMENT_UTILITY,
CINIT_FLAG_UNPRIVILEGED_DAEMON_DEFAULTS|
CINIT_FLAG_NO_DEFAULT_CONFIG_FILE);
common_init_finish(g_ceph_context);
// Expect exactly one positional argument (inode number)
if (args.size() != 1) {
cerr << "missing position argument (inode number)" << std::endl;
exit(1);
}
char const *inode_str = args[0];
inodeno_t inode = strtoll(inode_str, NULL, 0);
if (inode <= 0) {
derr << "Invalid inode: " << inode_str << dendl;
return -1;
}
// Initialize filesystem client
struct ceph_mount_info *client;
int r = ceph_create_with_context(&client, g_ceph_context);
if (r) {
derr << "Error initializing libcephfs: " << cpp_strerror(r) << dendl;
return r;
}
r = ceph_mount(client, "/");
if (r) {
derr << "Error mounting: " << cpp_strerror(r) << dendl;
ceph_shutdown(client);
return r;
}
// Populate client cache with inode of interest & ancestors
r = lookup_trace(client, inode);
if (r) {
derr << "Error looking up inode " << std::hex << inode << std::dec <<
": " << cpp_strerror(r) << dendl;
return -1;
}
// Retrieve inode of interest
struct vinodeno_t vinode;
vinode.ino = inode;
vinode.snapid = CEPH_NOSNAP;
Inode *ino = ceph_ll_get_inode(client, vinode);
// Retrieve dentry trace
std::vector<Dentry*> path;
traverse_dentries(ino, path);
// Print inode and path as a JSON object
JSONFormatter jf(true);
jf.open_object_section("client_debug");
{
jf.open_object_section("inode");
{
ino->dump(&jf);
}
jf.close_section(); // inode
jf.open_array_section("path");
{
for (std::vector<Dentry*>::reverse_iterator p = path.rbegin(); p != path.rend(); ++p) {
jf.open_object_section("dentry");
{
(*p)->dump(&jf);
}
jf.close_section(); // dentry
}
}
jf.close_section(); // path
}
jf.close_section(); // client_debug
jf.flush(std::cout);
std::cout << std::endl;
// Release Inode references
ceph_ll_forget(client, ino, 1);
for (std::vector<Dentry*>::reverse_iterator p = path.rbegin(); p != path.rend(); ++p) {
ceph_ll_forget(client, (*p)->inode.get(), 1);
}
ino = NULL;
path.clear();
// Shut down
r = ceph_unmount(client);
if (r) {
derr << "Error mounting: " << cpp_strerror(r) << dendl;
}
ceph_shutdown(client);
return r;
}
| 4,709 | 23.53125 | 93 | cc |
null | ceph-main/src/tools/ceph-diff-sorted.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* diffsorted -- a utility to compute a line-by-line diff on two
* sorted input files
*
* Copyright © 2019 Red Hat
*
* Author: J. Eric Ivancich
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation.
*/
/*
* SUMMARY
*
* The `diffsorted` utility does a line-by-line diff on two sorted text
* files and indicating lines that are in one file but not the other
* using diff-style notation (although line numbers are not indicated).
*
* USAGE
*
* rgw-diff-sorted file1.txt file2.txt
*
* NOTES
*
* Each files should have its lines in sorted order and should have no
* empty lines.
*
* A potential input file can be sorted using the `sort` utility provided
* that LANG=C to insure byte lexical order. For example:
*
* LANG=C sort unsorted.txt >sorted.txt
*
* or:
*
* export LANG=C
* sort unsorted.txt >sorted.txt
*
* EXIT STATUS
*
* 0 : files same
* 1 : files different
* 2 : usage problem (e.g., wrong number of command-line arguments)
* 3 : problem opening input file
* 4 : bad file content (e.g., unsorted order or empty lines)
*/
#include <iostream>
#include <fstream>
struct FileOfLines {
const char* filename;
std::ifstream input;
std::string this_line, prev_line;
bool next_eof;
bool is_eof;
FileOfLines(const char* _filename) :
filename(_filename),
input(filename),
next_eof(false),
is_eof(false)
{ }
void dump(const std::string& prefix) {
do {
std::cout << prefix << this_line << std::endl;
advance();
} while (!eof());
}
bool eof() const {
return is_eof;
}
bool good() const {
return input.good();
}
void advance() {
if (next_eof) {
is_eof = true;
return;
}
prev_line = this_line;
std::getline(input, this_line);
if (this_line.empty()) {
if (!input.eof()) {
std::cerr << "Error: " << filename << " has an empty line." <<
std::endl;
exit(4);
}
is_eof = true;
return;
} else if (input.eof()) {
next_eof = true;
}
if (this_line < prev_line) {
std::cerr << "Error: " << filename << " is not in sorted order; \"" <<
this_line << "\" follows \"" << prev_line << "\"." << std::endl;
exit(4);
}
}
const std::string line() const {
return this_line;
}
};
int main(int argc, const char* argv[]) {
if (argc != 3) {
std::cerr << "Usage: " << argv[0] << " <file1> <file2>" << std::endl;
exit(2);
}
FileOfLines input1(argv[1]);
if (!input1.good()) {
std::cerr << "Error opening " << argv[1] <<
"." << std::endl;
exit(3);
}
FileOfLines input2(argv[2]);
if (!input2.good()) {
std::cerr << "Error opening " << argv[2] <<
"." << std::endl;
exit(3);
}
bool files_same = true;
input1.advance();
input2.advance();
while (!input1.eof() && !input2.eof()) {
if (input1.line() == input2.line()) {
input1.advance();
input2.advance();
} else if (input1.line() < input2.line()) {
files_same = false;
std::cout << "< " << input1.line() << std::endl;
input1.advance();
} else {
files_same = false;
std::cout << "> " << input2.line() << std::endl;
input2.advance();
}
}
if (!input1.eof()) {
files_same = false;
input1.dump("< ");
} else if (!input2.eof()) {
files_same = false;
input2.dump("> ");
}
if (files_same) {
exit(0);
} else {
exit(1);
}
}
| 3,697 | 20.252874 | 76 | cc |
null | ceph-main/src/tools/ceph-monstore-update-crush.sh | #!/usr/bin/env bash
#
# Copyright (C) 2015 Red Hat <contact@redhat.com>
#
# Author: Kefu Chai <kchai@redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Library Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Public License for more details.
#
verbose=
test -d ../src && export PATH=$PATH:.
if ! which jq ; then
echo "Missing jq binary!"
exit 1
fi
if [ `uname` = FreeBSD ]; then
GETOPT=/usr/local/bin/getopt
else
GETOPT=getopt
fi
function osdmap_get() {
local store_path=$1
local query=$2
local epoch=${3:+-v $3}
local osdmap=`mktemp`
$CEPH_BIN/ceph-monstore-tool $store_path get osdmap -- \
$epoch -o $osdmap > /dev/null || return
echo $($CEPH_BIN/osdmaptool --dump json $osdmap 2> /dev/null | \
jq "$query")
rm -f $osdmap
}
function test_crush() {
local store_path=$1
local epoch=$2
local max_osd=$3
local crush=$4
local osdmap=`mktemp`
$CEPH_BIN/ceph-monstore-tool $store_path get osdmap -- \
-v $epoch -o $osdmap > /dev/null
$CEPH_BIN/osdmaptool --export-crush $crush $osdmap &> /dev/null
if $CEPH_BIN/crushtool --test --check $max_osd -i $crush > /dev/null; then
good=true
else
good=false
fi
rm -f $osdmap
$good || return 1
}
function die() {
local retval=$?
echo "$@" >&2
exit $retval
}
function usage() {
[ $# -gt 0 ] && echo -e "\n$@"
cat <<EOF
Usage: $0 [options ...] <mon-store>
Search backward for a latest known-good epoch in monstore. Rewrite the osdmap
epochs after it with the crush map in the found epoch if asked to do so. By
default, print out the crush map in the good epoch.
[-h|--help] display this message
[--out] write the found crush map to given file (default: stdout)
[--rewrite] rewrite the monitor storage with the found crush map
[--verbose] be more chatty
EOF
[ $# -gt 0 ] && exit 1
exit 0
}
function main() {
local temp
temp=$($GETOPT -o h --long verbose,help,mon-store:,out:,rewrite -n $0 -- "$@") || return 1
eval set -- "$temp"
local rewrite
while [ "$1" != "--" ]; do
case "$1" in
--verbose)
verbose=true
# set -xe
# PS4='${FUNCNAME[0]}: $LINENO: '
shift;;
-h|--help)
usage
return 0;;
--out)
output=$2
shift 2;;
--osdmap-epoch)
osdmap_epoch=$2
shift 2;;
--rewrite)
rewrite=true
shift;;
*)
usage "unexpected argument $1"
shift;;
esac
done
shift
local store_path="$1"
test $store_path || usage "I need the path to mon-store."
# try accessing the store; if it fails, likely means a mon is running
local last_osdmap_epoch
local max_osd
last_osdmap_epoch=$(osdmap_get $store_path ".epoch") || \
die "error accessing mon store at $store_path"
# get the max_osd # in last osdmap epoch, crushtool will use it to check
# the crush maps in previous osdmaps
max_osd=$(osdmap_get $store_path ".max_osd" $last_osdmap_epoch)
local good_crush
local good_epoch
test $verbose && echo "the latest osdmap epoch is $last_osdmap_epoch"
for epoch in `seq $last_osdmap_epoch -1 1`; do
local crush_path=`mktemp`
test $verbose && echo "checking crush map #$epoch"
if test_crush $store_path $epoch $max_osd $crush_path; then
test $verbose && echo "crush map version #$epoch works with osdmap epoch #$osdmap_epoch"
good_epoch=$epoch
good_crush=$crush_path
break
fi
rm -f $crush_path
done
if test $good_epoch; then
echo "good crush map found at epoch $epoch/$last_osdmap_epoch"
else
echo "Unable to find a crush map for osdmap version #$osdmap_epoch." 2>&1
return 1
fi
if test $good_epoch -eq $last_osdmap_epoch; then
echo "and mon store has no faulty crush maps."
elif test $output; then
$CEPH_BIN/crushtool --decompile $good_crush --outfn $output
elif test $rewrite; then
$CEPH_BIN/ceph-monstore-tool $store_path rewrite-crush -- \
--crush $good_crush \
--good-epoch $good_epoch
else
echo
$CEPH_BIN/crushtool --decompile $good_crush
fi
rm -f $good_crush
}
main "$@"
| 4,950 | 27.291429 | 100 | sh |
null | ceph-main/src/tools/ceph_authtool.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2009 Sage Weil <sage@newdream.net>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "common/ConfUtils.h"
#include "common/ceph_argparse.h"
#include "common/config_proxy.h"
#include "global/global_context.h"
#include "global/global_init.h"
#include "auth/Crypto.h"
#include "auth/Auth.h"
#include "auth/KeyRing.h"
using std::map;
using std::string;
using std::vector;
using std::cerr;
using std::cout;
void usage()
{
cout << "usage: ceph-authtool keyringfile [OPTIONS]...\n"
<< "where the options are:\n"
<< " -l, --list will list all keys and capabilities present in\n"
<< " the keyring\n"
<< " -p, --print-key will print an encoded key for the specified\n"
<< " entityname. This is suitable for the\n"
<< " 'mount -o secret=..' argument\n"
<< " -C, --create-keyring will create a new keyring, overwriting any\n"
<< " existing keyringfile\n"
<< " -g, --gen-key will generate a new secret key for the\n"
<< " specified entityname\n"
<< " --gen-print-key will generate a new secret key without set it\n"
<< " to the keyringfile, prints the secret to stdout\n"
<< " --import-keyring FILE will import the content of a given keyring\n"
<< " into the keyringfile\n"
<< " -n NAME, --name NAME specify entityname to operate on\n"
<< " -a BASE64, --add-key BASE64 will add an encoded key to the keyring\n"
<< " --cap SUBSYSTEM CAPABILITY will set the capability for given subsystem\n"
<< " --caps CAPSFILE will set all of capabilities associated with a\n"
<< " given key, for all subsystems\n"
<< " --mode MODE will set the desired file mode to the keyring\n"
<< " e.g: '0644', defaults to '0600'"
<< std::endl;
exit(1);
}
int main(int argc, const char **argv)
{
auto args = argv_to_vec(argc, argv);
std::string add_key;
std::string caps_fn;
std::string import_keyring;
map<string,bufferlist> caps;
std::string fn;
if (args.empty()) {
cerr << argv[0] << ": -h or --help for usage" << std::endl;
exit(1);
}
if (ceph_argparse_need_usage(args)) {
usage();
exit(0);
}
auto cct = global_init(NULL, args, CEPH_ENTITY_TYPE_CLIENT,
CODE_ENVIRONMENT_UTILITY,
CINIT_FLAG_NO_DEFAULT_CONFIG_FILE);
bool gen_key = false;
bool gen_print_key = false;
bool list = false;
bool print_key = false;
bool create_keyring = false;
int mode = 0600; // keyring file mode
std::vector<const char*>::iterator i;
/* Handle options unique to ceph-authtool
* -n NAME, --name NAME is handled by global_init
* */
for (i = args.begin(); i != args.end(); ) {
std::string val;
if (ceph_argparse_double_dash(args, i)) {
break;
} else if (ceph_argparse_flag(args, i, "-g", "--gen-key", (char*)NULL)) {
gen_key = true;
} else if (ceph_argparse_flag(args, i, "--gen-print-key", (char*)NULL)) {
gen_print_key = true;
} else if (ceph_argparse_witharg(args, i, &val, "-a", "--add-key", (char*)NULL)) {
if (val.empty()) {
cerr << "Option --add-key requires an argument" << std::endl;
exit(1);
}
add_key = val;
} else if (ceph_argparse_flag(args, i, "-l", "--list", (char*)NULL)) {
list = true;
} else if (ceph_argparse_witharg(args, i, &val, "--caps", (char*)NULL)) {
caps_fn = val;
} else if (ceph_argparse_witharg(args, i, &val, "--cap", (char*)NULL)) {
std::string my_key = val;
if (i == args.end()) {
cerr << "must give two arguments to --cap: key and val." << std::endl;
exit(1);
}
std::string my_val = *i;
++i;
encode(my_val, caps[my_key]);
} else if (ceph_argparse_flag(args, i, "-p", "--print-key", (char*)NULL)) {
print_key = true;
} else if (ceph_argparse_flag(args, i, "-C", "--create-keyring", (char*)NULL)) {
create_keyring = true;
} else if (ceph_argparse_witharg(args, i, &val, "--import-keyring", (char*)NULL)) {
import_keyring = val;
} else if (ceph_argparse_witharg(args, i, &val, "--mode", (char*)NULL)) {
std::string err;
mode = strict_strtoll(val.c_str(), 8, &err);
if (!err.empty()) {
cerr << "Option --mode requires an argument" << std::endl;
exit(1);
}
} else if (fn.empty()) {
fn = *i++;
} else {
cerr << argv[0] << ": unexpected '" << *i << "'" << std::endl;
usage();
}
}
if (fn.empty() && !gen_print_key) {
cerr << argv[0] << ": must specify filename" << std::endl;
usage();
}
if (!(gen_key ||
gen_print_key ||
!add_key.empty() ||
list ||
!caps_fn.empty() ||
!caps.empty() ||
print_key ||
create_keyring ||
!import_keyring.empty())) {
cerr << "no command specified" << std::endl;
usage();
}
if (gen_key && (!add_key.empty())) {
cerr << "can't both gen-key and add-key" << std::endl;
usage();
}
common_init_finish(g_ceph_context);
EntityName ename(g_conf()->name);
// Enforce the use of gen-key or add-key when creating to avoid ending up
// with an "empty" key (key = AAAAAAAAAAAAAAAA)
if (create_keyring && !gen_key && add_key.empty() && !caps.empty()) {
cerr << "must specify either gen-key or add-key when creating" << std::endl;
usage();
}
if (gen_print_key) {
CryptoKey key;
key.create(g_ceph_context, CEPH_CRYPTO_AES);
cout << key << std::endl;
return 0;
}
// keyring --------
bool modified = false;
bool added_entity = false;
KeyRing keyring;
bufferlist bl;
int r = 0;
if (create_keyring) {
cout << "creating " << fn << std::endl;
modified = true;
} else {
std::string err;
r = bl.read_file(fn.c_str(), &err);
if (r >= 0) {
try {
auto iter = bl.cbegin();
decode(keyring, iter);
} catch (const buffer::error &err) {
cerr << "error reading file " << fn << std::endl;
exit(1);
}
} else {
cerr << "can't open " << fn << ": " << err << std::endl;
exit(1);
}
}
// Validate that "name" actually has an existing key in this keyring if we
// have not given gen-key or add-key options
if (!gen_key && add_key.empty() && !caps.empty()) {
CryptoKey key;
if (!keyring.get_secret(ename, key)) {
cerr << "can't find existing key for " << ename
<< " and neither gen-key nor add-key specified" << std::endl;
exit(1);
}
}
// write commands
if (!import_keyring.empty()) {
KeyRing other;
bufferlist obl;
std::string err;
int r = obl.read_file(import_keyring.c_str(), &err);
if (r >= 0) {
try {
auto iter = obl.cbegin();
decode(other, iter);
} catch (const buffer::error &err) {
cerr << "error reading file " << import_keyring << std::endl;
exit(1);
}
cout << "importing contents of " << import_keyring << " into " << fn << std::endl;
//other.print(cout);
keyring.import(g_ceph_context, other);
modified = true;
} else {
cerr << "can't open " << import_keyring << ": " << err << std::endl;
exit(1);
}
}
if (gen_key) {
EntityAuth eauth;
eauth.key.create(g_ceph_context, CEPH_CRYPTO_AES);
keyring.add(ename, eauth);
modified = true;
}
if (!add_key.empty()) {
EntityAuth eauth;
try {
eauth.key.decode_base64(add_key);
} catch (const buffer::error &err) {
cerr << "can't decode key '" << add_key << "'" << std::endl;
exit(1);
}
keyring.add(ename, eauth);
modified = true;
cout << "added entity " << ename << " " << eauth << std::endl;
added_entity = true;
}
if (!caps_fn.empty()) {
ConfFile cf;
if (cf.parse_file(caps_fn, &cerr) != 0) {
cerr << "could not parse caps file " << caps_fn << std::endl;
exit(1);
}
map<string, bufferlist> caps;
const char *key_names[] = { "mon", "osd", "mds", "mgr", NULL };
for (int i=0; key_names[i]; i++) {
std::string val;
if (cf.read("global", key_names[i], val) == 0) {
bufferlist bl;
encode(val, bl);
string s(key_names[i]);
caps[s] = bl;
}
}
keyring.set_caps(ename, caps);
modified = true;
}
if (!caps.empty()) {
keyring.set_caps(ename, caps);
modified = true;
}
if (added_entity && caps.size() > 0) {
cout << "added " << caps.size() << " caps to entity " << ename << std::endl;
}
// read commands
if (list) {
try {
keyring.print(cout);
} catch (ceph::buffer::end_of_buffer &eob) {
cout << "Exception (end_of_buffer) in print(), exit." << std::endl;
exit(1);
}
}
if (print_key) {
CryptoKey key;
if (keyring.get_secret(ename, key)) {
cout << key << std::endl;
} else {
cerr << "entity " << ename << " not found" << std::endl;
exit(1);
}
}
// write result?
if (modified) {
bufferlist bl;
keyring.encode_plaintext(bl);
r = bl.write_file(fn.c_str(), mode);
if (r < 0) {
cerr << "could not write " << fn << std::endl;
exit(1);
}
//cout << "wrote " << bl.length() << " bytes to " << fn << std::endl;
}
return 0;
}
| 9,844 | 29.862069 | 93 | cc |
null | ceph-main/src/tools/ceph_conf.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2010 Dreamhost
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <iomanip>
#include <string>
#include "common/ceph_argparse.h"
#include "global/global_init.h"
#include "mon/AuthMonitor.h"
#include "common/Formatter.h"
using std::deque;
using std::string;
using std::unique_ptr;
using std::cerr;
using std::cout;
using std::vector;
static void usage(std::ostream& out)
{
// TODO: add generic_usage once cerr/derr issues are resolved
out << R"(Ceph configuration query tool
USAGE
ceph-conf <flags> <action>
ACTIONS
-L|--list-all-sections List all sections
-l|--list-sections <prefix> List sections with the given prefix
--filter-key <key> Filter section list to only include sections
with given key defined.
--filter-key-value <key>=<val> Filter section list to only include sections
with given key/value pair.
--lookup <key> Print a configuration setting to stdout.
Returns 0 (success) if the configuration setting is
found; 1 otherwise.
-r|--resolve-search search for the first file that exists and
can be opened in the resulted comma
delimited search list.
-D|--dump-all dump all variables.
--show-config-value <key> Print the corresponding ceph.conf value
that matches the specified key. Also searches
global defaults.
FLAGS
--name name Set type.id
[-s <section>] Add to list of sections to search
[--format plain|json|json-pretty]
dump variables in plain text, json or pretty
json
[--pid <pid>] Override the $pid when expanding options
If there is no action given, the action will default to --lookup.
EXAMPLES
$ ceph-conf --name mon.0 -c /etc/ceph/ceph.conf 'mon addr'
Find out what the value of 'mon addr' is for monitor 0.
$ ceph-conf -l mon
List sections beginning with 'mon'.
RETURN CODE
Return code will be 0 on success; error code otherwise.
)";
}
static int list_sections(const std::string &prefix,
const std::list<string>& filter_key,
const std::map<string,string>& filter_key_value)
{
std::vector <std::string> sections;
int ret = g_conf().get_all_sections(sections);
if (ret)
return 2;
for (std::vector<std::string>::const_iterator p = sections.begin();
p != sections.end(); ++p) {
if (strncmp(prefix.c_str(), p->c_str(), prefix.size()))
continue;
std::vector<std::string> sec;
sec.push_back(*p);
int r = 0;
for (std::list<string>::const_iterator q = filter_key.begin(); q != filter_key.end(); ++q) {
string v;
r = g_conf().get_val_from_conf_file(sec, q->c_str(), v, false);
if (r < 0)
break;
}
if (r < 0)
continue;
for (std::map<string,string>::const_iterator q = filter_key_value.begin();
q != filter_key_value.end();
++q) {
string v;
r = g_conf().get_val_from_conf_file(sec, q->first.c_str(), v, false);
if (r < 0 || v != q->second) {
r = -1;
break;
}
}
if (r < 0)
continue;
cout << *p << std::endl;
}
return 0;
}
static int lookup(const std::deque<std::string> §ions,
const std::string &key, bool resolve_search)
{
std::vector<std::string> my_sections{sections.begin(), sections.end()};
for (auto& section : g_conf().get_my_sections()) {
my_sections.push_back(section);
}
std::string val;
int ret = g_conf().get_val_from_conf_file(my_sections, key.c_str(), val, true);
if (ret == -ENOENT)
return 1;
else if (ret == 0) {
if (resolve_search) {
string result;
ret = ceph_resolve_file_search(val, result);
if (!ret)
puts(result.c_str());
}
else {
puts(val.c_str());
}
return 0;
}
else {
cerr << "error looking up '" << key << "': error " << ret << std::endl;
return 2;
}
}
static int dump_all(const string& format)
{
if (format == "" || format == "plain") {
g_conf().show_config(std::cout);
return 0;
} else {
unique_ptr<Formatter> f(Formatter::create(format));
if (f) {
f->open_object_section("ceph-conf");
g_conf().show_config(f.get());
f->close_section();
f->flush(std::cout);
return 0;
}
cerr << "format '" << format << "' not recognized." << std::endl;
usage(cerr);
return 1;
}
}
static void maybe_override_pid(vector<const char*>& args)
{
for (auto i = args.begin(); i != args.end(); ++i) {
string val;
if (ceph_argparse_witharg(args, i, &val, "--pid", (char*)NULL)) {
setenv("PID", val.c_str(), 1);
break;
}
}
}
int main(int argc, const char **argv)
{
deque<std::string> sections;
bool resolve_search = false;
std::string action;
std::string lookup_key;
std::string section_list_prefix;
std::list<string> filter_key;
std::map<string,string> filter_key_value;
std::string dump_format;
auto args = argv_to_vec(argc, argv);
auto orig_args = args;
auto cct = [&args] {
// override the PID before options are expanded
maybe_override_pid(args);
std::map<std::string,std::string> defaults = {{"log_to_file", "false"}};
return global_init(&defaults, args, CEPH_ENTITY_TYPE_CLIENT,
CODE_ENVIRONMENT_DAEMON,
CINIT_FLAG_NO_DAEMON_ACTIONS |
CINIT_FLAG_NO_MON_CONFIG);
}();
// do not common_init_finish(); do not start threads; do not do any of thing
// wonky things the daemon whose conf we are examining would do (like initialize
// the admin socket).
//common_init_finish(g_ceph_context);
std::string val;
for (std::vector<const char*>::iterator i = args.begin(); i != args.end(); ) {
if (ceph_argparse_double_dash(args, i)) {
break;
} else if (ceph_argparse_witharg(args, i, &val, "-s", "--section", (char*)NULL)) {
sections.push_back(val);
} else if (ceph_argparse_flag(args, i, "-r", "--resolve_search", (char*)NULL)) {
resolve_search = true;
} else if (ceph_argparse_flag(args, i, "-h", "--help", (char*)NULL)) {
action = "help";
} else if (ceph_argparse_witharg(args, i, &val, "--lookup", (char*)NULL)) {
action = "lookup";
lookup_key = val;
} else if (ceph_argparse_flag(args, i, "-L", "--list_all_sections", (char*)NULL)) {
action = "list-sections";
section_list_prefix = "";
} else if (ceph_argparse_witharg(args, i, &val, "-l", "--list_sections", (char*)NULL)) {
action = "list-sections";
section_list_prefix = val;
} else if (ceph_argparse_witharg(args, i, &val, "--filter_key", (char*)NULL)) {
filter_key.push_back(val);
} else if (ceph_argparse_witharg(args, i, &val, "--filter_key_value", (char*)NULL)) {
size_t pos = val.find_first_of('=');
if (pos == string::npos) {
cerr << "expecting argument like 'key=value' for --filter-key-value (not '" << val << "')" << std::endl;
usage(cerr);
return EXIT_FAILURE;
}
string key(val, 0, pos);
string value(val, pos+1);
filter_key_value[key] = value;
} else if (ceph_argparse_flag(args, i, "-D", "--dump_all", (char*)NULL)) {
action = "dumpall";
} else if (ceph_argparse_witharg(args, i, &val, "--format", (char*)NULL)) {
dump_format = val;
} else {
if (((action == "lookup") || (action == "")) && (lookup_key.empty())) {
action = "lookup";
lookup_key = *i++;
} else {
cerr << "unable to parse option: '" << *i << "'" << std::endl;
cerr << "args:";
for (auto arg : orig_args) {
cerr << " " << std::quoted(arg);
}
cerr << std::endl;
usage(cerr);
return EXIT_FAILURE;
}
}
}
cct->_log->flush();
if (action == "help") {
usage(cout);
return EXIT_SUCCESS;
} else if (action == "list-sections") {
return list_sections(section_list_prefix, filter_key, filter_key_value);
} else if (action == "lookup") {
return lookup(sections, lookup_key, resolve_search);
} else if (action == "dumpall") {
return dump_all(dump_format);
} else {
cerr << "You must give an action, such as --lookup or --list-all-sections." << std::endl;
cerr << "Pass --help for more help." << std::endl;
return EXIT_FAILURE;
}
}
| 8,788 | 30.501792 | 105 | cc |
null | ceph-main/src/tools/ceph_dedup_tool.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Author: Myoungwon Oh <ohmyoungwon@gmail.com>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "include/types.h"
#include "include/rados/buffer.h"
#include "include/rados/librados.hpp"
#include "include/rados/rados_types.hpp"
#include "acconfig.h"
#include "common/Cond.h"
#include "common/Formatter.h"
#include "common/ceph_argparse.h"
#include "common/ceph_crypto.h"
#include "common/config.h"
#include "common/debug.h"
#include "common/errno.h"
#include "common/obj_bencher.h"
#include "global/global_init.h"
#include <iostream>
#include <fstream>
#include <stdlib.h>
#include <time.h>
#include <sstream>
#include <errno.h>
#include <dirent.h>
#include <stdexcept>
#include <climits>
#include <locale>
#include <memory>
#include <math.h>
#include "tools/RadosDump.h"
#include "cls/cas/cls_cas_client.h"
#include "cls/cas/cls_cas_internal.h"
#include "include/stringify.h"
#include "global/signal_handler.h"
#include "common/CDC.h"
#include "common/Preforker.h"
#include <boost/program_options/variables_map.hpp>
#include <boost/program_options/parsers.hpp>
using namespace std;
namespace po = boost::program_options;
struct EstimateResult {
std::unique_ptr<CDC> cdc;
uint64_t chunk_size;
ceph::mutex lock = ceph::make_mutex("EstimateResult::lock");
// < key, <count, chunk_size> >
map< string, pair <uint64_t, uint64_t> > chunk_statistics;
uint64_t total_bytes = 0;
std::atomic<uint64_t> total_objects = {0};
EstimateResult(std::string alg, int chunk_size)
: cdc(CDC::create(alg, chunk_size)),
chunk_size(1ull << chunk_size) {}
void add_chunk(bufferlist& chunk, const std::string& fp_algo) {
string fp;
if (fp_algo == "sha1") {
sha1_digest_t sha1_val = crypto::digest<crypto::SHA1>(chunk);
fp = sha1_val.to_str();
} else if (fp_algo == "sha256") {
sha256_digest_t sha256_val = crypto::digest<crypto::SHA256>(chunk);
fp = sha256_val.to_str();
} else if (fp_algo == "sha512") {
sha512_digest_t sha512_val = crypto::digest<crypto::SHA512>(chunk);
fp = sha512_val.to_str();
} else {
ceph_assert(0 == "no support fingerperint algorithm");
}
std::lock_guard l(lock);
auto p = chunk_statistics.find(fp);
if (p != chunk_statistics.end()) {
p->second.first++;
if (p->second.second != chunk.length()) {
cerr << "warning: hash collision on " << fp
<< ": was " << p->second.second
<< " now " << chunk.length() << std::endl;
}
} else {
chunk_statistics[fp] = make_pair(1, chunk.length());
}
total_bytes += chunk.length();
}
void dump(Formatter *f) const {
f->dump_unsigned("target_chunk_size", chunk_size);
uint64_t dedup_bytes = 0;
uint64_t dedup_objects = chunk_statistics.size();
for (auto& j : chunk_statistics) {
dedup_bytes += j.second.second;
}
//f->dump_unsigned("dedup_bytes", dedup_bytes);
//f->dump_unsigned("original_bytes", total_bytes);
f->dump_float("dedup_bytes_ratio",
(double)dedup_bytes / (double)total_bytes);
f->dump_float("dedup_objects_ratio",
(double)dedup_objects / (double)total_objects);
uint64_t avg = total_bytes / dedup_objects;
uint64_t sqsum = 0;
for (auto& j : chunk_statistics) {
sqsum += (avg - j.second.second) * (avg - j.second.second);
}
uint64_t stddev = sqrt(sqsum / dedup_objects);
f->dump_unsigned("chunk_size_average", avg);
f->dump_unsigned("chunk_size_stddev", stddev);
}
};
map<uint64_t, EstimateResult> dedup_estimates; // chunk size -> result
using namespace librados;
unsigned default_op_size = 1 << 26;
unsigned default_max_thread = 2;
int32_t default_report_period = 10;
ceph::mutex glock = ceph::make_mutex("glock");
po::options_description make_usage() {
po::options_description desc("Usage");
desc.add_options()
("help,h", ": produce help message")
("op estimate --pool <POOL> --chunk-size <CHUNK_SIZE> --chunk-algorithm <ALGO> --fingerprint-algorithm <FP_ALGO>",
": estimate how many chunks are redundant")
("op chunk-scrub --chunk-pool <POOL>",
": perform chunk scrub")
("op chunk-get-ref --chunk-pool <POOL> --object <OID> --target-ref <OID> --target-ref-pool-id <POOL_ID>",
": get chunk object's reference")
("op chunk-put-ref --chunk-pool <POOL> --object <OID> --target-ref <OID> --target-ref-pool-id <POOL_ID>",
": put chunk object's reference")
("op chunk-repair --chunk-pool <POOL> --object <OID> --target-ref <OID> --target-ref-pool-id <POOL_ID>",
": fix mismatched references")
("op dump-chunk-refs --chunk-pool <POOL> --object <OID>",
": dump chunk object's references")
("op chunk-dedup --pool <POOL> --object <OID> --chunk-pool <POOL> --fingerprint-algorithm <FP> --source-off <OFFSET> --source-length <LENGTH>",
": perform a chunk dedup---deduplicate only a chunk, which is a part of object.")
("op object-dedup --pool <POOL> --object <OID> --chunk-pool <POOL> --fingerprint-algorithm <FP> --dedup-cdc-chunk-size <CHUNK_SIZE> [--snap]",
": perform a object dedup---deduplicate the entire object, not a chunk. Related snapshots are also deduplicated if --snap is given")
("op sample-dedup --pool <POOL> --chunk-pool <POOL> --chunk-algorithm <ALGO> --fingerprint-algorithm <FP> --daemon --loop",
": perform a sample dedup---make crawling threads which crawl objects in base pool and deduplicate them based on their deduplication efficiency")
;
po::options_description op_desc("Opational arguments");
op_desc.add_options()
("op", po::value<std::string>(), ": estimate|chunk-scrub|chunk-get-ref|chunk-put-ref|chunk-repair|dump-chunk-refs|chunk-dedup|object-dedup")
("target-ref", po::value<std::string>(), ": set target object")
("target-ref-pool-id", po::value<uint64_t>(), ": set target pool id")
("object", po::value<std::string>(), ": set object name")
("chunk-size", po::value<int>(), ": chunk size (byte)")
("chunk-algorithm", po::value<std::string>(), ": <fixed|fastcdc>, set chunk-algorithm")
("fingerprint-algorithm", po::value<std::string>(), ": <sha1|sha256|sha512>, set fingerprint-algorithm")
("chunk-pool", po::value<std::string>(), ": set chunk pool name")
("max-thread", po::value<int>(), ": set max thread")
("report-period", po::value<int>(), ": set report-period")
("max-seconds", po::value<int>(), ": set max runtime")
("max-read-size", po::value<int>(), ": set max read size")
("pool", po::value<std::string>(), ": set pool name")
("min-chunk-size", po::value<int>(), ": min chunk size (byte)")
("max-chunk-size", po::value<int>(), ": max chunk size (byte)")
("source-off", po::value<uint64_t>(), ": set source offset")
("source-length", po::value<uint64_t>(), ": set source length")
("dedup-cdc-chunk-size", po::value<unsigned int>(), ": set dedup chunk size for cdc")
("snap", ": deduplciate snapshotted object")
("debug", ": enable debug")
("pgid", ": set pgid")
("chunk-dedup-threshold", po::value<uint32_t>(), ": set the threshold for chunk dedup (number of duplication) ")
("sampling-ratio", po::value<int>(), ": set the sampling ratio (percentile)")
("daemon", ": execute sample dedup in daemon mode")
("loop", ": execute sample dedup in a loop until terminated. Sleeps 'wakeup-period' seconds between iterations")
("wakeup-period", po::value<int>(), ": set the wakeup period of crawler thread (sec)")
;
desc.add(op_desc);
return desc;
}
template <typename I, typename T>
static int rados_sistrtoll(I &i, T *val) {
std::string err;
*val = strict_iecstrtoll(i->second, &err);
if (err != "") {
cerr << "Invalid value for " << i->first << ": " << err << std::endl;
return -EINVAL;
} else {
return 0;
}
}
class EstimateDedupRatio;
class ChunkScrub;
class CrawlerThread : public Thread
{
IoCtx io_ctx;
int n;
int m;
ObjectCursor begin;
ObjectCursor end;
ceph::mutex m_lock = ceph::make_mutex("CrawlerThread::Locker");
ceph::condition_variable m_cond;
int32_t report_period;
bool m_stop = false;
uint64_t total_bytes = 0;
uint64_t total_objects = 0;
uint64_t examined_objects = 0;
uint64_t examined_bytes = 0;
uint64_t max_read_size = 0;
bool debug = false;
#define COND_WAIT_INTERVAL 10
public:
CrawlerThread(IoCtx& io_ctx, int n, int m,
ObjectCursor begin, ObjectCursor end, int32_t report_period,
uint64_t num_objects, uint64_t max_read_size = default_op_size):
io_ctx(io_ctx), n(n), m(m), begin(begin), end(end),
report_period(report_period), total_objects(num_objects), max_read_size(max_read_size)
{}
void signal(int signum) {
std::lock_guard l{m_lock};
m_stop = true;
m_cond.notify_all();
}
virtual void print_status(Formatter *f, ostream &out) {}
uint64_t get_examined_objects() { return examined_objects; }
uint64_t get_examined_bytes() { return examined_bytes; }
uint64_t get_total_bytes() { return total_bytes; }
uint64_t get_total_objects() { return total_objects; }
void set_debug(const bool debug_) { debug = debug_; }
friend class EstimateDedupRatio;
friend class ChunkScrub;
};
class EstimateDedupRatio : public CrawlerThread
{
string chunk_algo;
string fp_algo;
uint64_t chunk_size;
uint64_t max_seconds;
public:
EstimateDedupRatio(
IoCtx& io_ctx, int n, int m, ObjectCursor begin, ObjectCursor end,
string chunk_algo, string fp_algo, uint64_t chunk_size, int32_t report_period,
uint64_t num_objects, uint64_t max_read_size,
uint64_t max_seconds):
CrawlerThread(io_ctx, n, m, begin, end, report_period, num_objects,
max_read_size),
chunk_algo(chunk_algo),
fp_algo(fp_algo),
chunk_size(chunk_size),
max_seconds(max_seconds) {
}
void* entry() {
estimate_dedup_ratio();
return NULL;
}
void estimate_dedup_ratio();
};
class ChunkScrub: public CrawlerThread
{
IoCtx chunk_io_ctx;
int damaged_objects = 0;
public:
ChunkScrub(IoCtx& io_ctx, int n, int m, ObjectCursor begin, ObjectCursor end,
IoCtx& chunk_io_ctx, int32_t report_period, uint64_t num_objects):
CrawlerThread(io_ctx, n, m, begin, end, report_period, num_objects), chunk_io_ctx(chunk_io_ctx)
{ }
void* entry() {
chunk_scrub_common();
return NULL;
}
void chunk_scrub_common();
int get_damaged_objects() { return damaged_objects; }
void print_status(Formatter *f, ostream &out);
};
vector<std::unique_ptr<CrawlerThread>> estimate_threads;
static void print_dedup_estimate(std::ostream& out, std::string chunk_algo)
{
/*
uint64_t total_bytes = 0;
uint64_t total_objects = 0;
*/
uint64_t examined_objects = 0;
uint64_t examined_bytes = 0;
for (auto &et : estimate_threads) {
examined_objects += et->get_examined_objects();
examined_bytes += et->get_examined_bytes();
}
auto f = Formatter::create("json-pretty");
f->open_object_section("results");
f->dump_string("chunk_algo", chunk_algo);
f->open_array_section("chunk_sizes");
for (auto& i : dedup_estimates) {
f->dump_object("chunker", i.second);
}
f->close_section();
f->open_object_section("summary");
f->dump_unsigned("examined_objects", examined_objects);
f->dump_unsigned("examined_bytes", examined_bytes);
/*
f->dump_unsigned("total_objects", total_objects);
f->dump_unsigned("total_bytes", total_bytes);
f->dump_float("examined_ratio", (float)examined_bytes / (float)total_bytes);
*/
f->close_section();
f->close_section();
f->flush(out);
}
static void handle_signal(int signum)
{
std::lock_guard l{glock};
for (auto &p : estimate_threads) {
p->signal(signum);
}
}
void EstimateDedupRatio::estimate_dedup_ratio()
{
ObjectCursor shard_start;
ObjectCursor shard_end;
io_ctx.object_list_slice(
begin,
end,
n,
m,
&shard_start,
&shard_end);
utime_t start = ceph_clock_now();
utime_t end;
if (max_seconds) {
end = start;
end += max_seconds;
}
utime_t next_report;
if (report_period) {
next_report = start;
next_report += report_period;
}
ObjectCursor c(shard_start);
while (c < shard_end)
{
std::vector<ObjectItem> result;
int r = io_ctx.object_list(c, shard_end, 12, {}, &result, &c);
if (r < 0 ){
cerr << "error object_list : " << cpp_strerror(r) << std::endl;
return;
}
unsigned op_size = max_read_size;
for (const auto & i : result) {
const auto &oid = i.oid;
utime_t now = ceph_clock_now();
if (max_seconds && now > end) {
m_stop = true;
}
if (m_stop) {
return;
}
if (n == 0 && // first thread only
next_report != utime_t() && now > next_report) {
cerr << (int)(now - start) << "s : read "
<< dedup_estimates.begin()->second.total_bytes << " bytes so far..."
<< std::endl;
print_dedup_estimate(cerr, chunk_algo);
next_report = now;
next_report += report_period;
}
// read entire object
bufferlist bl;
uint64_t offset = 0;
while (true) {
bufferlist t;
int ret = io_ctx.read(oid, t, op_size, offset);
if (ret <= 0) {
break;
}
offset += ret;
bl.claim_append(t);
}
examined_objects++;
examined_bytes += bl.length();
// do the chunking
for (auto& i : dedup_estimates) {
vector<pair<uint64_t, uint64_t>> chunks;
i.second.cdc->calc_chunks(bl, &chunks);
for (auto& p : chunks) {
bufferlist chunk;
chunk.substr_of(bl, p.first, p.second);
i.second.add_chunk(chunk, fp_algo);
if (debug) {
cout << " " << oid << " " << p.first << "~" << p.second << std::endl;
}
}
++i.second.total_objects;
}
}
}
}
void ChunkScrub::chunk_scrub_common()
{
ObjectCursor shard_start;
ObjectCursor shard_end;
int ret;
Rados rados;
ret = rados.init_with_context(g_ceph_context);
if (ret < 0) {
cerr << "couldn't initialize rados: " << cpp_strerror(ret) << std::endl;
return;
}
ret = rados.connect();
if (ret) {
cerr << "couldn't connect to cluster: " << cpp_strerror(ret) << std::endl;
return;
}
chunk_io_ctx.object_list_slice(
begin,
end,
n,
m,
&shard_start,
&shard_end);
ObjectCursor c(shard_start);
while(c < shard_end)
{
std::vector<ObjectItem> result;
int r = chunk_io_ctx.object_list(c, shard_end, 12, {}, &result, &c);
if (r < 0 ){
cerr << "error object_list : " << cpp_strerror(r) << std::endl;
return;
}
for (const auto & i : result) {
std::unique_lock l{m_lock};
if (m_stop) {
Formatter *formatter = Formatter::create("json-pretty");
print_status(formatter, cout);
delete formatter;
return;
}
auto oid = i.oid;
cout << oid << std::endl;
chunk_refs_t refs;
{
bufferlist t;
ret = chunk_io_ctx.getxattr(oid, CHUNK_REFCOUNT_ATTR, t);
if (ret < 0) {
continue;
}
auto p = t.cbegin();
decode(refs, p);
}
examined_objects++;
if (refs.get_type() != chunk_refs_t::TYPE_BY_OBJECT) {
// we can't do anything here
continue;
}
// check all objects
chunk_refs_by_object_t *byo =
static_cast<chunk_refs_by_object_t*>(refs.r.get());
set<hobject_t> real_refs;
uint64_t pool_missing = 0;
uint64_t object_missing = 0;
uint64_t does_not_ref = 0;
for (auto& pp : byo->by_object) {
IoCtx target_io_ctx;
ret = rados.ioctx_create2(pp.pool, target_io_ctx);
if (ret < 0) {
cerr << oid << " ref " << pp
<< ": referencing pool does not exist" << std::endl;
++pool_missing;
continue;
}
ret = cls_cas_references_chunk(target_io_ctx, pp.oid.name, oid);
if (ret == -ENOENT) {
cerr << oid << " ref " << pp
<< ": referencing object missing" << std::endl;
++object_missing;
} else if (ret == -ENOLINK) {
cerr << oid << " ref " << pp
<< ": referencing object does not reference chunk"
<< std::endl;
++does_not_ref;
}
}
if (pool_missing || object_missing || does_not_ref) {
++damaged_objects;
}
}
}
cout << "--done--" << std::endl;
}
using AioCompRef = unique_ptr<AioCompletion>;
class SampleDedupWorkerThread : public Thread
{
public:
struct chunk_t {
string oid = "";
size_t start = 0;
size_t size = 0;
string fingerprint = "";
bufferlist data;
};
class FpStore {
public:
using dup_count_t = ssize_t;
bool find(string& fp) {
std::shared_lock lock(fingerprint_lock);
auto found_item = fp_map.find(fp);
return found_item != fp_map.end();
}
// return true if the chunk is duplicate
bool add(chunk_t& chunk) {
std::unique_lock lock(fingerprint_lock);
auto found_iter = fp_map.find(chunk.fingerprint);
ssize_t cur_reference = 1;
if (found_iter == fp_map.end()) {
fp_map.insert({chunk.fingerprint, 1});
} else {
cur_reference = ++found_iter->second;
}
return cur_reference >= dedup_threshold && dedup_threshold != -1;
}
void init(size_t dedup_threshold_) {
std::unique_lock lock(fingerprint_lock);
fp_map.clear();
dedup_threshold = dedup_threshold_;
}
FpStore(size_t chunk_threshold) : dedup_threshold(chunk_threshold) { }
private:
ssize_t dedup_threshold = -1;
std::unordered_map<std::string, dup_count_t> fp_map;
std::shared_mutex fingerprint_lock;
};
struct SampleDedupGlobal {
FpStore fp_store;
const double sampling_ratio = -1;
SampleDedupGlobal(
int chunk_threshold,
int sampling_ratio) :
fp_store(chunk_threshold),
sampling_ratio(static_cast<double>(sampling_ratio) / 100) { }
};
SampleDedupWorkerThread(
IoCtx &io_ctx,
IoCtx &chunk_io_ctx,
ObjectCursor begin,
ObjectCursor end,
size_t chunk_size,
std::string &fp_algo,
std::string &chunk_algo,
SampleDedupGlobal &sample_dedup_global) :
io_ctx(io_ctx),
chunk_io_ctx(chunk_io_ctx),
chunk_size(chunk_size),
fp_type(pg_pool_t::get_fingerprint_from_str(fp_algo)),
chunk_algo(chunk_algo),
sample_dedup_global(sample_dedup_global),
begin(begin),
end(end) { }
~SampleDedupWorkerThread() { };
protected:
void* entry() override {
crawl();
return nullptr;
}
private:
void crawl();
std::tuple<std::vector<ObjectItem>, ObjectCursor> get_objects(
ObjectCursor current,
ObjectCursor end,
size_t max_object_count);
std::vector<size_t> sample_object(size_t count);
void try_dedup_and_accumulate_result(ObjectItem &object);
bool ok_to_dedup_all();
int do_chunk_dedup(chunk_t &chunk);
bufferlist read_object(ObjectItem &object);
std::vector<std::tuple<bufferlist, pair<uint64_t, uint64_t>>> do_cdc(
ObjectItem &object,
bufferlist &data);
std::string generate_fingerprint(bufferlist chunk_data);
AioCompRef do_async_evict(string oid);
IoCtx io_ctx;
IoCtx chunk_io_ctx;
size_t total_duplicated_size = 0;
size_t total_object_size = 0;
std::set<std::string> oid_for_evict;
const size_t chunk_size = 0;
pg_pool_t::fingerprint_t fp_type = pg_pool_t::TYPE_FINGERPRINT_NONE;
std::string chunk_algo;
SampleDedupGlobal &sample_dedup_global;
ObjectCursor begin;
ObjectCursor end;
};
void SampleDedupWorkerThread::crawl()
{
cout << "new iteration" << std::endl;
ObjectCursor current_object = begin;
while (current_object < end) {
std::vector<ObjectItem> objects;
// Get the list of object IDs to deduplicate
std::tie(objects, current_object) = get_objects(current_object, end, 100);
// Pick few objects to be processed. Sampling ratio decides how many
// objects to pick. Lower sampling ratio makes crawler have lower crawling
// overhead but find less duplication.
auto sampled_indexes = sample_object(objects.size());
for (size_t index : sampled_indexes) {
ObjectItem target = objects[index];
try_dedup_and_accumulate_result(target);
}
}
vector<AioCompRef> evict_completions(oid_for_evict.size());
int i = 0;
for (auto &oid : oid_for_evict) {
evict_completions[i] = do_async_evict(oid);
i++;
}
for (auto &completion : evict_completions) {
completion->wait_for_complete();
}
cout << "done iteration" << std::endl;
}
AioCompRef SampleDedupWorkerThread::do_async_evict(string oid)
{
Rados rados;
ObjectReadOperation op_tier;
AioCompRef completion(rados.aio_create_completion());
op_tier.tier_evict();
io_ctx.aio_operate(
oid,
completion.get(),
&op_tier,
NULL);
return completion;
}
std::tuple<std::vector<ObjectItem>, ObjectCursor> SampleDedupWorkerThread::get_objects(
ObjectCursor current, ObjectCursor end, size_t max_object_count)
{
std::vector<ObjectItem> objects;
ObjectCursor next;
int ret = io_ctx.object_list(
current,
end,
max_object_count,
{},
&objects,
&next);
if (ret < 0 ) {
cerr << "error object_list" << std::endl;
objects.clear();
}
return std::make_tuple(objects, next);
}
std::vector<size_t> SampleDedupWorkerThread::sample_object(size_t count)
{
std::vector<size_t> indexes(count);
for (size_t i = 0 ; i < count ; i++) {
indexes[i] = i;
}
default_random_engine generator;
shuffle(indexes.begin(), indexes.end(), generator);
size_t sampling_count = static_cast<double>(count) *
sample_dedup_global.sampling_ratio;
indexes.resize(sampling_count);
return indexes;
}
void SampleDedupWorkerThread::try_dedup_and_accumulate_result(ObjectItem &object)
{
bufferlist data = read_object(object);
if (data.length() == 0) {
cerr << __func__ << " skip object " << object.oid
<< " read returned size 0" << std::endl;
return;
}
auto chunks = do_cdc(object, data);
size_t chunk_total_amount = 0;
// First, check total size of created chunks
for (auto &chunk : chunks) {
auto &chunk_data = std::get<0>(chunk);
chunk_total_amount += chunk_data.length();
}
if (chunk_total_amount != data.length()) {
cerr << __func__ << " sum of chunked length(" << chunk_total_amount
<< ") is different from object data length(" << data.length() << ")"
<< std::endl;
return;
}
size_t duplicated_size = 0;
list<chunk_t> redundant_chunks;
for (auto &chunk : chunks) {
auto &chunk_data = std::get<0>(chunk);
std::string fingerprint = generate_fingerprint(chunk_data);
std::pair<uint64_t, uint64_t> chunk_boundary = std::get<1>(chunk);
chunk_t chunk_info = {
.oid = object.oid,
.start = chunk_boundary.first,
.size = chunk_boundary.second,
.fingerprint = fingerprint,
.data = chunk_data
};
if (sample_dedup_global.fp_store.find(fingerprint)) {
duplicated_size += chunk_data.length();
}
if (sample_dedup_global.fp_store.add(chunk_info)) {
redundant_chunks.push_back(chunk_info);
}
}
size_t object_size = data.length();
// perform chunk-dedup
for (auto &p : redundant_chunks) {
do_chunk_dedup(p);
}
total_duplicated_size += duplicated_size;
total_object_size += object_size;
}
bufferlist SampleDedupWorkerThread::read_object(ObjectItem &object)
{
bufferlist whole_data;
size_t offset = 0;
int ret = -1;
while (ret != 0) {
bufferlist partial_data;
ret = io_ctx.read(object.oid, partial_data, default_op_size, offset);
if (ret < 0) {
cerr << "read object error " << object.oid << " offset " << offset
<< " size " << default_op_size << " error(" << cpp_strerror(ret)
<< std::endl;
bufferlist empty_buf;
return empty_buf;
}
offset += ret;
whole_data.claim_append(partial_data);
}
return whole_data;
}
std::vector<std::tuple<bufferlist, pair<uint64_t, uint64_t>>> SampleDedupWorkerThread::do_cdc(
ObjectItem &object,
bufferlist &data)
{
std::vector<std::tuple<bufferlist, pair<uint64_t, uint64_t>>> ret;
unique_ptr<CDC> cdc = CDC::create(chunk_algo, cbits(chunk_size) - 1);
vector<pair<uint64_t, uint64_t>> chunks;
cdc->calc_chunks(data, &chunks);
for (auto &p : chunks) {
bufferlist chunk;
chunk.substr_of(data, p.first, p.second);
ret.push_back(make_tuple(chunk, p));
}
return ret;
}
std::string SampleDedupWorkerThread::generate_fingerprint(bufferlist chunk_data)
{
string ret;
switch (fp_type) {
case pg_pool_t::TYPE_FINGERPRINT_SHA1:
ret = crypto::digest<crypto::SHA1>(chunk_data).to_str();
break;
case pg_pool_t::TYPE_FINGERPRINT_SHA256:
ret = crypto::digest<crypto::SHA256>(chunk_data).to_str();
break;
case pg_pool_t::TYPE_FINGERPRINT_SHA512:
ret = crypto::digest<crypto::SHA512>(chunk_data).to_str();
break;
default:
ceph_assert(0 == "Invalid fp type");
break;
}
return ret;
}
int SampleDedupWorkerThread::do_chunk_dedup(chunk_t &chunk)
{
uint64_t size;
time_t mtime;
int ret = chunk_io_ctx.stat(chunk.fingerprint, &size, &mtime);
if (ret == -ENOENT) {
bufferlist bl;
bl.append(chunk.data);
ObjectWriteOperation wop;
wop.write_full(bl);
chunk_io_ctx.operate(chunk.fingerprint, &wop);
} else {
ceph_assert(ret == 0);
}
ObjectReadOperation op;
op.set_chunk(
chunk.start,
chunk.size,
chunk_io_ctx,
chunk.fingerprint,
0,
CEPH_OSD_OP_FLAG_WITH_REFERENCE);
ret = io_ctx.operate(chunk.oid, &op, nullptr);
oid_for_evict.insert(chunk.oid);
return ret;
}
void ChunkScrub::print_status(Formatter *f, ostream &out)
{
if (f) {
f->open_array_section("chunk_scrub");
f->dump_string("PID", stringify(get_pid()));
f->open_object_section("Status");
f->dump_string("Total object", stringify(total_objects));
f->dump_string("Examined objects", stringify(examined_objects));
f->dump_string("damaged objects", stringify(damaged_objects));
f->close_section();
f->flush(out);
cout << std::endl;
}
}
string get_opts_pool_name(const po::variables_map &opts) {
if (opts.count("pool")) {
return opts["pool"].as<string>();
}
cerr << "must specify pool name" << std::endl;
exit(1);
}
string get_opts_chunk_algo(const po::variables_map &opts) {
if (opts.count("chunk-algorithm")) {
string chunk_algo = opts["chunk-algorithm"].as<string>();
if (!CDC::create(chunk_algo, 12)) {
cerr << "unrecognized chunk-algorithm " << chunk_algo << std::endl;
exit(1);
}
return chunk_algo;
}
cerr << "must specify chunk-algorithm" << std::endl;
exit(1);
}
string get_opts_fp_algo(const po::variables_map &opts) {
if (opts.count("fingerprint-algorithm")) {
string fp_algo = opts["fingerprint-algorithm"].as<string>();
if (fp_algo != "sha1"
&& fp_algo != "sha256" && fp_algo != "sha512") {
cerr << "unrecognized fingerprint-algorithm " << fp_algo << std::endl;
exit(1);
}
return fp_algo;
}
cout << "SHA1 is set as fingerprint algorithm by default" << std::endl;
return string("sha1");
}
string get_opts_op_name(const po::variables_map &opts) {
if (opts.count("op")) {
return opts["op"].as<string>();
} else {
cerr << "must specify op" << std::endl;
exit(1);
}
}
string get_opts_chunk_pool(const po::variables_map &opts) {
if (opts.count("chunk-pool")) {
return opts["chunk-pool"].as<string>();
} else {
cerr << "must specify --chunk-pool" << std::endl;
exit(1);
}
}
string get_opts_object_name(const po::variables_map &opts) {
if (opts.count("object")) {
return opts["object"].as<string>();
} else {
cerr << "must specify object" << std::endl;
exit(1);
}
}
int get_opts_max_thread(const po::variables_map &opts) {
if (opts.count("max-thread")) {
return opts["max-thread"].as<int>();
} else {
cout << "2 is set as the number of threads by default" << std::endl;
return 2;
}
}
int get_opts_report_period(const po::variables_map &opts) {
if (opts.count("report-period")) {
return opts["report-period"].as<int>();
} else {
cout << "10 seconds is set as report period by default" << std::endl;
return 10;
}
}
int estimate_dedup_ratio(const po::variables_map &opts)
{
Rados rados;
IoCtx io_ctx;
std::string chunk_algo = "fastcdc";
string fp_algo = "sha1";
string pool_name;
uint64_t chunk_size = 8192;
uint64_t min_chunk_size = 8192;
uint64_t max_chunk_size = 4*1024*1024;
unsigned max_thread = default_max_thread;
uint32_t report_period = default_report_period;
uint64_t max_read_size = default_op_size;
uint64_t max_seconds = 0;
int ret;
std::map<std::string, std::string>::const_iterator i;
bool debug = false;
ObjectCursor begin;
ObjectCursor end;
librados::pool_stat_t s;
list<string> pool_names;
map<string, librados::pool_stat_t> stats;
pool_name = get_opts_pool_name(opts);
if (opts.count("chunk-algorithm")) {
chunk_algo = opts["chunk-algorithm"].as<string>();
if (!CDC::create(chunk_algo, 12)) {
cerr << "unrecognized chunk-algorithm " << chunk_algo << std::endl;
exit(1);
}
} else {
cerr << "must specify chunk-algorithm" << std::endl;
exit(1);
}
fp_algo = get_opts_fp_algo(opts);
if (opts.count("chunk-size")) {
chunk_size = opts["chunk-size"].as<int>();
} else {
cout << "8192 is set as chunk size by default" << std::endl;
}
if (opts.count("min-chunk-size")) {
chunk_size = opts["min-chunk-size"].as<int>();
} else {
cout << "8192 is set as min chunk size by default" << std::endl;
}
if (opts.count("max-chunk-size")) {
chunk_size = opts["max-chunk-size"].as<int>();
} else {
cout << "4MB is set as max chunk size by default" << std::endl;
}
max_thread = get_opts_max_thread(opts);
report_period = get_opts_report_period(opts);
if (opts.count("max-seconds")) {
max_seconds = opts["max-seconds"].as<int>();
} else {
cout << "max seconds is not set" << std::endl;
}
if (opts.count("max-read-size")) {
max_read_size = opts["max-read-size"].as<int>();
} else {
cout << default_op_size << " is set as max-read-size by default" << std::endl;
}
if (opts.count("debug")) {
debug = true;
}
boost::optional<pg_t> pgid(opts.count("pgid"), pg_t());
ret = rados.init_with_context(g_ceph_context);
if (ret < 0) {
cerr << "couldn't initialize rados: " << cpp_strerror(ret) << std::endl;
goto out;
}
ret = rados.connect();
if (ret) {
cerr << "couldn't connect to cluster: " << cpp_strerror(ret) << std::endl;
ret = -1;
goto out;
}
if (pool_name.empty()) {
cerr << "--create-pool requested but pool_name was not specified!" << std::endl;
exit(1);
}
ret = rados.ioctx_create(pool_name.c_str(), io_ctx);
if (ret < 0) {
cerr << "error opening pool "
<< pool_name << ": "
<< cpp_strerror(ret) << std::endl;
goto out;
}
// set up chunkers
if (chunk_size) {
dedup_estimates.emplace(std::piecewise_construct,
std::forward_as_tuple(chunk_size),
std::forward_as_tuple(chunk_algo, cbits(chunk_size)-1));
} else {
for (size_t cs = min_chunk_size; cs <= max_chunk_size; cs *= 2) {
dedup_estimates.emplace(std::piecewise_construct,
std::forward_as_tuple(cs),
std::forward_as_tuple(chunk_algo, cbits(cs)-1));
}
}
glock.lock();
begin = io_ctx.object_list_begin();
end = io_ctx.object_list_end();
pool_names.push_back(pool_name);
ret = rados.get_pool_stats(pool_names, stats);
if (ret < 0) {
cerr << "error fetching pool stats: " << cpp_strerror(ret) << std::endl;
glock.unlock();
return ret;
}
if (stats.find(pool_name) == stats.end()) {
cerr << "stats can not find pool name: " << pool_name << std::endl;
glock.unlock();
return ret;
}
s = stats[pool_name];
for (unsigned i = 0; i < max_thread; i++) {
std::unique_ptr<CrawlerThread> ptr (
new EstimateDedupRatio(io_ctx, i, max_thread, begin, end,
chunk_algo, fp_algo, chunk_size,
report_period, s.num_objects, max_read_size,
max_seconds));
ptr->create("estimate_thread");
ptr->set_debug(debug);
estimate_threads.push_back(move(ptr));
}
glock.unlock();
for (auto &p : estimate_threads) {
p->join();
}
print_dedup_estimate(cout, chunk_algo);
out:
return (ret < 0) ? 1 : 0;
}
static void print_chunk_scrub()
{
uint64_t total_objects = 0;
uint64_t examined_objects = 0;
int damaged_objects = 0;
for (auto &et : estimate_threads) {
if (!total_objects) {
total_objects = et->get_total_objects();
}
examined_objects += et->get_examined_objects();
ChunkScrub *ptr = static_cast<ChunkScrub*>(et.get());
damaged_objects += ptr->get_damaged_objects();
}
cout << " Total object : " << total_objects << std::endl;
cout << " Examined object : " << examined_objects << std::endl;
cout << " Damaged object : " << damaged_objects << std::endl;
}
int chunk_scrub_common(const po::variables_map &opts)
{
Rados rados;
IoCtx io_ctx, chunk_io_ctx;
std::string object_name, target_object_name;
string chunk_pool_name, op_name;
int ret;
unsigned max_thread = default_max_thread;
std::map<std::string, std::string>::const_iterator i;
uint32_t report_period = default_report_period;
ObjectCursor begin;
ObjectCursor end;
librados::pool_stat_t s;
list<string> pool_names;
map<string, librados::pool_stat_t> stats;
op_name = get_opts_op_name(opts);
chunk_pool_name = get_opts_chunk_pool(opts);
boost::optional<pg_t> pgid(opts.count("pgid"), pg_t());
ret = rados.init_with_context(g_ceph_context);
if (ret < 0) {
cerr << "couldn't initialize rados: " << cpp_strerror(ret) << std::endl;
goto out;
}
ret = rados.connect();
if (ret) {
cerr << "couldn't connect to cluster: " << cpp_strerror(ret) << std::endl;
ret = -1;
goto out;
}
ret = rados.ioctx_create(chunk_pool_name.c_str(), chunk_io_ctx);
if (ret < 0) {
cerr << "error opening pool "
<< chunk_pool_name << ": "
<< cpp_strerror(ret) << std::endl;
goto out;
}
if (op_name == "chunk-get-ref" ||
op_name == "chunk-put-ref" ||
op_name == "chunk-repair") {
string target_object_name;
uint64_t pool_id;
object_name = get_opts_object_name(opts);
if (opts.count("target-ref")) {
target_object_name = opts["target-ref"].as<string>();
} else {
cerr << "must specify target ref" << std::endl;
exit(1);
}
if (opts.count("target-ref-pool-id")) {
pool_id = opts["target-ref-pool-id"].as<uint64_t>();
} else {
cerr << "must specify target-ref-pool-id" << std::endl;
exit(1);
}
uint32_t hash;
ret = chunk_io_ctx.get_object_hash_position2(object_name, &hash);
if (ret < 0) {
return ret;
}
hobject_t oid(sobject_t(target_object_name, CEPH_NOSNAP), "", hash, pool_id, "");
auto run_op = [] (ObjectWriteOperation& op, hobject_t& oid,
string& object_name, IoCtx& chunk_io_ctx) -> int {
int ret = chunk_io_ctx.operate(object_name, &op);
if (ret < 0) {
cerr << " operate fail : " << cpp_strerror(ret) << std::endl;
}
return ret;
};
ObjectWriteOperation op;
if (op_name == "chunk-get-ref") {
cls_cas_chunk_get_ref(op, oid);
ret = run_op(op, oid, object_name, chunk_io_ctx);
} else if (op_name == "chunk-put-ref") {
cls_cas_chunk_put_ref(op, oid);
ret = run_op(op, oid, object_name, chunk_io_ctx);
} else if (op_name == "chunk-repair") {
ret = rados.ioctx_create2(pool_id, io_ctx);
if (ret < 0) {
cerr << oid << " ref " << pool_id
<< ": referencing pool does not exist" << std::endl;
return ret;
}
int chunk_ref = -1, base_ref = -1;
// read object on chunk pool to know how many reference the object has
bufferlist t;
ret = chunk_io_ctx.getxattr(object_name, CHUNK_REFCOUNT_ATTR, t);
if (ret < 0) {
return ret;
}
chunk_refs_t refs;
auto p = t.cbegin();
decode(refs, p);
if (refs.get_type() != chunk_refs_t::TYPE_BY_OBJECT) {
cerr << " does not supported chunk type " << std::endl;
return -1;
}
chunk_ref =
static_cast<chunk_refs_by_object_t*>(refs.r.get())->by_object.count(oid);
if (chunk_ref < 0) {
cerr << object_name << " has no reference of " << target_object_name
<< std::endl;
return chunk_ref;
}
cout << object_name << " has " << chunk_ref << " references for "
<< target_object_name << std::endl;
// read object on base pool to know the number of chunk object's references
base_ref = cls_cas_references_chunk(io_ctx, target_object_name, object_name);
if (base_ref < 0) {
if (base_ref == -ENOENT || base_ref == -ENOLINK) {
base_ref = 0;
} else {
return base_ref;
}
}
cout << target_object_name << " has " << base_ref << " references for "
<< object_name << std::endl;
if (chunk_ref != base_ref) {
if (base_ref > chunk_ref) {
cerr << "error : " << target_object_name << "'s ref. < " << object_name
<< "' ref. " << std::endl;
return -EINVAL;
}
cout << " fix dangling reference from " << chunk_ref << " to " << base_ref
<< std::endl;
while (base_ref != chunk_ref) {
ObjectWriteOperation op;
cls_cas_chunk_put_ref(op, oid);
chunk_ref--;
ret = run_op(op, oid, object_name, chunk_io_ctx);
if (ret < 0) {
return ret;
}
}
}
}
return ret;
} else if (op_name == "dump-chunk-refs") {
object_name = get_opts_object_name(opts);
bufferlist t;
ret = chunk_io_ctx.getxattr(object_name, CHUNK_REFCOUNT_ATTR, t);
if (ret < 0) {
return ret;
}
chunk_refs_t refs;
auto p = t.cbegin();
decode(refs, p);
auto f = Formatter::create("json-pretty");
f->dump_object("refs", refs);
f->flush(cout);
return 0;
}
max_thread = get_opts_max_thread(opts);
report_period = get_opts_report_period(opts);
glock.lock();
begin = chunk_io_ctx.object_list_begin();
end = chunk_io_ctx.object_list_end();
pool_names.push_back(chunk_pool_name);
ret = rados.get_pool_stats(pool_names, stats);
if (ret < 0) {
cerr << "error fetching pool stats: " << cpp_strerror(ret) << std::endl;
glock.unlock();
return ret;
}
if (stats.find(chunk_pool_name) == stats.end()) {
cerr << "stats can not find pool name: " << chunk_pool_name << std::endl;
glock.unlock();
return ret;
}
s = stats[chunk_pool_name];
for (unsigned i = 0; i < max_thread; i++) {
std::unique_ptr<CrawlerThread> ptr (
new ChunkScrub(io_ctx, i, max_thread, begin, end, chunk_io_ctx,
report_period, s.num_objects));
ptr->create("estimate_thread");
estimate_threads.push_back(move(ptr));
}
glock.unlock();
for (auto &p : estimate_threads) {
cout << "join " << std::endl;
p->join();
cout << "joined " << std::endl;
}
print_chunk_scrub();
out:
return (ret < 0) ? 1 : 0;
}
string make_pool_str(string pool, string var, string val)
{
return string("{\"prefix\": \"osd pool set\",\"pool\":\"") + pool
+ string("\",\"var\": \"") + var + string("\",\"val\": \"")
+ val + string("\"}");
}
string make_pool_str(string pool, string var, int val)
{
return make_pool_str(pool, var, stringify(val));
}
int make_dedup_object(const po::variables_map &opts)
{
Rados rados;
IoCtx io_ctx, chunk_io_ctx;
std::string object_name, chunk_pool_name, op_name, pool_name, fp_algo;
int ret;
std::map<std::string, std::string>::const_iterator i;
op_name = get_opts_op_name(opts);
pool_name = get_opts_pool_name(opts);
object_name = get_opts_object_name(opts);
chunk_pool_name = get_opts_chunk_pool(opts);
boost::optional<pg_t> pgid(opts.count("pgid"), pg_t());
ret = rados.init_with_context(g_ceph_context);
if (ret < 0) {
cerr << "couldn't initialize rados: " << cpp_strerror(ret) << std::endl;
goto out;
}
ret = rados.connect();
if (ret) {
cerr << "couldn't connect to cluster: " << cpp_strerror(ret) << std::endl;
ret = -1;
goto out;
}
ret = rados.ioctx_create(pool_name.c_str(), io_ctx);
if (ret < 0) {
cerr << "error opening pool "
<< chunk_pool_name << ": "
<< cpp_strerror(ret) << std::endl;
goto out;
}
ret = rados.ioctx_create(chunk_pool_name.c_str(), chunk_io_ctx);
if (ret < 0) {
cerr << "error opening pool "
<< chunk_pool_name << ": "
<< cpp_strerror(ret) << std::endl;
goto out;
}
fp_algo = get_opts_fp_algo(opts);
if (op_name == "chunk-dedup") {
uint64_t offset, length;
string chunk_object;
if (opts.count("source-off")) {
offset = opts["source-off"].as<uint64_t>();
} else {
cerr << "must specify --source-off" << std::endl;
exit(1);
}
if (opts.count("source-length")) {
length = opts["source-length"].as<uint64_t>();
} else {
cerr << "must specify --source-length" << std::endl;
exit(1);
}
// 1. make a copy from manifest object to chunk object
bufferlist bl;
ret = io_ctx.read(object_name, bl, length, offset);
if (ret < 0) {
cerr << " reading object in base pool fails : " << cpp_strerror(ret) << std::endl;
goto out;
}
chunk_object = [&fp_algo, &bl]() -> string {
if (fp_algo == "sha1") {
return ceph::crypto::digest<ceph::crypto::SHA1>(bl).to_str();
} else if (fp_algo == "sha256") {
return ceph::crypto::digest<ceph::crypto::SHA256>(bl).to_str();
} else if (fp_algo == "sha512") {
return ceph::crypto::digest<ceph::crypto::SHA512>(bl).to_str();
} else {
assert(0 == "unrecognized fingerprint type");
return {};
}
}();
ret = chunk_io_ctx.write(chunk_object, bl, length, offset);
if (ret < 0) {
cerr << " writing object in chunk pool fails : " << cpp_strerror(ret) << std::endl;
goto out;
}
// 2. call set_chunk
ObjectReadOperation op;
op.set_chunk(offset, length, chunk_io_ctx, chunk_object, 0,
CEPH_OSD_OP_FLAG_WITH_REFERENCE);
ret = io_ctx.operate(object_name, &op, NULL);
if (ret < 0) {
cerr << " operate fail : " << cpp_strerror(ret) << std::endl;
goto out;
}
} else if (op_name == "object-dedup") {
unsigned chunk_size = 0;
bool snap = false;
if (opts.count("dedup-cdc-chunk-size")) {
chunk_size = opts["dedup-cdc-chunk-size"].as<unsigned int>();
} else {
cerr << "must specify --dedup-cdc-chunk-size" << std::endl;
exit(1);
}
if (opts.count("snap")) {
snap = true;
}
bufferlist inbl;
ret = rados.mon_command(
make_pool_str(pool_name, "fingerprint_algorithm", fp_algo),
inbl, NULL, NULL);
if (ret < 0) {
cerr << " operate fail : " << cpp_strerror(ret) << std::endl;
return ret;
}
ret = rados.mon_command(
make_pool_str(pool_name, "dedup_tier", chunk_pool_name),
inbl, NULL, NULL);
if (ret < 0) {
cerr << " operate fail : " << cpp_strerror(ret) << std::endl;
return ret;
}
ret = rados.mon_command(
make_pool_str(pool_name, "dedup_chunk_algorithm", "fastcdc"),
inbl, NULL, NULL);
if (ret < 0) {
cerr << " operate fail : " << cpp_strerror(ret) << std::endl;
return ret;
}
ret = rados.mon_command(
make_pool_str(pool_name, "dedup_cdc_chunk_size", chunk_size),
inbl, NULL, NULL);
if (ret < 0) {
cerr << " operate fail : " << cpp_strerror(ret) << std::endl;
return ret;
}
auto create_new_deduped_object =
[&io_ctx](string object_name) -> int {
// tier-flush to perform deduplication
ObjectReadOperation flush_op;
flush_op.tier_flush();
int ret = io_ctx.operate(object_name, &flush_op, NULL);
if (ret < 0) {
cerr << " tier_flush fail : " << cpp_strerror(ret) << std::endl;
return ret;
}
// tier-evict
ObjectReadOperation evict_op;
evict_op.tier_evict();
ret = io_ctx.operate(object_name, &evict_op, NULL);
if (ret < 0) {
cerr << " tier_evict fail : " << cpp_strerror(ret) << std::endl;
return ret;
}
return ret;
};
if (snap) {
io_ctx.snap_set_read(librados::SNAP_DIR);
snap_set_t snap_set;
int snap_ret;
ObjectReadOperation op;
op.list_snaps(&snap_set, &snap_ret);
io_ctx.operate(object_name, &op, NULL);
for (vector<librados::clone_info_t>::const_iterator r = snap_set.clones.begin();
r != snap_set.clones.end();
++r) {
io_ctx.snap_set_read(r->cloneid);
ret = create_new_deduped_object(object_name);
if (ret < 0) {
goto out;
}
}
} else {
ret = create_new_deduped_object(object_name);
}
}
out:
return (ret < 0) ? 1 : 0;
}
int make_crawling_daemon(const po::variables_map &opts)
{
string base_pool_name = get_opts_pool_name(opts);
string chunk_pool_name = get_opts_chunk_pool(opts);
unsigned max_thread = get_opts_max_thread(opts);
bool loop = false;
if (opts.count("loop")) {
loop = true;
}
int sampling_ratio = -1;
if (opts.count("sampling-ratio")) {
sampling_ratio = opts["sampling-ratio"].as<int>();
}
size_t chunk_size = 8192;
if (opts.count("chunk-size")) {
chunk_size = opts["chunk-size"].as<int>();
} else {
cout << "8192 is set as chunk size by default" << std::endl;
}
uint32_t chunk_dedup_threshold = -1;
if (opts.count("chunk-dedup-threshold")) {
chunk_dedup_threshold = opts["chunk-dedup-threshold"].as<uint32_t>();
}
std::string chunk_algo = get_opts_chunk_algo(opts);
Rados rados;
int ret = rados.init_with_context(g_ceph_context);
if (ret < 0) {
cerr << "couldn't initialize rados: " << cpp_strerror(ret) << std::endl;
return -EINVAL;
}
ret = rados.connect();
if (ret) {
cerr << "couldn't connect to cluster: " << cpp_strerror(ret) << std::endl;
return -EINVAL;
}
int wakeup_period = 100;
if (opts.count("wakeup-period")) {
wakeup_period = opts["wakeup-period"].as<int>();
} else {
cout << "100 second is set as wakeup period by default" << std::endl;
}
std::string fp_algo = get_opts_fp_algo(opts);
list<string> pool_names;
IoCtx io_ctx, chunk_io_ctx;
pool_names.push_back(base_pool_name);
ret = rados.ioctx_create(base_pool_name.c_str(), io_ctx);
if (ret < 0) {
cerr << "error opening base pool "
<< base_pool_name << ": "
<< cpp_strerror(ret) << std::endl;
return -EINVAL;
}
ret = rados.ioctx_create(chunk_pool_name.c_str(), chunk_io_ctx);
if (ret < 0) {
cerr << "error opening chunk pool "
<< chunk_pool_name << ": "
<< cpp_strerror(ret) << std::endl;
return -EINVAL;
}
bufferlist inbl;
ret = rados.mon_command(
make_pool_str(base_pool_name, "fingerprint_algorithm", fp_algo),
inbl, NULL, NULL);
if (ret < 0) {
cerr << " operate fail : " << cpp_strerror(ret) << std::endl;
return ret;
}
ret = rados.mon_command(
make_pool_str(base_pool_name, "dedup_chunk_algorithm", "fastcdc"),
inbl, NULL, NULL);
if (ret < 0) {
cerr << " operate fail : " << cpp_strerror(ret) << std::endl;
return ret;
}
ret = rados.mon_command(
make_pool_str(base_pool_name, "dedup_cdc_chunk_size", chunk_size),
inbl, NULL, NULL);
if (ret < 0) {
cerr << " operate fail : " << cpp_strerror(ret) << std::endl;
return ret;
}
ret = rados.mon_command(
make_pool_str(base_pool_name, "dedup_tier", chunk_pool_name),
inbl, NULL, NULL);
if (ret < 0) {
cerr << " operate fail : " << cpp_strerror(ret) << std::endl;
return ret;
}
cout << "SampleRatio : " << sampling_ratio << std::endl
<< "Chunk Dedup Threshold : " << chunk_dedup_threshold << std::endl
<< "Chunk Size : " << chunk_size << std::endl
<< std::endl;
while (true) {
lock_guard lock(glock);
ObjectCursor begin = io_ctx.object_list_begin();
ObjectCursor end = io_ctx.object_list_end();
map<string, librados::pool_stat_t> stats;
ret = rados.get_pool_stats(pool_names, stats);
if (ret < 0) {
cerr << "error fetching pool stats: " << cpp_strerror(ret) << std::endl;
return -EINVAL;
}
if (stats.find(base_pool_name) == stats.end()) {
cerr << "stats can not find pool name: " << base_pool_name << std::endl;
return -EINVAL;
}
SampleDedupWorkerThread::SampleDedupGlobal sample_dedup_global(
chunk_dedup_threshold, sampling_ratio);
std::list<SampleDedupWorkerThread> threads;
for (unsigned i = 0; i < max_thread; i++) {
cout << " add thread.. " << std::endl;
ObjectCursor shard_start;
ObjectCursor shard_end;
io_ctx.object_list_slice(
begin,
end,
i,
max_thread,
&shard_start,
&shard_end);
threads.emplace_back(
io_ctx,
chunk_io_ctx,
shard_start,
shard_end,
chunk_size,
fp_algo,
chunk_algo,
sample_dedup_global);
threads.back().create("sample_dedup");
}
for (auto &p : threads) {
p.join();
}
if (loop) {
sleep(wakeup_period);
} else {
break;
}
}
return 0;
}
int main(int argc, const char **argv)
{
auto args = argv_to_vec(argc, argv);
if (args.empty()) {
cerr << argv[0] << ": -h or --help for usage" << std::endl;
exit(1);
}
po::variables_map opts;
po::positional_options_description p;
p.add("command", 1);
po::options_description desc = make_usage();
try {
po::parsed_options parsed =
po::command_line_parser(argc, argv).options(desc).positional(p).allow_unregistered().run();
po::store(parsed, opts);
po::notify(opts);
} catch(po::error &e) {
std::cerr << e.what() << std::endl;
return 1;
}
if (opts.count("help") || opts.count("h")) {
cout<< desc << std::endl;
exit(0);
}
auto cct = global_init(NULL, args, CEPH_ENTITY_TYPE_CLIENT,
CODE_ENVIRONMENT_DAEMON,
CINIT_FLAG_UNPRIVILEGED_DAEMON_DEFAULTS);
Preforker forker;
if (global_init_prefork(g_ceph_context) >= 0) {
std::string err;
int r = forker.prefork(err);
if (r < 0) {
cerr << err << std::endl;
return r;
}
if (forker.is_parent()) {
g_ceph_context->_log->start();
if (forker.parent_wait(err) != 0) {
return -ENXIO;
}
return 0;
}
global_init_postfork_start(g_ceph_context);
}
common_init_finish(g_ceph_context);
if (opts.count("daemon")) {
global_init_postfork_finish(g_ceph_context);
forker.daemonize();
}
init_async_signal_handler();
register_async_signal_handler_oneshot(SIGINT, handle_signal);
register_async_signal_handler_oneshot(SIGTERM, handle_signal);
string op_name = get_opts_op_name(opts);
int ret = 0;
if (op_name == "estimate") {
ret = estimate_dedup_ratio(opts);
} else if (op_name == "chunk-scrub" ||
op_name == "chunk-get-ref" ||
op_name == "chunk-put-ref" ||
op_name == "chunk-repair" ||
op_name == "dump-chunk-refs") {
ret = chunk_scrub_common(opts);
} else if (op_name == "chunk-dedup" ||
op_name == "object-dedup") {
/*
* chunk-dedup:
* using a chunk generated by given source,
* create a new object in the chunk pool or increase the reference
* if the object exists
*
* object-dedup:
* perform deduplication on the entire object, not a chunk.
*
*/
ret = make_dedup_object(opts);
} else if (op_name == "sample-dedup") {
ret = make_crawling_daemon(opts);
} else {
cerr << "unrecognized op " << op_name << std::endl;
exit(1);
}
unregister_async_signal_handler(SIGINT, handle_signal);
unregister_async_signal_handler(SIGTERM, handle_signal);
shutdown_async_signal_handler();
return forker.signal_exit(ret);
}
| 51,935 | 28.177528 | 150 | cc |
null | ceph-main/src/tools/ceph_kvstore_tool.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2012 Inktank, Inc.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*/
#include <map>
#include <set>
#include <string>
#include <fstream>
#include "common/ceph_argparse.h"
#include "common/config.h"
#include "common/errno.h"
#include "common/strtol.h"
#include "common/url_escape.h"
#include "global/global_context.h"
#include "global/global_init.h"
#include "kvstore_tool.h"
using namespace std;
void usage(const char *pname)
{
std::cout << "Usage: " << pname << " <rocksdb|bluestore-kv> <store path> command [args...]\n"
<< "\n"
<< "Commands:\n"
<< " list [prefix]\n"
<< " list-crc [prefix]\n"
<< " dump [prefix]\n"
<< " exists <prefix> [key]\n"
<< " get <prefix> <key> [out <file>]\n"
<< " crc <prefix> <key>\n"
<< " get-size [<prefix> <key>]\n"
<< " set <prefix> <key> [ver <N>|in <file>]\n"
<< " rm <prefix> <key>\n"
<< " rm-prefix <prefix>\n"
<< " store-copy <path> [num-keys-per-tx] [rocksdb|...] \n"
<< " store-crc <path>\n"
<< " compact\n"
<< " compact-prefix <prefix>\n"
<< " compact-range <prefix> <start> <end>\n"
<< " destructive-repair (use only as last resort! may corrupt healthy data)\n"
<< " stats\n"
<< " histogram [prefix]\n"
<< std::endl;
}
int main(int argc, const char *argv[])
{
auto args = argv_to_vec(argc, argv);
if (args.empty()) {
cerr << argv[0] << ": -h or --help for usage" << std::endl;
exit(1);
}
if (ceph_argparse_need_usage(args)) {
usage(argv[0]);
exit(0);
}
map<string,string> defaults = {
{ "debug_rocksdb", "2" }
};
auto cct = global_init(
&defaults, args,
CEPH_ENTITY_TYPE_CLIENT, CODE_ENVIRONMENT_UTILITY,
CINIT_FLAG_NO_DEFAULT_CONFIG_FILE);
common_init_finish(g_ceph_context);
ceph_assert((int)args.size() < argc);
for(size_t i=0; i<args.size(); i++)
argv[i+1] = args[i];
argc = args.size() + 1;
if (args.size() < 3) {
usage(argv[0]);
return 1;
}
string type(args[0]);
string path(args[1]);
string cmd(args[2]);
if (type != "rocksdb" &&
type != "bluestore-kv") {
std::cerr << "Unrecognized type: " << args[0] << std::endl;
usage(argv[0]);
return 1;
}
bool to_repair = (cmd == "destructive-repair");
bool need_stats = (cmd == "stats");
StoreTool st(type, path, to_repair, need_stats);
if (cmd == "destructive-repair") {
int ret = st.destructive_repair();
if (!ret) {
std::cout << "destructive-repair completed without reporting an error"
<< std::endl;
} else {
std::cout << "destructive-repair failed with " << cpp_strerror(ret)
<< std::endl;
}
return ret;
} else if (cmd == "list" || cmd == "list-crc") {
string prefix;
if (argc > 4)
prefix = url_unescape(argv[4]);
bool do_crc = (cmd == "list-crc");
st.list(prefix, do_crc, false);
} else if (cmd == "dump") {
string prefix;
if (argc > 4)
prefix = url_unescape(argv[4]);
st.list(prefix, false, true);
} else if (cmd == "exists") {
string key;
if (argc < 5) {
usage(argv[0]);
return 1;
}
string prefix(url_unescape(argv[4]));
if (argc > 5)
key = url_unescape(argv[5]);
bool ret = st.exists(prefix, key);
std::cout << "(" << url_escape(prefix) << ", " << url_escape(key) << ") "
<< (ret ? "exists" : "does not exist")
<< std::endl;
return (ret ? 0 : 1);
} else if (cmd == "get") {
if (argc < 6) {
usage(argv[0]);
return 1;
}
string prefix(url_unescape(argv[4]));
string key(url_unescape(argv[5]));
bool exists = false;
bufferlist bl = st.get(prefix, key, exists);
std::cout << "(" << url_escape(prefix) << ", " << url_escape(key) << ")";
if (!exists) {
std::cout << " does not exist" << std::endl;
return 1;
}
std::cout << std::endl;
if (argc >= 7) {
string subcmd(argv[6]);
if (subcmd != "out") {
std::cerr << "unrecognized subcmd '" << subcmd << "'"
<< std::endl;
return 1;
}
if (argc < 8) {
std::cerr << "output path not specified" << std::endl;
return 1;
}
string out(argv[7]);
if (out.empty()) {
std::cerr << "unspecified out file" << std::endl;
return 1;
}
int err = bl.write_file(argv[7], 0644);
if (err < 0) {
std::cerr << "error writing value to '" << out << "': "
<< cpp_strerror(err) << std::endl;
return 1;
}
} else {
ostringstream os;
bl.hexdump(os);
std::cout << os.str() << std::endl;
}
} else if (cmd == "crc") {
if (argc < 6) {
usage(argv[0]);
return 1;
}
string prefix(url_unescape(argv[4]));
string key(url_unescape(argv[5]));
bool exists = false;
bufferlist bl = st.get(prefix, key, exists);
std::cout << "(" << url_escape(prefix) << ", " << url_escape(key) << ") ";
if (!exists) {
std::cout << " does not exist" << std::endl;
return 1;
}
std::cout << " crc " << bl.crc32c(0) << std::endl;
} else if (cmd == "get-size") {
std::cout << "estimated store size: " << st.get_size() << std::endl;
if (argc < 5)
return 0;
if (argc < 6) {
usage(argv[0]);
return 1;
}
string prefix(url_unescape(argv[4]));
string key(url_unescape(argv[5]));
bool exists = false;
bufferlist bl = st.get(prefix, key, exists);
if (!exists) {
std::cerr << "(" << url_escape(prefix) << "," << url_escape(key)
<< ") does not exist" << std::endl;
return 1;
}
std::cout << "(" << url_escape(prefix) << "," << url_escape(key)
<< ") size " << byte_u_t(bl.length()) << std::endl;
} else if (cmd == "set") {
if (argc < 8) {
usage(argv[0]);
return 1;
}
string prefix(url_unescape(argv[4]));
string key(url_unescape(argv[5]));
string subcmd(argv[6]);
bufferlist val;
string errstr;
if (subcmd == "ver") {
version_t v = (version_t) strict_strtoll(argv[7], 10, &errstr);
if (!errstr.empty()) {
std::cerr << "error reading version: " << errstr << std::endl;
return 1;
}
encode(v, val);
} else if (subcmd == "in") {
int ret = val.read_file(argv[7], &errstr);
if (ret < 0 || !errstr.empty()) {
std::cerr << "error reading file: " << errstr << std::endl;
return 1;
}
} else {
std::cerr << "unrecognized subcommand '" << subcmd << "'" << std::endl;
usage(argv[0]);
return 1;
}
bool ret = st.set(prefix, key, val);
if (!ret) {
std::cerr << "error setting ("
<< url_escape(prefix) << "," << url_escape(key) << ")" << std::endl;
return 1;
}
} else if (cmd == "rm") {
if (argc < 6) {
usage(argv[0]);
return 1;
}
string prefix(url_unescape(argv[4]));
string key(url_unescape(argv[5]));
bool ret = st.rm(prefix, key);
if (!ret) {
std::cerr << "error removing ("
<< url_escape(prefix) << "," << url_escape(key) << ")"
<< std::endl;
return 1;
}
} else if (cmd == "rm-prefix") {
if (argc < 5) {
usage(argv[0]);
return 1;
}
string prefix(url_unescape(argv[4]));
bool ret = st.rm_prefix(prefix);
if (!ret) {
std::cerr << "error removing prefix ("
<< url_escape(prefix) << ")"
<< std::endl;
return 1;
}
} else if (cmd == "store-copy") {
int num_keys_per_tx = 128; // magic number that just feels right.
if (argc < 5) {
usage(argv[0]);
return 1;
} else if (argc > 5) {
string err;
num_keys_per_tx = strict_strtol(argv[5], 10, &err);
if (!err.empty()) {
std::cerr << "invalid num_keys_per_tx: " << err << std::endl;
return 1;
}
}
string other_store_type = argv[1];
if (argc > 6) {
other_store_type = argv[6];
}
int ret = st.copy_store_to(argv[1], argv[4], num_keys_per_tx, other_store_type);
if (ret < 0) {
std::cerr << "error copying store to path '" << argv[4]
<< "': " << cpp_strerror(ret) << std::endl;
return 1;
}
} else if (cmd == "store-crc") {
if (argc < 4) {
usage(argv[0]);
return 1;
}
std::ofstream fs(argv[4]);
uint32_t crc = st.traverse(string(), true, false, &fs);
std::cout << "store at '" << argv[4] << "' crc " << crc << std::endl;
} else if (cmd == "compact") {
st.compact();
} else if (cmd == "compact-prefix") {
if (argc < 5) {
usage(argv[0]);
return 1;
}
string prefix(url_unescape(argv[4]));
st.compact_prefix(prefix);
} else if (cmd == "compact-range") {
if (argc < 7) {
usage(argv[0]);
return 1;
}
string prefix(url_unescape(argv[4]));
string start(url_unescape(argv[5]));
string end(url_unescape(argv[6]));
st.compact_range(prefix, start, end);
} else if (cmd == "stats") {
st.print_stats();
} else if (cmd == "histogram") {
string prefix;
if (argc > 4)
prefix = url_unescape(argv[4]);
st.build_size_histogram(prefix);
} else {
std::cerr << "Unrecognized command: " << cmd << std::endl;
return 1;
}
return 0;
}
| 9,644 | 25.570248 | 95 | cc |
null | ceph-main/src/tools/ceph_monstore_tool.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2012 Inktank, Inc.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*/
#include <boost/program_options/variables_map.hpp>
#include <boost/program_options/parsers.hpp>
#include <boost/scope_exit.hpp>
#include <stdlib.h>
#include <string>
#include "common/Formatter.h"
#include "common/errno.h"
#include "auth/KeyRing.h"
#include "auth/cephx/CephxKeyServer.h"
#include "global/global_init.h"
#include "include/scope_guard.h"
#include "include/stringify.h"
#include "mgr/mgr_commands.h"
#include "mon/AuthMonitor.h"
#include "mon/MonitorDBStore.h"
#include "mon/Paxos.h"
#include "mon/MonMap.h"
#include "mds/FSMap.h"
#include "mon/MgrMap.h"
#include "osd/OSDMap.h"
#include "crush/CrushCompiler.h"
#include "mon/CreatingPGs.h"
namespace po = boost::program_options;
using namespace std;
class TraceIter {
int fd;
unsigned idx;
MonitorDBStore::TransactionRef t;
public:
explicit TraceIter(string fname) : fd(-1), idx(-1) {
fd = ::open(fname.c_str(), O_RDONLY|O_BINARY);
t.reset(new MonitorDBStore::Transaction);
}
bool valid() {
return fd != -1;
}
MonitorDBStore::TransactionRef cur() {
ceph_assert(valid());
return t;
}
unsigned num() { return idx; }
void next() {
++idx;
bufferlist bl;
int r = bl.read_fd(fd, 6);
if (r < 0) {
std::cerr << "Got error: " << cpp_strerror(r) << " on read_fd"
<< std::endl;
::close(fd);
fd = -1;
return;
} else if ((unsigned)r < 6) {
std::cerr << "short read" << std::endl;
::close(fd);
fd = -1;
return;
}
auto bliter = bl.cbegin();
uint8_t ver, ver2;
decode(ver, bliter);
decode(ver2, bliter);
uint32_t len;
decode(len, bliter);
r = bl.read_fd(fd, len);
if (r < 0) {
std::cerr << "Got error: " << cpp_strerror(r) << " on read_fd"
<< std::endl;
::close(fd);
fd = -1;
return;
} else if ((unsigned)r < len) {
std::cerr << "short read" << std::endl;
::close(fd);
fd = -1;
return;
}
bliter = bl.cbegin();
t.reset(new MonitorDBStore::Transaction);
t->decode(bliter);
}
void init() {
next();
}
~TraceIter() {
if (fd != -1) {
::close(fd);
fd = -1;
}
}
};
int parse_cmd_args(
po::options_description *desc, /// < visible options description
po::options_description *hidden_desc, /// < hidden options description
po::positional_options_description *positional, /// < positional args
vector<string> &cmd_args, /// < arguments to be parsed
po::variables_map *vm /// > post-parsing variable map
)
{
// desc_all will aggregate all visible and hidden options for parsing.
//
// From boost's program_options point of view, there is absolutely no
// distinction between 'desc' and 'hidden_desc'. This is a distinction
// that is only useful to us: 'desc' is whatever we are willing to show
// on 'usage()', whereas 'hidden_desc' refers to parameters we wish to
// take advantage of but do not wish to show on 'usage()'.
//
// For example, consider that program_options matches positional arguments
// (specified via 'positional') against the paramenters defined on a
// given 'po::options_description' class. This is performed below,
// supplying both the description and the positional arguments to the
// parser. However, we do not want the parameters that are mapped to
// positional arguments to be shown on usage, as that makes for ugly and
// confusing usage messages. Therefore we dissociate the options'
// description that is to be used as an aid to the user from those options
// that are nothing but useful for internal purposes (i.e., mapping options
// to positional arguments). We still need to aggregate them before parsing
// and that's what 'desc_all' is all about.
//
ceph_assert(desc != NULL);
po::options_description desc_all;
desc_all.add(*desc);
if (hidden_desc != NULL)
desc_all.add(*hidden_desc);
try {
po::command_line_parser parser = po::command_line_parser(cmd_args).
options(desc_all);
if (positional) {
parser = parser.positional(*positional);
}
po::parsed_options parsed = parser.run();
po::store(parsed, *vm);
po::notify(*vm);
} catch (po::error &e) {
std::cerr << "error: " << e.what() << std::endl;
return -EINVAL;
}
return 0;
}
/**
* usage: ceph-monstore-tool <store-path> <command> [options]
*
* commands:
*
* store-copy < --out arg >
* dump-keys
* compact
* getmonmap < --out arg [ --version arg ] >
* getosdmap < --out arg [ --version arg ] >
* dump-paxos <--dump-start VER> <--dump-end VER>
* dump-trace < --trace-file arg >
* replay-trace
* random-gen
* rewrite-crush
*
* wanted syntax:
*
* ceph-monstore-tool PATH CMD [options]
*
* ceph-monstore-tool PATH store-copy <PATH2 | -o PATH2>
* ceph-monstore-tool PATH dump-keys
* ceph-monstore-tool PATH compact
* ceph-monstore-tool PATH get monmap [VER]
* ceph-monstore-tool PATH get osdmap [VER]
* ceph-monstore-tool PATH dump-paxos STARTVER ENDVER
*
*
*/
void usage(const char *n, po::options_description &d)
{
std::cerr <<
"usage: " << n << " <store-path> <cmd> [args|options]\n"
<< "\n"
<< "Commands:\n"
<< " store-copy PATH copies store to PATH\n"
<< " compact compacts the store\n"
<< " get monmap [-- options] get monmap (version VER if specified)\n"
<< " (default: last committed)\n"
<< " get osdmap [-- options] get osdmap (version VER if specified)\n"
<< " (default: last committed)\n"
<< " get mdsmap [-- options] get mdsmap (version VER if specified)\n"
<< " (default: last committed)\n"
<< " get mgr [-- options] get mgr map (version VER if specified)\n"
<< " (default: last committed)\n"
<< " get crushmap [-- options] get crushmap (version VER if specified)\n"
<< " (default: last committed)\n"
<< " show-versions [-- options] show the first&last committed version of map\n"
<< " (show-versions -- --help for more info)\n"
<< " dump-keys dumps store keys to FILE\n"
<< " (default: stdout)\n"
<< " dump-paxos [-- options] dump paxos transactions\n"
<< " (dump-paxos -- --help for more info)\n"
<< " dump-trace FILE [-- options] dump contents of trace file FILE\n"
<< " (dump-trace -- --help for more info)\n"
<< " replay-trace FILE [-- options] replay trace from FILE\n"
<< " (replay-trace -- --help for more info)\n"
<< " random-gen [-- options] add randomly generated ops to the store\n"
<< " (random-gen -- --help for more info)\n"
<< " rewrite-crush [-- options] add a rewrite commit to the store\n"
<< " (rewrite-crush -- --help for more info)\n"
<< " rebuild rebuild store\n"
<< " (rebuild -- --help for more info)\n"
<< std::endl;
std::cerr << d << std::endl;
std::cerr
<< "\nPlease Note:\n"
<< "* Ceph-specific options should be in the format --option-name=VAL\n"
<< " (specifically, do not forget the '='!!)\n"
<< "* Command-specific options need to be passed after a '--'\n"
<< " e.g., 'get monmap -- --version 10 --out /tmp/foo'"
<< std::endl;
}
int update_osdmap(MonitorDBStore& store, version_t ver, bool copy,
std::shared_ptr<CrushWrapper> crush,
MonitorDBStore::Transaction* t) {
const string prefix("osdmap");
// full
bufferlist bl;
int r = 0;
r = store.get(prefix, store.combine_strings("full", ver), bl);
if (r) {
std::cerr << "Error getting full map: " << cpp_strerror(r) << std::endl;
return r;
}
OSDMap osdmap;
osdmap.decode(bl);
osdmap.crush = crush;
if (copy) {
osdmap.inc_epoch();
}
bl.clear();
// be consistent with OSDMonitor::update_from_paxos()
osdmap.encode(bl, CEPH_FEATURES_ALL|CEPH_FEATURE_RESERVED);
t->put(prefix, store.combine_strings("full", osdmap.get_epoch()), bl);
// incremental
OSDMap::Incremental inc;
if (copy) {
inc.epoch = osdmap.get_epoch();
inc.fsid = osdmap.get_fsid();
} else {
bl.clear();
r = store.get(prefix, ver, bl);
if (r) {
std::cerr << "Error getting inc map: " << cpp_strerror(r) << std::endl;
return r;
}
OSDMap::Incremental inc(bl);
if (inc.crush.length()) {
inc.crush.clear();
crush->encode(inc.crush, CEPH_FEATURES_SUPPORTED_DEFAULT);
}
if (inc.fullmap.length()) {
OSDMap fullmap;
fullmap.decode(inc.fullmap);
fullmap.crush = crush;
inc.fullmap.clear();
fullmap.encode(inc.fullmap);
}
}
ceph_assert(osdmap.have_crc());
inc.full_crc = osdmap.get_crc();
bl.clear();
// be consistent with OSDMonitor::update_from_paxos()
inc.encode(bl, CEPH_FEATURES_ALL|CEPH_FEATURE_RESERVED);
t->put(prefix, inc.epoch, bl);
return 0;
}
int rewrite_transaction(MonitorDBStore& store, int version,
const string& crush_file,
MonitorDBStore::Transaction* t) {
const string prefix("osdmap");
// calc the known-good epoch
version_t last_committed = store.get(prefix, "last_committed");
version_t good_version = 0;
if (version <= 0) {
if (last_committed >= (unsigned)-version) {
good_version = last_committed + version;
} else {
std::cerr << "osdmap-version is less than: -" << last_committed << std::endl;
return EINVAL;
}
} else {
good_version = version;
}
if (good_version >= last_committed) {
std::cout << "good epoch is greater or equal to the last committed one: "
<< good_version << " >= " << last_committed << std::endl;
return 0;
}
// load/extract the crush map
int r = 0;
std::shared_ptr<CrushWrapper> crush(new CrushWrapper);
if (crush_file.empty()) {
bufferlist bl;
r = store.get(prefix, store.combine_strings("full", good_version), bl);
if (r) {
std::cerr << "Error getting map: " << cpp_strerror(r) << std::endl;
return r;
}
OSDMap osdmap;
osdmap.decode(bl);
crush = osdmap.crush;
} else {
string err;
bufferlist bl;
r = bl.read_file(crush_file.c_str(), &err);
if (r) {
std::cerr << err << ": " << cpp_strerror(r) << std::endl;
return r;
}
auto p = bl.cbegin();
crush->decode(p);
}
// prepare a transaction to rewrite the epochs
// (good_version, last_committed]
// with the good crush map.
// XXX: may need to break this into several paxos versions?
ceph_assert(good_version < last_committed);
for (version_t v = good_version + 1; v <= last_committed; v++) {
cout << "rewriting epoch #" << v << "/" << last_committed << std::endl;
r = update_osdmap(store, v, false, crush, t);
if (r)
return r;
}
// add a new osdmap epoch to store, so monitors will update their current osdmap
// in addition to the ones stored in epochs.
//
// This is needed due to the way the monitor updates from paxos and the
// facilities we are leveraging to push this update to the rest of the
// quorum.
//
// In a nutshell, we are generating a good version of the osdmap, with a
// proper crush, and building a transaction that will replace the bad
// osdmaps with good osdmaps. But this transaction needs to be applied on
// all nodes, so that the monitors will have good osdmaps to share with
// clients. We thus leverage Paxos, specifically the recovery mechanism, by
// creating a pending value that will be committed once the monitors form an
// initial quorum after being brought back to life.
//
// However, the way the monitor works has the paxos services, including the
// OSDMonitor, updating their state from disk *prior* to the recovery phase
// begins (so they have an up to date state in memory). This means the
// OSDMonitor will see the old, broken map, before the new paxos version is
// applied to disk, and the old version is cached. Even though we have the
// good map now, and we share the good map with clients, we will still be
// working on the old broken map. Instead of mucking around the monitor to
// make this work, we instead opt for adding the same osdmap but with a
// newer version, so that the OSDMonitor picks up on it when it updates from
// paxos after the proposal has been committed. This is not elegant, but
// avoids further unpleasantness that would arise from kludging around the
// current behavior. Also, has the added benefit of making sure the clients
// get an updated version of the map (because last_committed+1 >
// last_committed) :)
//
cout << "adding a new epoch #" << last_committed+1 << std::endl;
r = update_osdmap(store, last_committed++, true, crush, t);
if (r)
return r;
t->put(prefix, store.combine_strings("full", "latest"), last_committed);
t->put(prefix, "last_committed", last_committed);
return 0;
}
/**
* create a new paxos version which carries a proposal to rewrite all epochs
* of incremental and full map of "osdmap" after a faulty crush map is injected.
* so the leader will trigger a recovery and propagate this fix to its peons,
* after the proposal is accepted, and the transaction in it is applied. all
* monitors will rewrite the bad crush map with the good one, and have a new
* osdmap epoch with the good crush map in it.
*/
int rewrite_crush(const char* progname,
vector<string>& subcmds,
MonitorDBStore& store) {
po::options_description op_desc("Allowed 'rewrite-crush' options");
int version = -1;
string crush_file;
op_desc.add_options()
("help,h", "produce this help message")
("crush", po::value<string>(&crush_file),
("path to the crush map file "
"(default: will instead extract it from the known-good osdmap)"))
("good-epoch", po::value<int>(&version),
"known-good epoch of osdmap, if a negative number '-N' is given, the "
"$last_committed-N is used instead (default: -1). "
"Please note, -1 is not necessarily a good epoch, because there are "
"good chance that we have more epochs slipped into the monstore after "
"the one where the crushmap is firstly injected.")
;
po::variables_map op_vm;
int r = parse_cmd_args(&op_desc, NULL, NULL, subcmds, &op_vm);
if (r) {
return -r;
}
if (op_vm.count("help")) {
usage(progname, op_desc);
return 0;
}
MonitorDBStore::Transaction rewrite_txn;
r = rewrite_transaction(store, version, crush_file, &rewrite_txn);
if (r) {
return r;
}
// store the transaction into store as a proposal
const string prefix("paxos");
version_t pending_v = store.get(prefix, "last_committed") + 1;
auto t(std::make_shared<MonitorDBStore::Transaction>());
bufferlist bl;
rewrite_txn.encode(bl);
cout << "adding pending commit " << pending_v
<< " " << bl.length() << " bytes" << std::endl;
t->put(prefix, pending_v, bl);
t->put(prefix, "pending_v", pending_v);
// a large enough yet unique proposal number will probably do the trick
version_t pending_pn = (store.get(prefix, "accepted_pn") / 100 + 4) * 100 + 1;
t->put(prefix, "pending_pn", pending_pn);
store.apply_transaction(t);
return 0;
}
static int update_auth(MonitorDBStore& st, const string& keyring_path)
{
// import all keyrings stored in the keyring file
KeyRing keyring;
int r = keyring.load(g_ceph_context, keyring_path);
if (r < 0) {
cerr << "unable to load admin keyring: " << keyring_path << std::endl;
return r;
}
bufferlist bl;
__u8 v = 1;
encode(v, bl);
for (const auto& k : keyring.get_keys()) {
KeyServerData::Incremental auth_inc;
auth_inc.name = k.first;
auth_inc.auth = k.second;
if (auth_inc.auth.caps.empty()) {
cerr << "no caps granted to: " << auth_inc.name << std::endl;
return -EINVAL;
}
map<string,string> caps;
std::transform(begin(auth_inc.auth.caps), end(auth_inc.auth.caps),
inserter(caps, end(caps)),
[](auto& cap) {
string c;
auto p = cap.second.cbegin();
decode(c, p);
return make_pair(cap.first, c);
});
cout << "adding auth for '"
<< auth_inc.name << "': " << auth_inc.auth
<< " with caps(" << caps << ")" << std::endl;
auth_inc.op = KeyServerData::AUTH_INC_ADD;
AuthMonitor::Incremental inc;
inc.inc_type = AuthMonitor::AUTH_DATA;
encode(auth_inc, inc.auth_data);
inc.auth_type = CEPH_AUTH_CEPHX;
inc.encode(bl, CEPH_FEATURES_ALL);
}
// prime rotating secrets
{
KeyServer ks(g_ceph_context, nullptr);
KeyServerData::Incremental auth_inc;
auth_inc.op = KeyServerData::AUTH_INC_SET_ROTATING;
bool r = ks.prepare_rotating_update(auth_inc.rotating_bl);
ceph_assert(r);
AuthMonitor::Incremental inc;
inc.inc_type = AuthMonitor::AUTH_DATA;
encode(auth_inc, inc.auth_data);
inc.auth_type = CEPH_AUTH_CEPHX;
inc.encode(bl, CEPH_FEATURES_ALL);
}
const string prefix("auth");
auto last_committed = st.get(prefix, "last_committed") + 1;
auto t = make_shared<MonitorDBStore::Transaction>();
t->put(prefix, last_committed, bl);
t->put(prefix, "last_committed", last_committed);
auto first_committed = st.get(prefix, "first_committed");
if (!first_committed) {
t->put(prefix, "first_committed", last_committed);
}
st.apply_transaction(t);
return 0;
}
static int update_mkfs(MonitorDBStore& st,
const string& monmap_path,
const vector<string>& mon_ids)
{
MonMap monmap;
if (!monmap_path.empty()) {
cout << __func__ << " pulling initial monmap from " << monmap_path << std::endl;
bufferlist bl;
string err;
int r = bl.read_file(monmap_path.c_str(), &err);
if (r < 0) {
cerr << "failed to read monmap from " << monmap_path << ": "
<< cpp_strerror(r) << std::endl;
return r;
}
monmap.decode(bl);
} else {
cout << __func__ << " generating seed initial monmap" << std::endl;
int r = monmap.build_initial(g_ceph_context, true, cerr);
if (r) {
cerr << "no initial monitors" << std::endl;
return -EINVAL;
}
vector<string> new_names;
if (!mon_ids.empty()) {
if (mon_ids.size() != monmap.size()) {
cerr << "Please pass the same number of <mon-ids> to name the hosts "
<< "listed in 'mon_host'. "
<< mon_ids.size() << " mon-id(s) specified, "
<< "while you have " << monmap.size() << " mon hosts." << std::endl;
return -EINVAL;
}
new_names = mon_ids;
} else {
for (unsigned rank = 0; rank < monmap.size(); rank++) {
string new_name{"a"};
new_name[0] += rank;
new_names.push_back(std::move(new_name));
}
}
for (unsigned rank = 0; rank < monmap.size(); rank++) {
auto name = monmap.get_name(rank);
if (name.compare(0, 7, "noname-") == 0) {
monmap.rename(name, new_names[rank]);
}
}
}
monmap.print(cout);
bufferlist bl;
monmap.encode(bl, CEPH_FEATURES_ALL);
monmap.set_epoch(0);
auto t = make_shared<MonitorDBStore::Transaction>();
t->put("mkfs", "monmap", bl);
st.apply_transaction(t);
return 0;
}
static int update_monitor(MonitorDBStore& st)
{
const string prefix("monitor");
// a stripped-down Monitor::mkfs()
bufferlist bl;
bl.append(CEPH_MON_ONDISK_MAGIC "\n");
auto t = make_shared<MonitorDBStore::Transaction>();
t->put(prefix, "magic", bl);
st.apply_transaction(t);
return 0;
}
// rebuild
// - creating_pgs
static int update_creating_pgs(MonitorDBStore& st)
{
bufferlist bl;
auto last_osdmap_epoch = st.get("osdmap", "last_committed");
int r = st.get("osdmap", st.combine_strings("full", last_osdmap_epoch), bl);
if (r < 0) {
cerr << "unable to load osdmap e" << last_osdmap_epoch << std::endl;
return r;
}
OSDMap osdmap;
osdmap.decode(bl);
creating_pgs_t creating;
for (auto& i : osdmap.get_pools()) {
creating.created_pools.insert(i.first);
}
creating.last_scan_epoch = last_osdmap_epoch;
bufferlist newbl;
encode(creating, newbl, CEPH_FEATURES_ALL);
auto t = make_shared<MonitorDBStore::Transaction>();
t->put("osd_pg_creating", "creating", newbl);
st.apply_transaction(t);
return 0;
}
// rebuild
// - mgr
// - mgr_command_desc
static int update_mgrmap(MonitorDBStore& st)
{
auto t = make_shared<MonitorDBStore::Transaction>();
{
MgrMap map;
// mgr expects epoch > 1
map.epoch++;
auto initial_modules =
get_str_vec(g_ceph_context->_conf.get_val<string>("mgr_initial_modules"));
copy(begin(initial_modules),
end(initial_modules),
inserter(map.modules, end(map.modules)));
bufferlist bl;
map.encode(bl, CEPH_FEATURES_ALL);
t->put("mgr", map.epoch, bl);
t->put("mgr", "last_committed", map.epoch);
}
{
auto mgr_command_descs = mgr_commands;
for (auto& c : mgr_command_descs) {
c.set_flag(MonCommand::FLAG_MGR);
}
bufferlist bl;
encode(mgr_command_descs, bl);
t->put("mgr_command_descs", "", bl);
}
return st.apply_transaction(t);
}
static int update_paxos(MonitorDBStore& st)
{
const string prefix("paxos");
// a large enough version greater than the maximum possible `last_committed`
// that could be replied by the peons when the leader is collecting paxos
// transactions during recovery
constexpr version_t first_committed = 0x42;
constexpr version_t last_committed = first_committed;
for (version_t v = first_committed; v < last_committed + 1; v++) {
auto t = make_shared<MonitorDBStore::Transaction>();
if (v == first_committed) {
t->put(prefix, "first_committed", v);
}
bufferlist proposal;
MonitorDBStore::Transaction empty_txn;
empty_txn.encode(proposal);
t->put(prefix, v, proposal);
t->put(prefix, "last_committed", v);
st.apply_transaction(t);
}
// build a pending paxos proposal from all non-permanent k/v pairs. once the
// proposal is committed, it will gets applied. on the sync provider side, it
// will be a no-op, but on its peers, the paxos commit will help to build up
// the necessary epochs.
bufferlist pending_proposal;
{
MonitorDBStore::Transaction t;
vector<string> prefixes = {"auth", "osdmap",
"mgr", "mgr_command_desc"};
for (const auto& prefix : prefixes) {
for (auto i = st.get_iterator(prefix); i->valid(); i->next()) {
auto key = i->raw_key();
auto val = i->value();
t.put(key.first, key.second, val);
}
}
t.encode(pending_proposal);
}
auto pending_v = last_committed + 1;
auto t = make_shared<MonitorDBStore::Transaction>();
t->put(prefix, pending_v, pending_proposal);
t->put(prefix, "pending_v", pending_v);
t->put(prefix, "pending_pn", 400);
st.apply_transaction(t);
return 0;
}
int rebuild_monstore(const char* progname,
vector<string>& subcmds,
MonitorDBStore& st)
{
po::options_description op_desc("Allowed 'rebuild' options");
string keyring_path;
string monmap_path;
vector<string> mon_ids;
op_desc.add_options()
("keyring", po::value<string>(&keyring_path),
"path to the client.admin key")
("monmap", po::value<string>(&monmap_path),
"path to the initial monmap")
("mon-ids", po::value<vector<string>>(&mon_ids)->multitoken(),
"mon ids, use 'a', 'b', ... if not specified");
po::positional_options_description pos_desc;
pos_desc.add("mon-ids", -1);
po::variables_map op_vm;
int r = parse_cmd_args(&op_desc, nullptr, &pos_desc, subcmds, &op_vm);
if (r) {
return -r;
}
if (op_vm.count("help")) {
usage(progname, op_desc);
return 0;
}
if (!keyring_path.empty())
update_auth(st, keyring_path);
if ((r = update_creating_pgs(st))) {
return r;
}
if ((r = update_mgrmap(st))) {
return r;
}
if ((r = update_paxos(st))) {
return r;
}
if ((r = update_mkfs(st, monmap_path, mon_ids))) {
return r;
}
if ((r = update_monitor(st))) {
return r;
}
return 0;
}
int main(int argc, char **argv) {
int err = 0;
po::options_description desc("Allowed options");
string store_path, cmd;
vector<string> subcmds;
desc.add_options()
("help,h", "produce help message")
;
/* Dear Future Developer:
*
* for further improvement, should you need to pass specific options to
* a command (e.g., get osdmap VER --hex), you can expand the current
* format by creating additional 'po::option_description' and passing
* 'subcmds' to 'po::command_line_parser', much like what is currently
* done by default. However, beware: in order to differentiate a
* command-specific option from the generic/global options, you will need
* to pass '--' in the command line (so that the first parser, the one
* below, assumes it has reached the end of all options); e.g.,
* 'get osdmap VER -- --hex'. Not pretty; far from intuitive; it was as
* far as I got with this library. Improvements on this format will be
* left as an excercise for the reader. -Joao
*/
po::options_description positional_desc("Positional argument options");
positional_desc.add_options()
("store-path", po::value<string>(&store_path),
"path to monitor's store")
("command", po::value<string>(&cmd),
"Command")
("subcmd", po::value<vector<string> >(&subcmds),
"Command arguments/Sub-Commands")
;
po::positional_options_description positional;
positional.add("store-path", 1);
positional.add("command", 1);
positional.add("subcmd", -1);
po::options_description all_desc("All options");
all_desc.add(desc).add(positional_desc);
vector<string> ceph_option_strings;
po::variables_map vm;
try {
po::parsed_options parsed =
po::command_line_parser(argc, argv).
options(all_desc).
positional(positional).
allow_unregistered().run();
po::store(
parsed,
vm);
po::notify(vm);
// Specifying po::include_positional would have our positional arguments
// being collected (thus being part of ceph_option_strings and eventually
// passed on to global_init() below).
// Instead we specify po::exclude_positional, which has the upside of
// completely avoid this, but the downside of having to specify ceph
// options as --VAR=VAL (note the '='); otherwise we will capture the
// positional 'VAL' as belonging to us, never being collected.
ceph_option_strings = po::collect_unrecognized(parsed.options,
po::exclude_positional);
} catch(po::error &e) {
std::cerr << "error: " << e.what() << std::endl;
return 1;
}
// parse command structure before calling global_init() and friends.
if (vm.empty() || vm.count("help") ||
store_path.empty() || cmd.empty() ||
*cmd.begin() == '-') {
usage(argv[0], desc);
return 1;
}
vector<const char *> ceph_options;
ceph_options.reserve(ceph_option_strings.size());
for (vector<string>::iterator i = ceph_option_strings.begin();
i != ceph_option_strings.end();
++i) {
ceph_options.push_back(i->c_str());
}
auto cct = global_init(
NULL, ceph_options, CEPH_ENTITY_TYPE_MON,
CODE_ENVIRONMENT_UTILITY,
CINIT_FLAG_NO_MON_CONFIG);
common_init_finish(g_ceph_context);
cct->_conf.apply_changes(nullptr);
// this is where we'll write *whatever*, on a per-command basis.
// not all commands require some place to write their things.
MonitorDBStore st(store_path);
if (store_path.size()) {
stringstream ss;
int r = st.open(ss);
if (r < 0) {
std::cerr << ss.str() << std::endl;
return EINVAL;
}
}
auto close_store = make_scope_guard([&] {
st.close();
});
if (cmd == "dump-keys") {
KeyValueDB::WholeSpaceIterator iter = st.get_iterator();
while (iter->valid()) {
pair<string,string> key(iter->raw_key());
cout << key.first << " / " << key.second << std::endl;
iter->next();
}
} else if (cmd == "compact") {
st.compact();
} else if (cmd == "get") {
unsigned v = 0;
string outpath;
string map_type;
// visible options for this command
po::options_description op_desc("Allowed 'get' options");
op_desc.add_options()
("help,h", "produce this help message")
("out,o", po::value<string>(&outpath),
"output file (default: stdout)")
("version,v", po::value<unsigned>(&v),
"map version to obtain")
("readable,r", "print the map information in human readable format")
;
// this is going to be a positional argument; we don't want to show
// it as an option during --help, but we do want to have it captured
// when parsing.
po::options_description hidden_op_desc("Hidden 'get' options");
hidden_op_desc.add_options()
("map-type", po::value<string>(&map_type),
"map-type")
;
po::positional_options_description op_positional;
op_positional.add("map-type", 1);
po::variables_map op_vm;
int r = parse_cmd_args(&op_desc, &hidden_op_desc, &op_positional,
subcmds, &op_vm);
if (r < 0) {
return -r;
}
if (op_vm.count("help") || map_type.empty()) {
usage(argv[0], op_desc);
return 0;
}
if (v == 0) {
if (map_type == "crushmap") {
v = st.get("osdmap", "last_committed");
} else {
v = st.get(map_type, "last_committed");
}
}
int fd = STDOUT_FILENO;
if (!outpath.empty()){
fd = ::open(outpath.c_str(), O_WRONLY|O_CREAT|O_TRUNC|O_BINARY, 0666);
if (fd < 0) {
std::cerr << "error opening output file: "
<< cpp_strerror(errno) << std::endl;
return EINVAL;
}
}
auto close_fd = make_scope_guard([&] {
::close(fd);
if (r < 0 && fd != STDOUT_FILENO) {
::remove(outpath.c_str());
}
});
bufferlist bl;
r = 0;
if (map_type == "osdmap") {
r = st.get(map_type, st.combine_strings("full", v), bl);
} else if (map_type == "crushmap") {
bufferlist tmp;
r = st.get("osdmap", st.combine_strings("full", v), tmp);
if (r >= 0) {
OSDMap osdmap;
osdmap.decode(tmp);
osdmap.crush->encode(bl, CEPH_FEATURES_SUPPORTED_DEFAULT);
}
} else {
r = st.get(map_type, v, bl);
}
if (r < 0) {
std::cerr << "Error getting map: " << cpp_strerror(r) << std::endl;
return EINVAL;
}
if (op_vm.count("readable")) {
stringstream ss;
bufferlist out;
try {
if (map_type == "monmap") {
MonMap monmap;
monmap.decode(bl);
monmap.print(ss);
} else if (map_type == "osdmap") {
OSDMap osdmap;
osdmap.decode(bl);
osdmap.print(cct.get(), ss);
} else if (map_type == "mdsmap") {
FSMap fs_map;
fs_map.decode(bl);
fs_map.print(ss);
} else if (map_type == "mgr") {
MgrMap mgr_map;
auto p = bl.cbegin();
mgr_map.decode(p);
JSONFormatter f;
f.dump_object("mgrmap", mgr_map);
f.flush(ss);
} else if (map_type == "crushmap") {
CrushWrapper cw;
auto it = bl.cbegin();
cw.decode(it);
CrushCompiler cc(cw, std::cerr, 0);
cc.decompile(ss);
} else {
std::cerr << "This type of readable map does not exist: " << map_type
<< std::endl << "You can only specify[osdmap|monmap|mdsmap"
"|crushmap|mgr]" << std::endl;
}
} catch (const buffer::error &err) {
std::cerr << "Could not decode for human readable output (you may still"
" use non-readable mode). Detail: " << err.what() << std::endl;
}
out.append(ss);
out.write_fd(fd);
} else {
bl.write_fd(fd);
}
if (!outpath.empty()) {
std::cout << "wrote " << map_type
<< " version " << v << " to " << outpath
<< std::endl;
}
} else if (cmd == "show-versions") {
string map_type; //map type:osdmap,monmap...
// visible options for this command
po::options_description op_desc("Allowed 'show-versions' options");
op_desc.add_options()
("help,h", "produce this help message")
("map-type", po::value<string>(&map_type), "map_type");
po::positional_options_description op_positional;
op_positional.add("map-type", 1);
po::variables_map op_vm;
int r = parse_cmd_args(&op_desc, NULL, &op_positional,
subcmds, &op_vm);
if (r < 0) {
return -r;
}
if (op_vm.count("help") || map_type.empty()) {
usage(argv[0], op_desc);
return 0;
}
unsigned int v_first = 0;
unsigned int v_last = 0;
v_first = st.get(map_type, "first_committed");
v_last = st.get(map_type, "last_committed");
std::cout << "first committed:\t" << v_first << "\n"
<< "last committed:\t" << v_last << std::endl;
} else if (cmd == "dump-paxos") {
unsigned dstart = 0;
unsigned dstop = ~0;
po::options_description op_desc("Allowed 'dump-paxos' options");
op_desc.add_options()
("help,h", "produce this help message")
("start,s", po::value<unsigned>(&dstart),
"starting version (default: 0)")
("end,e", po::value<unsigned>(&dstop),
"finish version (default: ~0)")
;
po::variables_map op_vm;
int r = parse_cmd_args(&op_desc, NULL, NULL,
subcmds, &op_vm);
if (r < 0) {
return -r;
}
if (op_vm.count("help")) {
usage(argv[0], op_desc);
return 0;
}
if (dstart > dstop) {
std::cerr << "error: 'start' version (value: " << dstart << ") "
<< " is greater than 'end' version (value: " << dstop << ")"
<< std::endl;
return EINVAL;
}
version_t v = dstart;
for (; v <= dstop; ++v) {
bufferlist bl;
st.get("paxos", v, bl);
if (bl.length() == 0)
break;
cout << "\n--- " << v << " ---" << std::endl;
auto tx(std::make_shared<MonitorDBStore::Transaction>());
Paxos::decode_append_transaction(tx, bl);
JSONFormatter f(true);
tx->dump(&f);
f.flush(cout);
}
std::cout << "dumped " << v << " paxos versions" << std::endl;
} else if (cmd == "dump-trace") {
unsigned dstart = 0;
unsigned dstop = ~0;
string outpath;
// visible options for this command
po::options_description op_desc("Allowed 'dump-trace' options");
op_desc.add_options()
("help,h", "produce this help message")
("start,s", po::value<unsigned>(&dstart),
"starting version (default: 0)")
("end,e", po::value<unsigned>(&dstop),
"finish version (default: ~0)")
;
// this is going to be a positional argument; we don't want to show
// it as an option during --help, but we do want to have it captured
// when parsing.
po::options_description hidden_op_desc("Hidden 'dump-trace' options");
hidden_op_desc.add_options()
("out,o", po::value<string>(&outpath),
"file to write the dump to")
;
po::positional_options_description op_positional;
op_positional.add("out", 1);
po::variables_map op_vm;
int r = parse_cmd_args(&op_desc, &hidden_op_desc, &op_positional,
subcmds, &op_vm);
if (r < 0) {
return -r;
}
if (op_vm.count("help")) {
usage(argv[0], op_desc);
return 0;
}
if (outpath.empty()) {
usage(argv[0], op_desc);
return EINVAL;
}
if (dstart > dstop) {
std::cerr << "error: 'start' version (value: " << dstart << ") "
<< " is greater than 'stop' version (value: " << dstop << ")"
<< std::endl;
return EINVAL;
}
TraceIter iter(outpath.c_str());
iter.init();
while (true) {
if (!iter.valid())
break;
if (iter.num() >= dstop) {
break;
}
if (iter.num() >= dstart) {
JSONFormatter f(true);
iter.cur()->dump(&f, false);
f.flush(std::cout);
std::cout << std::endl;
}
iter.next();
}
std::cerr << "Read up to transaction " << iter.num() << std::endl;
} else if (cmd == "replay-trace") {
string inpath;
unsigned num_replays = 1;
// visible options for this command
po::options_description op_desc("Allowed 'replay-trace' options");
op_desc.add_options()
("help,h", "produce this help message")
("num-replays,n", po::value<unsigned>(&num_replays),
"finish version (default: 1)")
;
// this is going to be a positional argument; we don't want to show
// it as an option during --help, but we do want to have it captured
// when parsing.
po::options_description hidden_op_desc("Hidden 'replay-trace' options");
hidden_op_desc.add_options()
("in,i", po::value<string>(&inpath),
"file to write the dump to")
;
po::positional_options_description op_positional;
op_positional.add("in", 1);
// op_desc_all will aggregate all visible and hidden options for parsing.
// when we call 'usage()' we just pass 'op_desc', as that's the description
// holding the visible options.
po::options_description op_desc_all;
op_desc_all.add(op_desc).add(hidden_op_desc);
po::variables_map op_vm;
try {
po::parsed_options op_parsed = po::command_line_parser(subcmds).
options(op_desc_all).positional(op_positional).run();
po::store(op_parsed, op_vm);
po::notify(op_vm);
} catch (po::error &e) {
std::cerr << "error: " << e.what() << std::endl;
return EINVAL;
}
if (op_vm.count("help")) {
usage(argv[0], op_desc);
return 0;
}
if (inpath.empty()) {
usage(argv[0], op_desc);
return EINVAL;
}
unsigned num = 0;
for (unsigned i = 0; i < num_replays; ++i) {
TraceIter iter(inpath.c_str());
iter.init();
while (true) {
if (!iter.valid())
break;
std::cerr << "Replaying trans num " << num << std::endl;
st.apply_transaction(iter.cur());
iter.next();
++num;
}
std::cerr << "Read up to transaction " << iter.num() << std::endl;
}
} else if (cmd == "random-gen") {
unsigned tsize = 200;
unsigned tvalsize = 1024;
unsigned ntrans = 100;
po::options_description op_desc("Allowed 'random-gen' options");
op_desc.add_options()
("help,h", "produce this help message")
("num-keys,k", po::value<unsigned>(&tsize),
"keys to write in each transaction (default: 200)")
("size,s", po::value<unsigned>(&tvalsize),
"size (in bytes) of the value to write in each key (default: 1024)")
("ntrans,n", po::value<unsigned>(&ntrans),
"number of transactions to run (default: 100)")
;
po::variables_map op_vm;
try {
po::parsed_options op_parsed = po::command_line_parser(subcmds).
options(op_desc).run();
po::store(op_parsed, op_vm);
po::notify(op_vm);
} catch (po::error &e) {
std::cerr << "error: " << e.what() << std::endl;
return EINVAL;
}
if (op_vm.count("help")) {
usage(argv[0], op_desc);
return 0;
}
unsigned num = 0;
for (unsigned i = 0; i < ntrans; ++i) {
std::cerr << "Applying trans " << i << std::endl;
auto t(std::make_shared<MonitorDBStore::Transaction>());
string prefix;
prefix.push_back((i%26)+'a');
for (unsigned j = 0; j < tsize; ++j) {
stringstream os;
os << num;
bufferlist bl;
for (unsigned k = 0; k < tvalsize; ++k) bl.append(rand());
t->put(prefix, os.str(), bl);
++num;
}
t->compact_prefix(prefix);
st.apply_transaction(t);
}
} else if (cmd == "store-copy") {
if (subcmds.size() < 1 || subcmds[0].empty()) {
usage(argv[0], desc);
return EINVAL;
}
string out_path = subcmds[0];
MonitorDBStore out_store(out_path);
{
stringstream ss;
int r = out_store.create_and_open(ss);
if (r < 0) {
std::cerr << ss.str() << std::endl;
return err;
}
}
KeyValueDB::WholeSpaceIterator it = st.get_iterator();
uint64_t total_keys = 0;
uint64_t total_size = 0;
uint64_t total_tx = 0;
do {
uint64_t num_keys = 0;
auto tx(std::make_shared<MonitorDBStore::Transaction>());
while (it->valid() && num_keys < 128) {
pair<string,string> k = it->raw_key();
bufferlist v = it->value();
tx->put(k.first, k.second, v);
num_keys ++;
total_tx ++;
total_size += v.length();
it->next();
}
total_keys += num_keys;
if (!tx->empty())
out_store.apply_transaction(tx);
std::cout << "copied " << total_keys << " keys so far ("
<< stringify(byte_u_t(total_size)) << ")" << std::endl;
} while (it->valid());
out_store.close();
std::cout << "summary: copied " << total_keys << " keys, using "
<< total_tx << " transactions, totalling "
<< stringify(byte_u_t(total_size)) << std::endl;
std::cout << "from '" << store_path << "' to '" << out_path << "'"
<< std::endl;
} else if (cmd == "rewrite-crush") {
err = rewrite_crush(argv[0], subcmds, st);
} else if (cmd == "rebuild") {
err = rebuild_monstore(argv[0], subcmds, st);
} else {
std::cerr << "Unrecognized command: " << cmd << std::endl;
usage(argv[0], desc);
return err;
}
}
| 42,154 | 30.935606 | 87 | cc |
null | ceph-main/src/tools/ceph_objectstore_tool.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2013 Inktank
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <boost/program_options/variables_map.hpp>
#include <boost/program_options/parsers.hpp>
#include <boost/algorithm/string.hpp>
#include <boost/scoped_ptr.hpp>
#include <boost/optional.hpp>
#include <fstream>
#include <stdlib.h>
#include "common/Formatter.h"
#include "common/errno.h"
#include "common/ceph_argparse.h"
#include "common/url_escape.h"
#include "global/global_init.h"
#include "os/ObjectStore.h"
#ifdef HAVE_LIBFUSE
#include "os/FuseStore.h"
#endif
#include "osd/PGLog.h"
#include "osd/OSD.h"
#include "osd/PG.h"
#include "osd/ECUtil.h"
#include "json_spirit/json_spirit_value.h"
#include "json_spirit/json_spirit_reader.h"
#include "rebuild_mondb.h"
#include "ceph_objectstore_tool.h"
#include "include/compat.h"
#include "include/util.h"
using namespace std;
namespace po = boost::program_options;
#ifdef INTERNAL_TEST
CompatSet get_test_compat_set() {
CompatSet::FeatureSet ceph_osd_feature_compat;
CompatSet::FeatureSet ceph_osd_feature_ro_compat;
CompatSet::FeatureSet ceph_osd_feature_incompat;
ceph_osd_feature_incompat.insert(CEPH_OSD_FEATURE_INCOMPAT_BASE);
ceph_osd_feature_incompat.insert(CEPH_OSD_FEATURE_INCOMPAT_PGINFO);
ceph_osd_feature_incompat.insert(CEPH_OSD_FEATURE_INCOMPAT_OLOC);
ceph_osd_feature_incompat.insert(CEPH_OSD_FEATURE_INCOMPAT_LEC);
ceph_osd_feature_incompat.insert(CEPH_OSD_FEATURE_INCOMPAT_CATEGORIES);
ceph_osd_feature_incompat.insert(CEPH_OSD_FEATURE_INCOMPAT_HOBJECTPOOL);
ceph_osd_feature_incompat.insert(CEPH_OSD_FEATURE_INCOMPAT_BIGINFO);
ceph_osd_feature_incompat.insert(CEPH_OSD_FEATURE_INCOMPAT_LEVELDBINFO);
ceph_osd_feature_incompat.insert(CEPH_OSD_FEATURE_INCOMPAT_LEVELDBLOG);
#ifdef INTERNAL_TEST2
ceph_osd_feature_incompat.insert(CEPH_OSD_FEATURE_INCOMPAT_SNAPMAPPER);
ceph_osd_feature_incompat.insert(CEPH_OSD_FEATURE_INCOMPAT_SHARDS);
#endif
return CompatSet(ceph_osd_feature_compat, ceph_osd_feature_ro_compat,
ceph_osd_feature_incompat);
}
#endif
const ssize_t max_read = 1024 * 1024;
const int fd_none = INT_MIN;
bool outistty;
bool dry_run;
struct action_on_object_t {
virtual ~action_on_object_t() {}
virtual void call(ObjectStore *store, coll_t coll, ghobject_t &ghobj, object_info_t &oi) = 0;
};
int _action_on_all_objects_in_pg(ObjectStore *store, coll_t coll, action_on_object_t &action, bool debug)
{
auto ch = store->open_collection(coll);
unsigned LIST_AT_A_TIME = 100;
ghobject_t next;
while (!next.is_max()) {
vector<ghobject_t> list;
int r = store->collection_list(ch,
next,
ghobject_t::get_max(),
LIST_AT_A_TIME,
&list,
&next);
if (r < 0) {
cerr << "Error listing collection: " << coll << ", "
<< cpp_strerror(r) << std::endl;
return r;
}
for (vector<ghobject_t>::iterator obj = list.begin();
obj != list.end();
++obj) {
object_info_t oi;
if (coll != coll_t::meta()) {
bufferlist attr;
r = store->getattr(ch, *obj, OI_ATTR, attr);
if (r < 0) {
cerr << "Error getting attr on : " << make_pair(coll, *obj) << ", "
<< cpp_strerror(r) << std::endl;
} else {
auto bp = attr.cbegin();
try {
decode(oi, bp);
} catch (...) {
r = -EINVAL;
cerr << "Error decoding attr on : " << make_pair(coll, *obj) << ", "
<< cpp_strerror(r) << std::endl;
}
}
}
action.call(store, coll, *obj, oi);
}
}
return 0;
}
int action_on_all_objects_in_pg(ObjectStore *store, string pgidstr, action_on_object_t &action, bool debug)
{
spg_t pgid;
// Scan collections in case this is an ec pool but no shard specified
unsigned scanned = 0;
int r = 0;
vector<coll_t> colls_to_check;
vector<coll_t> candidates;
r = store->list_collections(candidates);
if (r < 0) {
cerr << "Error listing collections: " << cpp_strerror(r) << std::endl;
return r;
}
pgid.parse(pgidstr.c_str());
for (vector<coll_t>::iterator i = candidates.begin();
i != candidates.end();
++i) {
spg_t cand_pgid;
if (i->is_meta() && pgidstr == "meta") {
colls_to_check.push_back(*i);
continue;
}
if (!i->is_pg(&cand_pgid))
continue;
// If an exact match or treat no shard as any shard
if (cand_pgid == pgid ||
(pgid.is_no_shard() && pgid.pgid == cand_pgid.pgid)) {
colls_to_check.push_back(*i);
}
}
if (debug)
cerr << colls_to_check.size() << " pgs to scan" << std::endl;
for (vector<coll_t>::iterator i = colls_to_check.begin();
i != colls_to_check.end();
++i, ++scanned) {
if (debug)
cerr << "Scanning " << *i << ", " << scanned << "/"
<< colls_to_check.size() << " completed" << std::endl;
r = _action_on_all_objects_in_pg(store, *i, action, debug);
if (r < 0)
break;
}
return r;
}
int action_on_all_objects_in_exact_pg(ObjectStore *store, coll_t coll, action_on_object_t &action, bool debug)
{
int r = _action_on_all_objects_in_pg(store, coll, action, debug);
return r;
}
int _action_on_all_objects(ObjectStore *store, action_on_object_t &action, bool debug)
{
unsigned scanned = 0;
int r = 0;
vector<coll_t> colls_to_check;
vector<coll_t> candidates;
r = store->list_collections(candidates);
if (r < 0) {
cerr << "Error listing collections: " << cpp_strerror(r) << std::endl;
return r;
}
for (vector<coll_t>::iterator i = candidates.begin();
i != candidates.end();
++i) {
if (i->is_pg()) {
colls_to_check.push_back(*i);
}
}
if (debug)
cerr << colls_to_check.size() << " pgs to scan" << std::endl;
for (vector<coll_t>::iterator i = colls_to_check.begin();
i != colls_to_check.end();
++i, ++scanned) {
if (debug)
cerr << "Scanning " << *i << ", " << scanned << "/"
<< colls_to_check.size() << " completed" << std::endl;
r = _action_on_all_objects_in_pg(store, *i, action, debug);
if (r < 0)
return r;
}
return 0;
}
int action_on_all_objects(ObjectStore *store, action_on_object_t &action, bool debug)
{
int r = _action_on_all_objects(store, action, debug);
return r;
}
struct pgid_object_list {
list<pair<coll_t, ghobject_t> > _objects;
void insert(coll_t coll, ghobject_t &ghobj) {
_objects.push_back(make_pair(coll, ghobj));
}
void dump(Formatter *f, bool human_readable) const {
if (!human_readable)
f->open_array_section("pgid_objects");
for (list<pair<coll_t, ghobject_t> >::const_iterator i = _objects.begin();
i != _objects.end();
++i) {
f->open_array_section("pgid_object");
spg_t pgid;
bool is_pg = i->first.is_pg(&pgid);
if (is_pg)
f->dump_string("pgid", stringify(pgid));
if (!is_pg || !human_readable)
f->dump_string("coll", i->first.to_str());
f->open_object_section("ghobject");
i->second.dump(f);
f->close_section();
f->close_section();
if (human_readable) {
f->flush(cout);
cout << std::endl;
}
}
if (!human_readable) {
f->close_section();
f->flush(cout);
cout << std::endl;
}
}
};
struct lookup_ghobject : public action_on_object_t {
pgid_object_list _objects;
const string _name;
const boost::optional<std::string> _namespace;
bool _need_snapset;
lookup_ghobject(const string& name, const boost::optional<std::string>& nspace, bool need_snapset = false) : _name(name),
_namespace(nspace), _need_snapset(need_snapset) { }
void call(ObjectStore *store, coll_t coll, ghobject_t &ghobj, object_info_t &oi) override {
if (_need_snapset && !ghobj.hobj.has_snapset())
return;
if ((_name.length() == 0 || ghobj.hobj.oid.name == _name) &&
(!_namespace || ghobj.hobj.nspace == _namespace))
_objects.insert(coll, ghobj);
return;
}
int size() const {
return _objects._objects.size();
}
pair<coll_t, ghobject_t> pop() {
pair<coll_t, ghobject_t> front = _objects._objects.front();
_objects._objects.pop_front();
return front;
}
void dump(Formatter *f, bool human_readable) const {
_objects.dump(f, human_readable);
}
};
struct lookup_slow_ghobject : public action_on_object_t {
list<tuple<
coll_t,
ghobject_t,
ceph::signedspan,
ceph::signedspan,
ceph::signedspan,
string> > _objects;
const string _name;
double threshold;
coll_t last_coll;
lookup_slow_ghobject(const string& name, double _threshold) :
_name(name), threshold(_threshold) { }
void call(ObjectStore *store, coll_t coll, ghobject_t &ghobj, object_info_t &oi) override {
ObjectMap::ObjectMapIterator iter;
auto start1 = mono_clock::now();
ceph::signedspan first_seek_time = start1 - start1;
ceph::signedspan last_seek_time = first_seek_time;
ceph::signedspan total_time = first_seek_time;
{
auto ch = store->open_collection(coll);
iter = store->get_omap_iterator(ch, ghobj);
if (!iter) {
cerr << "omap_get_iterator: " << cpp_strerror(ENOENT)
<< " obj:" << ghobj
<< std::endl;
return;
}
auto start = mono_clock::now();
iter->seek_to_first();
first_seek_time = mono_clock::now() - start;
while(iter->valid()) {
start = mono_clock::now();
iter->next();
last_seek_time = mono_clock::now() - start;
}
}
if (coll != last_coll) {
cerr << ">>> inspecting coll" << coll << std::endl;
last_coll = coll;
}
total_time = mono_clock::now() - start1;
if ( total_time >= make_timespan(threshold)) {
_objects.emplace_back(coll, ghobj,
first_seek_time, last_seek_time, total_time,
url_escape(iter->tail_key()));
cerr << ">>>>> found obj " << ghobj
<< " first_seek_time "
<< std::chrono::duration_cast<std::chrono::seconds>(first_seek_time).count()
<< " last_seek_time "
<< std::chrono::duration_cast<std::chrono::seconds>(last_seek_time).count()
<< " total_time "
<< std::chrono::duration_cast<std::chrono::seconds>(total_time).count()
<< " tail key: " << url_escape(iter->tail_key())
<< std::endl;
}
return;
}
int size() const {
return _objects.size();
}
void dump(Formatter *f, bool human_readable) const {
if (!human_readable)
f->open_array_section("objects");
for (auto i = _objects.begin();
i != _objects.end();
++i) {
f->open_array_section("object");
coll_t coll;
ghobject_t ghobj;
ceph::signedspan first_seek_time;
ceph::signedspan last_seek_time;
ceph::signedspan total_time;
string tail_key;
std::tie(coll, ghobj, first_seek_time, last_seek_time, total_time, tail_key) = *i;
spg_t pgid;
bool is_pg = coll.is_pg(&pgid);
if (is_pg)
f->dump_string("pgid", stringify(pgid));
if (!is_pg || !human_readable)
f->dump_string("coll", coll.to_str());
f->dump_object("ghobject", ghobj);
f->open_object_section("times");
f->dump_int("first_seek_time",
std::chrono::duration_cast<std::chrono::seconds>(first_seek_time).count());
f->dump_int("last_seek_time",
std::chrono::duration_cast<std::chrono::seconds>
(last_seek_time).count());
f->dump_int("total_time",
std::chrono::duration_cast<std::chrono::seconds>(total_time).count());
f->dump_string("tail_key", tail_key);
f->close_section();
f->close_section();
if (human_readable) {
f->flush(cout);
cout << std::endl;
}
}
if (!human_readable) {
f->close_section();
f->flush(cout);
cout << std::endl;
}
}
};
int file_fd = fd_none;
bool debug;
bool force = false;
bool no_superblock = false;
super_header sh;
static int get_fd_data(int fd, bufferlist &bl)
{
uint64_t total = 0;
do {
ssize_t bytes = bl.read_fd(fd, max_read);
if (bytes < 0) {
cerr << "read_fd error " << cpp_strerror(bytes) << std::endl;
return bytes;
}
if (bytes == 0)
break;
total += bytes;
} while(true);
ceph_assert(bl.length() == total);
return 0;
}
int get_log(CephContext *cct, ObjectStore *fs, __u8 struct_ver,
spg_t pgid, const pg_info_t &info,
PGLog::IndexedLog &log, pg_missing_t &missing)
{
try {
auto ch = fs->open_collection(coll_t(pgid));
if (!ch) {
return -ENOENT;
}
ostringstream oss;
ceph_assert(struct_ver > 0);
PGLog::read_log_and_missing(
cct, fs, ch,
pgid.make_pgmeta_oid(),
info, log, missing,
oss,
g_ceph_context->_conf->osd_ignore_stale_divergent_priors);
if (debug && oss.str().size())
cerr << oss.str() << std::endl;
}
catch (const buffer::error &e) {
cerr << "read_log_and_missing threw exception error " << e.what() << std::endl;
return -EFAULT;
}
return 0;
}
void dump_log(Formatter *formatter, ostream &out, pg_log_t &log,
pg_missing_t &missing)
{
formatter->open_object_section("op_log");
formatter->open_object_section("pg_log_t");
log.dump(formatter);
formatter->close_section();
formatter->flush(out);
formatter->open_object_section("pg_missing_t");
missing.dump(formatter);
formatter->close_section();
formatter->close_section();
formatter->flush(out);
}
//Based on part of OSD::load_pgs()
int finish_remove_pgs(ObjectStore *store)
{
vector<coll_t> ls;
int r = store->list_collections(ls);
if (r < 0) {
cerr << "finish_remove_pgs: failed to list pgs: " << cpp_strerror(r)
<< std::endl;
return r;
}
for (vector<coll_t>::iterator it = ls.begin();
it != ls.end();
++it) {
spg_t pgid;
if (it->is_temp(&pgid) ||
(it->is_pg(&pgid) && PG::_has_removal_flag(store, pgid))) {
cout << "finish_remove_pgs " << *it << " removing " << pgid << std::endl;
OSD::recursive_remove_collection(g_ceph_context, store, pgid, *it);
continue;
}
//cout << "finish_remove_pgs ignoring unrecognized " << *it << std::endl;
}
return 0;
}
#pragma GCC diagnostic ignored "-Wpragmas"
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
int mark_pg_for_removal(ObjectStore *fs, spg_t pgid, ObjectStore::Transaction *t)
{
pg_info_t info(pgid);
coll_t coll(pgid);
ghobject_t pgmeta_oid(info.pgid.make_pgmeta_oid());
epoch_t map_epoch = 0;
int r = PG::peek_map_epoch(fs, pgid, &map_epoch);
if (r < 0)
cerr << __func__ << " warning: peek_map_epoch reported error" << std::endl;
PastIntervals past_intervals;
__u8 struct_v;
r = PG::read_info(fs, pgid, coll, info, past_intervals, struct_v);
if (r < 0) {
cerr << __func__ << " error on read_info " << cpp_strerror(r) << std::endl;
return r;
}
ceph_assert(struct_v >= 8);
// new omap key
cout << "setting '_remove' omap key" << std::endl;
map<string,bufferlist> values;
encode((char)1, values["_remove"]);
t->omap_setkeys(coll, pgmeta_oid, values);
return 0;
}
#pragma GCC diagnostic pop
#pragma GCC diagnostic warning "-Wpragmas"
template<typename Func>
void wait_until_done(ObjectStore::Transaction* txn, Func&& func)
{
bool finished = false;
std::condition_variable cond;
std::mutex m;
txn->register_on_complete(make_lambda_context([&](int) {
std::unique_lock lock{m};
finished = true;
cond.notify_one();
}));
std::move(func)();
std::unique_lock lock{m};
cond.wait(lock, [&] {return finished;});
}
int initiate_new_remove_pg(ObjectStore *store, spg_t r_pgid)
{
if (!dry_run)
finish_remove_pgs(store);
if (!store->collection_exists(coll_t(r_pgid)))
return -ENOENT;
cout << " marking collection for removal" << std::endl;
if (dry_run)
return 0;
ObjectStore::Transaction rmt;
int r = mark_pg_for_removal(store, r_pgid, &rmt);
if (r < 0) {
return r;
}
ObjectStore::CollectionHandle ch = store->open_collection(coll_t(r_pgid));
store->queue_transaction(ch, std::move(rmt));
finish_remove_pgs(store);
return r;
}
int write_info(ObjectStore::Transaction &t, epoch_t epoch, pg_info_t &info,
PastIntervals &past_intervals)
{
//Empty for this
coll_t coll(info.pgid);
ghobject_t pgmeta_oid(info.pgid.make_pgmeta_oid());
map<string,bufferlist> km;
string key_to_remove;
pg_info_t last_written_info;
int ret = prepare_info_keymap(
g_ceph_context,
&km, &key_to_remove,
epoch,
info,
last_written_info,
past_intervals,
true, true, false);
if (ret) cerr << "Failed to write info" << std::endl;
t.omap_setkeys(coll, pgmeta_oid, km);
if (!key_to_remove.empty()) {
t.omap_rmkey(coll, pgmeta_oid, key_to_remove);
}
return ret;
}
typedef map<eversion_t, hobject_t> divergent_priors_t;
int write_pg(ObjectStore::Transaction &t, epoch_t epoch, pg_info_t &info,
pg_log_t &log, PastIntervals &past_intervals,
divergent_priors_t &divergent,
pg_missing_t &missing)
{
cout << __func__ << " epoch " << epoch << " info " << info << std::endl;
int ret = write_info(t, epoch, info, past_intervals);
if (ret)
return ret;
coll_t coll(info.pgid);
map<string,bufferlist> km;
const bool require_rollback = !info.pgid.is_no_shard();
if (!divergent.empty()) {
ceph_assert(missing.get_items().empty());
PGLog::write_log_and_missing_wo_missing(
t, &km, log, coll, info.pgid.make_pgmeta_oid(), divergent,
require_rollback);
} else {
pg_missing_tracker_t tmissing(missing);
bool rebuilt_missing_set_with_deletes = missing.may_include_deletes;
PGLog::write_log_and_missing(
t, &km, log, coll, info.pgid.make_pgmeta_oid(), tmissing,
require_rollback,
&rebuilt_missing_set_with_deletes);
}
t.omap_setkeys(coll, info.pgid.make_pgmeta_oid(), km);
return 0;
}
int do_trim_pg_log(ObjectStore *store, const coll_t &coll,
pg_info_t &info, const spg_t &pgid,
epoch_t map_epoch,
PastIntervals &past_intervals)
{
ghobject_t oid = pgid.make_pgmeta_oid();
struct stat st;
auto ch = store->open_collection(coll);
int r = store->stat(ch, oid, &st);
ceph_assert(r == 0);
ceph_assert(st.st_size == 0);
cerr << "Log bounds are: " << "(" << info.log_tail << ","
<< info.last_update << "]" << std::endl;
uint64_t max_entries = g_ceph_context->_conf->osd_max_pg_log_entries;
if (info.last_update.version - info.log_tail.version <= max_entries) {
cerr << "Log not larger than osd_max_pg_log_entries " << max_entries << std::endl;
return 0;
}
ceph_assert(info.last_update.version > max_entries);
version_t trim_to = info.last_update.version - max_entries;
size_t trim_at_once = g_ceph_context->_conf->osd_pg_log_trim_max;
eversion_t new_tail;
bool done = false;
while (!done) {
// gather keys so we can delete them in a batch without
// affecting the iterator
set<string> keys_to_trim;
{
ObjectMap::ObjectMapIterator p = store->get_omap_iterator(ch, oid);
if (!p)
break;
for (p->seek_to_first(); p->valid(); p->next()) {
if (p->key()[0] == '_')
continue;
if (p->key() == "can_rollback_to")
continue;
if (p->key() == "divergent_priors")
continue;
if (p->key() == "rollback_info_trimmed_to")
continue;
if (p->key() == "may_include_deletes_in_missing")
continue;
if (p->key().substr(0, 7) == string("missing"))
continue;
if (p->key().substr(0, 4) == string("dup_"))
continue;
bufferlist bl = p->value();
auto bp = bl.cbegin();
pg_log_entry_t e;
try {
e.decode_with_checksum(bp);
} catch (const buffer::error &e) {
cerr << "Error reading pg log entry: " << e.what() << std::endl;
}
if (debug) {
cerr << "read entry " << e << std::endl;
}
if (e.version.version > trim_to) {
done = true;
break;
}
keys_to_trim.insert(p->key());
new_tail = e.version;
if (keys_to_trim.size() >= trim_at_once)
break;
}
if (!p->valid())
done = true;
} // deconstruct ObjectMapIterator
// delete the keys
if (!dry_run && !keys_to_trim.empty()) {
cout << "Removing keys " << *keys_to_trim.begin() << " - " << *keys_to_trim.rbegin() << std::endl;
ObjectStore::Transaction t;
t.omap_rmkeys(coll, oid, keys_to_trim);
store->queue_transaction(ch, std::move(t));
ch->flush();
}
}
// update pg info with new tail
if (!dry_run && new_tail != eversion_t()) {
info.log_tail = new_tail;
ObjectStore::Transaction t;
int ret = write_info(t, map_epoch, info, past_intervals);
if (ret)
return ret;
store->queue_transaction(ch, std::move(t));
ch->flush();
}
// compact the db since we just removed a bunch of data
cerr << "Finished trimming, now compacting..." << std::endl;
if (!dry_run)
store->compact();
return 0;
}
int do_trim_pg_log_dups(ObjectStore *store, const coll_t &coll,
pg_info_t &info, const spg_t &pgid,
epoch_t map_epoch,
PastIntervals &past_intervals)
{
ghobject_t oid = pgid.make_pgmeta_oid();
struct stat st;
auto ch = store->open_collection(coll);
int r = store->stat(ch, oid, &st);
ceph_assert(r == 0);
ceph_assert(st.st_size == 0);
const size_t max_dup_entries = g_ceph_context->_conf->osd_pg_log_dups_tracked;
ceph_assert(max_dup_entries > 0);
const size_t max_chunk_size = g_ceph_context->_conf->osd_pg_log_trim_max;
ceph_assert(max_chunk_size > 0);
cout << "max_dup_entries=" << max_dup_entries
<< " max_chunk_size=" << max_chunk_size << std::endl;
if (dry_run) {
cout << "Dry run enabled, so when many chunks are needed,"
<< " the trimming will never stop!" << std::endl;
}
set<string> keys_to_keep;
size_t num_removed = 0;
do {
set<string> keys_to_trim;
{
ObjectMap::ObjectMapIterator p = store->get_omap_iterator(ch, oid);
if (!p)
break;
for (p->seek_to_first(); p->valid(); p->next()) {
if (p->key()[0] == '_')
continue;
if (p->key() == "can_rollback_to")
continue;
if (p->key() == "divergent_priors")
continue;
if (p->key() == "rollback_info_trimmed_to")
continue;
if (p->key() == "may_include_deletes_in_missing")
continue;
if (p->key().substr(0, 7) == string("missing"))
continue;
if (p->key().substr(0, 4) != string("dup_"))
continue;
keys_to_keep.insert(p->key());
if (keys_to_keep.size() > max_dup_entries) {
auto oldest_to_keep = keys_to_keep.begin();
keys_to_trim.emplace(*oldest_to_keep);
keys_to_keep.erase(oldest_to_keep);
}
if (keys_to_trim.size() >= max_chunk_size) {
break;
}
}
} // deconstruct ObjectMapIterator
// delete the keys
num_removed = keys_to_trim.size();
if (!dry_run && !keys_to_trim.empty()) {
cout << "Removing keys " << *keys_to_trim.begin() << " - " << *keys_to_trim.rbegin() << std::endl;
ObjectStore::Transaction t;
t.omap_rmkeys(coll, oid, keys_to_trim);
store->queue_transaction(ch, std::move(t));
ch->flush();
}
} while (num_removed == max_chunk_size);
// compact the db since we just removed a bunch of data
cerr << "Finished trimming, now compacting..." << std::endl;
if (!dry_run)
store->compact();
return 0;
}
const int OMAP_BATCH_SIZE = 25;
void get_omap_batch(ObjectMap::ObjectMapIterator &iter, map<string, bufferlist> &oset)
{
oset.clear();
for (int count = OMAP_BATCH_SIZE; count && iter->valid(); --count, iter->next()) {
oset.insert(pair<string, bufferlist>(iter->key(), iter->value()));
}
}
int ObjectStoreTool::export_file(ObjectStore *store, coll_t cid, ghobject_t &obj)
{
struct stat st;
mysize_t total;
footer ft;
auto ch = store->open_collection(cid);
int ret = store->stat(ch, obj, &st);
if (ret < 0)
return ret;
cerr << "Read " << obj << std::endl;
total = st.st_size;
if (debug)
cerr << "size=" << total << std::endl;
object_begin objb(obj);
{
bufferptr bp;
bufferlist bl;
ret = store->getattr(ch, obj, OI_ATTR, bp);
if (ret < 0) {
cerr << "getattr failure object_info " << ret << std::endl;
return ret;
}
bl.push_back(bp);
decode(objb.oi, bl);
if (debug)
cerr << "object_info: " << objb.oi << std::endl;
}
// NOTE: we include whiteouts, lost, etc.
ret = write_section(TYPE_OBJECT_BEGIN, objb, file_fd);
if (ret < 0)
return ret;
uint64_t offset = 0;
bufferlist rawdatabl;
while(total > 0) {
rawdatabl.clear();
mysize_t len = max_read;
if (len > total)
len = total;
ret = store->read(ch, obj, offset, len, rawdatabl);
if (ret < 0)
return ret;
if (ret == 0)
return -EINVAL;
data_section dblock(offset, len, rawdatabl);
if (debug)
cerr << "data section offset=" << offset << " len=" << len << std::endl;
total -= ret;
offset += ret;
ret = write_section(TYPE_DATA, dblock, file_fd);
if (ret) return ret;
}
//Handle attrs for this object
map<string,bufferptr,less<>> aset;
ret = store->getattrs(ch, obj, aset);
if (ret) return ret;
attr_section as(aset);
ret = write_section(TYPE_ATTRS, as, file_fd);
if (ret)
return ret;
if (debug) {
cerr << "attrs size " << aset.size() << std::endl;
}
//Handle omap information
bufferlist hdrbuf;
ret = store->omap_get_header(ch, obj, &hdrbuf, true);
if (ret < 0) {
cerr << "omap_get_header: " << cpp_strerror(ret) << std::endl;
return ret;
}
omap_hdr_section ohs(hdrbuf);
ret = write_section(TYPE_OMAP_HDR, ohs, file_fd);
if (ret)
return ret;
ObjectMap::ObjectMapIterator iter = store->get_omap_iterator(ch, obj);
if (!iter) {
ret = -ENOENT;
cerr << "omap_get_iterator: " << cpp_strerror(ret) << std::endl;
return ret;
}
iter->seek_to_first();
int mapcount = 0;
map<string, bufferlist> out;
while(iter->valid()) {
get_omap_batch(iter, out);
if (out.empty()) break;
mapcount += out.size();
omap_section oms(out);
ret = write_section(TYPE_OMAP, oms, file_fd);
if (ret)
return ret;
}
if (debug)
cerr << "omap map size " << mapcount << std::endl;
ret = write_simple(TYPE_OBJECT_END, file_fd);
if (ret)
return ret;
return 0;
}
int ObjectStoreTool::export_files(ObjectStore *store, coll_t coll)
{
ghobject_t next;
auto ch = store->open_collection(coll);
while (!next.is_max()) {
vector<ghobject_t> objects;
int r = store->collection_list(ch, next, ghobject_t::get_max(), 300,
&objects, &next);
if (r < 0)
return r;
for (vector<ghobject_t>::iterator i = objects.begin();
i != objects.end();
++i) {
ceph_assert(!i->hobj.is_meta());
if (i->is_pgmeta() || i->hobj.is_temp() || !i->is_no_gen()) {
continue;
}
r = export_file(store, coll, *i);
if (r < 0)
return r;
}
}
return 0;
}
int set_inc_osdmap(ObjectStore *store, epoch_t e, bufferlist& bl, bool force) {
OSDMap::Incremental inc;
auto it = bl.cbegin();
inc.decode(it);
if (e == 0) {
e = inc.epoch;
} else if (e != inc.epoch) {
cerr << "incremental.epoch mismatch: "
<< inc.epoch << " != " << e << std::endl;
if (force) {
cerr << "But will continue anyway." << std::endl;
} else {
return -EINVAL;
}
}
auto ch = store->open_collection(coll_t::meta());
const ghobject_t inc_oid = OSD::get_inc_osdmap_pobject_name(e);
if (!store->exists(ch, inc_oid)) {
cerr << "inc-osdmap (" << inc_oid << ") does not exist." << std::endl;
if (!force) {
return -ENOENT;
}
cout << "Creating a new epoch." << std::endl;
}
if (dry_run)
return 0;
ObjectStore::Transaction t;
t.write(coll_t::meta(), inc_oid, 0, bl.length(), bl);
t.truncate(coll_t::meta(), inc_oid, bl.length());
store->queue_transaction(ch, std::move(t));
return 0;
}
int get_inc_osdmap(ObjectStore *store, epoch_t e, bufferlist& bl)
{
auto ch = store->open_collection(coll_t::meta());
if (store->read(ch,
OSD::get_inc_osdmap_pobject_name(e),
0, 0, bl) < 0) {
return -ENOENT;
}
return 0;
}
int set_osdmap(ObjectStore *store, epoch_t e, bufferlist& bl, bool force) {
OSDMap osdmap;
osdmap.decode(bl);
if (e == 0) {
e = osdmap.get_epoch();
} else if (e != osdmap.get_epoch()) {
cerr << "osdmap.epoch mismatch: "
<< e << " != " << osdmap.get_epoch() << std::endl;
if (force) {
cerr << "But will continue anyway." << std::endl;
} else {
return -EINVAL;
}
}
auto ch = store->open_collection(coll_t::meta());
const ghobject_t full_oid = OSD::get_osdmap_pobject_name(e);
if (!store->exists(ch, full_oid)) {
cerr << "osdmap (" << full_oid << ") does not exist." << std::endl;
if (!force) {
return -ENOENT;
}
cout << "Creating a new epoch." << std::endl;
}
if (dry_run)
return 0;
ObjectStore::Transaction t;
t.write(coll_t::meta(), full_oid, 0, bl.length(), bl);
t.truncate(coll_t::meta(), full_oid, bl.length());
store->queue_transaction(ch, std::move(t));
return 0;
}
int get_osdmap(ObjectStore *store, epoch_t e, OSDMap &osdmap, bufferlist& bl)
{
ObjectStore::CollectionHandle ch = store->open_collection(coll_t::meta());
bool found = store->read(
ch, OSD::get_osdmap_pobject_name(e), 0, 0, bl) >= 0;
if (!found) {
cerr << "Can't find OSDMap for pg epoch " << e << std::endl;
return -ENOENT;
}
osdmap.decode(bl);
if (debug)
cerr << osdmap << std::endl;
return 0;
}
int get_pg_num_history(ObjectStore *store, pool_pg_num_history_t *h)
{
ObjectStore::CollectionHandle ch = store->open_collection(coll_t::meta());
bufferlist bl;
auto pghist = OSD::make_pg_num_history_oid();
int r = store->read(ch, pghist, 0, 0, bl, 0);
if (r >= 0 && bl.length() > 0) {
auto p = bl.cbegin();
decode(*h, p);
}
cout << __func__ << " pg_num_history " << *h << std::endl;
return 0;
}
int add_osdmap(ObjectStore *store, metadata_section &ms)
{
return get_osdmap(store, ms.map_epoch, ms.osdmap, ms.osdmap_bl);
}
int ObjectStoreTool::do_export(
CephContext *cct, ObjectStore *fs, coll_t coll, spg_t pgid,
pg_info_t &info, epoch_t map_epoch, __u8 struct_ver,
const OSDSuperblock& superblock,
PastIntervals &past_intervals)
{
PGLog::IndexedLog log;
pg_missing_t missing;
cerr << "Exporting " << pgid << " info " << info << std::endl;
int ret = get_log(cct, fs, struct_ver, pgid, info, log, missing);
if (ret > 0)
return ret;
if (debug) {
Formatter *formatter = Formatter::create("json-pretty");
ceph_assert(formatter);
dump_log(formatter, cerr, log, missing);
delete formatter;
}
write_super();
pg_begin pgb(pgid, superblock);
// Special case: If replicated pg don't require the importing OSD to have shard feature
if (pgid.is_no_shard()) {
pgb.superblock.compat_features.incompat.remove(CEPH_OSD_FEATURE_INCOMPAT_SHARDS);
}
ret = write_section(TYPE_PG_BEGIN, pgb, file_fd);
if (ret)
return ret;
// The metadata_section is now before files, so import can detect
// errors and abort without wasting time.
metadata_section ms(
struct_ver,
map_epoch,
info,
log,
past_intervals,
missing);
ret = add_osdmap(fs, ms);
if (ret)
return ret;
ret = write_section(TYPE_PG_METADATA, ms, file_fd);
if (ret)
return ret;
ret = export_files(fs, coll);
if (ret) {
cerr << "export_files error " << ret << std::endl;
return ret;
}
ret = write_simple(TYPE_PG_END, file_fd);
if (ret)
return ret;
return 0;
}
int dump_data(Formatter *formatter, bufferlist &bl,
const std::string &dump_data_path)
{
auto ebliter = bl.cbegin();
data_section ds;
ds.decode(ebliter);
formatter->open_object_section("data_block");
formatter->dump_unsigned("offset", ds.offset);
formatter->dump_unsigned("len", ds.len);
if (!dump_data_path.empty()) {
int fd = open(dump_data_path.c_str(), O_WRONLY|O_CREAT|O_LARGEFILE, 0666);
if (fd == -1) {
std::cerr << "open " << dump_data_path << " failed: "
<< cpp_strerror(errno) << std::endl;
} else {
int ret = ds.databl.write_fd(fd, ds.offset);
if (ret < 0) {
std::cerr << "write " << dump_data_path << " failed: "
<< cpp_strerror(ret) << std::endl;
} else {
formatter->dump_string("file", dump_data_path);
}
close(fd);
}
}
formatter->close_section();
formatter->flush(cout);
return 0;
}
int get_data(ObjectStore *store, coll_t coll, ghobject_t hoid,
ObjectStore::Transaction *t, bufferlist &bl)
{
auto ebliter = bl.cbegin();
data_section ds;
ds.decode(ebliter);
if (debug)
cerr << "\tdata: offset " << ds.offset << " len " << ds.len << std::endl;
t->write(coll, hoid, ds.offset, ds.len, ds.databl);
return 0;
}
int dump_attrs(
Formatter *formatter, ghobject_t hoid,
bufferlist &bl)
{
auto ebliter = bl.cbegin();
attr_section as;
as.decode(ebliter);
// This could have been handled in the caller if we didn't need to
// support exports that didn't include object_info_t in object_begin.
if (hoid.generation == ghobject_t::NO_GEN &&
hoid.hobj.is_head()) {
map<string,bufferlist>::iterator mi = as.data.find(SS_ATTR);
if (mi != as.data.end()) {
SnapSet snapset;
auto p = mi->second.cbegin();
snapset.decode(p);
formatter->open_object_section("snapset");
snapset.dump(formatter);
formatter->close_section();
} else {
formatter->open_object_section("snapset");
formatter->dump_string("error", "missing SS_ATTR");
formatter->close_section();
}
}
formatter->open_object_section("attrs");
formatter->open_array_section("user");
for (auto kv : as.data) {
// Skip system attributes
if (('_' != kv.first.at(0)) || kv.first.size() == 1)
continue;
formatter->open_object_section("user_attr");
formatter->dump_string("name", kv.first.substr(1));
bool b64;
formatter->dump_string("value", cleanbin(kv.second, b64));
formatter->dump_bool("Base64", b64);
formatter->close_section();
}
formatter->close_section();
formatter->open_array_section("system");
for (auto kv : as.data) {
// Skip user attributes
if (('_' == kv.first.at(0)) && kv.first.size() != 1)
continue;
formatter->open_object_section("sys_attr");
formatter->dump_string("name", kv.first);
formatter->close_section();
}
formatter->close_section();
formatter->close_section();
formatter->flush(cout);
return 0;
}
int get_attrs(
ObjectStore *store, coll_t coll, ghobject_t hoid,
ObjectStore::Transaction *t, bufferlist &bl,
OSDriver &driver, SnapMapper &snap_mapper)
{
auto ebliter = bl.cbegin();
attr_section as;
as.decode(ebliter);
auto ch = store->open_collection(coll);
if (debug)
cerr << "\tattrs: len " << as.data.size() << std::endl;
t->setattrs(coll, hoid, as.data);
// This could have been handled in the caller if we didn't need to
// support exports that didn't include object_info_t in object_begin.
if (hoid.generation == ghobject_t::NO_GEN &&
hoid.hobj.is_head()) {
map<string,bufferlist>::iterator mi = as.data.find(SS_ATTR);
if (mi != as.data.end()) {
SnapSet snapset;
auto p = mi->second.cbegin();
snapset.decode(p);
cout << "snapset " << snapset << std::endl;
for (auto& p : snapset.clone_snaps) {
ghobject_t clone = hoid;
clone.hobj.snap = p.first;
set<snapid_t> snaps(p.second.begin(), p.second.end());
if (!store->exists(ch, clone)) {
// no clone, skip. this is probably a cache pool. this works
// because we use a separate transaction per object and clones
// come before head in the archive.
if (debug)
cerr << "\tskipping missing " << clone << " (snaps "
<< snaps << ")" << std::endl;
continue;
}
if (debug)
cerr << "\tsetting " << clone.hobj << " snaps " << snaps
<< std::endl;
OSDriver::OSTransaction _t(driver.get_transaction(t));
ceph_assert(!snaps.empty());
snap_mapper.add_oid(clone.hobj, snaps, &_t);
}
} else {
cerr << "missing SS_ATTR on " << hoid << std::endl;
}
}
return 0;
}
int dump_omap_hdr(Formatter *formatter, bufferlist &bl)
{
auto ebliter = bl.cbegin();
omap_hdr_section oh;
oh.decode(ebliter);
formatter->open_object_section("omap_header");
formatter->dump_string("value", string(oh.hdr.c_str(), oh.hdr.length()));
formatter->close_section();
formatter->flush(cout);
return 0;
}
int get_omap_hdr(ObjectStore *store, coll_t coll, ghobject_t hoid,
ObjectStore::Transaction *t, bufferlist &bl)
{
auto ebliter = bl.cbegin();
omap_hdr_section oh;
oh.decode(ebliter);
if (debug)
cerr << "\tomap header: " << string(oh.hdr.c_str(), oh.hdr.length())
<< std::endl;
t->omap_setheader(coll, hoid, oh.hdr);
return 0;
}
int dump_omap(Formatter *formatter, bufferlist &bl)
{
auto ebliter = bl.cbegin();
omap_section os;
os.decode(ebliter);
formatter->open_object_section("omaps");
formatter->dump_unsigned("count", os.omap.size());
formatter->open_array_section("data");
for (auto o : os.omap) {
formatter->open_object_section("omap");
formatter->dump_string("name", o.first);
bool b64;
formatter->dump_string("value", cleanbin(o.second, b64));
formatter->dump_bool("Base64", b64);
formatter->close_section();
}
formatter->close_section();
formatter->close_section();
formatter->flush(cout);
return 0;
}
int get_omap(ObjectStore *store, coll_t coll, ghobject_t hoid,
ObjectStore::Transaction *t, bufferlist &bl)
{
auto ebliter = bl.cbegin();
omap_section os;
os.decode(ebliter);
if (debug)
cerr << "\tomap: size " << os.omap.size() << std::endl;
t->omap_setkeys(coll, hoid, os.omap);
return 0;
}
int ObjectStoreTool::dump_object(Formatter *formatter,
bufferlist &bl,
const std::string &dump_data_dir)
{
auto ebliter = bl.cbegin();
object_begin ob;
ob.decode(ebliter);
if (ob.hoid.hobj.is_temp()) {
cerr << "ERROR: Export contains temporary object '" << ob.hoid << "'" << std::endl;
return -EFAULT;
}
formatter->open_object_section("object");
formatter->open_object_section("oid");
ob.hoid.dump(formatter);
formatter->close_section();
formatter->open_object_section("object_info");
ob.oi.dump(formatter);
formatter->close_section();
bufferlist ebl;
bool done = false;
while(!done) {
sectiontype_t type;
int ret = read_section(&type, &ebl);
if (ret)
return ret;
//cout << "\tdo_object: Section type " << hex << type << dec << std::endl;
//cout << "\t\tsection size " << ebl.length() << std::endl;
if (type >= END_OF_TYPES) {
cout << "Skipping unknown object section type" << std::endl;
continue;
}
switch(type) {
case TYPE_DATA:
if (dry_run) break;
ret = dump_data(formatter, ebl,
dump_data_dir.empty() ?
"" : dump_data_dir + "/" + stringify(ob.hoid.hobj));
if (ret) return ret;
break;
case TYPE_ATTRS:
if (dry_run) break;
ret = dump_attrs(formatter, ob.hoid, ebl);
if (ret) return ret;
break;
case TYPE_OMAP_HDR:
if (dry_run) break;
ret = dump_omap_hdr(formatter, ebl);
if (ret) return ret;
break;
case TYPE_OMAP:
if (dry_run) break;
ret = dump_omap(formatter, ebl);
if (ret) return ret;
break;
case TYPE_OBJECT_END:
done = true;
break;
default:
cerr << "Unknown section type " << type << std::endl;
return -EFAULT;
}
}
formatter->close_section();
return 0;
}
int ObjectStoreTool::get_object(ObjectStore *store,
OSDriver& driver,
SnapMapper& mapper,
coll_t coll,
bufferlist &bl, OSDMap &origmap,
bool *skipped_objects)
{
ObjectStore::Transaction tran;
ObjectStore::Transaction *t = &tran;
auto ebliter = bl.cbegin();
object_begin ob;
ob.decode(ebliter);
if (ob.hoid.hobj.is_temp()) {
cerr << "ERROR: Export contains temporary object '" << ob.hoid << "'" << std::endl;
return -EFAULT;
}
ceph_assert(g_ceph_context);
auto ch = store->open_collection(coll);
if (ob.hoid.hobj.nspace != g_ceph_context->_conf->osd_hit_set_namespace) {
object_t oid = ob.hoid.hobj.oid;
object_locator_t loc(ob.hoid.hobj);
pg_t raw_pgid = origmap.object_locator_to_pg(oid, loc);
pg_t pgid = origmap.raw_pg_to_pg(raw_pgid);
spg_t coll_pgid;
if (coll.is_pg(&coll_pgid) == false) {
cerr << "INTERNAL ERROR: Bad collection during import" << std::endl;
return -EFAULT;
}
if (coll_pgid.shard != ob.hoid.shard_id) {
cerr << "INTERNAL ERROR: Importing shard " << coll_pgid.shard
<< " but object shard is " << ob.hoid.shard_id << std::endl;
return -EFAULT;
}
if (coll_pgid.pgid != pgid) {
cerr << "Skipping object '" << ob.hoid << "' which belongs in pg " << pgid << std::endl;
*skipped_objects = true;
skip_object(bl);
return 0;
}
}
if (!dry_run)
t->touch(coll, ob.hoid);
cout << "Write " << ob.hoid << std::endl;
bufferlist ebl;
bool done = false;
while(!done) {
sectiontype_t type;
int ret = read_section(&type, &ebl);
if (ret)
return ret;
//cout << "\tdo_object: Section type " << hex << type << dec << std::endl;
//cout << "\t\tsection size " << ebl.length() << std::endl;
if (type >= END_OF_TYPES) {
cout << "Skipping unknown object section type" << std::endl;
continue;
}
switch(type) {
case TYPE_DATA:
if (dry_run) break;
ret = get_data(store, coll, ob.hoid, t, ebl);
if (ret) return ret;
break;
case TYPE_ATTRS:
if (dry_run) break;
ret = get_attrs(store, coll, ob.hoid, t, ebl, driver, mapper);
if (ret) return ret;
break;
case TYPE_OMAP_HDR:
if (dry_run) break;
ret = get_omap_hdr(store, coll, ob.hoid, t, ebl);
if (ret) return ret;
break;
case TYPE_OMAP:
if (dry_run) break;
ret = get_omap(store, coll, ob.hoid, t, ebl);
if (ret) return ret;
break;
case TYPE_OBJECT_END:
done = true;
break;
default:
cerr << "Unknown section type " << type << std::endl;
return -EFAULT;
}
}
if (!dry_run) {
wait_until_done(t, [&] {
store->queue_transaction(ch, std::move(*t));
ch->flush();
});
}
return 0;
}
int dump_pg_metadata(Formatter *formatter, bufferlist &bl, metadata_section &ms)
{
auto ebliter = bl.cbegin();
ms.decode(ebliter);
formatter->open_object_section("metadata_section");
formatter->dump_unsigned("pg_disk_version", (int)ms.struct_ver);
formatter->dump_unsigned("map_epoch", ms.map_epoch);
formatter->open_object_section("OSDMap");
ms.osdmap.dump(formatter);
formatter->close_section();
formatter->flush(cout);
cout << std::endl;
formatter->open_object_section("info");
ms.info.dump(formatter);
formatter->close_section();
formatter->flush(cout);
formatter->open_object_section("log");
ms.log.dump(formatter);
formatter->close_section();
formatter->flush(cout);
formatter->open_object_section("pg_missing_t");
ms.missing.dump(formatter);
formatter->close_section();
// XXX: ms.past_intervals?
formatter->close_section();
formatter->flush(cout);
if (ms.osdmap.get_epoch() != 0 && ms.map_epoch != ms.osdmap.get_epoch()) {
cerr << "FATAL: Invalid OSDMap epoch in export data" << std::endl;
return -EFAULT;
}
return 0;
}
int get_pg_metadata(ObjectStore *store, bufferlist &bl, metadata_section &ms,
const OSDSuperblock& sb, spg_t pgid)
{
auto ebliter = bl.cbegin();
ms.decode(ebliter);
spg_t old_pgid = ms.info.pgid;
ms.info.pgid = pgid;
if (debug) {
cout << "export pgid " << old_pgid << std::endl;
cout << "struct_v " << (int)ms.struct_ver << std::endl;
cout << "map epoch " << ms.map_epoch << std::endl;
#ifdef DIAGNOSTIC
Formatter *formatter = new JSONFormatter(true);
formatter->open_object_section("stuff");
formatter->open_object_section("importing OSDMap");
ms.osdmap.dump(formatter);
formatter->close_section();
formatter->flush(cout);
cout << std::endl;
cout << "osd current epoch " << sb.current_epoch << std::endl;
formatter->open_object_section("info");
ms.info.dump(formatter);
formatter->close_section();
formatter->flush(cout);
cout << std::endl;
formatter->open_object_section("log");
ms.log.dump(formatter);
formatter->close_section();
formatter->flush(cout);
cout << std::endl;
formatter->close_section();
formatter->flush(cout);
cout << std::endl;
#endif
}
if (ms.osdmap.get_epoch() != 0 && ms.map_epoch != ms.osdmap.get_epoch()) {
cerr << "FATAL: Invalid OSDMap epoch in export data" << std::endl;
return -EFAULT;
}
if (ms.map_epoch > sb.current_epoch) {
cerr << "ERROR: Export PG's map_epoch " << ms.map_epoch << " > OSD's epoch " << sb.current_epoch << std::endl;
cerr << "The OSD you are using is older than the exported PG" << std::endl;
cerr << "Either use another OSD or join selected OSD to cluster to update it first" << std::endl;
return -EINVAL;
}
// Old exports didn't include OSDMap
if (ms.osdmap.get_epoch() == 0) {
cerr << "WARNING: No OSDMap in old export, this is an ancient export."
" Not supported." << std::endl;
return -EINVAL;
}
if (ms.osdmap.get_epoch() < sb.oldest_map) {
cerr << "PG export's map " << ms.osdmap.get_epoch()
<< " is older than OSD's oldest_map " << sb.oldest_map << std::endl;
if (!force) {
cerr << " pass --force to proceed anyway (with incomplete PastIntervals)"
<< std::endl;
return -EINVAL;
}
}
if (debug) {
cerr << "Import pgid " << ms.info.pgid << std::endl;
cerr << "Previous past_intervals " << ms.past_intervals << std::endl;
cerr << "history.same_interval_since "
<< ms.info.history.same_interval_since << std::endl;
}
return 0;
}
// out: pg_log_t that only has entries that apply to import_pgid using curmap
// reject: Entries rejected from "in" are in the reject.log. Other fields not set.
void filter_divergent_priors(spg_t import_pgid, const OSDMap &curmap,
const string &hit_set_namespace, const divergent_priors_t &in,
divergent_priors_t &out, divergent_priors_t &reject)
{
out.clear();
reject.clear();
for (divergent_priors_t::const_iterator i = in.begin();
i != in.end(); ++i) {
// Reject divergent priors for temporary objects
if (i->second.is_temp()) {
reject.insert(*i);
continue;
}
if (i->second.nspace != hit_set_namespace) {
object_t oid = i->second.oid;
object_locator_t loc(i->second);
pg_t raw_pgid = curmap.object_locator_to_pg(oid, loc);
pg_t pgid = curmap.raw_pg_to_pg(raw_pgid);
if (import_pgid.pgid == pgid) {
out.insert(*i);
} else {
reject.insert(*i);
}
} else {
out.insert(*i);
}
}
}
int ObjectStoreTool::dump_export(Formatter *formatter,
const std::string &dump_data_dir)
{
bufferlist ebl;
pg_info_t info;
PGLog::IndexedLog log;
//bool skipped_objects = false;
int ret = read_super();
if (ret)
return ret;
if (sh.magic != super_header::super_magic) {
cerr << "Invalid magic number" << std::endl;
return -EFAULT;
}
if (sh.version > super_header::super_ver) {
cerr << "Can't handle export format version=" << sh.version << std::endl;
return -EINVAL;
}
formatter->open_object_section("Export");
//First section must be TYPE_PG_BEGIN
sectiontype_t type;
ret = read_section(&type, &ebl);
if (ret)
return ret;
if (type == TYPE_POOL_BEGIN) {
cerr << "Dump of pool exports not supported" << std::endl;
return -EINVAL;
} else if (type != TYPE_PG_BEGIN) {
cerr << "Invalid first section type " << std::to_string(type) << std::endl;
return -EFAULT;
}
auto ebliter = ebl.cbegin();
pg_begin pgb;
pgb.decode(ebliter);
spg_t pgid = pgb.pgid;
formatter->dump_string("pgid", stringify(pgid));
formatter->dump_string("cluster_fsid", stringify(pgb.superblock.cluster_fsid));
formatter->dump_string("features", stringify(pgb.superblock.compat_features));
bool done = false;
bool found_metadata = false;
metadata_section ms;
bool objects_started = false;
while(!done) {
ret = read_section(&type, &ebl);
if (ret)
return ret;
if (debug) {
cerr << "dump_export: Section type " << std::to_string(type) << std::endl;
}
if (type >= END_OF_TYPES) {
cerr << "Skipping unknown section type" << std::endl;
continue;
}
switch(type) {
case TYPE_OBJECT_BEGIN:
if (!objects_started) {
formatter->open_array_section("objects");
objects_started = true;
}
ret = dump_object(formatter, ebl, dump_data_dir);
if (ret) return ret;
break;
case TYPE_PG_METADATA:
if (objects_started)
cerr << "WARNING: metadata_section out of order" << std::endl;
ret = dump_pg_metadata(formatter, ebl, ms);
if (ret) return ret;
found_metadata = true;
break;
case TYPE_PG_END:
if (objects_started) {
formatter->close_section();
}
done = true;
break;
default:
cerr << "Unknown section type " << std::to_string(type) << std::endl;
return -EFAULT;
}
}
if (!found_metadata) {
cerr << "Missing metadata section" << std::endl;
return -EFAULT;
}
formatter->close_section();
formatter->flush(cout);
return 0;
}
int ObjectStoreTool::do_import(ObjectStore *store, OSDSuperblock& sb,
bool force, std::string pgidstr)
{
bufferlist ebl;
pg_info_t info;
PGLog::IndexedLog log;
bool skipped_objects = false;
if (!dry_run)
finish_remove_pgs(store);
int ret = read_super();
if (ret)
return ret;
if (sh.magic != super_header::super_magic) {
cerr << "Invalid magic number" << std::endl;
return -EFAULT;
}
if (sh.version > super_header::super_ver) {
cerr << "Can't handle export format version=" << sh.version << std::endl;
return -EINVAL;
}
//First section must be TYPE_PG_BEGIN
sectiontype_t type;
ret = read_section(&type, &ebl);
if (ret)
return ret;
if (type == TYPE_POOL_BEGIN) {
cerr << "Pool exports cannot be imported into a PG" << std::endl;
return -EINVAL;
} else if (type != TYPE_PG_BEGIN) {
cerr << "Invalid first section type " << std::to_string(type) << std::endl;
return -EFAULT;
}
auto ebliter = ebl.cbegin();
pg_begin pgb;
pgb.decode(ebliter);
spg_t pgid = pgb.pgid;
if (pgidstr.length()) {
spg_t user_pgid;
bool ok = user_pgid.parse(pgidstr.c_str());
// This succeeded in main() already
ceph_assert(ok);
if (pgid != user_pgid) {
cerr << "specified pgid " << user_pgid
<< " does not match actual pgid " << pgid << std::endl;
return -EINVAL;
}
}
if (!pgb.superblock.cluster_fsid.is_zero()
&& pgb.superblock.cluster_fsid != sb.cluster_fsid) {
cerr << "Export came from different cluster with fsid "
<< pgb.superblock.cluster_fsid << std::endl;
if (force) {
cerr << "Ignoring this problem due to --force" << std::endl;
} else {
return -EINVAL;
}
}
if (debug) {
cerr << "Exported features: " << pgb.superblock.compat_features << std::endl;
}
// Special case: Old export has SHARDS incompat feature on replicated pg, removqqe it
if (pgid.is_no_shard())
pgb.superblock.compat_features.incompat.remove(CEPH_OSD_FEATURE_INCOMPAT_SHARDS);
if (sb.compat_features.compare(pgb.superblock.compat_features) == -1) {
CompatSet unsupported = sb.compat_features.unsupported(pgb.superblock.compat_features);
cerr << "Export has incompatible features set " << unsupported << std::endl;
// Let them import if they specify the --force option
if (!force)
return 11; // Positive return means exit status
}
// we need the latest OSDMap to check for collisions
OSDMap curmap;
bufferlist bl;
ret = get_osdmap(store, sb.current_epoch, curmap, bl);
if (ret) {
cerr << "Can't find latest local OSDMap " << sb.current_epoch << std::endl;
return ret;
}
if (!curmap.have_pg_pool(pgid.pgid.m_pool)) {
cerr << "Pool " << pgid.pgid.m_pool << " no longer exists" << std::endl;
// Special exit code for this error, used by test code
return 10; // Positive return means exit status
}
pool_pg_num_history_t pg_num_history;
get_pg_num_history(store, &pg_num_history);
ghobject_t pgmeta_oid = pgid.make_pgmeta_oid();
// Check for PG already present.
coll_t coll(pgid);
if (store->collection_exists(coll)) {
cerr << "pgid " << pgid << " already exists" << std::endl;
return -EEXIST;
}
ObjectStore::CollectionHandle ch;
OSDriver driver(
store,
coll_t(),
OSD::make_snapmapper_oid());
SnapMapper mapper(g_ceph_context, &driver, 0, 0, 0, pgid.shard);
cout << "Importing pgid " << pgid;
cout << std::endl;
bool done = false;
bool found_metadata = false;
metadata_section ms;
while(!done) {
ret = read_section(&type, &ebl);
if (ret)
return ret;
if (debug) {
cout << __func__ << ": Section type " << std::to_string(type) << std::endl;
}
if (type >= END_OF_TYPES) {
cout << "Skipping unknown section type" << std::endl;
continue;
}
switch(type) {
case TYPE_OBJECT_BEGIN:
ceph_assert(found_metadata);
ret = get_object(store, driver, mapper, coll, ebl, ms.osdmap,
&skipped_objects);
if (ret) return ret;
break;
case TYPE_PG_METADATA:
ret = get_pg_metadata(store, ebl, ms, sb, pgid);
if (ret) return ret;
found_metadata = true;
if (pgid != ms.info.pgid) {
cerr << "specified pgid " << pgid << " does not match import file pgid "
<< ms.info.pgid << std::endl;
return -EINVAL;
}
// make sure there are no conflicting splits or merges
if (ms.osdmap.have_pg_pool(pgid.pgid.pool())) {
auto p = pg_num_history.pg_nums.find(pgid.pgid.m_pool);
if (p != pg_num_history.pg_nums.end() &&
!p->second.empty()) {
unsigned start_pg_num = ms.osdmap.get_pg_num(pgid.pgid.pool());
unsigned pg_num = start_pg_num;
for (auto q = p->second.lower_bound(ms.map_epoch);
q != p->second.end();
++q) {
unsigned new_pg_num = q->second;
cout << "pool " << pgid.pgid.pool() << " pg_num " << pg_num
<< " -> " << new_pg_num << std::endl;
// check for merge target
spg_t target;
if (pgid.is_merge_source(pg_num, new_pg_num, &target)) {
// FIXME: this checks assumes the OSD's PG is at the OSD's
// map epoch; it could be, say, at *our* epoch, pre-merge.
coll_t coll(target);
if (store->collection_exists(coll)) {
cerr << "pgid " << pgid << " merges to target " << target
<< " which already exists" << std::endl;
return 12;
}
}
// check for split children
set<spg_t> children;
if (pgid.is_split(start_pg_num, new_pg_num, &children)) {
cerr << " children are " << children << std::endl;
for (auto child : children) {
coll_t coll(child);
if (store->collection_exists(coll)) {
cerr << "pgid " << pgid << " splits to " << children
<< " and " << child << " exists" << std::endl;
return 12;
}
}
}
pg_num = new_pg_num;
}
}
} else {
cout << "pool " << pgid.pgid.pool() << " doesn't existing, not checking"
<< " for splits or mergers" << std::endl;
}
if (!dry_run) {
ObjectStore::Transaction t;
ch = store->create_new_collection(coll);
create_pg_collection(
t, pgid,
pgid.get_split_bits(ms.osdmap.get_pg_pool(pgid.pool())->get_pg_num()));
init_pg_ondisk(t, pgid, NULL);
// mark this coll for removal until we're done
map<string,bufferlist> values;
encode((char)1, values["_remove"]);
t.omap_setkeys(coll, pgid.make_pgmeta_oid(), values);
store->queue_transaction(ch, std::move(t));
}
break;
case TYPE_PG_END:
ceph_assert(found_metadata);
done = true;
break;
default:
cerr << "Unknown section type " << std::to_string(type) << std::endl;
return -EFAULT;
}
}
if (!found_metadata) {
cerr << "Missing metadata section" << std::endl;
return -EFAULT;
}
ObjectStore::Transaction t;
if (!dry_run) {
pg_log_t newlog, reject;
pg_log_t::filter_log(pgid, ms.osdmap, g_ceph_context->_conf->osd_hit_set_namespace,
ms.log, newlog, reject);
if (debug) {
for (list<pg_log_entry_t>::iterator i = newlog.log.begin();
i != newlog.log.end(); ++i)
cerr << "Keeping log entry " << *i << std::endl;
for (list<pg_log_entry_t>::iterator i = reject.log.begin();
i != reject.log.end(); ++i)
cerr << "Skipping log entry " << *i << std::endl;
}
divergent_priors_t newdp, rejectdp;
filter_divergent_priors(pgid, ms.osdmap, g_ceph_context->_conf->osd_hit_set_namespace,
ms.divergent_priors, newdp, rejectdp);
ms.divergent_priors = newdp;
if (debug) {
for (divergent_priors_t::iterator i = newdp.begin();
i != newdp.end(); ++i)
cerr << "Keeping divergent_prior " << *i << std::endl;
for (divergent_priors_t::iterator i = rejectdp.begin();
i != rejectdp.end(); ++i)
cerr << "Skipping divergent_prior " << *i << std::endl;
}
ms.missing.filter_objects([&](const hobject_t &obj) {
if (obj.nspace == g_ceph_context->_conf->osd_hit_set_namespace)
return false;
ceph_assert(!obj.is_temp());
object_t oid = obj.oid;
object_locator_t loc(obj);
pg_t raw_pgid = ms.osdmap.object_locator_to_pg(oid, loc);
pg_t _pgid = ms.osdmap.raw_pg_to_pg(raw_pgid);
return pgid.pgid != _pgid;
});
if (debug) {
pg_missing_t missing;
Formatter *formatter = Formatter::create("json-pretty");
dump_log(formatter, cerr, newlog, ms.missing);
delete formatter;
}
// Just like a split invalidate stats since the object count is changed
if (skipped_objects)
ms.info.stats.stats_invalid = true;
ret = write_pg(
t,
ms.map_epoch,
ms.info,
newlog,
ms.past_intervals,
ms.divergent_priors,
ms.missing);
if (ret) return ret;
}
// done, clear removal flag
if (debug)
cerr << "done, clearing removal flag" << std::endl;
if (!dry_run) {
t.omap_rmkey(coll, pgid.make_pgmeta_oid(), "_remove");
wait_until_done(&t, [&] {
store->queue_transaction(ch, std::move(t));
// make sure we flush onreadable items before mapper/driver are destroyed.
ch->flush();
});
}
return 0;
}
int do_list(ObjectStore *store, string pgidstr, string object, boost::optional<std::string> nspace,
Formatter *formatter, bool debug, bool human_readable, bool head)
{
int r;
lookup_ghobject lookup(object, nspace, head);
if (pgidstr.length() > 0) {
r = action_on_all_objects_in_pg(store, pgidstr, lookup, debug);
} else {
r = action_on_all_objects(store, lookup, debug);
}
if (r)
return r;
lookup.dump(formatter, human_readable);
formatter->flush(cout);
return 0;
}
int do_list_slow(ObjectStore *store, string pgidstr, string object,
double threshold, Formatter *formatter, bool debug, bool human_readable)
{
int r;
lookup_slow_ghobject lookup(object, threshold);
if (pgidstr.length() > 0) {
r = action_on_all_objects_in_pg(store, pgidstr, lookup, debug);
} else {
r = action_on_all_objects(store, lookup, debug);
}
if (r)
return r;
lookup.dump(formatter, human_readable);
formatter->flush(cout);
return 0;
}
int do_meta(ObjectStore *store, string object, Formatter *formatter, bool debug, bool human_readable)
{
int r;
boost::optional<std::string> nspace; // Not specified
lookup_ghobject lookup(object, nspace);
r = action_on_all_objects_in_exact_pg(store, coll_t::meta(), lookup, debug);
if (r)
return r;
lookup.dump(formatter, human_readable);
formatter->flush(cout);
return 0;
}
enum rmtype {
BOTH,
SNAPMAP,
NOSNAPMAP
};
int remove_object(coll_t coll, ghobject_t &ghobj,
SnapMapper &mapper,
MapCacher::Transaction<std::string, bufferlist> *_t,
ObjectStore::Transaction *t,
enum rmtype type)
{
if (type == BOTH || type == SNAPMAP) {
int r = mapper.remove_oid(ghobj.hobj, _t);
if (r < 0 && r != -ENOENT) {
cerr << "remove_oid returned " << cpp_strerror(r) << std::endl;
return r;
}
}
if (type == BOTH || type == NOSNAPMAP) {
t->remove(coll, ghobj);
}
return 0;
}
int get_snapset(ObjectStore *store, coll_t coll, ghobject_t &ghobj, SnapSet &ss, bool silent);
int do_remove_object(ObjectStore *store, coll_t coll,
ghobject_t &ghobj, bool all, bool force, enum rmtype type)
{
auto ch = store->open_collection(coll);
spg_t pg;
coll.is_pg_prefix(&pg);
OSDriver driver(
store,
coll_t(),
OSD::make_snapmapper_oid());
SnapMapper mapper(g_ceph_context, &driver, 0, 0, 0, pg.shard);
struct stat st;
int r = store->stat(ch, ghobj, &st);
if (r < 0) {
cerr << "remove: " << cpp_strerror(r) << std::endl;
return r;
}
SnapSet ss;
if (ghobj.hobj.has_snapset()) {
r = get_snapset(store, coll, ghobj, ss, false);
if (r < 0) {
cerr << "Can't get snapset error " << cpp_strerror(r) << std::endl;
// If --force and bad snapset let them remove the head
if (!(force && !all))
return r;
}
// cout << "snapset " << ss << std::endl;
if (!ss.clone_snaps.empty() && !all) {
if (force) {
cout << "WARNING: only removing "
<< (ghobj.hobj.is_head() ? "head" : "snapdir")
<< " with clones present" << std::endl;
ss.clone_snaps.clear();
} else {
cerr << "Clones are present, use removeall to delete everything"
<< std::endl;
return -EINVAL;
}
}
}
ObjectStore::Transaction t;
OSDriver::OSTransaction _t(driver.get_transaction(&t));
ghobject_t snapobj = ghobj;
for (auto& p : ss.clone_snaps) {
snapobj.hobj.snap = p.first;
cout << "remove clone " << snapobj << std::endl;
if (!dry_run) {
r = remove_object(coll, snapobj, mapper, &_t, &t, type);
if (r < 0)
return r;
}
}
cout << "remove " << ghobj << std::endl;
if (!dry_run) {
r = remove_object(coll, ghobj, mapper, &_t, &t, type);
if (r < 0)
return r;
}
if (!dry_run) {
wait_until_done(&t, [&] {
store->queue_transaction(ch, std::move(t));
ch->flush();
});
}
return 0;
}
int do_list_attrs(ObjectStore *store, coll_t coll, ghobject_t &ghobj)
{
auto ch = store->open_collection(coll);
map<string,bufferptr,less<>> aset;
int r = store->getattrs(ch, ghobj, aset);
if (r < 0) {
cerr << "getattrs: " << cpp_strerror(r) << std::endl;
return r;
}
for (map<string,bufferptr>::iterator i = aset.begin();i != aset.end(); ++i) {
string key(i->first);
if (outistty)
key = cleanbin(key);
cout << key << std::endl;
}
return 0;
}
int do_list_omap(ObjectStore *store, coll_t coll, ghobject_t &ghobj)
{
auto ch = store->open_collection(coll);
ObjectMap::ObjectMapIterator iter = store->get_omap_iterator(ch, ghobj);
if (!iter) {
cerr << "omap_get_iterator: " << cpp_strerror(ENOENT) << std::endl;
return -ENOENT;
}
iter->seek_to_first();
map<string, bufferlist> oset;
while(iter->valid()) {
get_omap_batch(iter, oset);
for (map<string,bufferlist>::iterator i = oset.begin();i != oset.end(); ++i) {
string key(i->first);
if (outistty)
key = cleanbin(key);
cout << key << std::endl;
}
}
return 0;
}
int do_get_bytes(ObjectStore *store, coll_t coll, ghobject_t &ghobj, int fd)
{
auto ch = store->open_collection(coll);
struct stat st;
mysize_t total;
int ret = store->stat(ch, ghobj, &st);
if (ret < 0) {
cerr << "get-bytes: " << cpp_strerror(ret) << std::endl;
return ret;
}
total = st.st_size;
if (debug)
cerr << "size=" << total << std::endl;
uint64_t offset = 0;
bufferlist rawdatabl;
while(total > 0) {
rawdatabl.clear();
mysize_t len = max_read;
if (len > total)
len = total;
ret = store->read(ch, ghobj, offset, len, rawdatabl);
if (ret < 0)
return ret;
if (ret == 0)
return -EINVAL;
if (debug)
cerr << "data section offset=" << offset << " len=" << len << std::endl;
total -= ret;
offset += ret;
ret = write(fd, rawdatabl.c_str(), ret);
if (ret == -1) {
perror("write");
return -errno;
}
}
return 0;
}
int do_set_bytes(ObjectStore *store, coll_t coll,
ghobject_t &ghobj, int fd)
{
ObjectStore::Transaction tran;
ObjectStore::Transaction *t = &tran;
if (debug)
cerr << "Write " << ghobj << std::endl;
if (!dry_run) {
t->touch(coll, ghobj);
t->truncate(coll, ghobj, 0);
}
uint64_t offset = 0;
bufferlist rawdatabl;
do {
rawdatabl.clear();
ssize_t bytes = rawdatabl.read_fd(fd, max_read);
if (bytes < 0) {
cerr << "read_fd error " << cpp_strerror(bytes) << std::endl;
return bytes;
}
if (bytes == 0)
break;
if (debug)
cerr << "\tdata: offset " << offset << " bytes " << bytes << std::endl;
if (!dry_run)
t->write(coll, ghobj, offset, bytes, rawdatabl);
offset += bytes;
// XXX: Should we queue_transaction() every once in a while for very large files
} while(true);
auto ch = store->open_collection(coll);
if (!dry_run)
store->queue_transaction(ch, std::move(*t));
return 0;
}
int do_get_attr(ObjectStore *store, coll_t coll, ghobject_t &ghobj, string key)
{
auto ch = store->open_collection(coll);
bufferptr bp;
int r = store->getattr(ch, ghobj, key.c_str(), bp);
if (r < 0) {
cerr << "getattr: " << cpp_strerror(r) << std::endl;
return r;
}
string value(bp.c_str(), bp.length());
if (outistty) {
value = cleanbin(value);
value.push_back('\n');
}
cout << value;
return 0;
}
int do_set_attr(ObjectStore *store, coll_t coll,
ghobject_t &ghobj, string key, int fd)
{
ObjectStore::Transaction tran;
ObjectStore::Transaction *t = &tran;
bufferlist bl;
if (debug)
cerr << "Setattr " << ghobj << std::endl;
int ret = get_fd_data(fd, bl);
if (ret < 0)
return ret;
if (dry_run)
return 0;
t->touch(coll, ghobj);
t->setattr(coll, ghobj, key, bl);
auto ch = store->open_collection(coll);
store->queue_transaction(ch, std::move(*t));
return 0;
}
int do_rm_attr(ObjectStore *store, coll_t coll,
ghobject_t &ghobj, string key)
{
ObjectStore::Transaction tran;
ObjectStore::Transaction *t = &tran;
if (debug)
cerr << "Rmattr " << ghobj << std::endl;
if (dry_run)
return 0;
t->rmattr(coll, ghobj, key);
auto ch = store->open_collection(coll);
store->queue_transaction(ch, std::move(*t));
return 0;
}
int do_get_omap(ObjectStore *store, coll_t coll, ghobject_t &ghobj, string key)
{
auto ch = store->open_collection(coll);
set<string> keys;
map<string, bufferlist> out;
keys.insert(key);
int r = store->omap_get_values(ch, ghobj, keys, &out);
if (r < 0) {
cerr << "omap_get_values: " << cpp_strerror(r) << std::endl;
return r;
}
if (out.empty()) {
cerr << "Key not found" << std::endl;
return -ENOENT;
}
ceph_assert(out.size() == 1);
bufferlist bl = out.begin()->second;
string value(bl.c_str(), bl.length());
if (outistty) {
value = cleanbin(value);
value.push_back('\n');
}
cout << value;
return 0;
}
int do_set_omap(ObjectStore *store, coll_t coll,
ghobject_t &ghobj, string key, int fd)
{
ObjectStore::Transaction tran;
ObjectStore::Transaction *t = &tran;
map<string, bufferlist> attrset;
bufferlist valbl;
if (debug)
cerr << "Set_omap " << ghobj << std::endl;
int ret = get_fd_data(fd, valbl);
if (ret < 0)
return ret;
attrset.insert(pair<string, bufferlist>(key, valbl));
if (dry_run)
return 0;
t->touch(coll, ghobj);
t->omap_setkeys(coll, ghobj, attrset);
auto ch = store->open_collection(coll);
store->queue_transaction(ch, std::move(*t));
return 0;
}
int do_rm_omap(ObjectStore *store, coll_t coll,
ghobject_t &ghobj, string key)
{
ObjectStore::Transaction tran;
ObjectStore::Transaction *t = &tran;
if (debug)
cerr << "Rm_omap " << ghobj << std::endl;
if (dry_run)
return 0;
t->omap_rmkey(coll, ghobj, key);
auto ch = store->open_collection(coll);
store->queue_transaction(ch, std::move(*t));
return 0;
}
int do_get_omaphdr(ObjectStore *store, coll_t coll, ghobject_t &ghobj)
{
auto ch = store->open_collection(coll);
bufferlist hdrbl;
int r = store->omap_get_header(ch, ghobj, &hdrbl, true);
if (r < 0) {
cerr << "omap_get_header: " << cpp_strerror(r) << std::endl;
return r;
}
string header(hdrbl.c_str(), hdrbl.length());
if (outistty) {
header = cleanbin(header);
header.push_back('\n');
}
cout << header;
return 0;
}
int do_set_omaphdr(ObjectStore *store, coll_t coll,
ghobject_t &ghobj, int fd)
{
ObjectStore::Transaction tran;
ObjectStore::Transaction *t = &tran;
bufferlist hdrbl;
if (debug)
cerr << "Omap_setheader " << ghobj << std::endl;
int ret = get_fd_data(fd, hdrbl);
if (ret)
return ret;
if (dry_run)
return 0;
t->touch(coll, ghobj);
t->omap_setheader(coll, ghobj, hdrbl);
auto ch = store->open_collection(coll);
store->queue_transaction(ch, std::move(*t));
return 0;
}
struct do_fix_lost : public action_on_object_t {
void call(ObjectStore *store, coll_t coll,
ghobject_t &ghobj, object_info_t &oi) override {
if (oi.is_lost()) {
cout << coll << "/" << ghobj << " is lost";
if (!dry_run)
cout << ", fixing";
cout << std::endl;
if (dry_run)
return;
oi.clear_flag(object_info_t::FLAG_LOST);
bufferlist bl;
encode(oi, bl, -1); /* fixme: using full features */
ObjectStore::Transaction t;
t.setattr(coll, ghobj, OI_ATTR, bl);
auto ch = store->open_collection(coll);
store->queue_transaction(ch, std::move(t));
}
return;
}
};
int get_snapset(ObjectStore *store, coll_t coll, ghobject_t &ghobj, SnapSet &ss, bool silent = false)
{
auto ch = store->open_collection(coll);
bufferlist attr;
int r = store->getattr(ch, ghobj, SS_ATTR, attr);
if (r < 0) {
if (!silent)
cerr << "Error getting snapset on : " << make_pair(coll, ghobj) << ", "
<< cpp_strerror(r) << std::endl;
return r;
}
auto bp = attr.cbegin();
try {
decode(ss, bp);
} catch (...) {
r = -EINVAL;
cerr << "Error decoding snapset on : " << make_pair(coll, ghobj) << ", "
<< cpp_strerror(r) << std::endl;
return r;
}
return 0;
}
int print_obj_info(ObjectStore *store, coll_t coll, ghobject_t &ghobj, Formatter* formatter)
{
auto ch = store->open_collection(coll);
int r = 0;
formatter->open_object_section("obj");
formatter->open_object_section("id");
ghobj.dump(formatter);
formatter->close_section();
bufferlist attr;
int gr = store->getattr(ch, ghobj, OI_ATTR, attr);
if (gr < 0) {
r = gr;
cerr << "Error getting attr on : " << make_pair(coll, ghobj) << ", "
<< cpp_strerror(r) << std::endl;
} else {
object_info_t oi;
auto bp = attr.cbegin();
try {
decode(oi, bp);
formatter->open_object_section("info");
oi.dump(formatter);
formatter->close_section();
} catch (...) {
r = -EINVAL;
cerr << "Error decoding attr on : " << make_pair(coll, ghobj) << ", "
<< cpp_strerror(r) << std::endl;
}
}
struct stat st;
int sr = store->stat(ch, ghobj, &st, true);
if (sr < 0) {
r = sr;
cerr << "Error stat on : " << make_pair(coll, ghobj) << ", "
<< cpp_strerror(r) << std::endl;
} else {
formatter->open_object_section("stat");
formatter->dump_int("size", st.st_size);
formatter->dump_int("blksize", st.st_blksize);
formatter->dump_int("blocks", st.st_blocks);
formatter->dump_int("nlink", st.st_nlink);
formatter->close_section();
}
if (ghobj.hobj.has_snapset()) {
SnapSet ss;
int snr = get_snapset(store, coll, ghobj, ss);
if (snr < 0) {
r = snr;
} else {
formatter->open_object_section("SnapSet");
ss.dump(formatter);
formatter->close_section();
}
}
bufferlist hattr;
gr = store->getattr(ch, ghobj, ECUtil::get_hinfo_key(), hattr);
if (gr == 0) {
ECUtil::HashInfo hinfo;
auto hp = hattr.cbegin();
try {
decode(hinfo, hp);
formatter->open_object_section("hinfo");
hinfo.dump(formatter);
formatter->close_section();
} catch (...) {
r = -EINVAL;
cerr << "Error decoding hinfo on : " << make_pair(coll, ghobj) << ", "
<< cpp_strerror(r) << std::endl;
}
}
gr = store->dump_onode(ch, ghobj, "onode", formatter);
formatter->close_section();
formatter->flush(cout);
cout << std::endl;
return r;
}
int corrupt_info(ObjectStore *store, coll_t coll, ghobject_t &ghobj, Formatter* formatter)
{
auto ch = store->open_collection(coll);
bufferlist attr;
int r = store->getattr(ch, ghobj, OI_ATTR, attr);
if (r < 0) {
cerr << "Error getting attr on : " << make_pair(coll, ghobj) << ", "
<< cpp_strerror(r) << std::endl;
return r;
}
object_info_t oi;
auto bp = attr.cbegin();
try {
decode(oi, bp);
} catch (...) {
r = -EINVAL;
cerr << "Error getting attr on : " << make_pair(coll, ghobj) << ", "
<< cpp_strerror(r) << std::endl;
return r;
}
if (!dry_run) {
attr.clear();
oi.alloc_hint_flags += 0xff;
ObjectStore::Transaction t;
encode(oi, attr, -1); /* fixme: using full features */
t.setattr(coll, ghobj, OI_ATTR, attr);
auto ch = store->open_collection(coll);
r = store->queue_transaction(ch, std::move(t));
if (r < 0) {
cerr << "Error writing object info: " << make_pair(coll, ghobj) << ", "
<< cpp_strerror(r) << std::endl;
return r;
}
}
return 0;
}
int set_size(
ObjectStore *store, coll_t coll, ghobject_t &ghobj, uint64_t setsize, Formatter* formatter,
bool corrupt)
{
auto ch = store->open_collection(coll);
if (ghobj.hobj.is_snapdir()) {
cerr << "Can't set the size of a snapdir" << std::endl;
return -EINVAL;
}
bufferlist attr;
int r = store->getattr(ch, ghobj, OI_ATTR, attr);
if (r < 0) {
cerr << "Error getting attr on : " << make_pair(coll, ghobj) << ", "
<< cpp_strerror(r) << std::endl;
return r;
}
object_info_t oi;
auto bp = attr.cbegin();
try {
decode(oi, bp);
} catch (...) {
r = -EINVAL;
cerr << "Error getting attr on : " << make_pair(coll, ghobj) << ", "
<< cpp_strerror(r) << std::endl;
return r;
}
struct stat st;
r = store->stat(ch, ghobj, &st, true);
if (r < 0) {
cerr << "Error stat on : " << make_pair(coll, ghobj) << ", "
<< cpp_strerror(r) << std::endl;
}
ghobject_t head(ghobj);
SnapSet ss;
bool found_head = true;
map<snapid_t, uint64_t>::iterator csi;
bool is_snap = ghobj.hobj.is_snap();
if (is_snap) {
head.hobj = head.hobj.get_head();
r = get_snapset(store, coll, head, ss, true);
if (r < 0 && r != -ENOENT) {
// Requested get_snapset() silent, so if not -ENOENT show error
cerr << "Error getting snapset on : " << make_pair(coll, head) << ", "
<< cpp_strerror(r) << std::endl;
return r;
}
if (r == -ENOENT) {
head.hobj = head.hobj.get_snapdir();
r = get_snapset(store, coll, head, ss);
if (r < 0)
return r;
found_head = false;
} else {
found_head = true;
}
csi = ss.clone_size.find(ghobj.hobj.snap);
if (csi == ss.clone_size.end()) {
cerr << "SnapSet is missing clone_size for snap " << ghobj.hobj.snap << std::endl;
return -EINVAL;
}
}
if ((uint64_t)st.st_size == setsize && oi.size == setsize
&& (!is_snap || csi->second == setsize)) {
cout << "Size of object is already " << setsize << std::endl;
return 0;
}
cout << "Setting size to " << setsize << ", stat size " << st.st_size
<< ", obj info size " << oi.size;
if (is_snap) {
cout << ", " << (found_head ? "head" : "snapdir")
<< " clone_size " << csi->second;
csi->second = setsize;
}
cout << std::endl;
if (!dry_run) {
attr.clear();
oi.size = setsize;
ObjectStore::Transaction t;
// Only modify object info if we want to corrupt it
if (!corrupt && (uint64_t)st.st_size != setsize) {
t.truncate(coll, ghobj, setsize);
// Changing objectstore size will invalidate data_digest, so clear it.
oi.clear_data_digest();
}
encode(oi, attr, -1); /* fixme: using full features */
t.setattr(coll, ghobj, OI_ATTR, attr);
if (is_snap) {
bufferlist snapattr;
snapattr.clear();
encode(ss, snapattr);
t.setattr(coll, head, SS_ATTR, snapattr);
}
auto ch = store->open_collection(coll);
r = store->queue_transaction(ch, std::move(t));
if (r < 0) {
cerr << "Error writing object info: " << make_pair(coll, ghobj) << ", "
<< cpp_strerror(r) << std::endl;
return r;
}
}
return 0;
}
int clear_data_digest(ObjectStore *store, coll_t coll, ghobject_t &ghobj) {
auto ch = store->open_collection(coll);
bufferlist attr;
int r = store->getattr(ch, ghobj, OI_ATTR, attr);
if (r < 0) {
cerr << "Error getting attr on : " << make_pair(coll, ghobj) << ", "
<< cpp_strerror(r) << std::endl;
return r;
}
object_info_t oi;
auto bp = attr.cbegin();
try {
decode(oi, bp);
} catch (...) {
r = -EINVAL;
cerr << "Error getting attr on : " << make_pair(coll, ghobj) << ", "
<< cpp_strerror(r) << std::endl;
return r;
}
if (!dry_run) {
attr.clear();
oi.clear_data_digest();
encode(oi, attr, -1); /* fixme: using full features */
ObjectStore::Transaction t;
t.setattr(coll, ghobj, OI_ATTR, attr);
auto ch = store->open_collection(coll);
r = store->queue_transaction(ch, std::move(t));
if (r < 0) {
cerr << "Error writing object info: " << make_pair(coll, ghobj) << ", "
<< cpp_strerror(r) << std::endl;
return r;
}
}
return 0;
}
int clear_snapset(ObjectStore *store, coll_t coll, ghobject_t &ghobj,
string arg)
{
SnapSet ss;
int ret = get_snapset(store, coll, ghobj, ss);
if (ret < 0)
return ret;
// Use "corrupt" to clear entire SnapSet
// Use "seq" to just corrupt SnapSet.seq
if (arg == "corrupt" || arg == "seq")
ss.seq = 0;
// Use "snaps" to just clear SnapSet.clone_snaps
if (arg == "corrupt" || arg == "snaps")
ss.clone_snaps.clear();
// By default just clear clone, clone_overlap and clone_size
if (arg == "corrupt")
arg = "";
if (arg == "" || arg == "clones")
ss.clones.clear();
if (arg == "" || arg == "clone_overlap")
ss.clone_overlap.clear();
if (arg == "" || arg == "clone_size")
ss.clone_size.clear();
// Break all clone sizes by adding 1
if (arg == "size") {
for (map<snapid_t, uint64_t>::iterator i = ss.clone_size.begin();
i != ss.clone_size.end(); ++i)
++(i->second);
}
if (!dry_run) {
bufferlist bl;
encode(ss, bl);
ObjectStore::Transaction t;
t.setattr(coll, ghobj, SS_ATTR, bl);
auto ch = store->open_collection(coll);
int r = store->queue_transaction(ch, std::move(t));
if (r < 0) {
cerr << "Error setting snapset on : " << make_pair(coll, ghobj) << ", "
<< cpp_strerror(r) << std::endl;
return r;
}
}
return 0;
}
vector<snapid_t>::iterator find(vector<snapid_t> &v, snapid_t clid)
{
return std::find(v.begin(), v.end(), clid);
}
map<snapid_t, interval_set<uint64_t> >::iterator
find(map<snapid_t, interval_set<uint64_t> > &m, snapid_t clid)
{
return m.find(clid);
}
map<snapid_t, uint64_t>::iterator find(map<snapid_t, uint64_t> &m,
snapid_t clid)
{
return m.find(clid);
}
template<class T>
int remove_from(T &mv, string name, snapid_t cloneid, bool force)
{
typename T::iterator i = find(mv, cloneid);
if (i != mv.end()) {
mv.erase(i);
} else {
cerr << "Clone " << cloneid << " doesn't exist in " << name;
if (force) {
cerr << " (ignored)" << std::endl;
return 0;
}
cerr << std::endl;
return -EINVAL;
}
return 0;
}
int remove_clone(
ObjectStore *store, coll_t coll, ghobject_t &ghobj, snapid_t cloneid, bool force)
{
// XXX: Don't allow this if in a cache tier or former cache tier
// bool allow_incomplete_clones() const {
// return cache_mode != CACHEMODE_NONE || has_flag(FLAG_INCOMPLETE_CLONES);
SnapSet snapset;
int ret = get_snapset(store, coll, ghobj, snapset);
if (ret < 0)
return ret;
// Derived from trim_object()
// ...from snapset
vector<snapid_t>::iterator p;
for (p = snapset.clones.begin(); p != snapset.clones.end(); ++p)
if (*p == cloneid)
break;
if (p == snapset.clones.end()) {
cerr << "Clone " << cloneid << " not present";
return -ENOENT;
}
if (p != snapset.clones.begin()) {
// not the oldest... merge overlap into next older clone
vector<snapid_t>::iterator n = p - 1;
hobject_t prev_coid = ghobj.hobj;
prev_coid.snap = *n;
//bool adjust_prev_bytes = is_present_clone(prev_coid);
//if (adjust_prev_bytes)
// ctx->delta_stats.num_bytes -= snapset.get_clone_bytes(*n);
snapset.clone_overlap[*n].intersection_of(
snapset.clone_overlap[*p]);
//if (adjust_prev_bytes)
// ctx->delta_stats.num_bytes += snapset.get_clone_bytes(*n);
}
ret = remove_from(snapset.clones, "clones", cloneid, force);
if (ret) return ret;
ret = remove_from(snapset.clone_overlap, "clone_overlap", cloneid, force);
if (ret) return ret;
ret = remove_from(snapset.clone_size, "clone_size", cloneid, force);
if (ret) return ret;
if (dry_run)
return 0;
bufferlist bl;
encode(snapset, bl);
ObjectStore::Transaction t;
t.setattr(coll, ghobj, SS_ATTR, bl);
auto ch = store->open_collection(coll);
int r = store->queue_transaction(ch, std::move(t));
if (r < 0) {
cerr << "Error setting snapset on : " << make_pair(coll, ghobj) << ", "
<< cpp_strerror(r) << std::endl;
return r;
}
cout << "Removal of clone " << cloneid << " complete" << std::endl;
cout << "Use pg repair after OSD restarted to correct stat information" << std::endl;
return 0;
}
int dup(string srcpath, ObjectStore *src, string dstpath, ObjectStore *dst)
{
cout << "dup from " << src->get_type() << ": " << srcpath << "\n"
<< " to " << dst->get_type() << ": " << dstpath
<< std::endl;
int num, i;
vector<coll_t> collections;
int r;
r = src->mount();
if (r < 0) {
cerr << "failed to mount src: " << cpp_strerror(r) << std::endl;
return r;
}
r = dst->mount();
if (r < 0) {
cerr << "failed to mount dst: " << cpp_strerror(r) << std::endl;
goto out_src;
}
if (src->get_fsid() != dst->get_fsid()) {
cerr << "src fsid " << src->get_fsid() << " != dest " << dst->get_fsid()
<< std::endl;
goto out;
}
cout << "fsid " << src->get_fsid() << std::endl;
// make sure dst is empty
r = dst->list_collections(collections);
if (r < 0) {
cerr << "error listing collections on dst: " << cpp_strerror(r) << std::endl;
goto out;
}
if (!collections.empty()) {
cerr << "destination store is not empty" << std::endl;
goto out;
}
r = src->list_collections(collections);
if (r < 0) {
cerr << "error listing collections on src: " << cpp_strerror(r) << std::endl;
goto out;
}
num = collections.size();
cout << num << " collections" << std::endl;
i = 1;
for (auto cid : collections) {
cout << i++ << "/" << num << " " << cid << std::endl;
auto ch = src->open_collection(cid);
auto dch = dst->create_new_collection(cid);
{
ObjectStore::Transaction t;
int bits = src->collection_bits(ch);
if (bits < 0) {
if (src->get_type() == "filestore" && cid.is_meta()) {
bits = 0;
} else {
cerr << "cannot get bit count for collection " << cid << ": "
<< cpp_strerror(bits) << std::endl;
goto out;
}
}
t.create_collection(cid, bits);
dst->queue_transaction(dch, std::move(t));
}
ghobject_t pos;
uint64_t n = 0;
uint64_t bytes = 0, keys = 0;
while (true) {
vector<ghobject_t> ls;
r = src->collection_list(ch, pos, ghobject_t::get_max(), 1000, &ls, &pos);
if (r < 0) {
cerr << "collection_list on " << cid << " from " << pos << " got: "
<< cpp_strerror(r) << std::endl;
goto out;
}
if (ls.empty()) {
break;
}
for (auto& oid : ls) {
//cout << " " << cid << " " << oid << std::endl;
if (n % 100 == 0) {
cout << " " << std::setw(16) << n << " objects, "
<< std::setw(16) << bytes << " bytes, "
<< std::setw(16) << keys << " keys"
<< std::setw(1) << "\r" << std::flush;
}
n++;
ObjectStore::Transaction t;
t.touch(cid, oid);
map<string,bufferptr,less<>> attrs;
src->getattrs(ch, oid, attrs);
if (!attrs.empty()) {
t.setattrs(cid, oid, attrs);
}
bufferlist bl;
src->read(ch, oid, 0, 0, bl);
if (bl.length()) {
t.write(cid, oid, 0, bl.length(), bl);
bytes += bl.length();
}
bufferlist header;
map<string,bufferlist> omap;
src->omap_get(ch, oid, &header, &omap);
if (header.length()) {
t.omap_setheader(cid, oid, header);
++keys;
}
if (!omap.empty()) {
keys += omap.size();
t.omap_setkeys(cid, oid, omap);
}
dst->queue_transaction(dch, std::move(t));
}
}
cout << " " << std::setw(16) << n << " objects, "
<< std::setw(16) << bytes << " bytes, "
<< std::setw(16) << keys << " keys"
<< std::setw(1) << std::endl;
}
// keyring
cout << "keyring" << std::endl;
{
bufferlist bl;
string s = srcpath + "/keyring";
string err;
r = bl.read_file(s.c_str(), &err);
if (r < 0) {
cerr << "failed to copy " << s << ": " << err << std::endl;
} else {
string d = dstpath + "/keyring";
bl.write_file(d.c_str(), 0600);
}
}
// osd metadata
cout << "duping osd metadata" << std::endl;
{
for (auto k : {"magic", "whoami", "ceph_fsid", "fsid"}) {
string val;
src->read_meta(k, &val);
dst->write_meta(k, val);
}
}
dst->write_meta("ready", "ready");
cout << "done." << std::endl;
r = 0;
out:
dst->umount();
out_src:
src->umount();
return r;
}
const int ceph_entity_name_type(const string name)
{
if (name == "mds") return CEPH_ENTITY_TYPE_MDS;
if (name == "osd") return CEPH_ENTITY_TYPE_OSD;
if (name == "mon") return CEPH_ENTITY_TYPE_MON;
if (name == "client") return CEPH_ENTITY_TYPE_CLIENT;
if (name == "mgr") return CEPH_ENTITY_TYPE_MGR;
if (name == "auth") return CEPH_ENTITY_TYPE_AUTH;
return -1;
}
eversion_t get_eversion_from_str(const string& s) {
eversion_t e;
vector<string> result;
boost::split(result, s, boost::is_any_of("'"));
if (result.size() != 2) {
cerr << "eversion_t: invalid format: '" << s << "'" << std::endl;
return e;
}
e.epoch = atoi(result[0].c_str());
e.version = atoi(result[1].c_str());
return e;
}
osd_reqid_t get_reqid_from_str(const string& s) {
osd_reqid_t reqid;
vector<string> result;
boost::split(result, s, boost::is_any_of(".:"));
if (result.size() != 4) {
cerr << "reqid: invalid format " << s << std::endl;
return osd_reqid_t();
}
reqid.name._type = ceph_entity_name_type(result[0]);
reqid.name._num = atoi(result[1].c_str());
reqid.inc = atoi(result[2].c_str());
reqid.tid = atoi(result[3].c_str());
return reqid;
}
void do_dups_inject_transction(ObjectStore *store, spg_t r_pgid, map<string,bufferlist> *new_dups)
{
ObjectStore::Transaction t;
coll_t coll(r_pgid);
cerr << "injecting dups into pgid:" << r_pgid << " num of dups:" << new_dups->size() << std::endl;
t.omap_setkeys(coll, r_pgid.make_pgmeta_oid(), (*new_dups));
auto ch = store->open_collection(coll);
store->queue_transaction(ch, std::move(t));
new_dups->clear();
}
int do_dups_inject_object(ObjectStore *store, spg_t r_pgid, json_spirit::mObject &in_json_obj,
map<string,bufferlist> *new_dups, bool debug) {
std::map<std::string, json_spirit::mValue>::const_iterator it = in_json_obj.find("generate");
int32_t generate = 0;
if (it != in_json_obj.end()) {
generate = atoi(it->second.get_str().c_str());
}
it = in_json_obj.find("reqid");
if (it == in_json_obj.end()) {
return 1;
}
osd_reqid_t reqid(get_reqid_from_str(it->second.get_str()));
it = in_json_obj.find("version");
if (it == in_json_obj.end()) {
return 1;
}
eversion_t version(get_eversion_from_str(it->second.get_str()));
it = in_json_obj.find("user_version");
if (it == in_json_obj.end()) {
return 1;
}
version_t user_version = atoi(it->second.get_str().c_str());
it = in_json_obj.find("return_code");
if (it == in_json_obj.end()) {
return 1;
}
int32_t return_code = atoi(it->second.get_str().c_str());
if (generate) {
for(auto i = 0; i < generate; ++i) {
version.version++;
if (debug) {
cout << "generate dups reqid " << reqid << " v=" << version << std::endl;
}
pg_log_dup_t tmp(version, user_version, reqid, return_code);
bufferlist bl;
encode(tmp, bl);
(*new_dups)[tmp.get_key_name()] = std::move(bl);
if ( new_dups->size() > 50000 ) {
do_dups_inject_transction(store, r_pgid, new_dups);
cout << "inject of " << i << " dups into pgid:" << r_pgid << " done..." << std::endl;
}
}
return 0;
} else {
pg_log_dup_t tmp(version, user_version, reqid, return_code);
if (debug) {
cout << "adding dup: " << tmp << "into key:" << tmp.get_key_name() << std::endl;
}
bufferlist bl;
encode(tmp, bl);
(*new_dups)[tmp.get_key_name()] = std::move(bl);
}
return 0;
}
void do_dups_inject_from_json(ObjectStore *store, spg_t r_pgid, json_spirit::mValue &inJson, bool debug)
{
map<string,bufferlist> new_dups;
const vector<json_spirit::mValue>& o = inJson.get_array();
for (const auto& obj : o) {
if (obj.type() == json_spirit::obj_type) {
json_spirit::mObject Mobj = obj.get_obj();
do_dups_inject_object(store, r_pgid, Mobj, &new_dups, debug);
} else {
throw std::runtime_error("JSON array/object not allowed type:" + std::to_string(obj.type()));
return;
}
}
if (new_dups.size() > 0) {
do_dups_inject_transction(store, r_pgid, &new_dups);
}
return ;
}
void usage(po::options_description &desc)
{
cerr << std::endl;
cerr << desc << std::endl;
cerr << std::endl;
cerr << "Positional syntax:" << std::endl;
cerr << std::endl;
cerr << "ceph-objectstore-tool ... <object> (get|set)-bytes [file]" << std::endl;
cerr << "ceph-objectstore-tool ... <object> set-(attr|omap) <key> [file]" << std::endl;
cerr << "ceph-objectstore-tool ... <object> (get|rm)-(attr|omap) <key>" << std::endl;
cerr << "ceph-objectstore-tool ... <object> get-omaphdr" << std::endl;
cerr << "ceph-objectstore-tool ... <object> set-omaphdr [file]" << std::endl;
cerr << "ceph-objectstore-tool ... <object> list-attrs" << std::endl;
cerr << "ceph-objectstore-tool ... <object> list-omap" << std::endl;
cerr << "ceph-objectstore-tool ... <object> remove|removeall" << std::endl;
cerr << "ceph-objectstore-tool ... <object> dump" << std::endl;
cerr << "ceph-objectstore-tool ... <object> set-size" << std::endl;
cerr << "ceph-objectstore-tool ... <object> clear-data-digest" << std::endl;
cerr << "ceph-objectstore-tool ... <object> remove-clone-metadata <cloneid>" << std::endl;
cerr << std::endl;
cerr << "<object> can be a JSON object description as displayed" << std::endl;
cerr << "by --op list." << std::endl;
cerr << "<object> can be an object name which will be looked up in all" << std::endl;
cerr << "the OSD's PGs." << std::endl;
cerr << "<object> can be the empty string ('') which with a provided pgid " << std::endl;
cerr << "specifies the pgmeta object" << std::endl;
cerr << std::endl;
cerr << "The optional [file] argument will read stdin or write stdout" << std::endl;
cerr << "if not specified or if '-' specified." << std::endl;
}
bool ends_with(const string& check, const string& ending)
{
return check.size() >= ending.size() && check.rfind(ending) == (check.size() - ending.size());
}
int main(int argc, char **argv)
{
string dpath, jpath, pgidstr, op, file, mountpoint, mon_store_path, object;
string target_data_path, fsid;
string objcmd, arg1, arg2, type, format, argnspace, pool, rmtypestr, dump_data_dir;
boost::optional<std::string> nspace;
spg_t pgid;
unsigned epoch = 0;
unsigned slow_threshold = 16;
ghobject_t ghobj;
bool human_readable;
Formatter *formatter;
bool head, tty;
po::options_description desc("Allowed options");
desc.add_options()
("help", "produce help message")
("type", po::value<string>(&type),
"Arg is one of [bluestore (default), memstore]")
("data-path", po::value<string>(&dpath),
"path to object store, mandatory")
("journal-path", po::value<string>(&jpath),
"path to journal, use if tool can't find it")
("pgid", po::value<string>(&pgidstr),
"PG id, mandatory for info, log, remove, export, export-remove, mark-complete, trim-pg-log, trim-pg-log-dups")
("pool", po::value<string>(&pool),
"Pool name")
("op", po::value<string>(&op),
"Arg is one of [info, log, remove, mkfs, fsck, repair, fuse, dup, export, export-remove, import, list, list-slow-omap, fix-lost, list-pgs, dump-super, meta-list, "
"get-osdmap, set-osdmap, get-inc-osdmap, set-inc-osdmap, mark-complete, reset-last-complete, update-mon-db, dump-export, trim-pg-log, trim-pg-log-dups statfs]")
("epoch", po::value<unsigned>(&epoch),
"epoch# for get-osdmap and get-inc-osdmap, the current epoch in use if not specified")
("file", po::value<string>(&file),
"path of file to export, export-remove, import, get-osdmap, set-osdmap, get-inc-osdmap or set-inc-osdmap")
("mon-store-path", po::value<string>(&mon_store_path),
"path of monstore to update-mon-db")
("fsid", po::value<string>(&fsid),
"fsid for new store created by mkfs")
("target-data-path", po::value<string>(&target_data_path),
"path of target object store (for --op dup)")
("mountpoint", po::value<string>(&mountpoint),
"fuse mountpoint")
("format", po::value<string>(&format)->default_value("json-pretty"),
"Output format which may be json, json-pretty, xml, xml-pretty")
("debug", "Enable diagnostic output to stderr")
("no-mon-config", "Do not contact mons for config")
("no-superblock", "Do not read superblock")
("force", "Ignore some types of errors and proceed with operation - USE WITH CAUTION: CORRUPTION POSSIBLE NOW OR IN THE FUTURE")
("skip-journal-replay", "Disable journal replay")
("skip-mount-omap", "Disable mounting of omap")
("head", "Find head/snapdir when searching for objects by name")
("dry-run", "Don't modify the objectstore")
("tty", "Treat stdout as a tty (no binary data)")
("namespace", po::value<string>(&argnspace), "Specify namespace when searching for objects")
("rmtype", po::value<string>(&rmtypestr), "Specify corrupting object removal 'snapmap' or 'nosnapmap' - TESTING USE ONLY")
("slow-omap-threshold", po::value<unsigned>(&slow_threshold),
"Threshold (in seconds) to consider omap listing slow (for op=list-slow-omap)")
("dump-data-dir", po::value<string>(&dump_data_dir),
"Directory to dump object data (for op=dump-export)")
;
po::options_description positional("Positional options");
positional.add_options()
("object", po::value<string>(&object), "'' for pgmeta_oid, object name or ghobject in json")
("objcmd", po::value<string>(&objcmd), "command [(get|set)-bytes, (get|set|rm)-(attr|omap), (get|set)-omaphdr, list-attrs, list-omap, remove]")
("arg1", po::value<string>(&arg1), "arg1 based on cmd")
("arg2", po::value<string>(&arg2), "arg2 based on cmd")
;
po::options_description all;
all.add(desc).add(positional);
po::positional_options_description pd;
pd.add("object", 1).add("objcmd", 1).add("arg1", 1).add("arg2", 1);
vector<string> ceph_option_strings;
po::variables_map vm;
try {
po::parsed_options parsed =
po::command_line_parser(argc, argv).options(all).allow_unregistered().positional(pd).run();
po::store( parsed, vm);
po::notify(vm);
ceph_option_strings = po::collect_unrecognized(parsed.options,
po::include_positional);
} catch(po::error &e) {
std::cerr << e.what() << std::endl;
return 1;
}
if (vm.count("help")) {
usage(desc);
return 1;
}
// Compatibility with previous option name
if (op == "dump-import")
op = "dump-export";
debug = (vm.count("debug") > 0);
force = (vm.count("force") > 0);
no_superblock = (vm.count("no-superblock") > 0);
if (vm.count("namespace"))
nspace = argnspace;
dry_run = (vm.count("dry-run") > 0);
tty = (vm.count("tty") > 0);
osflagbits_t flags = 0;
if (dry_run || vm.count("skip-journal-replay"))
flags |= SKIP_JOURNAL_REPLAY;
if (vm.count("skip-mount-omap"))
flags |= SKIP_MOUNT_OMAP;
if (op == "update-mon-db")
flags |= SKIP_JOURNAL_REPLAY;
head = (vm.count("head") > 0);
// infer osd id so we can authenticate
char fn[PATH_MAX];
snprintf(fn, sizeof(fn), "%s/whoami", dpath.c_str());
int fd = ::open(fn, O_RDONLY);
if (fd >= 0) {
bufferlist bl;
bl.read_fd(fd, 64);
string s(bl.c_str(), bl.length());
int whoami = atoi(s.c_str());
vector<string> tmp;
// identify ourselves as this osd so we can auth and fetch our configs
tmp.push_back("-n");
tmp.push_back(string("osd.") + stringify(whoami));
// populate osd_data so that the default keyring location works
tmp.push_back("--osd-data");
tmp.push_back(dpath);
tmp.insert(tmp.end(), ceph_option_strings.begin(),
ceph_option_strings.end());
tmp.swap(ceph_option_strings);
}
vector<const char *> ceph_options;
ceph_options.reserve(ceph_options.size() + ceph_option_strings.size());
for (vector<string>::iterator i = ceph_option_strings.begin();
i != ceph_option_strings.end();
++i) {
ceph_options.push_back(i->c_str());
}
snprintf(fn, sizeof(fn), "%s/type", dpath.c_str());
fd = ::open(fn, O_RDONLY);
if (fd >= 0) {
bufferlist bl;
bl.read_fd(fd, 64);
if (bl.length()) {
string dp_type = string(bl.c_str(), bl.length() - 1); // drop \n
if (vm.count("type") && dp_type != "" && type != dp_type)
cerr << "WARNING: Ignoring type \"" << type << "\" - found data-path type \""
<< dp_type << "\"" << std::endl;
type = dp_type;
//cout << "object store type is " << type << std::endl;
}
::close(fd);
}
if (!vm.count("type") && type == "") {
type = "bluestore";
}
if (!vm.count("data-path") &&
op != "dump-export") {
cerr << "Must provide --data-path" << std::endl;
usage(desc);
return 1;
}
if (!vm.count("op") && !vm.count("object")) {
cerr << "Must provide --op or object command..." << std::endl;
usage(desc);
return 1;
}
if (op == "fuse" && mountpoint.length() == 0) {
cerr << "Missing fuse mountpoint" << std::endl;
usage(desc);
return 1;
}
outistty = isatty(STDOUT_FILENO) || tty;
file_fd = fd_none;
if ((op == "export" || op == "export-remove" || op == "get-osdmap" || op == "get-inc-osdmap") && !dry_run) {
if (!vm.count("file") || file == "-") {
if (outistty) {
cerr << "stdout is a tty and no --file filename specified" << std::endl;
return 1;
}
file_fd = STDOUT_FILENO;
} else {
file_fd = open(file.c_str(), O_WRONLY|O_CREAT|O_TRUNC, 0666);
}
} else if (op == "import" || op == "dump-export" || op == "set-osdmap" || op == "set-inc-osdmap" || op == "pg-log-inject-dups") {
if (!vm.count("file") || file == "-") {
if (isatty(STDIN_FILENO)) {
cerr << "stdin is a tty and no --file filename specified" << std::endl;
return 1;
}
file_fd = STDIN_FILENO;
} else {
file_fd = open(file.c_str(), O_RDONLY);
}
}
ObjectStoreTool tool = ObjectStoreTool(file_fd, dry_run);
if (vm.count("file") && file_fd == fd_none && !dry_run) {
cerr << "--file option only applies to import, dump-export, export, export-remove, "
<< "get-osdmap, set-osdmap, get-inc-osdmap or set-inc-osdmap" << std::endl;
return 1;
}
if (file_fd != fd_none && file_fd < 0) {
string err = string("file: ") + file;
perror(err.c_str());
return 1;
}
int init_flags = 0;
if (vm.count("no-mon-config") > 0) {
init_flags |= CINIT_FLAG_NO_MON_CONFIG;
}
auto cct = global_init(
NULL, ceph_options,
CEPH_ENTITY_TYPE_OSD,
CODE_ENVIRONMENT_UTILITY_NODOUT,
init_flags);
common_init_finish(g_ceph_context);
if (debug) {
g_conf().set_val_or_die("log_to_stderr", "true");
g_conf().set_val_or_die("err_to_stderr", "true");
}
g_conf().apply_changes(nullptr);
// Special list handling. Treating pretty_format as human readable,
// with one object per line and not an enclosing array.
human_readable = ends_with(format, "-pretty");
if ((op == "list" || op == "meta-list") && human_readable) {
// Remove -pretty from end of format which we know is there
format = format.substr(0, format.size() - strlen("-pretty"));
}
formatter = Formatter::create(format);
if (formatter == NULL) {
cerr << "unrecognized format: " << format << std::endl;
return 1;
}
if (op == "dump-export") {
int ret = tool.dump_export(formatter, dump_data_dir);
if (ret < 0) {
cerr << "dump-export: "
<< cpp_strerror(ret) << std::endl;
return 1;
}
return 0;
}
//Verify that data-path really exists
struct stat st;
if (::stat(dpath.c_str(), &st) == -1) {
string err = string("data-path: ") + dpath;
perror(err.c_str());
return 1;
}
if (pgidstr.length() && pgidstr != "meta" && !pgid.parse(pgidstr.c_str())) {
cerr << "Invalid pgid '" << pgidstr << "' specified" << std::endl;
return 1;
}
std::unique_ptr<ObjectStore> fs = ObjectStore::create(g_ceph_context, type, dpath, jpath, flags);
if (!fs) {
cerr << "Unable to create store of type " << type << std::endl;
return 1;
}
if (op == "fsck" || op == "fsck-deep") {
int r = fs->fsck(op == "fsck-deep");
if (r < 0) {
cerr << "fsck failed: " << cpp_strerror(r) << std::endl;
return 1;
}
if (r > 0) {
cerr << "fsck status: " << r << " remaining error(s) and warning(s)" << std::endl;
return 1;
}
cout << "fsck success" << std::endl;
return 0;
}
if (op == "repair" || op == "repair-deep") {
int r = fs->repair(op == "repair-deep");
if (r < 0) {
cerr << "repair failed: " << cpp_strerror(r) << std::endl;
return 1;
}
if (r > 0) {
cerr << "repair status: " << r << " remaining error(s) and warning(s)" << std::endl;
return 1;
}
cout << "repair success" << std::endl;
return 0;
}
if (op == "mkfs") {
if (fsid.length()) {
uuid_d f;
bool r = f.parse(fsid.c_str());
if (!r) {
cerr << "failed to parse uuid '" << fsid << "'" << std::endl;
return 1;
}
fs->set_fsid(f);
}
int r = fs->mkfs();
if (r < 0) {
cerr << "mkfs failed: " << cpp_strerror(r) << std::endl;
return 1;
}
return 0;
}
if (op == "dup") {
string target_type;
char fn[PATH_MAX];
snprintf(fn, sizeof(fn), "%s/type", target_data_path.c_str());
int fd = ::open(fn, O_RDONLY);
if (fd < 0) {
cerr << "Unable to open " << target_data_path << "/type" << std::endl;
exit(1);
}
bufferlist bl;
bl.read_fd(fd, 64);
if (bl.length()) {
target_type = string(bl.c_str(), bl.length() - 1); // drop \n
}
::close(fd);
unique_ptr<ObjectStore> targetfs = ObjectStore::create(
g_ceph_context, target_type,
target_data_path, "", 0);
if (!targetfs) {
cerr << "Unable to open store of type " << target_type << std::endl;
return 1;
}
int r = dup(dpath, fs.get(), target_data_path, targetfs.get());
if (r < 0) {
cerr << "dup failed: " << cpp_strerror(r) << std::endl;
return 1;
}
return 0;
}
int ret = fs->mount();
if (ret < 0) {
if (ret == -EBUSY) {
cerr << "OSD has the store locked" << std::endl;
} else {
cerr << "Mount failed with '" << cpp_strerror(ret) << "'" << std::endl;
}
return 1;
}
if (op == "fuse") {
#ifdef HAVE_LIBFUSE
FuseStore fuse(fs.get(), mountpoint);
cout << "mounting fuse at " << mountpoint << " ..." << std::endl;
int r = fuse.main();
fs->umount();
if (r < 0) {
cerr << "failed to mount fuse: " << cpp_strerror(r) << std::endl;
return 1;
}
#else
cerr << "fuse support not enabled" << std::endl;
#endif
return 0;
}
vector<coll_t> ls;
vector<coll_t>::iterator it;
CompatSet supported;
#ifdef INTERNAL_TEST
supported = get_test_compat_set();
#else
supported = OSD::get_osd_compat_set();
#endif
bufferlist bl;
auto ch = fs->open_collection(coll_t::meta());
std::unique_ptr<OSDSuperblock> superblock;
if (!no_superblock) {
superblock.reset(new OSDSuperblock);
bufferlist::const_iterator p;
ret = fs->read(ch, OSD_SUPERBLOCK_GOBJECT, 0, 0, bl);
if (ret < 0) {
cerr << "Failure to read OSD superblock: " << cpp_strerror(ret) << std::endl;
goto out;
}
p = bl.cbegin();
decode(*superblock, p);
if (debug) {
cerr << "Cluster fsid=" << superblock->cluster_fsid << std::endl;
}
if (debug) {
cerr << "Supported features: " << supported << std::endl;
cerr << "On-disk features: " << superblock->compat_features << std::endl;
}
if (supported.compare(superblock->compat_features) == -1) {
CompatSet unsupported = supported.unsupported(superblock->compat_features);
cerr << "On-disk OSD incompatible features set "
<< unsupported << std::endl;
ret = -EINVAL;
goto out;
}
}
if (op != "list" && vm.count("object")) {
// Special case: Create pgmeta_oid if empty string specified
// This can't conflict with any actual object names.
if (object == "") {
ghobj = pgid.make_pgmeta_oid();
} else {
json_spirit::Value v;
try {
if (!json_spirit::read(object, v) ||
(v.type() != json_spirit::array_type && v.type() != json_spirit::obj_type)) {
// Special: Need head/snapdir so set even if user didn't specify
if (vm.count("objcmd") && (objcmd == "remove-clone-metadata"))
head = true;
lookup_ghobject lookup(object, nspace, head);
if (pgidstr == "meta")
ret = action_on_all_objects_in_exact_pg(fs.get(), coll_t::meta(), lookup, debug);
else if (pgidstr.length())
ret = action_on_all_objects_in_exact_pg(fs.get(), coll_t(pgid), lookup, debug);
else
ret = action_on_all_objects(fs.get(), lookup, debug);
if (ret) {
throw std::runtime_error("Internal error");
} else {
if (lookup.size() != 1) {
stringstream ss;
if (lookup.size() == 0)
ss << "No object id '" << object << "' found or invalid JSON specified";
else
ss << "Found " << lookup.size() << " objects with id '" << object
<< "', please use a JSON spec from --op list instead";
throw std::runtime_error(ss.str());
}
pair<coll_t, ghobject_t> found = lookup.pop();
pgidstr = found.first.to_str();
pgid.parse(pgidstr.c_str());
ghobj = found.second;
}
} else {
stringstream ss;
if (pgidstr.length() == 0 && v.type() != json_spirit::array_type) {
ss << "Without --pgid the object '" << object
<< "' must be a JSON array";
throw std::runtime_error(ss.str());
}
if (v.type() == json_spirit::array_type) {
json_spirit::Array array = v.get_array();
if (array.size() != 2) {
ss << "Object '" << object
<< "' must be a JSON array with 2 elements";
throw std::runtime_error(ss.str());
}
vector<json_spirit::Value>::iterator i = array.begin();
ceph_assert(i != array.end());
if (i->type() != json_spirit::str_type) {
ss << "Object '" << object
<< "' must be a JSON array with the first element a string";
throw std::runtime_error(ss.str());
}
string object_pgidstr = i->get_str();
if (object_pgidstr != "meta") {
spg_t object_pgid;
object_pgid.parse(object_pgidstr.c_str());
if (pgidstr.length() > 0) {
if (object_pgid != pgid) {
ss << "object '" << object
<< "' has a pgid different from the --pgid="
<< pgidstr << " option";
throw std::runtime_error(ss.str());
}
} else {
pgidstr = object_pgidstr;
pgid = object_pgid;
}
} else {
pgidstr = object_pgidstr;
}
++i;
v = *i;
}
try {
ghobj.decode(v);
} catch (std::runtime_error& e) {
ss << "Decode object JSON error: " << e.what();
throw std::runtime_error(ss.str());
}
if (pgidstr != "meta" && (uint64_t)pgid.pgid.m_pool != (uint64_t)ghobj.hobj.pool) {
cerr << "Object pool and pgid pool don't match" << std::endl;
ret = 1;
goto out;
}
if (pgidstr != "meta") {
auto ch = fs->open_collection(coll_t(pgid));
if (!ghobj.match(fs->collection_bits(ch), pgid.ps())) {
stringstream ss;
ss << "object " << ghobj << " not contained by pg " << pgid;
throw std::runtime_error(ss.str());
}
}
}
} catch (std::runtime_error& e) {
cerr << e.what() << std::endl;
ret = 1;
goto out;
}
}
}
// The ops which require --pgid option are checked here and
// mentioned in the usage for --pgid.
if ((op == "info" || op == "log" || op == "remove" || op == "export"
|| op == "export-remove" || op == "mark-complete"
|| op == "reset-last-complete"
|| op == "trim-pg-log"
|| op == "pg-log-inject-dups") &&
pgidstr.length() == 0) {
cerr << "Must provide pgid" << std::endl;
usage(desc);
ret = 1;
goto out;
}
if (op == "import") {
ceph_assert(superblock != nullptr);
try {
ret = tool.do_import(fs.get(), *superblock, force, pgidstr);
}
catch (const buffer::error &e) {
cerr << "do_import threw exception error " << e.what() << std::endl;
ret = -EFAULT;
}
if (ret == -EFAULT) {
cerr << "Corrupt input for import" << std::endl;
}
if (ret == 0)
cout << "Import successful" << std::endl;
goto out;
} else if (op == "dump-journal-mount") {
// Undocumented feature to dump journal with mounted fs
// This doesn't support the format option, but it uses the
// ObjectStore::dump_journal() and mounts to get replay to run.
ret = fs->dump_journal(cout);
if (ret) {
if (ret == -EOPNOTSUPP) {
cerr << "Object store type \"" << type << "\" doesn't support journal dump" << std::endl;
} else {
cerr << "Journal dump failed with error " << cpp_strerror(ret) << std::endl;
}
}
goto out;
} else if (op == "get-osdmap") {
bufferlist bl;
OSDMap osdmap;
if (epoch == 0) {
ceph_assert(superblock != nullptr);
epoch = superblock->current_epoch;
}
ret = get_osdmap(fs.get(), epoch, osdmap, bl);
if (ret) {
cerr << "Failed to get osdmap#" << epoch << ": "
<< cpp_strerror(ret) << std::endl;
goto out;
}
ret = bl.write_fd(file_fd);
if (ret) {
cerr << "Failed to write to " << file << ": " << cpp_strerror(ret) << std::endl;
} else {
cout << "osdmap#" << epoch << " exported." << std::endl;
}
goto out;
} else if (op == "set-osdmap") {
bufferlist bl;
ret = get_fd_data(file_fd, bl);
if (ret < 0) {
cerr << "Failed to read osdmap " << cpp_strerror(ret) << std::endl;
} else {
ret = set_osdmap(fs.get(), epoch, bl, force);
}
goto out;
} else if (op == "get-inc-osdmap") {
bufferlist bl;
if (epoch == 0) {
ceph_assert(superblock != nullptr);
epoch = superblock->current_epoch;
}
ret = get_inc_osdmap(fs.get(), epoch, bl);
if (ret < 0) {
cerr << "Failed to get incremental osdmap# " << epoch << ": "
<< cpp_strerror(ret) << std::endl;
goto out;
}
ret = bl.write_fd(file_fd);
if (ret) {
cerr << "Failed to write to " << file << ": " << cpp_strerror(ret) << std::endl;
} else {
cout << "inc-osdmap#" << epoch << " exported." << std::endl;
}
goto out;
} else if (op == "set-inc-osdmap") {
bufferlist bl;
ret = get_fd_data(file_fd, bl);
if (ret < 0) {
cerr << "Failed to read incremental osdmap " << cpp_strerror(ret) << std::endl;
goto out;
} else {
ret = set_inc_osdmap(fs.get(), epoch, bl, force);
}
goto out;
} else if (op == "update-mon-db") {
if (!vm.count("mon-store-path")) {
cerr << "Please specify the path to monitor db to update" << std::endl;
ret = -EINVAL;
} else {
ceph_assert(superblock != nullptr);
ret = update_mon_db(*fs, *superblock, dpath + "/keyring", mon_store_path);
}
goto out;
}
if (op == "remove") {
if (!force && !dry_run) {
cerr << "Please use export-remove or you must use --force option" << std::endl;
ret = -EINVAL;
goto out;
}
ret = initiate_new_remove_pg(fs.get(), pgid);
if (ret < 0) {
cerr << "PG '" << pgid << "' not found" << std::endl;
goto out;
}
cout << "Remove successful" << std::endl;
goto out;
}
if (op == "fix-lost") {
boost::scoped_ptr<action_on_object_t> action;
action.reset(new do_fix_lost());
if (pgidstr.length())
ret = action_on_all_objects_in_exact_pg(fs.get(), coll_t(pgid), *action, debug);
else
ret = action_on_all_objects(fs.get(), *action, debug);
goto out;
}
if (op == "list") {
ret = do_list(fs.get(), pgidstr, object, nspace, formatter, debug,
human_readable, head);
if (ret < 0) {
cerr << "do_list failed: " << cpp_strerror(ret) << std::endl;
}
goto out;
}
if (op == "list-slow-omap") {
ret = do_list_slow(fs.get(), pgidstr, object, slow_threshold, formatter, debug,
human_readable);
if (ret < 0) {
cerr << "do_list failed: " << cpp_strerror(ret) << std::endl;
}
goto out;
}
if (op == "dump-super") {
ceph_assert(superblock != nullptr);
formatter->open_object_section("superblock");
superblock->dump(formatter);
formatter->close_section();
formatter->flush(cout);
cout << std::endl;
goto out;
}
if (op == "statfs") {
store_statfs_t statsbuf;
ret = fs->statfs(&statsbuf);
if (ret < 0) {
cerr << "error from statfs: " << cpp_strerror(ret) << std::endl;
goto out;
}
formatter->open_object_section("statfs");
statsbuf.dump(formatter);
formatter->close_section();
formatter->flush(cout);
cout << std::endl;
goto out;
}
if (op == "meta-list") {
ret = do_meta(fs.get(), object, formatter, debug, human_readable);
if (ret < 0) {
cerr << "do_meta failed: " << cpp_strerror(ret) << std::endl;
}
goto out;
}
ret = fs->list_collections(ls);
if (ret < 0) {
cerr << "failed to list pgs: " << cpp_strerror(ret) << std::endl;
goto out;
}
if (debug && op == "list-pgs")
cout << "Performing list-pgs operation" << std::endl;
// Find pg
for (it = ls.begin(); it != ls.end(); ++it) {
spg_t tmppgid;
if (pgidstr == "meta") {
if (it->to_str() == "meta")
break;
else
continue;
}
if (!it->is_pg(&tmppgid)) {
continue;
}
if (it->is_temp(&tmppgid)) {
continue;
}
if (op != "list-pgs" && tmppgid != pgid) {
continue;
}
if (op != "list-pgs") {
//Found!
break;
}
cout << tmppgid << std::endl;
}
if (op == "list-pgs") {
ret = 0;
goto out;
}
// If not an object command nor any of the ops handled below, then output this usage
// before complaining about a bad pgid
if (!vm.count("objcmd") && op != "export" && op != "export-remove" && op != "info" && op != "log" && op != "mark-complete" && op != "trim-pg-log" && op != "trim-pg-log-dups" && op != "pg-log-inject-dups") {
cerr << "Must provide --op (info, log, remove, mkfs, fsck, repair, export, export-remove, import, list, fix-lost, list-pgs, dump-super, meta-list, "
"get-osdmap, set-osdmap, get-inc-osdmap, set-inc-osdmap, mark-complete, reset-last-complete, dump-export, trim-pg-log, trim-pg-log-dups statfs)"
<< std::endl;
usage(desc);
ret = 1;
goto out;
}
epoch_t map_epoch;
// The following code for export, info, log require omap or !skip-mount-omap
if (it != ls.end()) {
coll_t coll = *it;
if (vm.count("objcmd")) {
ret = 0;
if (objcmd == "remove" || objcmd == "removeall") {
bool all = (objcmd == "removeall");
enum rmtype type = BOTH;
if (rmtypestr == "nosnapmap")
type = NOSNAPMAP;
else if (rmtypestr == "snapmap")
type = SNAPMAP;
ret = do_remove_object(fs.get(), coll, ghobj, all, force, type);
goto out;
} else if (objcmd == "list-attrs") {
ret = do_list_attrs(fs.get(), coll, ghobj);
goto out;
} else if (objcmd == "list-omap") {
ret = do_list_omap(fs.get(), coll, ghobj);
goto out;
} else if (objcmd == "get-bytes" || objcmd == "set-bytes") {
if (objcmd == "get-bytes") {
int fd;
if (vm.count("arg1") == 0 || arg1 == "-") {
fd = STDOUT_FILENO;
} else {
fd = open(arg1.c_str(), O_WRONLY|O_TRUNC|O_CREAT|O_EXCL|O_LARGEFILE, 0666);
if (fd == -1) {
cerr << "open " << arg1 << " " << cpp_strerror(errno) << std::endl;
ret = 1;
goto out;
}
}
ret = do_get_bytes(fs.get(), coll, ghobj, fd);
if (fd != STDOUT_FILENO)
close(fd);
} else {
int fd;
if (vm.count("arg1") == 0 || arg1 == "-") {
// Since read_fd() doesn't handle ^D from a tty stdin, don't allow it.
if (isatty(STDIN_FILENO)) {
cerr << "stdin is a tty and no file specified" << std::endl;
ret = 1;
goto out;
}
fd = STDIN_FILENO;
} else {
fd = open(arg1.c_str(), O_RDONLY|O_LARGEFILE, 0666);
if (fd == -1) {
cerr << "open " << arg1 << " " << cpp_strerror(errno) << std::endl;
ret = 1;
goto out;
}
}
ret = do_set_bytes(fs.get(), coll, ghobj, fd);
if (fd != STDIN_FILENO)
close(fd);
}
goto out;
} else if (objcmd == "get-attr") {
if (vm.count("arg1") == 0) {
usage(desc);
ret = 1;
goto out;
}
ret = do_get_attr(fs.get(), coll, ghobj, arg1);
goto out;
} else if (objcmd == "set-attr") {
if (vm.count("arg1") == 0) {
usage(desc);
ret = 1;
}
int fd;
if (vm.count("arg2") == 0 || arg2 == "-") {
// Since read_fd() doesn't handle ^D from a tty stdin, don't allow it.
if (isatty(STDIN_FILENO)) {
cerr << "stdin is a tty and no file specified" << std::endl;
ret = 1;
goto out;
}
fd = STDIN_FILENO;
} else {
fd = open(arg2.c_str(), O_RDONLY|O_LARGEFILE, 0666);
if (fd == -1) {
cerr << "open " << arg2 << " " << cpp_strerror(errno) << std::endl;
ret = 1;
goto out;
}
}
ret = do_set_attr(fs.get(), coll, ghobj, arg1, fd);
if (fd != STDIN_FILENO)
close(fd);
goto out;
} else if (objcmd == "rm-attr") {
if (vm.count("arg1") == 0) {
usage(desc);
ret = 1;
goto out;
}
ret = do_rm_attr(fs.get(), coll, ghobj, arg1);
goto out;
} else if (objcmd == "get-omap") {
if (vm.count("arg1") == 0) {
usage(desc);
ret = 1;
goto out;
}
ret = do_get_omap(fs.get(), coll, ghobj, arg1);
goto out;
} else if (objcmd == "set-omap") {
if (vm.count("arg1") == 0) {
usage(desc);
ret = 1;
goto out;
}
int fd;
if (vm.count("arg2") == 0 || arg2 == "-") {
// Since read_fd() doesn't handle ^D from a tty stdin, don't allow it.
if (isatty(STDIN_FILENO)) {
cerr << "stdin is a tty and no file specified" << std::endl;
ret = 1;
goto out;
}
fd = STDIN_FILENO;
} else {
fd = open(arg2.c_str(), O_RDONLY|O_LARGEFILE, 0666);
if (fd == -1) {
cerr << "open " << arg2 << " " << cpp_strerror(errno) << std::endl;
ret = 1;
goto out;
}
}
ret = do_set_omap(fs.get(), coll, ghobj, arg1, fd);
if (fd != STDIN_FILENO)
close(fd);
goto out;
} else if (objcmd == "rm-omap") {
if (vm.count("arg1") == 0) {
usage(desc);
ret = 1;
goto out;
}
ret = do_rm_omap(fs.get(), coll, ghobj, arg1);
goto out;
} else if (objcmd == "get-omaphdr") {
if (vm.count("arg1")) {
usage(desc);
ret = 1;
goto out;
}
ret = do_get_omaphdr(fs.get(), coll, ghobj);
goto out;
} else if (objcmd == "set-omaphdr") {
// Extra arg
if (vm.count("arg2")) {
usage(desc);
ret = 1;
goto out;
}
int fd;
if (vm.count("arg1") == 0 || arg1 == "-") {
// Since read_fd() doesn't handle ^D from a tty stdin, don't allow it.
if (isatty(STDIN_FILENO)) {
cerr << "stdin is a tty and no file specified" << std::endl;
ret = 1;
goto out;
}
fd = STDIN_FILENO;
} else {
fd = open(arg1.c_str(), O_RDONLY|O_LARGEFILE, 0666);
if (fd == -1) {
cerr << "open " << arg1 << " " << cpp_strerror(errno) << std::endl;
ret = 1;
goto out;
}
}
ret = do_set_omaphdr(fs.get(), coll, ghobj, fd);
if (fd != STDIN_FILENO)
close(fd);
goto out;
} else if (objcmd == "dump") {
// There should not be any other arguments
if (vm.count("arg1") || vm.count("arg2")) {
usage(desc);
ret = 1;
goto out;
}
ret = print_obj_info(fs.get(), coll, ghobj, formatter);
goto out;
} else if (objcmd == "corrupt-info") { // Undocumented testing feature
// There should not be any other arguments
if (vm.count("arg1") || vm.count("arg2")) {
usage(desc);
ret = 1;
goto out;
}
ret = corrupt_info(fs.get(), coll, ghobj, formatter);
goto out;
} else if (objcmd == "set-size" || objcmd == "corrupt-size") {
// Undocumented testing feature
bool corrupt = (objcmd == "corrupt-size");
// Extra arg
if (vm.count("arg1") == 0 || vm.count("arg2")) {
usage(desc);
ret = 1;
goto out;
}
if (arg1.length() == 0 || !isdigit(arg1.c_str()[0])) {
cerr << "Invalid size '" << arg1 << "' specified" << std::endl;
ret = 1;
goto out;
}
uint64_t size = atoll(arg1.c_str());
ret = set_size(fs.get(), coll, ghobj, size, formatter, corrupt);
goto out;
} else if (objcmd == "clear-data-digest") {
ret = clear_data_digest(fs.get(), coll, ghobj);
goto out;
} else if (objcmd == "clear-snapset") {
// UNDOCUMENTED: For testing zap SnapSet
// IGNORE extra args since not in usage anyway
if (!ghobj.hobj.has_snapset()) {
cerr << "'" << objcmd << "' requires a head or snapdir object" << std::endl;
ret = 1;
goto out;
}
ret = clear_snapset(fs.get(), coll, ghobj, arg1);
goto out;
} else if (objcmd == "remove-clone-metadata") {
// Extra arg
if (vm.count("arg1") == 0 || vm.count("arg2")) {
usage(desc);
ret = 1;
goto out;
}
if (!ghobj.hobj.has_snapset()) {
cerr << "'" << objcmd << "' requires a head or snapdir object" << std::endl;
ret = 1;
goto out;
}
if (arg1.length() == 0 || !isdigit(arg1.c_str()[0])) {
cerr << "Invalid cloneid '" << arg1 << "' specified" << std::endl;
ret = 1;
goto out;
}
snapid_t cloneid = atoi(arg1.c_str());
ret = remove_clone(fs.get(), coll, ghobj, cloneid, force);
goto out;
}
cerr << "Unknown object command '" << objcmd << "'" << std::endl;
usage(desc);
ret = 1;
goto out;
}
map_epoch = 0;
ret = PG::peek_map_epoch(fs.get(), pgid, &map_epoch);
if (ret < 0)
cerr << "peek_map_epoch reports error" << std::endl;
if (debug)
cerr << "map_epoch " << map_epoch << std::endl;
pg_info_t info(pgid);
PastIntervals past_intervals;
__u8 struct_ver;
ret = PG::read_info(fs.get(), pgid, coll, info, past_intervals, struct_ver);
if (ret < 0) {
cerr << "read_info error " << cpp_strerror(ret) << std::endl;
goto out;
}
if (struct_ver < PG::get_compat_struct_v()) {
cerr << "PG is too old to upgrade, use older Ceph version" << std::endl;
ret = -EFAULT;
goto out;
}
if (debug)
cerr << "struct_v " << (int)struct_ver << std::endl;
if (op == "export" || op == "export-remove") {
ceph_assert(superblock != nullptr);
ret = tool.do_export(cct.get(), fs.get(), coll, pgid, info, map_epoch, struct_ver, *superblock, past_intervals);
if (ret == 0) {
cerr << "Export successful" << std::endl;
if (op == "export-remove") {
ret = initiate_new_remove_pg(fs.get(), pgid);
// Export succeeded, so pgid is there
ceph_assert(ret == 0);
cerr << "Remove successful" << std::endl;
}
}
} else if (op == "info") {
formatter->open_object_section("info");
info.dump(formatter);
formatter->close_section();
formatter->flush(cout);
cout << std::endl;
} else if (op == "log") {
PGLog::IndexedLog log;
pg_missing_t missing;
ret = get_log(cct.get(), fs.get(), struct_ver, pgid, info, log, missing);
if (ret < 0)
goto out;
dump_log(formatter, cout, log, missing);
} else if (op == "mark-complete") {
ObjectStore::Transaction tran;
ObjectStore::Transaction *t = &tran;
if (struct_ver < PG::get_compat_struct_v()) {
cerr << "Can't mark-complete, version mismatch " << (int)struct_ver
<< " (pg) < compat " << (int)PG::get_compat_struct_v() << " (tool)"
<< std::endl;
ret = 1;
goto out;
}
cout << "Marking complete " << std::endl;
ceph_assert(superblock != nullptr);
info.last_update = eversion_t(superblock->current_epoch, info.last_update.version + 1);
info.last_backfill = hobject_t::get_max();
info.last_epoch_started = superblock->current_epoch;
info.history.last_epoch_started = superblock->current_epoch;
info.history.last_epoch_clean = superblock->current_epoch;
past_intervals.clear();
if (!dry_run) {
ret = write_info(*t, map_epoch, info, past_intervals);
if (ret != 0)
goto out;
auto ch = fs->open_collection(coll_t(pgid));
fs->queue_transaction(ch, std::move(*t));
}
cout << "Marking complete succeeded" << std::endl;
} else if (op == "trim-pg-log") {
ret = do_trim_pg_log(fs.get(), coll, info, pgid,
map_epoch, past_intervals);
if (ret < 0) {
cerr << "Error trimming pg log: " << cpp_strerror(ret) << std::endl;
goto out;
}
cout << "Finished trimming pg log" << std::endl;
goto out;
} else if (op == "trim-pg-log-dups") {
ret = do_trim_pg_log_dups(fs.get(), coll, info, pgid,
map_epoch, past_intervals);
if (ret < 0) {
cerr << "Error trimming pg log dups: " << cpp_strerror(ret) << std::endl;
goto out;
}
cout << "Finished trimming pg log dups" << std::endl;
goto out;
} else if (op == "reset-last-complete") {
if (!force) {
std::cerr << "WARNING: reset-last-complete is extremely dangerous and almost "
<< "certain to lead to permanent data loss unless you know exactly "
<< "what you are doing. Pass --force to proceed anyway."
<< std::endl;
ret = -EINVAL;
goto out;
}
ObjectStore::Transaction tran;
ObjectStore::Transaction *t = &tran;
if (struct_ver < PG::get_compat_struct_v()) {
cerr << "Can't reset-last-complete, version mismatch " << (int)struct_ver
<< " (pg) < compat " << (int)PG::get_compat_struct_v() << " (tool)"
<< std::endl;
ret = 1;
goto out;
}
cout << "Reseting last_complete " << std::endl;
info.last_complete = info.last_update;
if (!dry_run) {
ret = write_info(*t, map_epoch, info, past_intervals);
if (ret != 0)
goto out;
fs->queue_transaction(ch, std::move(*t));
}
cout << "Reseting last_complete succeeded" << std::endl;
} else if (op == "pg-log-inject-dups") {
if (!vm.count("file") || file == "-") {
cerr << "Must provide file containing JSON dups entries" << std::endl;
ret = 1;
goto out;
}
if (debug)
cerr << "opening file " << file << std::endl;
ifstream json_file_stream(file , std::ifstream::in);
if (!json_file_stream.is_open()) {
cerr << "unable to open file " << file << std::endl;
ret = -1;
goto out;
}
json_spirit::mValue result;
try {
if (!json_spirit::read(json_file_stream, result))
throw std::runtime_error("unparseable JSON " + file);
if (result.type() != json_spirit::array_type) {
cerr << "result is not an array_type - type=" << result.type() << std::endl;
throw std::runtime_error("not JSON array_type " + file);
}
do_dups_inject_from_json(fs.get(), pgid, result, debug);
} catch (const std::runtime_error &e) {
cerr << e.what() << std::endl;;
return -1;
}
} else {
ceph_assert(!"Should have already checked for valid --op");
}
} else {
cerr << "PG '" << pgid << "' not found" << std::endl;
ret = -ENOENT;
}
out:
if (debug) {
ostringstream ostr;
Formatter* f = Formatter::create("json-pretty", "json-pretty", "json-pretty");
cct->get_perfcounters_collection()->dump_formatted(f, false, false);
ostr << "ceph-objectstore-tool ";
f->flush(ostr);
delete f;
cout << ostr.str() << std::endl;
}
int r = fs->umount();
if (r < 0) {
cerr << "umount failed: " << cpp_strerror(r) << std::endl;
// If no previous error, then use umount() error
if (ret == 0)
ret = r;
}
if (dry_run) {
// Export output can go to stdout, so put this message on stderr
if (op == "export")
cerr << "dry-run: Nothing changed" << std::endl;
else
cout << "dry-run: Nothing changed" << std::endl;
}
if (ret < 0)
ret = 1;
return ret;
}
| 132,437 | 27.941871 | 208 | cc |
null | ceph-main/src/tools/ceph_objectstore_tool.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2013 Inktank
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_OBJECTSTORE_TOOL_H_
#define CEPH_OBJECTSTORE_TOOL_H_
#include "RadosDump.h"
class ObjectStoreTool : public RadosDump
{
public:
ObjectStoreTool(int file_fd, bool dry_run)
: RadosDump(file_fd, dry_run)
{}
int dump_export(Formatter *formatter, const std::string &dump_data_dir);
int do_import(ObjectStore *store, OSDSuperblock& sb, bool force,
std::string pgidstr);
int do_export(CephContext *cct, ObjectStore *fs, coll_t coll, spg_t pgid,
pg_info_t &info, epoch_t map_epoch, __u8 struct_ver,
const OSDSuperblock& superblock,
PastIntervals &past_intervals);
int dump_object(Formatter *formatter, bufferlist &bl,
const std::string &dump_data_dir = "");
int get_object(
ObjectStore *store, OSDriver& driver, SnapMapper& mapper, coll_t coll,
bufferlist &bl, OSDMap &curmap, bool *skipped_objects);
int export_file(
ObjectStore *store, coll_t cid, ghobject_t &obj);
int export_files(ObjectStore *store, coll_t coll);
};
#endif // CEPH_OBJECSTORE_TOOL_H_
| 1,481 | 31.933333 | 77 | h |
null | ceph-main/src/tools/ceph_osdomap_tool.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2012 Inktank, Inc.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License kkjversion 2.1, as published by the Free Software
* Foundation. See file COPYING.
*/
#include <boost/program_options/variables_map.hpp>
#include <boost/program_options/parsers.hpp>
#include <stdlib.h>
#include <string>
#include "common/errno.h"
#include "global/global_init.h"
#include "os/DBObjectMap.h"
#include "kv/KeyValueDB.h"
using namespace std;
namespace po = boost::program_options;
int main(int argc, char **argv) {
po::options_description desc("Allowed options");
string store_path, cmd, oid, backend;
bool debug = false;
desc.add_options()
("help", "produce help message")
("omap-path", po::value<string>(&store_path),
"path to omap directory, mandatory (current/omap usually)")
("paranoid", "use paranoid checking")
("debug", "Additional debug output from DBObjectMap")
("oid", po::value<string>(&oid), "Restrict to this object id when dumping objects")
("command", po::value<string>(&cmd),
"command arg is one of [dump-raw-keys, dump-raw-key-vals, dump-objects, dump-objects-with-keys, check, dump-headers, repair, compact], mandatory")
("backend", po::value<string>(&backend),
"DB backend (default rocksdb)")
;
po::positional_options_description p;
p.add("command", 1);
vector<string> ceph_option_strings;
po::variables_map vm;
try {
po::parsed_options parsed =
po::command_line_parser(argc, argv).options(desc).positional(p).allow_unregistered().run();
po::store(
parsed,
vm);
po::notify(vm);
ceph_option_strings = po::collect_unrecognized(parsed.options,
po::include_positional);
} catch(po::error &e) {
std::cerr << e.what() << std::endl;
return 1;
}
vector<const char *> ceph_options;
ceph_options.reserve(ceph_option_strings.size());
for (vector<string>::iterator i = ceph_option_strings.begin();
i != ceph_option_strings.end();
++i) {
ceph_options.push_back(i->c_str());
}
if (vm.count("debug")) debug = true;
if (vm.count("help")) {
std::cerr << desc << std::endl;
return 1;
}
auto cct = global_init(
NULL, ceph_options, CEPH_ENTITY_TYPE_OSD,
CODE_ENVIRONMENT_UTILITY_NODOUT, 0);
common_init_finish(g_ceph_context);
cct->_conf.apply_changes(nullptr);
if (debug) {
g_conf().set_val_or_die("log_to_stderr", "true");
g_conf().set_val_or_die("err_to_stderr", "true");
}
g_conf().apply_changes(nullptr);
if (vm.count("omap-path") == 0) {
std::cerr << "Required argument --omap-path" << std::endl;
return 1;
}
if (vm.count("command") == 0) {
std::cerr << "Required argument --command" << std::endl;
return 1;
}
if (vm.count("backend") == 0) {
backend = "rocksdb";
}
KeyValueDB* store(KeyValueDB::create(g_ceph_context, backend, store_path));
if (store == NULL) {
std::cerr << "Invalid backend '" << backend << "' specified" << std::endl;
return 1;
}
/*if (vm.count("paranoid")) {
std::cerr << "Enabling paranoid checks" << std::endl;
store->options.paranoid_checks = true;
}*/
DBObjectMap omap(cct.get(), store);
stringstream out;
int r = store->open(out);
if (r < 0) {
std::cerr << "Store open got: " << cpp_strerror(r) << std::endl;
std::cerr << "Output: " << out.str() << std::endl;
return r;
}
// We don't call omap.init() here because it will repair
// the DBObjectMap which we might want to examine for diagnostic
// reasons. Instead use --command repair.
omap.get_state();
std::cout << "Version: " << (int)omap.state.v << std::endl;
std::cout << "Seq: " << omap.state.seq << std::endl;
std::cout << "legacy: " << (omap.state.legacy ? "true" : "false") << std::endl;
if (cmd == "dump-raw-keys") {
KeyValueDB::WholeSpaceIterator i = store->get_wholespace_iterator();
for (i->seek_to_first(); i->valid(); i->next()) {
std::cout << i->raw_key() << std::endl;
}
return 0;
} else if (cmd == "dump-raw-key-vals") {
KeyValueDB::WholeSpaceIterator i = store->get_wholespace_iterator();
for (i->seek_to_first(); i->valid(); i->next()) {
std::cout << i->raw_key() << std::endl;
i->value().hexdump(std::cout);
}
return 0;
} else if (cmd == "dump-objects") {
vector<ghobject_t> objects;
r = omap.list_objects(&objects);
if (r < 0) {
std::cerr << "list_objects got: " << cpp_strerror(r) << std::endl;
return r;
}
for (vector<ghobject_t>::iterator i = objects.begin();
i != objects.end();
++i) {
if (vm.count("oid") != 0 && i->hobj.oid.name != oid)
continue;
std::cout << *i << std::endl;
}
return 0;
} else if (cmd == "dump-objects-with-keys") {
vector<ghobject_t> objects;
r = omap.list_objects(&objects);
if (r < 0) {
std::cerr << "list_objects got: " << cpp_strerror(r) << std::endl;
return r;
}
for (vector<ghobject_t>::iterator i = objects.begin();
i != objects.end();
++i) {
if (vm.count("oid") != 0 && i->hobj.oid.name != oid)
continue;
std::cout << "Object: " << *i << std::endl;
ObjectMap::ObjectMapIterator j = omap.get_iterator(ghobject_t(i->hobj));
for (j->seek_to_first(); j->valid(); j->next()) {
std::cout << j->key() << std::endl;
j->value().hexdump(std::cout);
}
}
return 0;
} else if (cmd == "check" || cmd == "repair") {
ostringstream ss;
bool repair = (cmd == "repair");
r = omap.check(ss, repair, true);
if (r) {
std::cerr << ss.str() << std::endl;
if (r > 0) {
std::cerr << "check got " << r << " error(s)" << std::endl;
return 1;
}
}
std::cout << (repair ? "repair" : "check") << " succeeded" << std::endl;
return 0;
} else if (cmd == "dump-headers") {
vector<DBObjectMap::_Header> headers;
r = omap.list_object_headers(&headers);
if (r < 0) {
std::cerr << "list_object_headers got: " << cpp_strerror(r) << std::endl;
return 1;
}
for (auto i : headers)
std::cout << i << std::endl;
return 0;
} else if (cmd == "resetv2") {
omap.state.v = 2;
omap.state.legacy = false;
omap.set_state();
} else if (cmd == "compact") {
omap.compact();
return 0;
} else {
std::cerr << "Did not recognize command " << cmd << std::endl;
return 1;
}
}
| 6,624 | 30.103286 | 151 | cc |
null | ceph-main/src/tools/crushtool.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <sage@newdream.net>
* Copyright (C) 2014 Cloudwatt <libre.licensing@cloudwatt.com>
*
* Author: Loic Dachary <loic@dachary.org>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <errno.h>
#include <fstream>
#include <type_traits>
#include "common/debug.h"
#include "common/errno.h"
#include "common/config.h"
#include "common/Formatter.h"
#include "common/ceph_argparse.h"
#include "include/stringify.h"
#include "global/global_context.h"
#include "global/global_init.h"
#include "osd/OSDMap.h"
#include "crush/CrushWrapper.h"
#include "crush/CrushCompiler.h"
#include "crush/CrushTester.h"
#include "include/ceph_assert.h"
#define dout_context g_ceph_context
#define dout_subsys ceph_subsys_crush
using std::cerr;
using std::cout;
using std::decay_t;
using std::ifstream;
using std::ios;
using std::is_same_v;
using std::map;
using std::ofstream;
using std::pair;
using std::set;
using std::string;
using std::vector;
const char *infn = "stdin";
static int get_fd_data(int fd, bufferlist &bl)
{
uint64_t total = 0;
do {
ssize_t bytes = bl.read_fd(fd, 1024*1024);
if (bytes < 0) {
cerr << "read_fd error " << cpp_strerror(-bytes) << "\n";
return -1;
}
if (bytes == 0)
break;
total += bytes;
} while(true);
ceph_assert(bl.length() == total);
return 0;
}
////////////////////////////////////////////////////////////////////////////
void data_analysis_usage()
{
cout << "data output from testing routine ...\n";
cout << " absolute_weights\n";
cout << " the decimal weight of each OSD\n";
cout << " data layout: ROW MAJOR\n";
cout << " OSD id (int), weight (int)\n";
cout << " batch_device_expected_utilization_all\n";
cout << " the expected number of objects each OSD should receive per placement batch\n";
cout << " which may be a decimal value\n";
cout << " data layout: COLUMN MAJOR\n";
cout << " round (int), objects expected on OSD 0...OSD n (float)\n";
cout << " batch_device_utilization_all\n";
cout << " the number of objects stored on each OSD during each placement round\n";
cout << " data layout: COLUMN MAJOR\n";
cout << " round (int), objects stored on OSD 0...OSD n (int)\n";
cout << " device_utilization_all\n";
cout << " the number of objects stored on each OSD at the end of placements\n";
cout << " data_layout: ROW MAJOR\n";
cout << " OSD id (int), objects stored (int), objects expected (float)\n";
cout << " device_utilization\n";
cout << " the number of objects stored on each OSD marked 'up' at the end of placements\n";
cout << " data_layout: ROW MAJOR\n";
cout << " OSD id (int), objects stored (int), objects expected (float)\n";
cout << " placement_information\n";
cout << " the map of input -> OSD\n";
cout << " data_layout: ROW MAJOR\n";
cout << " input (int), OSD's mapped (int)\n";
cout << " proportional_weights_all\n";
cout << " the proportional weight of each OSD specified in the CRUSH map\n";
cout << " data_layout: ROW MAJOR\n";
cout << " OSD id (int), proportional weight (float)\n";
cout << " proportional_weights\n";
cout << " the proportional weight of each 'up' OSD specified in the CRUSH map\n";
cout << " data_layout: ROW MAJOR\n";
cout << " OSD id (int), proportional weight (float)\n";
}
void usage()
{
cout << "usage: crushtool ...\n";
cout << "\n";
cout << "Display, modify and test a crush map\n";
cout << "\n";
cout << "There are five stages, running one after the other:\n";
cout << "\n";
cout << " - input/build\n";
cout << " - tunables adjustments\n";
cout << " - modifications\n";
cout << " - display/test\n";
cout << " - output\n";
cout << "\n";
cout << "Options that are not specific to a stage.\n";
cout << "\n";
cout << " [--infn|-i infile]\n";
cout << " read the crush map from infile\n";
cout << "\n";
cout << "Options for the input/build stage\n";
cout << "\n";
cout << " --decompile|-d map decompile a crush map to source\n";
cout << " [--outfn|-o outfile]\n";
cout << " specify output for for (de)compilation\n";
cout << " --compile|-c map.txt compile a map from source\n";
cout << " --enable-unsafe-tunables\n";
cout << " compile with unsafe tunables\n";
cout << " --build --num_osds N layer1 ...\n";
cout << " build a new map, where each 'layer' is\n";
cout << " 'name (uniform|straw2|straw|list|tree) size'\n";
cout << "\n";
cout << "Options for the tunables adjustments stage\n";
cout << "\n";
cout << " --set-choose-local-tries N\n";
cout << " set choose local retries before re-descent\n";
cout << " --set-choose-local-fallback-tries N\n";
cout << " set choose local retries using fallback\n";
cout << " permutation before re-descent\n";
cout << " --set-choose-total-tries N\n";
cout << " set choose total descent attempts\n";
cout << " --set-chooseleaf-descend-once <0|1>\n";
cout << " set chooseleaf to (not) retry the recursive descent\n";
cout << " --set-chooseleaf-vary-r <0|1>\n";
cout << " set chooseleaf to (not) vary r based on parent\n";
cout << " --set-chooseleaf-stable <0|1>\n";
cout << " set chooseleaf firstn to (not) return stable results\n";
cout << "\n";
cout << "Options for the modifications stage\n";
cout << "\n";
cout << " -i mapfn --add-item id weight name [--loc type name ...]\n";
cout << " insert an item into the hierarchy at the\n";
cout << " given location\n";
cout << " -i mapfn --update-item id weight name [--loc type name ...]\n";
cout << " insert or move an item into the hierarchy at the\n";
cout << " given location\n";
cout << " -i mapfn --remove-item name\n"
<< " remove the given item\n";
cout << " -i mapfn --reweight-item name weight\n";
cout << " reweight a given item (and adjust ancestor\n"
<< " weights as needed)\n";
cout << " -i mapfn --add-bucket name type [--loc type name ...]\n"
<< " insert a bucket into the hierarchy at the given\n"
<< " location\n";
cout << " -i mapfn --move name --loc type name ...\n"
<< " move the given item to specified location\n";
cout << " -i mapfn --reweight recalculate all bucket weights\n";
cout << " -i mapfn --rebuild-class-roots\n";
cout << " rebuild the per-class shadow trees (normally a no-op)\n";
cout << " -i mapfn --create-simple-rule name root type mode\n"
<< " create crush rule <name> to start from <root>,\n"
<< " replicate across buckets of type <type>, using\n"
<< " a choose mode of <firstn|indep>\n";
cout << " -i mapfn --create-replicated-rule name root type\n"
<< " create crush rule <name> to start from <root>,\n"
<< " replicate across buckets of type <type>\n";
cout << " --device-class <class>\n";
cout << " use device class <class> for new rule\n";
cout << " -i mapfn --remove-rule name\n"
<< " remove the specified crush rule\n";
cout << "\n";
cout << "Options for the display/test stage\n";
cout << "\n";
cout << " -f --format the format of --dump, defaults to json-pretty\n";
cout << " can be one of json, json-pretty, xml, xml-pretty,\n";
cout << " table, table-kv, html, html-pretty\n";
cout << " --dump dump the crush map\n";
cout << " --tree print map summary as a tree\n";
cout << " --bucket-tree print bucket map summary as a tree\n";
cout << " --bucket-name specify bucket bucket name for bucket-tree\n";
cout << " --check [max_id] check if any item is referencing an unknown name/type\n";
cout << " -i mapfn --show-location id\n";
cout << " show location for given device id\n";
cout << " -i mapfn --test test a range of inputs on the map\n";
cout << " [--min-x x] [--max-x x] [--x x]\n";
cout << " [--min-rule r] [--max-rule r] [--rule r]\n";
cout << " [--min-rep n] [--max-rep n] [--num-rep n]\n";
cout << " [--pool-id n] specifies pool id\n";
cout << " [--batches b] split the CRUSH mapping into b > 1 rounds\n";
cout << " [--weight|-w devno weight]\n";
cout << " where weight is 0 to 1.0\n";
cout << " [--simulate] simulate placements using a random\n";
cout << " number generator in place of the CRUSH\n";
cout << " algorithm\n";
cout << " --show-utilization show OSD usage\n";
cout << " --show-utilization-all\n";
cout << " include zero weight items\n";
cout << " --show-statistics show chi squared statistics\n";
cout << " --show-mappings show mappings\n";
cout << " --show-bad-mappings show bad mappings\n";
cout << " --show-choose-tries show choose tries histogram\n";
cout << " --output-name name\n";
cout << " prepend the data file(s) generated during the\n";
cout << " testing routine with name\n";
cout << " --output-csv\n";
cout << " export select data generated during testing routine\n";
cout << " to CSV files for off-line post-processing\n";
cout << " use --help-output for more information\n";
cout << " --reclassify transform legacy CRUSH map buckets and rules\n";
cout << " by adding classes\n";
cout << " --reclassify-bucket <bucket-match> <class> <default-parent>\n";
cout << " --reclassify-root <bucket-name> <class>\n";
cout << " --set-subtree-class <bucket-name> <class>\n";
cout << " set class for all items beneath bucket-name\n";
cout << " --compare <otherfile> compare two maps using --test parameters\n";
cout << "\n";
cout << "Options for the output stage\n";
cout << "\n";
cout << " [--outfn|-o outfile]\n";
cout << " specify output for modified crush map\n";
cout << "\n";
}
struct bucket_types_t {
const char *name;
int type;
} bucket_types[] = {
{ "uniform", CRUSH_BUCKET_UNIFORM },
{ "list", CRUSH_BUCKET_LIST },
{ "straw", CRUSH_BUCKET_STRAW },
{ "straw2", CRUSH_BUCKET_STRAW2 },
{ "tree", CRUSH_BUCKET_TREE },
{ 0, 0 },
};
struct layer_t {
const char *name;
const char *buckettype;
int size;
};
template<typename... Args>
bool argparse_withargs(std::vector<const char*> &args,
std::vector<const char*>::iterator& i,
std::ostream& oss,
const char* opt,
Args*... opts)
{
if (!ceph_argparse_flag(args, i, opt, nullptr)) {
return false;
}
auto parse = [&](auto& opt) {
if (i == args.end()) {
oss << "expecting additional argument to " << opt;
return false;
}
using opt_t = std::remove_pointer_t<decay_t<decltype(opt)>>;
string err;
if constexpr (std::is_same_v<opt_t, string>) {
opt->assign(*i);
} else if constexpr (is_same_v<opt_t, int>) {
*opt = strict_strtol(*i, 10, &err);
} else if constexpr (is_same_v<opt_t, float>) {
*opt = strict_strtof(*i, &err);
}
i = args.erase(i);
if (err.empty())
return true;
else {
oss << err;
return false;
}
};
(... && parse(opts));
return true;
}
int do_add_bucket(CephContext* cct,
const char* me,
CrushWrapper& crush,
const string& add_name,
const string& add_type,
const map<string,string>& add_loc) {
int bucketno;
if (crush.name_exists(add_name)) {
cerr << me << " bucket '" << add_name << "' already exists" << std::endl;
return -EEXIST;
}
int type = crush.get_type_id(add_type);
if (type <= 0) {
cerr << me << " bad bucket type: " << add_type << std::endl;
return -EINVAL;
}
if (int r = crush.add_bucket(0, 0, CRUSH_HASH_DEFAULT, type, 0, nullptr, nullptr, &bucketno);
r < 0) {
cerr << me << " unable to add bucket: " << cpp_strerror(r) << std::endl;
return r;
}
if (int r = crush.set_item_name(bucketno, add_name); r < 0) {
cerr << me << " bad bucket name: " << add_name << std::endl;
return r;
}
if (!add_loc.empty()) {
if (!crush.check_item_loc(cct, bucketno, add_loc, (int*)nullptr)) {
if (int r = crush.move_bucket(cct, bucketno, add_loc); r < 0) {
cerr << me << " error moving bucket '" << add_name << "' to " << add_loc << std::endl;
return r;
}
}
}
return 0;
}
// return 1 for no change, 0 for successful change, negative on error
int do_move_item(CephContext* cct,
const char *me,
CrushWrapper& crush,
const string& name,
const map<string,string>& loc)
{
if (!crush.name_exists(name)) {
cerr << me << " item '" << name << "' does not exist" << std::endl;
return -ENOENT;
}
int id = crush.get_item_id(name);
if (loc.empty()) {
cerr << me << " expecting additional --loc argument to --move" << std::endl;
return -EINVAL;
}
if (crush.check_item_loc(cct, id, loc, (int*)nullptr)) {
// it's already there
cerr << me << " item '" << name << "' already at " << loc << std::endl;
return 1;
}
if (id >= 0) {
switch (int r = crush.create_or_move_item(cct, id, 0, name, loc)) {
case 0:
return 1;
case 1:
return 0;
default:
return r;
}
} else {
return crush.move_bucket(cct, id, loc);
}
}
int main(int argc, const char **argv)
{
auto args = argv_to_vec(argc, argv);
if (args.empty()) {
cerr << argv[0] << ": -h or --help for usage" << std::endl;
exit(1);
}
if (ceph_argparse_need_usage(args)) {
usage();
exit(0);
}
const char *me = argv[0];
std::string infn, srcfn, outfn, add_name, add_type, remove_name,
reweight_name, bucket_name;
std::string move_name;
bool compile = false;
bool decompile = false;
bool check = false;
int max_id = -1;
bool test = false;
bool display = false;
bool tree = false;
bool bucket_tree = false;
string dump_format = "json-pretty";
bool dump = false;
int full_location = -1;
bool write_to_file = false;
int verbose = 0;
bool unsafe_tunables = false;
bool rebuild_class_roots = false;
bool reweight = false;
int add_item = -1;
bool add_bucket = false;
bool update_item = false;
bool move_item = false;
bool add_rule = false;
std::string rule_name, rule_root, rule_type, rule_mode, rule_device_class;
bool del_rule = false;
float add_weight = 0;
map<string,string> add_loc;
float reweight_weight = 0;
bool adjust = false;
int build = 0;
int num_osds =0;
vector<layer_t> layers;
int choose_local_tries = -1;
int choose_local_fallback_tries = -1;
int choose_total_tries = -1;
int chooseleaf_descend_once = -1;
int chooseleaf_vary_r = -1;
int chooseleaf_stable = -1;
int straw_calc_version = -1;
int allowed_bucket_algs = -1;
bool reclassify = false;
map<string,pair<string,string>> reclassify_bucket; // %suffix or prefix% -> class, default_root
map<string,string> reclassify_root; // bucket -> class
map<string,string> set_subtree_class; // bucket -> class
string compare;
CrushWrapper crush;
CrushTester tester(crush, cout);
// we use -c, don't confuse the generic arg parsing
// only parse arguments from CEPH_ARGS, if in the environment
vector<const char *> empty_args;
auto cct = global_init(NULL, empty_args, CEPH_ENTITY_TYPE_CLIENT,
CODE_ENVIRONMENT_UTILITY,
CINIT_FLAG_NO_DEFAULT_CONFIG_FILE);
// crushtool times out occasionally when quits. so do not
// release the g_ceph_context.
cct->get();
common_init_finish(g_ceph_context);
int x;
float y;
long long z;
std::string val;
std::ostringstream err;
int tmp;
for (std::vector<const char*>::iterator i = args.begin(); i != args.end(); ) {
if (ceph_argparse_double_dash(args, i)) {
break;
} else if (ceph_argparse_witharg(args, i, &val, "-d", "--decompile", (char*)NULL)) {
infn = val;
decompile = true;
} else if (ceph_argparse_witharg(args, i, &val, "-i", "--infn", (char*)NULL)) {
infn = val;
} else if (ceph_argparse_witharg(args, i, &val, "-o", "--outfn", (char*)NULL)) {
outfn = val;
} else if (ceph_argparse_flag(args, i, "-v", "--verbose", (char*)NULL)) {
verbose += 1;
} else if (ceph_argparse_witharg(args, i, &val, "--compare", (char*)NULL)) {
compare = val;
} else if (ceph_argparse_flag(args, i, "--reclassify", (char*)NULL)) {
reclassify = true;
} else if (ceph_argparse_witharg(args, i, &val, "--reclassify-bucket",
(char*)NULL)) {
if (i == args.end()) {
cerr << "expecting additional argument" << std::endl;
return EXIT_FAILURE;
}
string c = *i;
i = args.erase(i);
if (i == args.end()) {
cerr << "expecting additional argument" << std::endl;
return EXIT_FAILURE;
}
reclassify_bucket[val] = make_pair(c, *i);
i = args.erase(i);
} else if (ceph_argparse_witharg(args, i, &val, "--reclassify-root",
(char*)NULL)) {
if (i == args.end()) {
cerr << "expecting additional argument" << std::endl;
return EXIT_FAILURE;
}
reclassify_root[val] = *i;
i = args.erase(i);
} else if (ceph_argparse_witharg(args, i, &val, "--set-subtree-class",
(char*)NULL)) {
if (i == args.end()) {
cerr << "expecting additional argument" << std::endl;
return EXIT_FAILURE;
}
set_subtree_class[val] = *i;
i = args.erase(i);
} else if (ceph_argparse_flag(args, i, "--tree", (char*)NULL)) {
tree = true;
} else if (ceph_argparse_flag(args, i, "--bucket-tree", (char*)NULL)) {
bucket_tree = true;
} else if (ceph_argparse_witharg(args, i, &val, "-b", "--bucket-name", (char*)NULL)) {
bucket_name = val;
} else if (ceph_argparse_witharg(args, i, &val, "-f", "--format", (char*)NULL)) {
dump_format = val;
} else if (ceph_argparse_flag(args, i, "--dump", (char*)NULL)) {
dump = true;
} else if (ceph_argparse_flag(args, i, "--show_utilization", (char*)NULL)) {
display = true;
tester.set_output_utilization(true);
} else if (ceph_argparse_flag(args, i, "--show_utilization_all", (char*)NULL)) {
display = true;
tester.set_output_utilization_all(true);
} else if (ceph_argparse_flag(args, i, "--show_statistics", (char*)NULL)) {
display = true;
tester.set_output_statistics(true);
} else if (ceph_argparse_flag(args, i, "--show_mappings", (char*)NULL)) {
display = true;
tester.set_output_mappings(true);
} else if (ceph_argparse_flag(args, i, "--show_bad_mappings", (char*)NULL)) {
display = true;
tester.set_output_bad_mappings(true);
} else if (ceph_argparse_flag(args, i, "--show_choose_tries", (char*)NULL)) {
display = true;
tester.set_output_choose_tries(true);
} else if (ceph_argparse_witharg(args, i, &val, "-c", "--compile", (char*)NULL)) {
srcfn = val;
compile = true;
} else if (ceph_argparse_witharg(args, i, &max_id, err, "--check", (char*)NULL)) {
check = true;
} else if (ceph_argparse_flag(args, i, "-t", "--test", (char*)NULL)) {
test = true;
} else if (ceph_argparse_witharg(args, i, &full_location, err, "--show-location", (char*)NULL)) {
} else if (ceph_argparse_flag(args, i, "-s", "--simulate", (char*)NULL)) {
tester.set_random_placement();
} else if (ceph_argparse_flag(args, i, "--enable-unsafe-tunables", (char*)NULL)) {
unsafe_tunables = true;
} else if (ceph_argparse_witharg(args, i, &choose_local_tries, err,
"--set_choose_local_tries", (char*)NULL)) {
adjust = true;
} else if (ceph_argparse_witharg(args, i, &choose_local_fallback_tries, err,
"--set_choose_local_fallback_tries", (char*)NULL)) {
adjust = true;
} else if (ceph_argparse_witharg(args, i, &choose_total_tries, err,
"--set_choose_total_tries", (char*)NULL)) {
adjust = true;
} else if (ceph_argparse_witharg(args, i, &chooseleaf_descend_once, err,
"--set_chooseleaf_descend_once", (char*)NULL)) {
adjust = true;
} else if (ceph_argparse_witharg(args, i, &chooseleaf_vary_r, err,
"--set_chooseleaf_vary_r", (char*)NULL)) {
adjust = true;
} else if (ceph_argparse_witharg(args, i, &chooseleaf_stable, err,
"--set_chooseleaf_stable", (char*)NULL)) {
adjust = true;
} else if (ceph_argparse_witharg(args, i, &straw_calc_version, err,
"--set_straw_calc_version", (char*)NULL)) {
adjust = true;
} else if (ceph_argparse_witharg(args, i, &allowed_bucket_algs, err,
"--set_allowed_bucket_algs", (char*)NULL)) {
adjust = true;
} else if (ceph_argparse_flag(args, i, "--reweight", (char*)NULL)) {
reweight = true;
} else if (ceph_argparse_flag(args, i, "--rebuild-class-roots", (char*)NULL)) {
rebuild_class_roots = true;
} else if (ceph_argparse_witharg(args, i, &add_item, err, "--add_item", (char*)NULL)) {
if (!err.str().empty()) {
cerr << err.str() << std::endl;
return EXIT_FAILURE;
}
if (i == args.end()) {
cerr << "expecting additional argument to --add-item" << std::endl;
return EXIT_FAILURE;
}
add_weight = atof(*i);
i = args.erase(i);
if (i == args.end()) {
cerr << "expecting additional argument to --add-item" << std::endl;
return EXIT_FAILURE;
}
add_name.assign(*i);
i = args.erase(i);
} else if (ceph_argparse_witharg(args, i, &add_item, err, "--update_item", (char*)NULL)) {
update_item = true;
if (!err.str().empty()) {
cerr << err.str() << std::endl;
return EXIT_FAILURE;
}
if (i == args.end()) {
cerr << "expecting additional argument to --update-item" << std::endl;
return EXIT_FAILURE;
}
add_weight = atof(*i);
i = args.erase(i);
if (i == args.end()) {
cerr << "expecting additional argument to --update-item" << std::endl;
return EXIT_FAILURE;
}
add_name.assign(*i);
i = args.erase(i);
} else if (argparse_withargs(args, i, err, "--add-bucket",
&add_name, &add_type)) {
if (!err.str().empty()) {
cerr << err.str() << std::endl;
return EXIT_FAILURE;
}
add_bucket = true;
} else if (argparse_withargs(args, i, err, "--move",
&move_name)) {
if (!err.str().empty()) {
cerr << err.str() << std::endl;
return EXIT_FAILURE;
}
move_item = true;
} else if (ceph_argparse_witharg(args, i, &val, err, "--create-simple-rule", (char*)NULL)) {
rule_name.assign(val);
if (!err.str().empty()) {
cerr << err.str() << std::endl;
return EXIT_FAILURE;
}
if (i == args.end()) {
cerr << "expecting additional argument to --create-simple-rule" << std::endl;
return EXIT_FAILURE;
}
rule_root.assign(*i);
i = args.erase(i);
if (i == args.end()) {
cerr << "expecting additional argument to --create-simple-rule" << std::endl;
return EXIT_FAILURE;
}
rule_type.assign(*i);
i = args.erase(i);
if (i == args.end()) {
cerr << "expecting additional argument to --create-simple-rule" << std::endl;
return EXIT_FAILURE;
}
rule_mode.assign(*i);
i = args.erase(i);
cout << "--create-simple-rule:"
<< " name=" << rule_name
<< " root=" << rule_root
<< " type=" << rule_type
<< " mode=" << rule_mode
<< std::endl;
add_rule = true;
} else if (ceph_argparse_witharg(args, i, &val, err, "--create-replicated-rule", (char*)NULL)) {
rule_name.assign(val);
if (!err.str().empty()) {
cerr << err.str() << std::endl;
return EXIT_FAILURE;
}
if (i == args.end()) {
cerr << "expecting additional argument to --create-replicated-rule" << std::endl;
return EXIT_FAILURE;
}
rule_root.assign(*i);
i = args.erase(i);
if (i == args.end()) {
cerr << "expecting additional argument to --create-replicated-rule" << std::endl;
return EXIT_FAILURE;
}
rule_type.assign(*i);
i = args.erase(i);
rule_mode = "firstn";
cout << "--create-replicated-rule:"
<< " name=" << rule_name
<< " root=" << rule_root
<< " type=" << rule_type
<< std::endl;
add_rule = true;
} else if (ceph_argparse_witharg(args, i, &val, "--device-class", (char*)NULL)) {
rule_device_class.assign(val);
if (!err.str().empty()) {
cerr << err.str() << std::endl;
return EXIT_FAILURE;
}
} else if (ceph_argparse_witharg(args, i, &val, "--remove-rule", (char*)NULL)) {
rule_name.assign(val);
if (!err.str().empty()) {
cerr << err.str() << std::endl;
return EXIT_FAILURE;
}
del_rule = true;
} else if (ceph_argparse_witharg(args, i, &val, "--loc", (char*)NULL)) {
std::string type(val);
if (i == args.end()) {
cerr << "expecting additional argument to --loc" << std::endl;
return EXIT_FAILURE;
}
std::string name(*i);
i = args.erase(i);
add_loc[type] = name;
} else if (ceph_argparse_flag(args, i, "--output-csv", (char*)NULL)) {
write_to_file = true;
tester.set_output_data_file(true);
tester.set_output_csv(true);
} else if (ceph_argparse_flag(args, i, "--help-output", (char*)NULL)) {
data_analysis_usage();
return EXIT_SUCCESS;
} else if (ceph_argparse_witharg(args, i, &val, "--output-name", (char*)NULL)) {
std::string name(val);
if (i == args.end()) {
cerr << "expecting additional argument to --output-name" << std::endl;
return EXIT_FAILURE;
}
else {
tester.set_output_data_file_name(name + "-");
}
} else if (ceph_argparse_witharg(args, i, &val, "--remove_item", (char*)NULL)) {
remove_name = val;
} else if (ceph_argparse_witharg(args, i, &val, "--reweight_item", (char*)NULL)) {
reweight_name = val;
if (i == args.end()) {
cerr << "expecting additional argument to --reweight-item" << std::endl;
return EXIT_FAILURE;
}
reweight_weight = atof(*i);
i = args.erase(i);
} else if (ceph_argparse_flag(args, i, "--build", (char*)NULL)) {
build = true;
} else if (ceph_argparse_witharg(args, i, &num_osds, err, "--num_osds", (char*)NULL)) {
if (!err.str().empty()) {
cerr << err.str() << std::endl;
return EXIT_FAILURE;
}
} else if (ceph_argparse_witharg(args, i, &x, err, "--num_rep", (char*)NULL)) {
if (!err.str().empty()) {
cerr << err.str() << std::endl;
return EXIT_FAILURE;
}
tester.set_num_rep(x);
} else if (ceph_argparse_witharg(args, i, &x, err, "--min_rep", (char*)NULL)) {
if (!err.str().empty()) {
cerr << err.str() << std::endl;
return EXIT_FAILURE;
}
tester.set_min_rep(x);
} else if (ceph_argparse_witharg(args, i, &x, err, "--max_rep", (char*)NULL)) {
if (!err.str().empty()) {
cerr << err.str() << std::endl;
return EXIT_FAILURE;
}
tester.set_max_rep(x);
} else if (ceph_argparse_witharg(args, i, &x, err, "--max_x", (char*)NULL)) {
if (!err.str().empty()) {
cerr << err.str() << std::endl;
return EXIT_FAILURE;
}
tester.set_max_x(x);
} else if (ceph_argparse_witharg(args, i, &x, err, "--min_x", (char*)NULL)) {
if (!err.str().empty()) {
cerr << err.str() << std::endl;
return EXIT_FAILURE;
}
tester.set_min_x(x);
} else if (ceph_argparse_witharg(args, i, &z, err, "--pool_id", (char*)NULL)) {
if (!err.str().empty()) {
cerr << err.str() << std::endl;
return EXIT_FAILURE;
}
tester.set_pool_id(z);
} else if (ceph_argparse_witharg(args, i, &x, err, "--x", (char*)NULL)) {
if (!err.str().empty()) {
cerr << err.str() << std::endl;
return EXIT_FAILURE;
}
tester.set_x(x);
} else if (ceph_argparse_witharg(args, i, &x, err, "--max_rule", (char*)NULL)) {
if (!err.str().empty()) {
cerr << err.str() << std::endl;
return EXIT_FAILURE;
}
tester.set_max_rule(x);
} else if (ceph_argparse_witharg(args, i, &x, err, "--min_rule", (char*)NULL)) {
if (!err.str().empty()) {
cerr << err.str() << std::endl;
return EXIT_FAILURE;
}
tester.set_min_rule(x);
} else if (ceph_argparse_witharg(args, i, &x, err, "--rule", (char*)NULL)) {
if (!err.str().empty()) {
cerr << err.str() << std::endl;
return EXIT_FAILURE;
}
tester.set_rule(x);
} else if (ceph_argparse_witharg(args, i, &x, err, "--batches", (char*)NULL)) {
if (!err.str().empty()) {
cerr << err.str() << std::endl;
return EXIT_FAILURE;
}
tester.set_batches(x);
} else if (ceph_argparse_witharg(args, i, &y, err, "--mark-down-ratio", (char*)NULL)) {
if (!err.str().empty()) {
cerr << err.str() << std::endl;
return EXIT_FAILURE;
}
tester.set_device_down_ratio(y);
} else if (ceph_argparse_witharg(args, i, &y, err, "--mark-down-bucket-ratio", (char*)NULL)) {
if (!err.str().empty()) {
cerr << err.str() << std::endl;
return EXIT_FAILURE;
}
tester.set_bucket_down_ratio(y);
} else if (ceph_argparse_witharg(args, i, &tmp, err, "--weight", (char*)NULL)) {
if (!err.str().empty()) {
cerr << err.str() << std::endl;
return EXIT_FAILURE;
}
int dev = tmp;
if (i == args.end()) {
cerr << "expecting additional argument to --weight" << std::endl;
return EXIT_FAILURE;
}
float f = atof(*i);
i = args.erase(i);
tester.set_device_weight(dev, f);
}
else {
++i;
}
}
if (test && !check && !display && !write_to_file && compare.empty()) {
cerr << "WARNING: no output selected; use --output-csv or --show-X" << std::endl;
}
if (decompile + compile + build > 1) {
cerr << "cannot specify more than one of compile, decompile, and build" << std::endl;
return EXIT_FAILURE;
}
if (!check && !compile && !decompile && !build && !test && !reweight && !adjust && !tree && !dump &&
add_item < 0 && !add_bucket && !move_item && !add_rule && !del_rule && full_location < 0 &&
!bucket_tree &&
!reclassify && !rebuild_class_roots &&
compare.empty() &&
remove_name.empty() && reweight_name.empty()) {
cerr << "no action specified; -h for help" << std::endl;
return EXIT_FAILURE;
}
if ((!build) && (!args.empty())) {
cerr << "unrecognized arguments: " << args << std::endl;
return EXIT_FAILURE;
}
else {
if ((args.size() % 3) != 0U) {
cerr << "remaining args: " << args << std::endl;
cerr << "layers must be specified with 3-tuples of (name, buckettype, size)"
<< std::endl;
return EXIT_FAILURE;
}
for (size_t j = 0; j < args.size(); j += 3) {
layer_t l;
l.name = args[j];
l.buckettype = args[j+1];
l.size = atoi(args[j+2]);
layers.push_back(l);
}
}
/*
if (outfn) cout << "outfn " << outfn << std::endl;
if (cinfn) cout << "cinfn " << cinfn << std::endl;
if (dinfn) cout << "dinfn " << dinfn << std::endl;
*/
bool modified = false;
// input ----
if (!infn.empty()) {
bufferlist bl;
std::string error;
int r = 0;
if (infn == "-") {
if (isatty(STDIN_FILENO)) {
cerr << "stdin must not be from a tty" << std::endl;
return EXIT_FAILURE;
}
r = get_fd_data(STDIN_FILENO, bl);
if (r < 0) {
cerr << "error reading data from STDIN" << std::endl;
return EXIT_FAILURE;
}
} else {
r = bl.read_file(infn.c_str(), &error);
if (r < 0) {
cerr << me << ": error reading '" << infn << "': "
<< error << std::endl;
return EXIT_FAILURE;
}
}
auto p = bl.cbegin();
try {
crush.decode(p);
} catch(...) {
cerr << me << ": unable to decode " << infn << std::endl;
return EXIT_FAILURE;
}
}
if (compile) {
crush.create();
// read the file
ifstream in(srcfn.c_str());
if (!in.is_open()) {
cerr << "input file " << srcfn << " not found" << std::endl;
return -ENOENT;
}
CrushCompiler cc(crush, cerr, verbose);
if (unsafe_tunables)
cc.enable_unsafe_tunables();
int r = cc.compile(in, srcfn.c_str());
if (r < 0)
return EXIT_FAILURE;
modified = true;
}
if (build) {
if (layers.empty()) {
cerr << me << ": must specify at least one layer" << std::endl;
return EXIT_FAILURE;
}
crush.create();
vector<int> lower_items;
vector<int> lower_weights;
crush.set_max_devices(num_osds);
for (int i=0; i<num_osds; i++) {
lower_items.push_back(i);
lower_weights.push_back(0x10000);
crush.set_item_name(i, "osd." + stringify(i));
}
crush.set_type_name(0, "osd");
int type = 1;
for (vector<layer_t>::iterator p = layers.begin(); p != layers.end(); ++p, type++) {
layer_t &l = *p;
dout(2) << "layer " << type
<< " " << l.name
<< " bucket type " << l.buckettype
<< " " << l.size
<< dendl;
crush.set_type_name(type, l.name);
int buckettype = -1;
for (int i = 0; bucket_types[i].name; i++)
if (l.buckettype && strcmp(l.buckettype, bucket_types[i].name) == 0) {
buckettype = bucket_types[i].type;
break;
}
if (buckettype < 0) {
cerr << "unknown bucket type '" << l.buckettype << "'" << std::endl;
return EXIT_FAILURE;
}
// build items
vector<int> cur_items;
vector<int> cur_weights;
unsigned lower_pos = 0; // lower pos
dout(2) << "lower_items " << lower_items << dendl;
dout(2) << "lower_weights " << lower_weights << dendl;
int i = 0;
while (1) {
if (lower_pos == lower_items.size())
break;
int items[num_osds];
int weights[num_osds];
int weight = 0;
int j;
for (j=0; j<l.size || l.size==0; j++) {
if (lower_pos == lower_items.size())
break;
items[j] = lower_items[lower_pos];
weights[j] = lower_weights[lower_pos];
weight += weights[j];
lower_pos++;
dout(2) << " item " << items[j] << " weight " << weights[j] << dendl;
}
int id;
int r = crush.add_bucket(0, buckettype, CRUSH_HASH_DEFAULT, type, j, items, weights, &id);
if (r < 0) {
cerr << " Couldn't add bucket: " << cpp_strerror(r) << std::endl;
return r;
}
char format[20];
format[sizeof(format)-1] = '\0';
if (l.size)
snprintf(format, sizeof(format)-1, "%s%%d", l.name);
else
strncpy(format, l.name, sizeof(format)-1);
char name[20];
snprintf(name, sizeof(name), format, i);
crush.set_item_name(id, name);
dout(2) << " in bucket " << id << " '" << name << "' size " << j << " weight " << weight << dendl;
cur_items.push_back(id);
cur_weights.push_back(weight);
i++;
}
lower_items.swap(cur_items);
lower_weights.swap(cur_weights);
}
string root = layers.back().size == 0 ? layers.back().name :
string(layers.back().name) + "0";
{
set<int> roots;
crush.find_roots(&roots);
if (roots.size() > 1) {
cerr << "The crush rules will use the root " << root << "\n"
<< "and ignore the others.\n"
<< "There are " << roots.size() << " roots, they can be\n"
<< "grouped into a single root by appending something like:\n"
<< " root straw 0\n"
<< std::endl;
}
}
if (OSDMap::build_simple_crush_rules(g_ceph_context, crush, root, &cerr))
return EXIT_FAILURE;
modified = true;
}
// mutate ----
if (choose_local_tries >= 0) {
crush.set_choose_local_tries(choose_local_tries);
modified = true;
}
if (choose_local_fallback_tries >= 0) {
crush.set_choose_local_fallback_tries(choose_local_fallback_tries);
modified = true;
}
if (choose_total_tries >= 0) {
crush.set_choose_total_tries(choose_total_tries);
modified = true;
}
if (chooseleaf_descend_once >= 0) {
crush.set_chooseleaf_descend_once(chooseleaf_descend_once);
modified = true;
}
if (chooseleaf_vary_r >= 0) {
crush.set_chooseleaf_vary_r(chooseleaf_vary_r);
modified = true;
}
if (chooseleaf_stable >= 0) {
crush.set_chooseleaf_stable(chooseleaf_stable);
modified = true;
}
if (straw_calc_version >= 0) {
crush.set_straw_calc_version(straw_calc_version);
modified = true;
}
if (allowed_bucket_algs >= 0) {
crush.set_allowed_bucket_algs(allowed_bucket_algs);
modified = true;
}
if (!reweight_name.empty()) {
cout << me << " reweighting item " << reweight_name << " to " << reweight_weight << std::endl;
int r;
if (!crush.name_exists(reweight_name)) {
cerr << " name " << reweight_name << " dne" << std::endl;
r = -ENOENT;
} else {
int item = crush.get_item_id(reweight_name);
r = crush.adjust_item_weightf(g_ceph_context, item, reweight_weight);
}
if (r >= 0)
modified = true;
else {
cerr << me << " " << cpp_strerror(r) << std::endl;
return r;
}
}
if (!remove_name.empty()) {
cout << me << " removing item " << remove_name << std::endl;
int r;
if (!crush.name_exists(remove_name)) {
cerr << " name " << remove_name << " dne" << std::endl;
r = -ENOENT;
} else {
int remove_item = crush.get_item_id(remove_name);
r = crush.remove_item(g_ceph_context, remove_item, false);
}
if (r == 0)
modified = true;
else {
cerr << me << " " << cpp_strerror(r) << std::endl;
return r;
}
}
if (add_item >= 0) {
int r;
if (update_item) {
r = crush.update_item(g_ceph_context, add_item, add_weight, add_name.c_str(), add_loc);
} else {
r = crush.insert_item(g_ceph_context, add_item, add_weight, add_name.c_str(), add_loc);
}
if (r >= 0) {
modified = true;
} else {
cerr << me << " " << cpp_strerror(r) << std::endl;
return r;
}
}
if (add_bucket) {
if (int r = do_add_bucket(cct.get(), me, crush, add_name, add_type, add_loc); !r) {
modified = true;
} else {
return r;
}
}
if (move_item) {
if (int r = do_move_item(cct.get(), me, crush, move_name, add_loc); !r) {
modified = true;
} else {
return r;
}
}
if (add_rule) {
if (crush.rule_exists(rule_name)) {
cerr << "rule " << rule_name << " already exists" << std::endl;
return EXIT_FAILURE;
}
int r = crush.add_simple_rule(rule_name, rule_root, rule_type,
rule_device_class,
rule_mode, pg_pool_t::TYPE_REPLICATED, &err);
if (r < 0) {
cerr << err.str() << std::endl;
return EXIT_FAILURE;
}
modified = true;
}
if (del_rule) {
if (!crush.rule_exists(rule_name)) {
cerr << "rule " << rule_name << " does not exist" << std::endl;
return 0;
}
int ruleno = crush.get_rule_id(rule_name);
ceph_assert(ruleno >= 0);
int r = crush.remove_rule(ruleno);
if (r < 0) {
cerr << "fail to remove rule " << rule_name << std::endl;
return EXIT_FAILURE;
}
modified = true;
}
if (reweight) {
crush.reweight(g_ceph_context);
modified = true;
}
if (rebuild_class_roots) {
int r = crush.rebuild_roots_with_classes(g_ceph_context);
if (r < 0) {
cerr << "failed to rebuidl roots with classes" << std::endl;
return EXIT_FAILURE;
}
modified = true;
}
for (auto& i : set_subtree_class) {
crush.set_subtree_class(i.first, i.second);
modified = true;
}
if (reclassify) {
int r = crush.reclassify(
g_ceph_context,
cout,
reclassify_root,
reclassify_bucket);
if (r < 0) {
cerr << "failed to reclassify map" << std::endl;
return EXIT_FAILURE;
}
modified = true;
}
// display ---
if (full_location >= 0) {
map<string, string> loc = crush.get_full_location(full_location);
for (map<string,string>::iterator p = loc.begin();
p != loc.end();
++p) {
cout << p->first << "\t" << p->second << std::endl;
}
}
if (tree) {
crush.dump_tree(&cout, NULL, {}, true);
}
if (bucket_tree) {
if (bucket_name.empty()) {
cerr << ": error bucket_name is empty" << std::endl;
}
else {
set<int> osd_ids;
crush.get_leaves(bucket_name.c_str(), &osd_ids);
for (auto &id : osd_ids) {
cout << "osd." << id << std::endl;
}
}
}
if (dump) {
boost::scoped_ptr<Formatter> f(Formatter::create(dump_format, "json-pretty", "json-pretty"));
f->open_object_section("crush_map");
crush.dump(f.get());
f->close_section();
f->flush(cout);
cout << "\n";
}
if (decompile) {
CrushCompiler cc(crush, cerr, verbose);
if (!outfn.empty()) {
ofstream o;
o.open(outfn.c_str(), ios::out | ios::binary | ios::trunc);
if (!o.is_open()) {
cerr << me << ": error writing '" << outfn << "'" << std::endl;
return EXIT_FAILURE;
}
cc.decompile(o);
o.close();
} else {
cc.decompile(cout);
}
}
if (check) {
if (max_id >= 0) {
if (!tester.check_name_maps(max_id)) {
return EXIT_FAILURE;
}
}
}
if (test) {
if (tester.get_output_utilization_all() ||
tester.get_output_utilization())
tester.set_output_statistics(true);
int r = tester.test(cct->get());
if (r < 0)
return EXIT_FAILURE;
}
if (compare.size()) {
CrushWrapper crush2;
bufferlist in;
string error;
int r = in.read_file(compare.c_str(), &error);
if (r < 0) {
cerr << me << ": error reading '" << compare << "': "
<< error << std::endl;
return EXIT_FAILURE;
}
auto p = in.cbegin();
try {
crush2.decode(p);
} catch(...) {
cerr << me << ": unable to decode " << compare << std::endl;
return EXIT_FAILURE;
}
r = tester.compare(crush2);
if (r < 0)
return EXIT_FAILURE;
}
// output ---
if (modified) {
crush.finalize();
if (outfn.empty()) {
cout << me << " successfully built or modified map. Use '-o <file>' to write it out." << std::endl;
} else {
bufferlist bl;
crush.encode(bl, CEPH_FEATURES_SUPPORTED_DEFAULT);
int r = bl.write_file(outfn.c_str());
if (r < 0) {
cerr << me << ": error writing '" << outfn << "': " << cpp_strerror(r) << std::endl;
return EXIT_FAILURE;
}
if (verbose)
cout << "wrote crush map to " << outfn << std::endl;
}
}
return 0;
}
/*
* Local Variables:
* compile-command: "cd .. ; make crushtool && test/run-cli-tests"
* End:
*/
| 44,467 | 32.06171 | 108 | cc |
null | ceph-main/src/tools/histogram_dump.py | #!/usr/bin/env python3
# coding: utf-8
#
# Ceph - scalable distributed file system
#
# Copyright (C) 2017 OVH
# Copyright (C) 2020 Marc Schöchlin <ms-github@256bit.org>
#
# This is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public
# License version 2, as published by the Free Software
# Foundation. See file COPYING.
#
import json
import subprocess
import time
import os
import argparse
import glob
import sys
import textwrap
import datetime
def shorten(val):
if isinstance(val, str):
return val
for u in ((3, ''), (6, 'k'), (9, 'M'), (12, 'G'), (15, 'T')):
if val < 10**u[0]:
return "{}{}".format(int(val / (10 ** (u[0]-3))), u[1])
return val
def create_histogram(sockets, counter, last, seconds, batch):
current_datasets = {}
json_d = {}
for socket in sockets:
try:
out = subprocess.check_output(
"ceph --admin-daemon {} perf histogram dump".format(socket),
shell=True)
json_d = json.loads(out.decode('utf-8'))
except Exception as e:
return (last,
"Couldn't connect to admin socket, result: \n{}".format(e))
current_datasets[socket] = json_d['osd'][counter]['values']
axes = json_d['osd'][counter]['axes']
if batch:
content = "{} : Counter: {} for {}\n\n\n".format(
datetime.datetime.now().isoformat(), counter,", ".join(sockets))
else:
content = "Counter: {} for {}\n(create statistics every {} seconds)\n\n".format(
counter,", ".join(sockets),seconds)
content += "{}:\n".format(axes[1]['name'])
for r in axes[1]['ranges']:
content += "{0: >4} ".format(
shorten(r['min']) if 'min' in r else '')
content += "\n"
for r in axes[1]['ranges']:
content += "{0: >4} ".format(
shorten(r['max']) if 'max' in r else '')
content += "\n"
content += ("{0: >"+str(len(axes[1]['ranges'])*5+14)+"}:\n").format(
axes[0]['name'])
if batch:
COL = ''
ENDC = ''
else:
COL = '\033[91m'
ENDC = '\033[0m'
current = []
# initalize with zeros
for i in range(len(current_datasets[socket])):
current.append([])
for j in range(len(current_datasets[socket][i])):
current[i].append(0)
# combine data
for socket, data in current_datasets.items():
for i in range(len(data)):
for j in range(len(data[i])):
current[i][j] += data[i][j]
for i in range(len(current)):
for j in range(len(current[i])):
try:
diff = current[i][j] - last[i][j]
except IndexError:
diff = '-'
if diff != "-" and diff != 0:
content += "{0}{1: >4}{2} ".format(COL,shorten(diff),ENDC)
else:
content += "{0: >4} ".format(shorten(diff))
r = axes[0]['ranges'][i]
content += "{0: >6} : {1}\n".format(
shorten(r['min']) if 'min' in r else '',
shorten(r['max']) if 'max' in r else '')
return (current, content)
def loop_print(sockets, counter, loop_seconds, batch):
last = []
try:
while True:
last, content = create_histogram(sockets, counter, last, loop_seconds, batch)
if not batch:
print(chr(27) + "[2J")
print(content)
time.sleep(loop_seconds)
except KeyboardInterrupt:
print("...interupted")
sys.exit(0)
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='Continuously display ceph performance histogram for selected osd operations')
parser.add_argument(
'--asok',
type=str,
default=['/var/run/ceph/*.asok'],
nargs='+',
help='Path to asok file, you can use wildcards')
parser.add_argument(
'--counter',
type=str,
help=textwrap.dedent('''\
Specify name of the counter to calculate statistics
see "ceph --admin-daemon /var/run/ceph/<osd>.asok perf histogram dump"
'''),
default='op_w_latency_in_bytes_histogram')
parser.add_argument(
'--batch',
help='Disable colors and add timestamps',
action='store_true',
)
parser.add_argument(
'--loop_seconds',
type=int,
help='Cycle time in seconds for statistics generation',
default=5)
args = parser.parse_args()
if not sys.stdout.isatty():
print("Not running with a tty, automatically switching to batch mode")
args.batch = True
sockets = []
for asok in args.asok:
sockets = glob.glob(asok) + sockets
if len(sockets) == 0:
print("no suitable socket at {}".format(args.asok))
sys.exit(1)
loop_print(sockets, args.counter, args.loop_seconds, args.batch)
if __name__ == '__main__':
main()
| 5,002 | 27.588571 | 98 | py |
null | ceph-main/src/tools/kvstore_tool.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "kvstore_tool.h"
#include <iostream>
#include "common/errno.h"
#include "common/url_escape.h"
#include "common/pretty_binary.h"
#include "include/buffer.h"
#include "kv/KeyValueDB.h"
#include "kv/KeyValueHistogram.h"
using namespace std;
StoreTool::StoreTool(const string& type,
const string& path,
bool to_repair,
bool need_stats)
: store_path(path)
{
if (need_stats) {
g_conf()->rocksdb_perf = true;
g_conf()->rocksdb_collect_compaction_stats = true;
}
if (type == "bluestore-kv") {
#ifdef WITH_BLUESTORE
if (load_bluestore(path, to_repair) != 0)
exit(1);
#else
cerr << "bluestore not compiled in" << std::endl;
exit(1);
#endif
} else {
auto db_ptr = KeyValueDB::create(g_ceph_context, type, path);
if (!to_repair) {
if (int r = db_ptr->open(std::cerr); r < 0) {
cerr << "failed to open type " << type << " path " << path << ": "
<< cpp_strerror(r) << std::endl;
exit(1);
}
}
db.reset(db_ptr);
}
}
int StoreTool::load_bluestore(const string& path, bool to_repair)
{
auto bluestore = new BlueStore(g_ceph_context, path);
KeyValueDB *db_ptr;
int r = bluestore->open_db_environment(&db_ptr, to_repair);
if (r < 0) {
return -EINVAL;
}
db = decltype(db){db_ptr, Deleter(bluestore)};
return 0;
}
uint32_t StoreTool::traverse(const string& prefix,
const bool do_crc,
const bool do_value_dump,
ostream *out)
{
KeyValueDB::WholeSpaceIterator iter = db->get_wholespace_iterator();
if (prefix.empty())
iter->seek_to_first();
else
iter->seek_to_first(prefix);
uint32_t crc = -1;
while (iter->valid()) {
pair<string,string> rk = iter->raw_key();
if (!prefix.empty() && (rk.first != prefix))
break;
if (out)
*out << url_escape(rk.first) << "\t" << url_escape(rk.second);
if (do_crc) {
bufferlist bl;
bl.append(rk.first);
bl.append(rk.second);
bl.append(iter->value());
crc = bl.crc32c(crc);
if (out) {
*out << "\t" << bl.crc32c(0);
}
}
if (out)
*out << std::endl;
if (out && do_value_dump) {
bufferptr bp = iter->value_as_ptr();
bufferlist value;
value.append(bp);
ostringstream os;
value.hexdump(os);
std::cout << os.str() << std::endl;
}
iter->next();
}
return crc;
}
void StoreTool::list(const string& prefix, const bool do_crc,
const bool do_value_dump)
{
traverse(prefix, do_crc, do_value_dump,& std::cout);
}
bool StoreTool::exists(const string& prefix)
{
ceph_assert(!prefix.empty());
KeyValueDB::WholeSpaceIterator iter = db->get_wholespace_iterator();
iter->seek_to_first(prefix);
return (iter->valid() && (iter->raw_key().first == prefix));
}
bool StoreTool::exists(const string& prefix, const string& key)
{
ceph_assert(!prefix.empty());
if (key.empty()) {
return exists(prefix);
}
bool exists = false;
get(prefix, key, exists);
return exists;
}
bufferlist StoreTool::get(const string& prefix,
const string& key,
bool& exists)
{
ceph_assert(!prefix.empty() && !key.empty());
map<string,bufferlist> result;
std::set<std::string> keys;
keys.insert(key);
db->get(prefix, keys, &result);
if (result.count(key) > 0) {
exists = true;
return result[key];
} else {
exists = false;
return bufferlist();
}
}
uint64_t StoreTool::get_size()
{
map<string,uint64_t> extras;
uint64_t s = db->get_estimated_size(extras);
for (auto& [name, size] : extras) {
std::cout << name << " - " << size << std::endl;
}
std::cout << "total: " << s << std::endl;
return s;
}
bool StoreTool::set(const string &prefix, const string &key, bufferlist &val)
{
ceph_assert(!prefix.empty());
ceph_assert(!key.empty());
ceph_assert(val.length() > 0);
KeyValueDB::Transaction tx = db->get_transaction();
tx->set(prefix, key, val);
int ret = db->submit_transaction_sync(tx);
return (ret == 0);
}
bool StoreTool::rm(const string& prefix, const string& key)
{
ceph_assert(!prefix.empty());
ceph_assert(!key.empty());
KeyValueDB::Transaction tx = db->get_transaction();
tx->rmkey(prefix, key);
int ret = db->submit_transaction_sync(tx);
return (ret == 0);
}
bool StoreTool::rm_prefix(const string& prefix)
{
ceph_assert(!prefix.empty());
KeyValueDB::Transaction tx = db->get_transaction();
tx->rmkeys_by_prefix(prefix);
int ret = db->submit_transaction_sync(tx);
return (ret == 0);
}
void StoreTool::print_summary(const uint64_t total_keys, const uint64_t total_size,
const uint64_t total_txs, const string& store_path,
const string& other_path, const int duration) const
{
std::cout << "summary:" << std::endl;
std::cout << " copied " << total_keys << " keys" << std::endl;
std::cout << " used " << total_txs << " transactions" << std::endl;
std::cout << " total size " << byte_u_t(total_size) << std::endl;
std::cout << " from '" << store_path << "' to '" << other_path << "'"
<< std::endl;
std::cout << " duration " << duration << " seconds" << std::endl;
}
int StoreTool::print_stats() const
{
ostringstream ostr;
Formatter* f = Formatter::create("json-pretty", "json-pretty", "json-pretty");
int ret = -1;
if (g_conf()->rocksdb_perf) {
db->get_statistics(f);
ostr << "db_statistics ";
f->flush(ostr);
ret = 0;
} else {
ostr << "db_statistics not enabled";
f->flush(ostr);
}
std::cout << ostr.str() << std::endl;
delete f;
return ret;
}
//Itrerates through the db and collects the stats
int StoreTool::build_size_histogram(const string& prefix0) const
{
ostringstream ostr;
Formatter* f = Formatter::create("json-pretty", "json-pretty", "json-pretty");
const size_t MAX_PREFIX = 256;
uint64_t num[MAX_PREFIX] = {0};
size_t max_key_size = 0, max_value_size = 0;
uint64_t total_key_size = 0, total_value_size = 0;
size_t key_size = 0, value_size = 0;
KeyValueHistogram hist;
auto start = coarse_mono_clock::now();
auto iter = db->get_iterator(prefix0, KeyValueDB::ITERATOR_NOCACHE);
iter->seek_to_first();
while (iter->valid()) {
pair<string, string> key(iter->raw_key());
key_size = key.first.size() + key.second.size();
value_size = iter->value().length();
hist.value_hist[hist.get_value_slab(value_size)]++;
max_key_size = std::max(max_key_size, key_size);
max_value_size = std::max(max_value_size, value_size);
total_key_size += key_size;
total_value_size += value_size;
unsigned prefix = key.first[0];
ceph_assert(prefix < MAX_PREFIX);
num[prefix]++;
hist.update_hist_entry(hist.key_hist, key.first, key_size, value_size);
iter->next();
}
ceph::timespan duration = coarse_mono_clock::now() - start;
f->open_object_section("rocksdb_key_value_stats");
for (size_t i = 0; i < MAX_PREFIX; ++i) {
if (num[i]) {
string key = "Records for prefix: ";
key += pretty_binary_string(string(1, char(i)));
f->dump_unsigned(key, num[i]);
}
}
f->dump_unsigned("max_key_size", max_key_size);
f->dump_unsigned("max_value_size", max_value_size);
f->dump_unsigned("total_key_size", total_key_size);
f->dump_unsigned("total_value_size", total_value_size);
hist.dump(f);
f->close_section();
f->flush(ostr);
delete f;
std::cout << ostr.str() << std::endl;
std::cout << __func__ << " finished in " << duration << " seconds" << std::endl;
return 0;
}
int StoreTool::copy_store_to(const string& type, const string& other_path,
const int num_keys_per_tx,
const string& other_type)
{
if (num_keys_per_tx <= 0) {
std::cerr << "must specify a number of keys/tx > 0" << std::endl;
return -EINVAL;
}
// open or create a RocksDB store at @p other_path
boost::scoped_ptr<KeyValueDB> other;
KeyValueDB *other_ptr = KeyValueDB::create(g_ceph_context,
other_type,
other_path);
if (int err = other_ptr->create_and_open(std::cerr); err < 0) {
return err;
}
other.reset(other_ptr);
KeyValueDB::WholeSpaceIterator it = db->get_wholespace_iterator();
it->seek_to_first();
uint64_t total_keys = 0;
uint64_t total_size = 0;
uint64_t total_txs = 0;
auto duration = [start=coarse_mono_clock::now()] {
const auto now = coarse_mono_clock::now();
auto seconds = std::chrono::duration<double>(now - start);
return seconds.count();
};
do {
int num_keys = 0;
KeyValueDB::Transaction tx = other->get_transaction();
while (it->valid() && num_keys < num_keys_per_tx) {
auto [prefix, key] = it->raw_key();
bufferlist v = it->value();
tx->set(prefix, key, v);
num_keys++;
total_size += v.length();
it->next();
}
total_txs++;
total_keys += num_keys;
if (num_keys > 0)
other->submit_transaction_sync(tx);
std::cout << "ts = " << duration() << "s, copied " << total_keys
<< " keys so far (" << byte_u_t(total_size) << ")"
<< std::endl;
} while (it->valid());
print_summary(total_keys, total_size, total_txs, store_path, other_path,
duration());
return 0;
}
void StoreTool::compact()
{
db->compact();
}
void StoreTool::compact_prefix(const string& prefix)
{
db->compact_prefix(prefix);
}
void StoreTool::compact_range(const string& prefix,
const string& start,
const string& end)
{
db->compact_range(prefix, start, end);
}
int StoreTool::destructive_repair()
{
return db->repair(std::cout);
}
| 9,897 | 24.979003 | 83 | cc |