Search is not available for this dataset
repo stringlengths 2 152 ⌀ | file stringlengths 15 239 | code stringlengths 0 58.4M | file_length int64 0 58.4M | avg_line_length float64 0 1.81M | max_line_length int64 0 12.7M | extension_type stringclasses 364 values |
|---|---|---|---|---|---|---|
null | ceph-main/src/tools/rbd_mirror/image_sync/Types.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef RBD_MIRROR_IMAGE_SYNC_TYPES_H
#define RBD_MIRROR_IMAGE_SYNC_TYPES_H
#include "cls/rbd/cls_rbd_types.h"
#include "librbd/Types.h"
#include <list>
#include <string>
#include <boost/optional.hpp>
struct Context;
namespace rbd {
namespace mirror {
namespace image_sync {
struct SyncPoint {
typedef boost::optional<uint64_t> ObjectNumber;
SyncPoint() {
}
SyncPoint(const cls::rbd::SnapshotNamespace& snap_namespace,
const std::string& snap_name,
const std::string& from_snap_name,
const ObjectNumber& object_number)
: snap_namespace(snap_namespace), snap_name(snap_name),
from_snap_name(from_snap_name), object_number(object_number) {
}
cls::rbd::SnapshotNamespace snap_namespace =
{cls::rbd::UserSnapshotNamespace{}};
std::string snap_name;
std::string from_snap_name;
ObjectNumber object_number = boost::none;
bool operator==(const SyncPoint& rhs) const {
return (snap_namespace == rhs.snap_namespace &&
snap_name == rhs.snap_name &&
from_snap_name == rhs.from_snap_name &&
object_number == rhs.object_number);
}
};
typedef std::list<SyncPoint> SyncPoints;
struct SyncPointHandler {
public:
SyncPointHandler(const SyncPointHandler&) = delete;
SyncPointHandler& operator=(const SyncPointHandler&) = delete;
virtual ~SyncPointHandler() {}
virtual void destroy() {
delete this;
}
virtual SyncPoints get_sync_points() const = 0;
virtual librbd::SnapSeqs get_snap_seqs() const = 0;
virtual void update_sync_points(const librbd::SnapSeqs& snap_seq,
const SyncPoints& sync_points,
bool sync_complete,
Context* on_finish) = 0;
protected:
SyncPointHandler() {}
};
} // namespace image_sync
} // namespace mirror
} // namespace rbd
#endif // RBD_MIRROR_IMAGE_SYNC_TYPES_H
| 2,017 | 25.906667 | 70 | h |
null | ceph-main/src/tools/rbd_mirror/image_sync/Utils.cc | // -*- mode:c++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "Utils.h"
namespace rbd {
namespace mirror {
namespace image_sync {
namespace util {
namespace {
static const std::string SNAP_NAME_PREFIX(".rbd-mirror");
} // anonymous namespace
std::string get_snapshot_name_prefix(const std::string& local_mirror_uuid) {
return SNAP_NAME_PREFIX + "." + local_mirror_uuid + ".";
}
} // namespace util
} // namespace image_sync
} // namespace mirror
} // namespace rbd
| 519 | 19.8 | 76 | cc |
null | ceph-main/src/tools/rbd_mirror/image_sync/Utils.h | // -*- mode:c++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include <string>
namespace rbd {
namespace mirror {
namespace image_sync {
namespace util {
std::string get_snapshot_name_prefix(const std::string& local_mirror_uuid);
} // namespace util
} // namespace image_sync
} // namespace mirror
} // namespace rbd
| 358 | 20.117647 | 75 | h |
null | ceph-main/src/tools/rbd_mirror/instance_watcher/Types.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "Types.h"
#include "include/ceph_assert.h"
#include "include/stringify.h"
#include "common/Formatter.h"
namespace rbd {
namespace mirror {
namespace instance_watcher {
namespace {
class EncodePayloadVisitor : public boost::static_visitor<void> {
public:
explicit EncodePayloadVisitor(bufferlist &bl) : m_bl(bl) {}
template <typename Payload>
inline void operator()(const Payload &payload) const {
using ceph::encode;
encode(static_cast<uint32_t>(Payload::NOTIFY_OP), m_bl);
payload.encode(m_bl);
}
private:
bufferlist &m_bl;
};
class DecodePayloadVisitor : public boost::static_visitor<void> {
public:
DecodePayloadVisitor(__u8 version, bufferlist::const_iterator &iter)
: m_version(version), m_iter(iter) {}
template <typename Payload>
inline void operator()(Payload &payload) const {
payload.decode(m_version, m_iter);
}
private:
__u8 m_version;
bufferlist::const_iterator &m_iter;
};
class DumpPayloadVisitor : public boost::static_visitor<void> {
public:
explicit DumpPayloadVisitor(Formatter *formatter) : m_formatter(formatter) {}
template <typename Payload>
inline void operator()(const Payload &payload) const {
NotifyOp notify_op = Payload::NOTIFY_OP;
m_formatter->dump_string("notify_op", stringify(notify_op));
payload.dump(m_formatter);
}
private:
ceph::Formatter *m_formatter;
};
} // anonymous namespace
void PayloadBase::encode(bufferlist &bl) const {
using ceph::encode;
encode(request_id, bl);
}
void PayloadBase::decode(__u8 version, bufferlist::const_iterator &iter) {
using ceph::decode;
decode(request_id, iter);
}
void PayloadBase::dump(Formatter *f) const {
f->dump_unsigned("request_id", request_id);
}
void ImagePayloadBase::encode(bufferlist &bl) const {
using ceph::encode;
PayloadBase::encode(bl);
encode(global_image_id, bl);
}
void ImagePayloadBase::decode(__u8 version, bufferlist::const_iterator &iter) {
using ceph::decode;
PayloadBase::decode(version, iter);
decode(global_image_id, iter);
}
void ImagePayloadBase::dump(Formatter *f) const {
PayloadBase::dump(f);
f->dump_string("global_image_id", global_image_id);
}
void PeerImageRemovedPayload::encode(bufferlist &bl) const {
using ceph::encode;
PayloadBase::encode(bl);
encode(global_image_id, bl);
encode(peer_mirror_uuid, bl);
}
void PeerImageRemovedPayload::decode(__u8 version, bufferlist::const_iterator &iter) {
using ceph::decode;
PayloadBase::decode(version, iter);
decode(global_image_id, iter);
decode(peer_mirror_uuid, iter);
}
void PeerImageRemovedPayload::dump(Formatter *f) const {
PayloadBase::dump(f);
f->dump_string("global_image_id", global_image_id);
f->dump_string("peer_mirror_uuid", peer_mirror_uuid);
}
void SyncPayloadBase::encode(bufferlist &bl) const {
using ceph::encode;
PayloadBase::encode(bl);
encode(sync_id, bl);
}
void SyncPayloadBase::decode(__u8 version, bufferlist::const_iterator &iter) {
using ceph::decode;
PayloadBase::decode(version, iter);
decode(sync_id, iter);
}
void SyncPayloadBase::dump(Formatter *f) const {
PayloadBase::dump(f);
f->dump_string("sync_id", sync_id);
}
void UnknownPayload::encode(bufferlist &bl) const {
ceph_abort();
}
void UnknownPayload::decode(__u8 version, bufferlist::const_iterator &iter) {
}
void UnknownPayload::dump(Formatter *f) const {
}
void NotifyMessage::encode(bufferlist& bl) const {
ENCODE_START(2, 2, bl);
boost::apply_visitor(EncodePayloadVisitor(bl), payload);
ENCODE_FINISH(bl);
}
void NotifyMessage::decode(bufferlist::const_iterator& iter) {
DECODE_START(2, iter);
uint32_t notify_op;
decode(notify_op, iter);
// select the correct payload variant based upon the encoded op
switch (notify_op) {
case NOTIFY_OP_IMAGE_ACQUIRE:
payload = ImageAcquirePayload();
break;
case NOTIFY_OP_IMAGE_RELEASE:
payload = ImageReleasePayload();
break;
case NOTIFY_OP_PEER_IMAGE_REMOVED:
payload = PeerImageRemovedPayload();
break;
case NOTIFY_OP_SYNC_REQUEST:
payload = SyncRequestPayload();
break;
case NOTIFY_OP_SYNC_START:
payload = SyncStartPayload();
break;
default:
payload = UnknownPayload();
break;
}
apply_visitor(DecodePayloadVisitor(struct_v, iter), payload);
DECODE_FINISH(iter);
}
void NotifyMessage::dump(Formatter *f) const {
apply_visitor(DumpPayloadVisitor(f), payload);
}
void NotifyMessage::generate_test_instances(std::list<NotifyMessage *> &o) {
o.push_back(new NotifyMessage(ImageAcquirePayload()));
o.push_back(new NotifyMessage(ImageAcquirePayload(1, "gid")));
o.push_back(new NotifyMessage(ImageReleasePayload()));
o.push_back(new NotifyMessage(ImageReleasePayload(1, "gid")));
o.push_back(new NotifyMessage(PeerImageRemovedPayload()));
o.push_back(new NotifyMessage(PeerImageRemovedPayload(1, "gid", "uuid")));
o.push_back(new NotifyMessage(SyncRequestPayload()));
o.push_back(new NotifyMessage(SyncRequestPayload(1, "sync_id")));
o.push_back(new NotifyMessage(SyncStartPayload()));
o.push_back(new NotifyMessage(SyncStartPayload(1, "sync_id")));
}
std::ostream &operator<<(std::ostream &out, const NotifyOp &op) {
switch (op) {
case NOTIFY_OP_IMAGE_ACQUIRE:
out << "ImageAcquire";
break;
case NOTIFY_OP_IMAGE_RELEASE:
out << "ImageRelease";
break;
case NOTIFY_OP_PEER_IMAGE_REMOVED:
out << "PeerImageRemoved";
break;
case NOTIFY_OP_SYNC_REQUEST:
out << "SyncRequest";
break;
case NOTIFY_OP_SYNC_START:
out << "SyncStart";
break;
default:
out << "Unknown (" << static_cast<uint32_t>(op) << ")";
break;
}
return out;
}
void NotifyAckPayload::encode(bufferlist &bl) const {
using ceph::encode;
encode(instance_id, bl);
encode(request_id, bl);
encode(ret_val, bl);
}
void NotifyAckPayload::decode(bufferlist::const_iterator &iter) {
using ceph::decode;
decode(instance_id, iter);
decode(request_id, iter);
decode(ret_val, iter);
}
void NotifyAckPayload::dump(Formatter *f) const {
f->dump_string("instance_id", instance_id);
f->dump_unsigned("request_id", request_id);
f->dump_int("request_id", ret_val);
}
} // namespace instance_watcher
} // namespace mirror
} // namespace rbd
| 6,335 | 24.756098 | 86 | cc |
null | ceph-main/src/tools/rbd_mirror/instance_watcher/Types.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef RBD_MIRROR_INSTANCE_WATCHER_TYPES_H
#define RBD_MIRROR_INSTANCE_WATCHER_TYPES_H
#include <string>
#include <set>
#include <boost/variant.hpp>
#include "include/buffer_fwd.h"
#include "include/encoding.h"
#include "include/int_types.h"
namespace ceph { class Formatter; }
namespace rbd {
namespace mirror {
namespace instance_watcher {
enum NotifyOp {
NOTIFY_OP_IMAGE_ACQUIRE = 0,
NOTIFY_OP_IMAGE_RELEASE = 1,
NOTIFY_OP_PEER_IMAGE_REMOVED = 2,
NOTIFY_OP_SYNC_REQUEST = 3,
NOTIFY_OP_SYNC_START = 4
};
struct PayloadBase {
uint64_t request_id;
PayloadBase() : request_id(0) {
}
PayloadBase(uint64_t request_id) : request_id(request_id) {
}
void encode(bufferlist &bl) const;
void decode(__u8 version, bufferlist::const_iterator &iter);
void dump(Formatter *f) const;
};
struct ImagePayloadBase : public PayloadBase {
std::string global_image_id;
ImagePayloadBase() : PayloadBase() {
}
ImagePayloadBase(uint64_t request_id, const std::string &global_image_id)
: PayloadBase(request_id), global_image_id(global_image_id) {
}
void encode(bufferlist &bl) const;
void decode(__u8 version, bufferlist::const_iterator &iter);
void dump(Formatter *f) const;
};
struct ImageAcquirePayload : public ImagePayloadBase {
static const NotifyOp NOTIFY_OP = NOTIFY_OP_IMAGE_ACQUIRE;
ImageAcquirePayload() {
}
ImageAcquirePayload(uint64_t request_id, const std::string &global_image_id)
: ImagePayloadBase(request_id, global_image_id) {
}
};
struct ImageReleasePayload : public ImagePayloadBase {
static const NotifyOp NOTIFY_OP = NOTIFY_OP_IMAGE_RELEASE;
ImageReleasePayload() {
}
ImageReleasePayload(uint64_t request_id, const std::string &global_image_id)
: ImagePayloadBase(request_id, global_image_id) {
}
};
struct PeerImageRemovedPayload : public PayloadBase {
static const NotifyOp NOTIFY_OP = NOTIFY_OP_PEER_IMAGE_REMOVED;
std::string global_image_id;
std::string peer_mirror_uuid;
PeerImageRemovedPayload() {
}
PeerImageRemovedPayload(uint64_t request_id,
const std::string& global_image_id,
const std::string& peer_mirror_uuid)
: PayloadBase(request_id),
global_image_id(global_image_id), peer_mirror_uuid(peer_mirror_uuid) {
}
void encode(bufferlist &bl) const;
void decode(__u8 version, bufferlist::const_iterator &iter);
void dump(Formatter *f) const;
};
struct SyncPayloadBase : public PayloadBase {
std::string sync_id;
SyncPayloadBase() : PayloadBase() {
}
SyncPayloadBase(uint64_t request_id, const std::string &sync_id)
: PayloadBase(request_id), sync_id(sync_id) {
}
void encode(bufferlist &bl) const;
void decode(__u8 version, bufferlist::const_iterator &iter);
void dump(Formatter *f) const;
};
struct SyncRequestPayload : public SyncPayloadBase {
static const NotifyOp NOTIFY_OP = NOTIFY_OP_SYNC_REQUEST;
SyncRequestPayload() : SyncPayloadBase() {
}
SyncRequestPayload(uint64_t request_id, const std::string &sync_id)
: SyncPayloadBase(request_id, sync_id) {
}
};
struct SyncStartPayload : public SyncPayloadBase {
static const NotifyOp NOTIFY_OP = NOTIFY_OP_SYNC_START;
SyncStartPayload() : SyncPayloadBase() {
}
SyncStartPayload(uint64_t request_id, const std::string &sync_id)
: SyncPayloadBase(request_id, sync_id) {
}
};
struct UnknownPayload {
static const NotifyOp NOTIFY_OP = static_cast<NotifyOp>(-1);
UnknownPayload() {
}
void encode(bufferlist &bl) const;
void decode(__u8 version, bufferlist::const_iterator &iter);
void dump(Formatter *f) const;
};
typedef boost::variant<ImageAcquirePayload,
ImageReleasePayload,
PeerImageRemovedPayload,
SyncRequestPayload,
SyncStartPayload,
UnknownPayload> Payload;
struct NotifyMessage {
NotifyMessage(const Payload &payload = UnknownPayload()) : payload(payload) {
}
Payload payload;
void encode(bufferlist& bl) const;
void decode(bufferlist::const_iterator& it);
void dump(Formatter *f) const;
static void generate_test_instances(std::list<NotifyMessage *> &o);
};
WRITE_CLASS_ENCODER(NotifyMessage);
std::ostream &operator<<(std::ostream &out, const NotifyOp &op);
struct NotifyAckPayload {
std::string instance_id;
uint64_t request_id;
int ret_val;
NotifyAckPayload() : request_id(0), ret_val(0) {
}
NotifyAckPayload(const std::string &instance_id, uint64_t request_id,
int ret_val)
: instance_id(instance_id), request_id(request_id), ret_val(ret_val) {
}
void encode(bufferlist &bl) const;
void decode(bufferlist::const_iterator& it);
void dump(Formatter *f) const;
};
WRITE_CLASS_ENCODER(NotifyAckPayload);
} // namespace instance_watcher
} // namespace mirror
} // namespace librbd
using rbd::mirror::instance_watcher::encode;
using rbd::mirror::instance_watcher::decode;
#endif // RBD_MIRROR_INSTANCE_WATCHER_TYPES_H
| 5,146 | 24.994949 | 79 | h |
null | ceph-main/src/tools/rbd_mirror/instances/Types.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_RBD_MIRROR_INSTANCES_TYPES_H
#define CEPH_RBD_MIRROR_INSTANCES_TYPES_H
#include <string>
#include <vector>
namespace rbd {
namespace mirror {
namespace instances {
struct Listener {
typedef std::vector<std::string> InstanceIds;
virtual ~Listener() {
}
virtual void handle_added(const InstanceIds& instance_ids) = 0;
virtual void handle_removed(const InstanceIds& instance_ids) = 0;
};
} // namespace instances
} // namespace mirror
} // namespace rbd
#endif // CEPH_RBD_MIRROR_INSTANCES_TYPES_H
| 624 | 20.551724 | 70 | h |
null | ceph-main/src/tools/rbd_mirror/leader_watcher/Types.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "Types.h"
#include "include/ceph_assert.h"
#include "include/stringify.h"
#include "common/Formatter.h"
namespace rbd {
namespace mirror {
namespace leader_watcher {
namespace {
class EncodePayloadVisitor : public boost::static_visitor<void> {
public:
explicit EncodePayloadVisitor(bufferlist &bl) : m_bl(bl) {}
template <typename Payload>
inline void operator()(const Payload &payload) const {
using ceph::encode;
encode(static_cast<uint32_t>(Payload::NOTIFY_OP), m_bl);
payload.encode(m_bl);
}
private:
bufferlist &m_bl;
};
class DecodePayloadVisitor : public boost::static_visitor<void> {
public:
DecodePayloadVisitor(__u8 version, bufferlist::const_iterator &iter)
: m_version(version), m_iter(iter) {}
template <typename Payload>
inline void operator()(Payload &payload) const {
payload.decode(m_version, m_iter);
}
private:
__u8 m_version;
bufferlist::const_iterator &m_iter;
};
class DumpPayloadVisitor : public boost::static_visitor<void> {
public:
explicit DumpPayloadVisitor(Formatter *formatter) : m_formatter(formatter) {}
template <typename Payload>
inline void operator()(const Payload &payload) const {
NotifyOp notify_op = Payload::NOTIFY_OP;
m_formatter->dump_string("notify_op", stringify(notify_op));
payload.dump(m_formatter);
}
private:
ceph::Formatter *m_formatter;
};
} // anonymous namespace
void HeartbeatPayload::encode(bufferlist &bl) const {
}
void HeartbeatPayload::decode(__u8 version, bufferlist::const_iterator &iter) {
}
void HeartbeatPayload::dump(Formatter *f) const {
}
void LockAcquiredPayload::encode(bufferlist &bl) const {
}
void LockAcquiredPayload::decode(__u8 version, bufferlist::const_iterator &iter) {
}
void LockAcquiredPayload::dump(Formatter *f) const {
}
void LockReleasedPayload::encode(bufferlist &bl) const {
}
void LockReleasedPayload::decode(__u8 version, bufferlist::const_iterator &iter) {
}
void LockReleasedPayload::dump(Formatter *f) const {
}
void UnknownPayload::encode(bufferlist &bl) const {
ceph_abort();
}
void UnknownPayload::decode(__u8 version, bufferlist::const_iterator &iter) {
}
void UnknownPayload::dump(Formatter *f) const {
}
void NotifyMessage::encode(bufferlist& bl) const {
ENCODE_START(1, 1, bl);
boost::apply_visitor(EncodePayloadVisitor(bl), payload);
ENCODE_FINISH(bl);
}
void NotifyMessage::decode(bufferlist::const_iterator& iter) {
DECODE_START(1, iter);
uint32_t notify_op;
decode(notify_op, iter);
// select the correct payload variant based upon the encoded op
switch (notify_op) {
case NOTIFY_OP_HEARTBEAT:
payload = HeartbeatPayload();
break;
case NOTIFY_OP_LOCK_ACQUIRED:
payload = LockAcquiredPayload();
break;
case NOTIFY_OP_LOCK_RELEASED:
payload = LockReleasedPayload();
break;
default:
payload = UnknownPayload();
break;
}
apply_visitor(DecodePayloadVisitor(struct_v, iter), payload);
DECODE_FINISH(iter);
}
void NotifyMessage::dump(Formatter *f) const {
apply_visitor(DumpPayloadVisitor(f), payload);
}
void NotifyMessage::generate_test_instances(std::list<NotifyMessage *> &o) {
o.push_back(new NotifyMessage(HeartbeatPayload()));
o.push_back(new NotifyMessage(LockAcquiredPayload()));
o.push_back(new NotifyMessage(LockReleasedPayload()));
}
std::ostream &operator<<(std::ostream &out, const NotifyOp &op) {
switch (op) {
case NOTIFY_OP_HEARTBEAT:
out << "Heartbeat";
break;
case NOTIFY_OP_LOCK_ACQUIRED:
out << "LockAcquired";
break;
case NOTIFY_OP_LOCK_RELEASED:
out << "LockReleased";
break;
default:
out << "Unknown (" << static_cast<uint32_t>(op) << ")";
break;
}
return out;
}
} // namespace leader_watcher
} // namespace mirror
} // namespace librbd
| 3,880 | 22.95679 | 82 | cc |
null | ceph-main/src/tools/rbd_mirror/leader_watcher/Types.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef RBD_MIRROR_LEADER_WATCHER_TYPES_H
#define RBD_MIRROR_LEADER_WATCHER_TYPES_H
#include "include/int_types.h"
#include "include/buffer_fwd.h"
#include "include/encoding.h"
#include <string>
#include <vector>
#include <boost/variant.hpp>
struct Context;
namespace ceph { class Formatter; }
namespace rbd {
namespace mirror {
namespace leader_watcher {
struct Listener {
typedef std::vector<std::string> InstanceIds;
virtual ~Listener() {
}
virtual void post_acquire_handler(Context *on_finish) = 0;
virtual void pre_release_handler(Context *on_finish) = 0;
virtual void update_leader_handler(
const std::string &leader_instance_id) = 0;
virtual void handle_instances_added(const InstanceIds& instance_ids) = 0;
virtual void handle_instances_removed(const InstanceIds& instance_ids) = 0;
};
enum NotifyOp {
NOTIFY_OP_HEARTBEAT = 0,
NOTIFY_OP_LOCK_ACQUIRED = 1,
NOTIFY_OP_LOCK_RELEASED = 2,
};
struct HeartbeatPayload {
static const NotifyOp NOTIFY_OP = NOTIFY_OP_HEARTBEAT;
HeartbeatPayload() {
}
void encode(bufferlist &bl) const;
void decode(__u8 version, bufferlist::const_iterator &iter);
void dump(Formatter *f) const;
};
struct LockAcquiredPayload {
static const NotifyOp NOTIFY_OP = NOTIFY_OP_LOCK_ACQUIRED;
LockAcquiredPayload() {
}
void encode(bufferlist &bl) const;
void decode(__u8 version, bufferlist::const_iterator &iter);
void dump(Formatter *f) const;
};
struct LockReleasedPayload {
static const NotifyOp NOTIFY_OP = NOTIFY_OP_LOCK_RELEASED;
LockReleasedPayload() {
}
void encode(bufferlist &bl) const;
void decode(__u8 version, bufferlist::const_iterator &iter);
void dump(Formatter *f) const;
};
struct UnknownPayload {
static const NotifyOp NOTIFY_OP = static_cast<NotifyOp>(-1);
UnknownPayload() {
}
void encode(bufferlist &bl) const;
void decode(__u8 version, bufferlist::const_iterator &iter);
void dump(Formatter *f) const;
};
typedef boost::variant<HeartbeatPayload,
LockAcquiredPayload,
LockReleasedPayload,
UnknownPayload> Payload;
struct NotifyMessage {
NotifyMessage(const Payload &payload = UnknownPayload()) : payload(payload) {
}
Payload payload;
void encode(bufferlist& bl) const;
void decode(bufferlist::const_iterator& it);
void dump(Formatter *f) const;
static void generate_test_instances(std::list<NotifyMessage *> &o);
};
WRITE_CLASS_ENCODER(NotifyMessage);
std::ostream &operator<<(std::ostream &out, const NotifyOp &op);
} // namespace leader_watcher
} // namespace mirror
} // namespace librbd
using rbd::mirror::leader_watcher::encode;
using rbd::mirror::leader_watcher::decode;
#endif // RBD_MIRROR_LEADER_WATCHER_TYPES_H
| 2,870 | 23.330508 | 79 | h |
null | ceph-main/src/tools/rbd_mirror/pool_watcher/RefreshImagesRequest.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "tools/rbd_mirror/pool_watcher/RefreshImagesRequest.h"
#include "common/debug.h"
#include "common/errno.h"
#include "cls/rbd/cls_rbd_client.h"
#include "librbd/Utils.h"
#include <map>
#define dout_context g_ceph_context
#define dout_subsys ceph_subsys_rbd_mirror
#undef dout_prefix
#define dout_prefix *_dout << "rbd::mirror::pool_watcher::RefreshImagesRequest " \
<< this << " " << __func__ << ": "
namespace rbd {
namespace mirror {
namespace pool_watcher {
static const uint32_t MAX_RETURN = 1024;
using librbd::util::create_rados_callback;
template <typename I>
void RefreshImagesRequest<I>::send() {
m_image_ids->clear();
mirror_image_list();
}
template <typename I>
void RefreshImagesRequest<I>::mirror_image_list() {
dout(10) << dendl;
librados::ObjectReadOperation op;
librbd::cls_client::mirror_image_list_start(&op, m_start_after, MAX_RETURN);
m_out_bl.clear();
librados::AioCompletion *aio_comp = create_rados_callback<
RefreshImagesRequest<I>,
&RefreshImagesRequest<I>::handle_mirror_image_list>(this);
int r = m_remote_io_ctx.aio_operate(RBD_MIRRORING, aio_comp, &op, &m_out_bl);
ceph_assert(r == 0);
aio_comp->release();
}
template <typename I>
void RefreshImagesRequest<I>::handle_mirror_image_list(int r) {
dout(10) << "r=" << r << dendl;
std::map<std::string, std::string> ids;
if (r == 0) {
auto it = m_out_bl.cbegin();
r = librbd::cls_client::mirror_image_list_finish(&it, &ids);
}
if (r < 0 && r != -ENOENT) {
derr << "failed to list mirrored images: " << cpp_strerror(r) << dendl;
finish(r);
return;
}
// store as global -> local image ids
for (auto &id : ids) {
m_image_ids->emplace(id.second, id.first);
}
if (ids.size() == MAX_RETURN) {
m_start_after = ids.rbegin()->first;
mirror_image_list();
return;
}
finish(0);
}
template <typename I>
void RefreshImagesRequest<I>::finish(int r) {
dout(10) << "r=" << r << dendl;
m_on_finish->complete(r);
delete this;
}
} // namespace pool_watcher
} // namespace mirror
} // namespace rbd
template class rbd::mirror::pool_watcher::RefreshImagesRequest<librbd::ImageCtx>;
| 2,281 | 24.355556 | 82 | cc |
null | ceph-main/src/tools/rbd_mirror/pool_watcher/RefreshImagesRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_RBD_MIRROR_POOL_WATCHER_REFRESH_IMAGES_REQUEST_H
#define CEPH_RBD_MIRROR_POOL_WATCHER_REFRESH_IMAGES_REQUEST_H
#include "include/buffer.h"
#include "include/rados/librados.hpp"
#include "tools/rbd_mirror/Types.h"
#include <string>
struct Context;
namespace librbd { struct ImageCtx; }
namespace rbd {
namespace mirror {
namespace pool_watcher {
template <typename ImageCtxT = librbd::ImageCtx>
class RefreshImagesRequest {
public:
static RefreshImagesRequest *create(librados::IoCtx &remote_io_ctx,
ImageIds *image_ids, Context *on_finish) {
return new RefreshImagesRequest(remote_io_ctx, image_ids, on_finish);
}
RefreshImagesRequest(librados::IoCtx &remote_io_ctx, ImageIds *image_ids,
Context *on_finish)
: m_remote_io_ctx(remote_io_ctx), m_image_ids(image_ids),
m_on_finish(on_finish) {
}
void send();
private:
/**
* @verbatim
*
* <start>
* |
* | /-------------\
* | | |
* v v | (more images)
* MIRROR_IMAGE_LIST ---/
* |
* v
* <finish>
*
* @endverbatim
*/
librados::IoCtx &m_remote_io_ctx;
ImageIds *m_image_ids;
Context *m_on_finish;
bufferlist m_out_bl;
std::string m_start_after;
void mirror_image_list();
void handle_mirror_image_list(int r);
void finish(int r);
};
} // namespace pool_watcher
} // namespace mirror
} // namespace rbd
extern template class rbd::mirror::pool_watcher::RefreshImagesRequest<librbd::ImageCtx>;
#endif // CEPH_RBD_MIRROR_POOL_WATCHER_REFRESH_IMAGES_REQUEST_H
| 1,718 | 22.22973 | 88 | h |
null | ceph-main/src/tools/rbd_mirror/pool_watcher/Types.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_RBD_MIRROR_POOL_WATCHER_TYPES_H
#define CEPH_RBD_MIRROR_POOL_WATCHER_TYPES_H
#include "tools/rbd_mirror/Types.h"
#include <string>
namespace rbd {
namespace mirror {
namespace pool_watcher {
struct Listener {
virtual ~Listener() {
}
virtual void handle_update(const std::string &mirror_uuid,
ImageIds &&added_image_ids,
ImageIds &&removed_image_ids) = 0;
};
} // namespace pool_watcher
} // namespace mirror
} // namespace rbd
#endif // CEPH_RBD_MIRROR_POOL_WATCHER_TYPES_H
| 656 | 22.464286 | 70 | h |
null | ceph-main/src/tools/rbd_mirror/service_daemon/Types.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "tools/rbd_mirror/service_daemon/Types.h"
#include <iostream>
namespace rbd {
namespace mirror {
namespace service_daemon {
std::ostream& operator<<(std::ostream& os, const CalloutLevel& callout_level) {
switch (callout_level) {
case CALLOUT_LEVEL_INFO:
os << "info";
break;
case CALLOUT_LEVEL_WARNING:
os << "warning";
break;
case CALLOUT_LEVEL_ERROR:
os << "error";
break;
}
return os;
}
} // namespace service_daemon
} // namespace mirror
} // namespace rbd
| 609 | 19.333333 | 79 | cc |
null | ceph-main/src/tools/rbd_mirror/service_daemon/Types.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_RBD_MIRROR_SERVICE_DAEMON_TYPES_H
#define CEPH_RBD_MIRROR_SERVICE_DAEMON_TYPES_H
#include "include/int_types.h"
#include <iosfwd>
#include <string>
#include <boost/variant.hpp>
namespace rbd {
namespace mirror {
namespace service_daemon {
typedef uint64_t CalloutId;
const uint64_t CALLOUT_ID_NONE {0};
enum CalloutLevel {
CALLOUT_LEVEL_INFO,
CALLOUT_LEVEL_WARNING,
CALLOUT_LEVEL_ERROR
};
std::ostream& operator<<(std::ostream& os, const CalloutLevel& callout_level);
typedef boost::variant<bool, uint64_t, std::string> AttributeValue;
} // namespace service_daemon
} // namespace mirror
} // namespace rbd
#endif // CEPH_RBD_MIRROR_SERVICE_DAEMON_TYPES_H
| 782 | 22.029412 | 78 | h |
null | ceph-main/src/tools/rbd_nbd/nbd-netlink.h | /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
* Copyright (C) 2017 Facebook. All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License v2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this program; if not, write to the
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 021110-1307, USA.
*/
#ifndef _UAPILINUX_NBD_NETLINK_H
#define _UAPILINUX_NBD_NETLINK_H
#define NBD_GENL_FAMILY_NAME "nbd"
#define NBD_GENL_VERSION 0x1
#define NBD_GENL_MCAST_GROUP_NAME "nbd_mc_group"
/* Configuration policy attributes, used for CONNECT */
enum {
NBD_ATTR_UNSPEC,
NBD_ATTR_INDEX,
NBD_ATTR_SIZE_BYTES,
NBD_ATTR_BLOCK_SIZE_BYTES,
NBD_ATTR_TIMEOUT,
NBD_ATTR_SERVER_FLAGS,
NBD_ATTR_CLIENT_FLAGS,
NBD_ATTR_SOCKETS,
NBD_ATTR_DEAD_CONN_TIMEOUT,
NBD_ATTR_DEVICE_LIST,
NBD_ATTR_BACKEND_IDENTIFIER,
__NBD_ATTR_MAX,
};
#define NBD_ATTR_MAX (__NBD_ATTR_MAX - 1)
/*
* This is the format for multiple devices with NBD_ATTR_DEVICE_LIST
*
* [NBD_ATTR_DEVICE_LIST]
* [NBD_DEVICE_ITEM]
* [NBD_DEVICE_INDEX]
* [NBD_DEVICE_CONNECTED]
*/
enum {
NBD_DEVICE_ITEM_UNSPEC,
NBD_DEVICE_ITEM,
__NBD_DEVICE_ITEM_MAX,
};
#define NBD_DEVICE_ITEM_MAX (__NBD_DEVICE_ITEM_MAX - 1)
enum {
NBD_DEVICE_UNSPEC,
NBD_DEVICE_INDEX,
NBD_DEVICE_CONNECTED,
__NBD_DEVICE_MAX,
};
#define NBD_DEVICE_ATTR_MAX (__NBD_DEVICE_MAX - 1)
/*
* This is the format for multiple sockets with NBD_ATTR_SOCKETS
*
* [NBD_ATTR_SOCKETS]
* [NBD_SOCK_ITEM]
* [NBD_SOCK_FD]
* [NBD_SOCK_ITEM]
* [NBD_SOCK_FD]
*/
enum {
NBD_SOCK_ITEM_UNSPEC,
NBD_SOCK_ITEM,
__NBD_SOCK_ITEM_MAX,
};
#define NBD_SOCK_ITEM_MAX (__NBD_SOCK_ITEM_MAX - 1)
enum {
NBD_SOCK_UNSPEC,
NBD_SOCK_FD,
__NBD_SOCK_MAX,
};
#define NBD_SOCK_MAX (__NBD_SOCK_MAX - 1)
enum {
NBD_CMD_UNSPEC,
NBD_CMD_CONNECT,
NBD_CMD_DISCONNECT,
NBD_CMD_RECONFIGURE,
NBD_CMD_LINK_DEAD,
NBD_CMD_STATUS,
__NBD_CMD_MAX,
};
#define NBD_CMD_MAX (__NBD_CMD_MAX - 1)
#endif /* _UAPILINUX_NBD_NETLINK_H */
| 2,423 | 23 | 68 | h |
null | ceph-main/src/tools/rbd_nbd/rbd-nbd.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* rbd-nbd - RBD in userspace
*
* Copyright (C) 2015 - 2016 Kylin Corporation
*
* Author: Yunchuan Wen <yunchuan.wen@kylin-cloud.com>
* Li Wang <li.wang@kylin-cloud.com>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "acconfig.h"
#include "include/int_types.h"
#include "include/scope_guard.h"
#include <boost/endian/conversion.hpp>
#include <libgen.h>
#include <stdio.h>
#include <stdlib.h>
#include <stddef.h>
#include <errno.h>
#include <fcntl.h>
#include <poll.h>
#include <string.h>
#include <sys/types.h>
#include <unistd.h>
#include <linux/nbd.h>
#include <linux/fs.h>
#include <sys/ioctl.h>
#include <sys/socket.h>
#include <sys/syscall.h>
#include "nbd-netlink.h"
#include <libnl3/netlink/genl/genl.h>
#include <libnl3/netlink/genl/ctrl.h>
#include <libnl3/netlink/genl/mngt.h>
#include <filesystem>
#include <fstream>
#include <iostream>
#include <memory>
#include <regex>
#include <boost/algorithm/string/predicate.hpp>
#include <boost/lexical_cast.hpp>
#include "common/Formatter.h"
#include "common/Preforker.h"
#include "common/SubProcess.h"
#include "common/TextTable.h"
#include "common/ceph_argparse.h"
#include "common/config.h"
#include "common/dout.h"
#include "common/errno.h"
#include "common/event_socket.h"
#include "common/module.h"
#include "common/safe_io.h"
#include "common/version.h"
#include "global/global_init.h"
#include "global/signal_handler.h"
#include "include/rados/librados.hpp"
#include "include/rbd/librbd.hpp"
#include "include/stringify.h"
#include "include/xlist.h"
#include "mon/MonClient.h"
#define dout_context g_ceph_context
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "rbd-nbd: "
using namespace std;
namespace fs = std::filesystem;
using boost::endian::big_to_native;
using boost::endian::native_to_big;
enum Command {
None,
Map,
Unmap,
Attach,
Detach,
List
};
struct Config {
int nbds_max = 0;
int max_part = 255;
int io_timeout = -1;
int reattach_timeout = 30;
bool exclusive = false;
bool notrim = false;
bool quiesce = false;
bool readonly = false;
bool set_max_part = false;
bool try_netlink = false;
bool show_cookie = false;
std::string poolname;
std::string nsname;
std::string imgname;
std::string snapname;
std::string devpath;
std::string quiesce_hook = CMAKE_INSTALL_LIBEXECDIR "/rbd-nbd/rbd-nbd_quiesce";
std::string format;
bool pretty_format = false;
std::vector<librbd::encryption_format_t> encryption_formats;
std::vector<std::string> encryption_passphrase_files;
Command command = None;
int pid = 0;
std::string cookie;
uint64_t snapid = CEPH_NOSNAP;
std::string image_spec() const {
std::string spec = poolname + "/";
if (!nsname.empty()) {
spec += nsname + "/";
}
spec += imgname;
if (!snapname.empty()) {
spec += "@" + snapname;
}
return spec;
}
};
static void usage()
{
std::cout << "Usage: rbd-nbd [options] map <image-or-snap-spec> Map image to nbd device\n"
<< " detach <device|image-or-snap-spec> Detach image from nbd device\n"
<< " [options] attach <image-or-snap-spec> Attach image to nbd device\n"
<< " unmap <device|image-or-snap-spec> Unmap nbd device\n"
<< " [options] list-mapped List mapped nbd devices\n"
<< "Map and attach options:\n"
<< " --device <device path> Specify nbd device path (/dev/nbd{num})\n"
<< " --encryption-format luks|luks1|luks2\n"
<< " Image encryption format (default: luks)\n"
<< " --encryption-passphrase-file Path of file containing passphrase for unlocking image encryption\n"
<< " --exclusive Forbid writes by other clients\n"
<< " --notrim Turn off trim/discard\n"
<< " --io-timeout <sec> Set nbd IO timeout\n"
<< " --max_part <limit> Override for module param max_part\n"
<< " --nbds_max <limit> Override for module param nbds_max\n"
<< " --quiesce Use quiesce callbacks\n"
<< " --quiesce-hook <path> Specify quiesce hook path\n"
<< " (default: " << Config().quiesce_hook << ")\n"
<< " --read-only Map read-only\n"
<< " --reattach-timeout <sec> Set nbd re-attach timeout\n"
<< " (default: " << Config().reattach_timeout << ")\n"
<< " --try-netlink Use the nbd netlink interface\n"
<< " --show-cookie Show device cookie\n"
<< " --cookie Specify device cookie\n"
<< " --snap-id <snap-id> Specify snapshot by ID instead of by name\n"
<< "\n"
<< "Unmap and detach options:\n"
<< " --device <device path> Specify nbd device path (/dev/nbd{num})\n"
<< " --snap-id <snap-id> Specify snapshot by ID instead of by name\n"
<< "\n"
<< "List options:\n"
<< " --format plain|json|xml Output format (default: plain)\n"
<< " --pretty-format Pretty formatting (json and xml)\n"
<< std::endl;
generic_server_usage();
}
static int nbd = -1;
static int nbd_index = -1;
static EventSocket terminate_event_sock;
#define RBD_NBD_BLKSIZE 512UL
#define HELP_INFO 1
#define VERSION_INFO 2
static int parse_args(vector<const char*>& args, std::ostream *err_msg,
Config *cfg);
static int netlink_disconnect(int index);
static int netlink_resize(int nbd_index, uint64_t size);
static int run_quiesce_hook(const std::string &quiesce_hook,
const std::string &devpath,
const std::string &command);
static std::string get_cookie(const std::string &devpath);
class NBDServer
{
public:
uint64_t quiesce_watch_handle = 0;
private:
int fd;
librbd::Image ℑ
Config *cfg;
public:
NBDServer(int fd, librbd::Image& image, Config *cfg)
: fd(fd)
, image(image)
, cfg(cfg)
, reader_thread(*this, &NBDServer::reader_entry)
, writer_thread(*this, &NBDServer::writer_entry)
, quiesce_thread(*this, &NBDServer::quiesce_entry)
{
std::vector<librbd::config_option_t> options;
image.config_list(&options);
for (auto &option : options) {
if ((option.name == std::string("rbd_cache") ||
option.name == std::string("rbd_cache_writethrough_until_flush")) &&
option.value == "false") {
allow_internal_flush = true;
break;
}
}
}
Config *get_cfg() const {
return cfg;
}
private:
int terminate_event_fd = -1;
ceph::mutex disconnect_lock =
ceph::make_mutex("NBDServer::DisconnectLocker");
ceph::condition_variable disconnect_cond;
std::atomic<bool> terminated = { false };
std::atomic<bool> allow_internal_flush = { false };
struct IOContext
{
xlist<IOContext*>::item item;
NBDServer *server = nullptr;
struct nbd_request request;
struct nbd_reply reply;
bufferlist data;
int command = 0;
IOContext()
: item(this)
{}
};
friend std::ostream &operator<<(std::ostream &os, const IOContext &ctx);
ceph::mutex lock = ceph::make_mutex("NBDServer::Locker");
ceph::condition_variable cond;
xlist<IOContext*> io_pending;
xlist<IOContext*> io_finished;
void io_start(IOContext *ctx)
{
std::lock_guard l{lock};
io_pending.push_back(&ctx->item);
}
void io_finish(IOContext *ctx)
{
std::lock_guard l{lock};
ceph_assert(ctx->item.is_on_list());
ctx->item.remove_myself();
io_finished.push_back(&ctx->item);
cond.notify_all();
}
IOContext *wait_io_finish()
{
std::unique_lock l{lock};
cond.wait(l, [this] {
return !io_finished.empty() ||
(io_pending.empty() && terminated);
});
if (io_finished.empty())
return NULL;
IOContext *ret = io_finished.front();
io_finished.pop_front();
return ret;
}
void wait_clean()
{
std::unique_lock l{lock};
cond.wait(l, [this] { return io_pending.empty(); });
while(!io_finished.empty()) {
std::unique_ptr<IOContext> free_ctx(io_finished.front());
io_finished.pop_front();
}
}
void assert_clean()
{
std::unique_lock l{lock};
ceph_assert(!reader_thread.is_started());
ceph_assert(!writer_thread.is_started());
ceph_assert(io_pending.empty());
ceph_assert(io_finished.empty());
}
static void aio_callback(librbd::completion_t cb, void *arg)
{
librbd::RBD::AioCompletion *aio_completion =
reinterpret_cast<librbd::RBD::AioCompletion*>(cb);
IOContext *ctx = reinterpret_cast<IOContext *>(arg);
int ret = aio_completion->get_return_value();
dout(20) << __func__ << ": " << *ctx << dendl;
if (ret == -EINVAL) {
// if shrinking an image, a pagecache writeback might reference
// extents outside of the range of the new image extents
dout(0) << __func__ << ": masking IO out-of-bounds error" << dendl;
ctx->data.clear();
ret = 0;
}
if (ret < 0) {
ctx->reply.error = native_to_big<uint32_t>(-ret);
} else if ((ctx->command == NBD_CMD_READ) &&
ret < static_cast<int>(ctx->request.len)) {
int pad_byte_count = static_cast<int> (ctx->request.len) - ret;
ctx->data.append_zero(pad_byte_count);
dout(20) << __func__ << ": " << *ctx << ": Pad byte count: "
<< pad_byte_count << dendl;
ctx->reply.error = native_to_big<uint32_t>(0);
} else {
ctx->reply.error = native_to_big<uint32_t>(0);
}
ctx->server->io_finish(ctx);
aio_completion->release();
}
void reader_entry()
{
struct pollfd poll_fds[2];
memset(poll_fds, 0, sizeof(struct pollfd) * 2);
poll_fds[0].fd = fd;
poll_fds[0].events = POLLIN;
poll_fds[1].fd = terminate_event_fd;
poll_fds[1].events = POLLIN;
while (true) {
std::unique_ptr<IOContext> ctx(new IOContext());
ctx->server = this;
dout(20) << __func__ << ": waiting for nbd request" << dendl;
int r = poll(poll_fds, 2, -1);
if (r == -1) {
if (errno == EINTR) {
continue;
}
r = -errno;
derr << "failed to poll nbd: " << cpp_strerror(r) << dendl;
goto error;
}
if ((poll_fds[1].revents & POLLIN) != 0) {
dout(0) << __func__ << ": terminate received" << dendl;
goto signal;
}
if ((poll_fds[0].revents & POLLIN) == 0) {
dout(20) << __func__ << ": nothing to read" << dendl;
continue;
}
r = safe_read_exact(fd, &ctx->request, sizeof(struct nbd_request));
if (r < 0) {
derr << "failed to read nbd request header: " << cpp_strerror(r)
<< dendl;
goto error;
}
if (ctx->request.magic != htonl(NBD_REQUEST_MAGIC)) {
derr << "invalid nbd request header" << dendl;
goto signal;
}
ctx->request.from = big_to_native(ctx->request.from);
ctx->request.type = big_to_native(ctx->request.type);
ctx->request.len = big_to_native(ctx->request.len);
ctx->reply.magic = native_to_big<uint32_t>(NBD_REPLY_MAGIC);
memcpy(ctx->reply.handle, ctx->request.handle, sizeof(ctx->reply.handle));
ctx->command = ctx->request.type & 0x0000ffff;
dout(20) << *ctx << ": start" << dendl;
switch (ctx->command)
{
case NBD_CMD_DISC:
// NBD_DO_IT will return when pipe is closed
dout(0) << "disconnect request received" << dendl;
goto signal;
case NBD_CMD_WRITE:
bufferptr ptr(ctx->request.len);
r = safe_read_exact(fd, ptr.c_str(), ctx->request.len);
if (r < 0) {
derr << *ctx << ": failed to read nbd request data: "
<< cpp_strerror(r) << dendl;
goto error;
}
ctx->data.push_back(ptr);
break;
}
IOContext *pctx = ctx.release();
io_start(pctx);
librbd::RBD::AioCompletion *c = new librbd::RBD::AioCompletion(pctx, aio_callback);
switch (pctx->command)
{
case NBD_CMD_WRITE:
image.aio_write(pctx->request.from, pctx->request.len, pctx->data, c);
break;
case NBD_CMD_READ:
image.aio_read(pctx->request.from, pctx->request.len, pctx->data, c);
break;
case NBD_CMD_FLUSH:
image.aio_flush(c);
allow_internal_flush = true;
break;
case NBD_CMD_TRIM:
image.aio_discard(pctx->request.from, pctx->request.len, c);
break;
default:
derr << *pctx << ": invalid request command" << dendl;
c->release();
goto signal;
}
}
error:
{
int r = netlink_disconnect(nbd_index);
if (r == 1) {
ioctl(nbd, NBD_DISCONNECT);
}
}
signal:
std::lock_guard l{lock};
terminated = true;
cond.notify_all();
std::lock_guard disconnect_l{disconnect_lock};
disconnect_cond.notify_all();
dout(20) << __func__ << ": terminated" << dendl;
}
void writer_entry()
{
while (true) {
dout(20) << __func__ << ": waiting for io request" << dendl;
std::unique_ptr<IOContext> ctx(wait_io_finish());
if (!ctx) {
dout(20) << __func__ << ": no io requests, terminating" << dendl;
goto done;
}
dout(20) << __func__ << ": got: " << *ctx << dendl;
int r = safe_write(fd, &ctx->reply, sizeof(struct nbd_reply));
if (r < 0) {
derr << *ctx << ": failed to write reply header: " << cpp_strerror(r)
<< dendl;
goto error;
}
if (ctx->command == NBD_CMD_READ && ctx->reply.error == htonl(0)) {
r = ctx->data.write_fd(fd);
if (r < 0) {
derr << *ctx << ": failed to write replay data: " << cpp_strerror(r)
<< dendl;
goto error;
}
}
dout(20) << *ctx << ": finish" << dendl;
}
error:
wait_clean();
done:
::shutdown(fd, SHUT_RDWR);
dout(20) << __func__ << ": terminated" << dendl;
}
bool wait_quiesce() {
dout(20) << __func__ << dendl;
std::unique_lock locker{lock};
cond.wait(locker, [this] { return quiesce || terminated; });
if (terminated) {
return false;
}
dout(20) << __func__ << ": got quiesce request" << dendl;
return true;
}
void wait_unquiesce(std::unique_lock<ceph::mutex> &locker) {
dout(20) << __func__ << dendl;
cond.wait(locker, [this] { return !quiesce || terminated; });
dout(20) << __func__ << ": got unquiesce request" << dendl;
}
void wait_inflight_io() {
if (!allow_internal_flush) {
return;
}
uint64_t features = 0;
image.features(&features);
if ((features & RBD_FEATURE_EXCLUSIVE_LOCK) != 0) {
bool is_owner = false;
image.is_exclusive_lock_owner(&is_owner);
if (!is_owner) {
return;
}
}
dout(20) << __func__ << dendl;
int r = image.flush();
if (r < 0) {
derr << "flush failed: " << cpp_strerror(r) << dendl;
}
}
void quiesce_entry()
{
ceph_assert(cfg->quiesce);
while (wait_quiesce()) {
int r = run_quiesce_hook(cfg->quiesce_hook, cfg->devpath, "quiesce");
wait_inflight_io();
{
std::unique_lock locker{lock};
ceph_assert(quiesce == true);
image.quiesce_complete(quiesce_watch_handle, r);
if (r < 0) {
quiesce = false;
continue;
}
wait_unquiesce(locker);
}
run_quiesce_hook(cfg->quiesce_hook, cfg->devpath, "unquiesce");
}
dout(20) << __func__ << ": terminated" << dendl;
}
class ThreadHelper : public Thread
{
public:
typedef void (NBDServer::*entry_func)();
private:
NBDServer &server;
entry_func func;
public:
ThreadHelper(NBDServer &_server, entry_func _func)
:server(_server)
,func(_func)
{}
protected:
void* entry() override
{
(server.*func)();
return NULL;
}
} reader_thread, writer_thread, quiesce_thread;
bool started = false;
bool quiesce = false;
public:
void start()
{
if (!started) {
dout(10) << __func__ << ": starting" << dendl;
started = true;
terminate_event_fd = eventfd(0, EFD_NONBLOCK);
ceph_assert(terminate_event_fd > 0);
int r = terminate_event_sock.init(terminate_event_fd,
EVENT_SOCKET_TYPE_EVENTFD);
ceph_assert(r >= 0);
reader_thread.create("rbd_reader");
writer_thread.create("rbd_writer");
if (cfg->quiesce) {
quiesce_thread.create("rbd_quiesce");
}
}
}
void wait_for_disconnect()
{
if (!started)
return;
std::unique_lock l{disconnect_lock};
disconnect_cond.wait(l);
}
void notify_quiesce() {
dout(10) << __func__ << dendl;
ceph_assert(cfg->quiesce);
std::unique_lock locker{lock};
ceph_assert(quiesce == false);
quiesce = true;
cond.notify_all();
}
void notify_unquiesce() {
dout(10) << __func__ << dendl;
ceph_assert(cfg->quiesce);
std::unique_lock locker{lock};
ceph_assert(quiesce == true);
quiesce = false;
cond.notify_all();
}
~NBDServer()
{
if (started) {
dout(10) << __func__ << ": terminating" << dendl;
terminate_event_sock.notify();
reader_thread.join();
writer_thread.join();
if (cfg->quiesce) {
quiesce_thread.join();
}
assert_clean();
close(terminate_event_fd);
started = false;
}
}
};
std::ostream &operator<<(std::ostream &os, const NBDServer::IOContext &ctx) {
os << "[" << std::hex << big_to_native(*((uint64_t *)ctx.request.handle));
switch (ctx.command)
{
case NBD_CMD_WRITE:
os << " WRITE ";
break;
case NBD_CMD_READ:
os << " READ ";
break;
case NBD_CMD_FLUSH:
os << " FLUSH ";
break;
case NBD_CMD_TRIM:
os << " TRIM ";
break;
case NBD_CMD_DISC:
os << " DISC ";
break;
default:
os << " UNKNOWN(" << ctx.command << ") ";
break;
}
os << ctx.request.from << "~" << ctx.request.len << " "
<< std::dec << big_to_native(ctx.reply.error) << "]";
return os;
}
class NBDQuiesceWatchCtx : public librbd::QuiesceWatchCtx
{
public:
NBDQuiesceWatchCtx(NBDServer *server) : server(server) {
}
void handle_quiesce() override {
server->notify_quiesce();
}
void handle_unquiesce() override {
server->notify_unquiesce();
}
private:
NBDServer *server;
};
class NBDWatchCtx : public librbd::UpdateWatchCtx
{
private:
int fd;
int nbd_index;
bool use_netlink;
librados::IoCtx &io_ctx;
librbd::Image ℑ
unsigned long size;
public:
NBDWatchCtx(int _fd,
int _nbd_index,
bool _use_netlink,
librados::IoCtx &_io_ctx,
librbd::Image &_image,
unsigned long _size)
: fd(_fd)
, nbd_index(_nbd_index)
, use_netlink(_use_netlink)
, io_ctx(_io_ctx)
, image(_image)
, size(_size)
{ }
~NBDWatchCtx() override {}
void handle_notify() override
{
librbd::image_info_t info;
if (image.stat(info, sizeof(info)) == 0) {
unsigned long new_size = info.size;
int ret;
if (new_size != size) {
dout(5) << "resize detected" << dendl;
if (ioctl(fd, BLKFLSBUF, NULL) < 0)
derr << "invalidate page cache failed: " << cpp_strerror(errno)
<< dendl;
if (use_netlink) {
ret = netlink_resize(nbd_index, new_size);
} else {
ret = ioctl(fd, NBD_SET_SIZE, new_size);
if (ret < 0)
derr << "resize failed: " << cpp_strerror(errno) << dendl;
}
if (!ret)
size = new_size;
if (ioctl(fd, BLKRRPART, NULL) < 0) {
derr << "rescan of partition table failed: " << cpp_strerror(errno)
<< dendl;
}
if (image.invalidate_cache() < 0)
derr << "invalidate rbd cache failed" << dendl;
}
}
}
};
class NBDListIterator {
public:
bool get(Config *cfg) {
while (true) {
std::string nbd_path = "/sys/block/nbd" + stringify(m_index);
if(access(nbd_path.c_str(), F_OK) != 0) {
return false;
}
*cfg = Config();
cfg->devpath = "/dev/nbd" + stringify(m_index++);
int pid;
std::ifstream ifs;
ifs.open(nbd_path + "/pid", std::ifstream::in);
if (!ifs.is_open()) {
continue;
}
ifs >> pid;
ifs.close();
// If the rbd-nbd is re-attached the pid may store garbage
// here. We are sure this is the case when it is negative or
// zero. Then we just try to find the attached process scanning
// /proc fs. If it is positive we check the process with this
// pid first and if it is not rbd-nbd fallback to searching the
// attached process.
do {
if (pid <= 0) {
pid = find_attached(cfg->devpath);
if (pid <= 0) {
break;
}
}
if (get_mapped_info(pid, cfg) >= 0) {
return true;
}
pid = -1;
} while (true);
}
}
private:
int m_index = 0;
std::map<int, Config> m_mapped_info_cache;
int get_mapped_info(int pid, Config *cfg) {
ceph_assert(!cfg->devpath.empty());
auto it = m_mapped_info_cache.find(pid);
if (it != m_mapped_info_cache.end()) {
if (it->second.devpath != cfg->devpath) {
return -EINVAL;
}
*cfg = it->second;
return 0;
}
m_mapped_info_cache[pid] = {};
int r;
std::string path = "/proc/" + stringify(pid) + "/comm";
std::ifstream ifs;
std::string comm;
ifs.open(path.c_str(), std::ifstream::in);
if (!ifs.is_open())
return -1;
ifs >> comm;
if (comm != "rbd-nbd") {
return -EINVAL;
}
ifs.close();
path = "/proc/" + stringify(pid) + "/cmdline";
std::string cmdline;
std::vector<const char*> args;
ifs.open(path.c_str(), std::ifstream::in);
if (!ifs.is_open())
return -1;
ifs >> cmdline;
if (cmdline.empty()) {
return -EINVAL;
}
for (unsigned i = 0; i < cmdline.size(); i++) {
char *arg = &cmdline[i];
if (i == 0) {
if (strcmp(basename(arg) , "rbd-nbd") != 0) {
return -EINVAL;
}
} else {
args.push_back(arg);
}
while (cmdline[i] != '\0') {
i++;
}
}
std::ostringstream err_msg;
Config c;
r = parse_args(args, &err_msg, &c);
if (r < 0) {
return r;
}
if (c.command != Map && c.command != Attach) {
return -ENOENT;
}
c.pid = pid;
m_mapped_info_cache.erase(pid);
if (!c.devpath.empty()) {
m_mapped_info_cache[pid] = c;
if (c.devpath != cfg->devpath) {
return -ENOENT;
}
} else {
c.devpath = cfg->devpath;
}
c.cookie = get_cookie(cfg->devpath);
*cfg = c;
return 0;
}
int find_attached(const std::string &devpath) {
for (auto &entry : fs::directory_iterator("/proc")) {
if (!fs::is_directory(entry.status())) {
continue;
}
int pid;
try {
pid = boost::lexical_cast<uint64_t>(entry.path().filename().c_str());
} catch (boost::bad_lexical_cast&) {
continue;
}
Config cfg;
cfg.devpath = devpath;
if (get_mapped_info(pid, &cfg) >=0 && cfg.command == Attach) {
return cfg.pid;
}
}
return -1;
}
};
struct EncryptionOptions {
std::vector<librbd::encryption_spec_t> specs;
~EncryptionOptions() {
for (auto& spec : specs) {
switch (spec.format) {
case RBD_ENCRYPTION_FORMAT_LUKS: {
auto opts =
static_cast<librbd::encryption_luks_format_options_t*>(spec.opts);
ceph_memzero_s(opts->passphrase.data(), opts->passphrase.size(),
opts->passphrase.size());
delete opts;
break;
}
case RBD_ENCRYPTION_FORMAT_LUKS1: {
auto opts =
static_cast<librbd::encryption_luks1_format_options_t*>(spec.opts);
ceph_memzero_s(opts->passphrase.data(), opts->passphrase.size(),
opts->passphrase.size());
delete opts;
break;
}
case RBD_ENCRYPTION_FORMAT_LUKS2: {
auto opts =
static_cast<librbd::encryption_luks2_format_options_t*>(spec.opts);
ceph_memzero_s(opts->passphrase.data(), opts->passphrase.size(),
opts->passphrase.size());
delete opts;
break;
}
default:
ceph_abort();
}
}
}
};
static std::string get_cookie(const std::string &devpath)
{
std::string cookie;
std::ifstream ifs;
std::string path = "/sys/block/" + devpath.substr(sizeof("/dev/") - 1) + "/backend";
ifs.open(path, std::ifstream::in);
if (ifs.is_open()) {
std::getline(ifs, cookie);
ifs.close();
}
return cookie;
}
static int load_module(Config *cfg)
{
ostringstream param;
int ret;
if (cfg->nbds_max)
param << "nbds_max=" << cfg->nbds_max;
if (cfg->max_part)
param << " max_part=" << cfg->max_part;
if (!access("/sys/module/nbd", F_OK)) {
if (cfg->nbds_max || cfg->set_max_part)
cerr << "rbd-nbd: ignoring kernel module parameter options: nbd module already loaded"
<< std::endl;
return 0;
}
ret = module_load("nbd", param.str().c_str());
if (ret < 0)
cerr << "rbd-nbd: failed to load nbd kernel module: " << cpp_strerror(-ret)
<< std::endl;
return ret;
}
static int check_device_size(int nbd_index, unsigned long expected_size)
{
// There are bugs with some older kernel versions that result in an
// overflow for large image sizes. This check is to ensure we are
// not affected.
unsigned long size = 0;
std::string path = "/sys/block/nbd" + stringify(nbd_index) + "/size";
std::ifstream ifs;
ifs.open(path.c_str(), std::ifstream::in);
if (!ifs.is_open()) {
cerr << "rbd-nbd: failed to open " << path << std::endl;
return -EINVAL;
}
ifs >> size;
size *= RBD_NBD_BLKSIZE;
if (size == 0) {
// Newer kernel versions will report real size only after nbd
// connect. Assume this is the case and return success.
return 0;
}
if (size != expected_size) {
cerr << "rbd-nbd: kernel reported invalid device size (" << size
<< ", expected " << expected_size << ")" << std::endl;
return -EINVAL;
}
return 0;
}
static int parse_nbd_index(const std::string& devpath)
{
int index, ret;
ret = sscanf(devpath.c_str(), "/dev/nbd%d", &index);
if (ret <= 0) {
// mean an early matching failure. But some cases need a negative value.
if (ret == 0)
ret = -EINVAL;
cerr << "rbd-nbd: invalid device path: " << devpath
<< " (expected /dev/nbd{num})" << std::endl;
return ret;
}
return index;
}
static int try_ioctl_setup(Config *cfg, int fd, uint64_t size,
uint64_t blksize, uint64_t flags)
{
int index = 0, r;
if (cfg->devpath.empty()) {
char dev[64];
const char *path = "/sys/module/nbd/parameters/nbds_max";
int nbds_max = -1;
if (access(path, F_OK) == 0) {
std::ifstream ifs;
ifs.open(path, std::ifstream::in);
if (ifs.is_open()) {
ifs >> nbds_max;
ifs.close();
}
}
while (true) {
snprintf(dev, sizeof(dev), "/dev/nbd%d", index);
nbd = open(dev, O_RDWR);
if (nbd < 0) {
if (nbd == -EPERM && nbds_max != -1 && index < (nbds_max-1)) {
++index;
continue;
}
r = nbd;
cerr << "rbd-nbd: failed to find unused device" << std::endl;
goto done;
}
r = ioctl(nbd, NBD_SET_SOCK, fd);
if (r < 0) {
close(nbd);
++index;
continue;
}
cfg->devpath = dev;
break;
}
} else {
r = parse_nbd_index(cfg->devpath);
if (r < 0)
goto done;
index = r;
nbd = open(cfg->devpath.c_str(), O_RDWR);
if (nbd < 0) {
r = nbd;
cerr << "rbd-nbd: failed to open device: " << cfg->devpath << std::endl;
goto done;
}
r = ioctl(nbd, NBD_SET_SOCK, fd);
if (r < 0) {
r = -errno;
cerr << "rbd-nbd: the device " << cfg->devpath << " is busy" << std::endl;
close(nbd);
goto done;
}
}
r = ioctl(nbd, NBD_SET_BLKSIZE, blksize);
if (r < 0) {
r = -errno;
cerr << "rbd-nbd: NBD_SET_BLKSIZE failed" << std::endl;
goto close_nbd;
}
r = ioctl(nbd, NBD_SET_SIZE, size);
if (r < 0) {
cerr << "rbd-nbd: NBD_SET_SIZE failed" << std::endl;
r = -errno;
goto close_nbd;
}
ioctl(nbd, NBD_SET_FLAGS, flags);
if (cfg->io_timeout >= 0) {
r = ioctl(nbd, NBD_SET_TIMEOUT, (unsigned long)cfg->io_timeout);
if (r < 0) {
r = -errno;
cerr << "rbd-nbd: failed to set IO timeout: " << cpp_strerror(r)
<< std::endl;
goto close_nbd;
}
}
dout(10) << "ioctl setup complete for " << cfg->devpath << dendl;
nbd_index = index;
return 0;
close_nbd:
if (r < 0) {
ioctl(nbd, NBD_CLEAR_SOCK);
cerr << "rbd-nbd: failed to map, status: " << cpp_strerror(-r) << std::endl;
}
close(nbd);
done:
return r;
}
static void netlink_cleanup(struct nl_sock *sock)
{
if (!sock)
return;
nl_close(sock);
nl_socket_free(sock);
}
static struct nl_sock *netlink_init(int *id)
{
struct nl_sock *sock;
int ret;
sock = nl_socket_alloc();
if (!sock) {
cerr << "rbd-nbd: Could not allocate netlink socket." << std::endl;
return NULL;
}
ret = genl_connect(sock);
if (ret < 0) {
cerr << "rbd-nbd: Could not connect netlink socket. Error " << ret
<< std::endl;
goto free_sock;
}
*id = genl_ctrl_resolve(sock, "nbd");
if (*id < 0)
// nbd netlink interface not supported.
goto close_sock;
return sock;
close_sock:
nl_close(sock);
free_sock:
nl_socket_free(sock);
return NULL;
}
static int netlink_disconnect(int index)
{
struct nl_sock *sock;
struct nl_msg *msg;
int ret, nl_id;
sock = netlink_init(&nl_id);
if (!sock)
// Try ioctl
return 1;
nl_socket_modify_cb(sock, NL_CB_VALID, NL_CB_CUSTOM, genl_handle_msg, NULL);
msg = nlmsg_alloc();
if (!msg) {
cerr << "rbd-nbd: Could not allocate netlink message." << std::endl;
goto free_sock;
}
if (!genlmsg_put(msg, NL_AUTO_PORT, NL_AUTO_SEQ, nl_id, 0, 0,
NBD_CMD_DISCONNECT, 0)) {
cerr << "rbd-nbd: Could not setup message." << std::endl;
goto nla_put_failure;
}
NLA_PUT_U32(msg, NBD_ATTR_INDEX, index);
ret = nl_send_sync(sock, msg);
netlink_cleanup(sock);
if (ret < 0) {
cerr << "rbd-nbd: netlink disconnect failed: " << nl_geterror(-ret)
<< std::endl;
return -EIO;
}
return 0;
nla_put_failure:
nlmsg_free(msg);
free_sock:
netlink_cleanup(sock);
return -EIO;
}
static int netlink_disconnect_by_path(const std::string& devpath)
{
int index;
index = parse_nbd_index(devpath);
if (index < 0)
return index;
return netlink_disconnect(index);
}
static int netlink_resize(int nbd_index, uint64_t size)
{
struct nl_sock *sock;
struct nl_msg *msg;
int nl_id, ret;
sock = netlink_init(&nl_id);
if (!sock) {
cerr << "rbd-nbd: Netlink interface not supported." << std::endl;
return 1;
}
nl_socket_modify_cb(sock, NL_CB_VALID, NL_CB_CUSTOM, genl_handle_msg, NULL);
msg = nlmsg_alloc();
if (!msg) {
cerr << "rbd-nbd: Could not allocate netlink message." << std::endl;
goto free_sock;
}
if (!genlmsg_put(msg, NL_AUTO_PORT, NL_AUTO_SEQ, nl_id, 0, 0,
NBD_CMD_RECONFIGURE, 0)) {
cerr << "rbd-nbd: Could not setup message." << std::endl;
goto free_msg;
}
NLA_PUT_U32(msg, NBD_ATTR_INDEX, nbd_index);
NLA_PUT_U64(msg, NBD_ATTR_SIZE_BYTES, size);
ret = nl_send_sync(sock, msg);
if (ret < 0) {
cerr << "rbd-nbd: netlink resize failed: " << nl_geterror(ret) << std::endl;
goto free_sock;
}
netlink_cleanup(sock);
dout(10) << "netlink resize complete for nbd" << nbd_index << dendl;
return 0;
nla_put_failure:
free_msg:
nlmsg_free(msg);
free_sock:
netlink_cleanup(sock);
return -EIO;
}
static int netlink_connect_cb(struct nl_msg *msg, void *arg)
{
struct genlmsghdr *gnlh = (struct genlmsghdr *)nlmsg_data(nlmsg_hdr(msg));
Config *cfg = (Config *)arg;
struct nlattr *msg_attr[NBD_ATTR_MAX + 1];
uint32_t index;
int ret;
ret = nla_parse(msg_attr, NBD_ATTR_MAX, genlmsg_attrdata(gnlh, 0),
genlmsg_attrlen(gnlh, 0), NULL);
if (ret) {
cerr << "rbd-nbd: Unsupported netlink reply" << std::endl;
return -NLE_MSGTYPE_NOSUPPORT;
}
if (!msg_attr[NBD_ATTR_INDEX]) {
cerr << "rbd-nbd: netlink connect reply missing device index." << std::endl;
return -NLE_MSGTYPE_NOSUPPORT;
}
index = nla_get_u32(msg_attr[NBD_ATTR_INDEX]);
cfg->devpath = "/dev/nbd" + stringify(index);
nbd_index = index;
return NL_OK;
}
static int netlink_connect(Config *cfg, struct nl_sock *sock, int nl_id, int fd,
uint64_t size, uint64_t flags, bool reconnect)
{
struct nlattr *sock_attr;
struct nlattr *sock_opt;
struct nl_msg *msg;
int ret;
if (reconnect) {
dout(10) << "netlink try reconnect for " << cfg->devpath << dendl;
nl_socket_modify_cb(sock, NL_CB_VALID, NL_CB_CUSTOM, genl_handle_msg, NULL);
} else {
nl_socket_modify_cb(sock, NL_CB_VALID, NL_CB_CUSTOM, netlink_connect_cb,
cfg);
}
msg = nlmsg_alloc();
if (!msg) {
cerr << "rbd-nbd: Could not allocate netlink message." << std::endl;
return -ENOMEM;
}
if (!genlmsg_put(msg, NL_AUTO_PORT, NL_AUTO_SEQ, nl_id, 0, 0,
reconnect ? NBD_CMD_RECONFIGURE : NBD_CMD_CONNECT, 0)) {
cerr << "rbd-nbd: Could not setup message." << std::endl;
goto free_msg;
}
if (!cfg->devpath.empty()) {
ret = parse_nbd_index(cfg->devpath);
if (ret < 0)
goto free_msg;
NLA_PUT_U32(msg, NBD_ATTR_INDEX, ret);
if (reconnect) {
nbd_index = ret;
}
}
if (cfg->io_timeout >= 0)
NLA_PUT_U64(msg, NBD_ATTR_TIMEOUT, cfg->io_timeout);
NLA_PUT_U64(msg, NBD_ATTR_SIZE_BYTES, size);
NLA_PUT_U64(msg, NBD_ATTR_BLOCK_SIZE_BYTES, RBD_NBD_BLKSIZE);
NLA_PUT_U64(msg, NBD_ATTR_SERVER_FLAGS, flags);
NLA_PUT_U64(msg, NBD_ATTR_DEAD_CONN_TIMEOUT, cfg->reattach_timeout);
if (!cfg->cookie.empty())
NLA_PUT_STRING(msg, NBD_ATTR_BACKEND_IDENTIFIER, cfg->cookie.c_str());
sock_attr = nla_nest_start(msg, NBD_ATTR_SOCKETS);
if (!sock_attr) {
cerr << "rbd-nbd: Could not init sockets in netlink message." << std::endl;
goto free_msg;
}
sock_opt = nla_nest_start(msg, NBD_SOCK_ITEM);
if (!sock_opt) {
cerr << "rbd-nbd: Could not init sock in netlink message." << std::endl;
goto free_msg;
}
NLA_PUT_U32(msg, NBD_SOCK_FD, fd);
nla_nest_end(msg, sock_opt);
nla_nest_end(msg, sock_attr);
ret = nl_send_sync(sock, msg);
if (ret < 0) {
cerr << "rbd-nbd: netlink connect failed: " << nl_geterror(ret)
<< std::endl;
return -EIO;
}
dout(10) << "netlink connect complete for " << cfg->devpath << dendl;
return 0;
nla_put_failure:
free_msg:
nlmsg_free(msg);
return -EIO;
}
static int try_netlink_setup(Config *cfg, int fd, uint64_t size, uint64_t flags,
bool reconnect)
{
struct nl_sock *sock;
int nl_id, ret;
sock = netlink_init(&nl_id);
if (!sock) {
cerr << "rbd-nbd: Netlink interface not supported. Using ioctl interface."
<< std::endl;
return 1;
}
dout(10) << "netlink interface supported." << dendl;
ret = netlink_connect(cfg, sock, nl_id, fd, size, flags, reconnect);
netlink_cleanup(sock);
if (ret != 0)
return ret;
nbd = open(cfg->devpath.c_str(), O_RDWR);
if (nbd < 0) {
cerr << "rbd-nbd: failed to open device: " << cfg->devpath << std::endl;
return nbd;
}
return 0;
}
static int run_quiesce_hook(const std::string &quiesce_hook,
const std::string &devpath,
const std::string &command) {
dout(10) << __func__ << ": " << quiesce_hook << " " << devpath << " "
<< command << dendl;
SubProcess hook(quiesce_hook.c_str(), SubProcess::CLOSE, SubProcess::PIPE,
SubProcess::PIPE);
hook.add_cmd_args(devpath.c_str(), command.c_str(), NULL);
bufferlist err;
int r = hook.spawn();
if (r < 0) {
err.append("subprocess spawn failed");
} else {
err.read_fd(hook.get_stderr(), 16384);
r = hook.join();
if (r > 0) {
r = -r;
}
}
if (r < 0) {
derr << __func__ << ": " << quiesce_hook << " " << devpath << " "
<< command << " failed: " << err.to_str() << dendl;
} else {
dout(10) << " succeeded: " << err.to_str() << dendl;
}
return r;
}
static void handle_signal(int signum)
{
ceph_assert(signum == SIGINT || signum == SIGTERM);
derr << "*** Got signal " << sig_str(signum) << " ***" << dendl;
dout(20) << __func__ << ": " << "notifying terminate" << dendl;
ceph_assert(terminate_event_sock.is_valid());
terminate_event_sock.notify();
}
static NBDServer *start_server(int fd, librbd::Image& image, Config *cfg)
{
NBDServer *server;
server = new NBDServer(fd, image, cfg);
server->start();
init_async_signal_handler();
register_async_signal_handler(SIGHUP, sighup_handler);
register_async_signal_handler_oneshot(SIGINT, handle_signal);
register_async_signal_handler_oneshot(SIGTERM, handle_signal);
return server;
}
static void run_server(Preforker& forker, NBDServer *server, bool netlink_used)
{
if (g_conf()->daemonize) {
global_init_postfork_finish(g_ceph_context);
forker.daemonize();
}
if (netlink_used)
server->wait_for_disconnect();
else
ioctl(nbd, NBD_DO_IT);
unregister_async_signal_handler(SIGHUP, sighup_handler);
unregister_async_signal_handler(SIGINT, handle_signal);
unregister_async_signal_handler(SIGTERM, handle_signal);
shutdown_async_signal_handler();
}
// Eventually it should be removed when pidfd_open is widely supported.
static int wait_for_terminate_legacy(int pid, int timeout)
{
for (int i = 0; ; i++) {
if (kill(pid, 0) == -1) {
if (errno == ESRCH) {
return 0;
}
int r = -errno;
cerr << "rbd-nbd: kill(" << pid << ", 0) failed: "
<< cpp_strerror(r) << std::endl;
return r;
}
if (i >= timeout * 2) {
break;
}
usleep(500000);
}
cerr << "rbd-nbd: waiting for process exit timed out" << std::endl;
return -ETIMEDOUT;
}
// Eventually it should be replaced with glibc' pidfd_open
// when it is widely available.
#ifdef __NR_pidfd_open
static int pidfd_open(pid_t pid, unsigned int flags)
{
return syscall(__NR_pidfd_open, pid, flags);
}
#else
static int pidfd_open(pid_t pid, unsigned int flags)
{
errno = ENOSYS;
return -1;
}
#endif
static int wait_for_terminate(int pid, int timeout)
{
int fd = pidfd_open(pid, 0);
if (fd == -1) {
if (errno == ENOSYS) {
return wait_for_terminate_legacy(pid, timeout);
}
if (errno == ESRCH) {
return 0;
}
int r = -errno;
cerr << "rbd-nbd: pidfd_open(" << pid << ") failed: "
<< cpp_strerror(r) << std::endl;
return r;
}
struct pollfd poll_fds[1];
memset(poll_fds, 0, sizeof(struct pollfd));
poll_fds[0].fd = fd;
poll_fds[0].events = POLLIN;
int r = poll(poll_fds, 1, timeout * 1000);
if (r == -1) {
r = -errno;
cerr << "rbd-nbd: failed to poll rbd-nbd process: " << cpp_strerror(r)
<< std::endl;
goto done;
} else {
r = 0;
}
if ((poll_fds[0].revents & POLLIN) == 0) {
cerr << "rbd-nbd: waiting for process exit timed out" << std::endl;
r = -ETIMEDOUT;
}
done:
close(fd);
return r;
}
static int do_map(int argc, const char *argv[], Config *cfg, bool reconnect)
{
int r;
librados::Rados rados;
librbd::RBD rbd;
librados::IoCtx io_ctx;
librbd::Image image;
int read_only = 0;
unsigned long flags;
unsigned long size;
unsigned long blksize = RBD_NBD_BLKSIZE;
bool use_netlink;
int fd[2];
librbd::image_info_t info;
Preforker forker;
NBDServer *server;
auto args = argv_to_vec(argc, argv);
if (args.empty()) {
cerr << argv[0] << ": -h or --help for usage" << std::endl;
exit(1);
}
if (ceph_argparse_need_usage(args)) {
usage();
exit(0);
}
auto cct = global_init(NULL, args, CEPH_ENTITY_TYPE_CLIENT,
CODE_ENVIRONMENT_DAEMON,
CINIT_FLAG_UNPRIVILEGED_DAEMON_DEFAULTS);
g_ceph_context->_conf.set_val_or_die("pid_file", "");
if (global_init_prefork(g_ceph_context) >= 0) {
std::string err;
r = forker.prefork(err);
if (r < 0) {
cerr << err << std::endl;
return r;
}
if (forker.is_parent()) {
if (forker.parent_wait(err) != 0) {
return -ENXIO;
}
return 0;
}
global_init_postfork_start(g_ceph_context);
}
common_init_finish(g_ceph_context);
global_init_chdir(g_ceph_context);
if (socketpair(AF_UNIX, SOCK_STREAM, 0, fd) == -1) {
r = -errno;
goto close_ret;
}
r = rados.init_with_context(g_ceph_context);
if (r < 0)
goto close_fd;
r = rados.connect();
if (r < 0)
goto close_fd;
r = rados.ioctx_create(cfg->poolname.c_str(), io_ctx);
if (r < 0)
goto close_fd;
io_ctx.set_namespace(cfg->nsname);
r = rbd.open(io_ctx, image, cfg->imgname.c_str());
if (r < 0)
goto close_fd;
if (cfg->exclusive) {
r = image.lock_acquire(RBD_LOCK_MODE_EXCLUSIVE);
if (r < 0) {
cerr << "rbd-nbd: failed to acquire exclusive lock: " << cpp_strerror(r)
<< std::endl;
goto close_fd;
}
}
if (cfg->snapid != CEPH_NOSNAP) {
r = image.snap_set_by_id(cfg->snapid);
if (r < 0) {
cerr << "rbd-nbd: failed to set snap id: " << cpp_strerror(r)
<< std::endl;
goto close_fd;
}
} else if (!cfg->snapname.empty()) {
r = image.snap_set(cfg->snapname.c_str());
if (r < 0) {
cerr << "rbd-nbd: failed to set snap name: " << cpp_strerror(r)
<< std::endl;
goto close_fd;
}
}
if (!cfg->encryption_formats.empty()) {
EncryptionOptions encryption_options;
encryption_options.specs.reserve(cfg->encryption_formats.size());
for (size_t i = 0; i < cfg->encryption_formats.size(); ++i) {
std::ifstream file(cfg->encryption_passphrase_files[i],
std::ios::in | std::ios::binary);
if (file.fail()) {
r = -errno;
std::cerr << "rbd-nbd: unable to open passphrase file '"
<< cfg->encryption_passphrase_files[i] << "': "
<< cpp_strerror(r) << std::endl;
goto close_fd;
}
std::string passphrase((std::istreambuf_iterator<char>(file)),
std::istreambuf_iterator<char>());
file.close();
switch (cfg->encryption_formats[i]) {
case RBD_ENCRYPTION_FORMAT_LUKS: {
auto opts = new librbd::encryption_luks_format_options_t{
std::move(passphrase)};
encryption_options.specs.push_back(
{RBD_ENCRYPTION_FORMAT_LUKS, opts, sizeof(*opts)});
break;
}
case RBD_ENCRYPTION_FORMAT_LUKS1: {
auto opts = new librbd::encryption_luks1_format_options_t{
.passphrase = std::move(passphrase)};
encryption_options.specs.push_back(
{RBD_ENCRYPTION_FORMAT_LUKS1, opts, sizeof(*opts)});
break;
}
case RBD_ENCRYPTION_FORMAT_LUKS2: {
auto opts = new librbd::encryption_luks2_format_options_t{
.passphrase = std::move(passphrase)};
encryption_options.specs.push_back(
{RBD_ENCRYPTION_FORMAT_LUKS2, opts, sizeof(*opts)});
break;
}
default:
ceph_abort();
}
}
r = image.encryption_load2(encryption_options.specs.data(),
encryption_options.specs.size());
if (r != 0) {
cerr << "rbd-nbd: failed to load encryption: " << cpp_strerror(r)
<< std::endl;
goto close_fd;
}
// luks2 block size can vary upto 4096, while luks1 always uses 512
// currently we don't have an rbd API for querying the loaded encryption
blksize = 4096;
}
r = image.stat(info, sizeof(info));
if (r < 0)
goto close_fd;
flags = NBD_FLAG_SEND_FLUSH | NBD_FLAG_HAS_FLAGS;
if (!cfg->notrim) {
flags |= NBD_FLAG_SEND_TRIM;
}
if (!cfg->snapname.empty() || cfg->readonly) {
flags |= NBD_FLAG_READ_ONLY;
read_only = 1;
}
if (info.size > ULONG_MAX) {
r = -EFBIG;
cerr << "rbd-nbd: image is too large (" << byte_u_t(info.size)
<< ", max is " << byte_u_t(ULONG_MAX) << ")" << std::endl;
goto close_fd;
}
size = info.size;
r = load_module(cfg);
if (r < 0)
goto close_fd;
server = start_server(fd[1], image, cfg);
use_netlink = cfg->try_netlink || reconnect;
if (use_netlink) {
// generate when the cookie is not supplied at CLI
if (!reconnect && cfg->cookie.empty()) {
uuid_d uuid_gen;
uuid_gen.generate_random();
cfg->cookie = uuid_gen.to_string();
}
r = try_netlink_setup(cfg, fd[0], size, flags, reconnect);
if (r < 0) {
goto free_server;
} else if (r == 1) {
use_netlink = false;
}
}
if (!use_netlink) {
r = try_ioctl_setup(cfg, fd[0], size, blksize, flags);
if (r < 0)
goto free_server;
}
r = check_device_size(nbd_index, size);
if (r < 0)
goto close_nbd;
r = ioctl(nbd, BLKROSET, (unsigned long) &read_only);
if (r < 0) {
r = -errno;
goto close_nbd;
}
{
NBDQuiesceWatchCtx quiesce_watch_ctx(server);
if (cfg->quiesce) {
r = image.quiesce_watch(&quiesce_watch_ctx,
&server->quiesce_watch_handle);
if (r < 0) {
goto close_nbd;
}
}
uint64_t handle;
NBDWatchCtx watch_ctx(nbd, nbd_index, use_netlink, io_ctx, image,
info.size);
r = image.update_watch(&watch_ctx, &handle);
if (r < 0)
goto close_nbd;
std::string cookie;
if (use_netlink) {
cookie = get_cookie(cfg->devpath);
ceph_assert(cookie == cfg->cookie || cookie.empty());
}
if (cfg->show_cookie && !cookie.empty()) {
cout << cfg->devpath << " " << cookie << std::endl;
} else {
cout << cfg->devpath << std::endl;
}
run_server(forker, server, use_netlink);
if (cfg->quiesce) {
r = image.quiesce_unwatch(server->quiesce_watch_handle);
ceph_assert(r == 0);
}
r = image.update_unwatch(handle);
ceph_assert(r == 0);
}
close_nbd:
if (r < 0) {
if (use_netlink) {
netlink_disconnect(nbd_index);
} else {
ioctl(nbd, NBD_CLEAR_SOCK);
cerr << "rbd-nbd: failed to map, status: " << cpp_strerror(-r)
<< std::endl;
}
}
close(nbd);
free_server:
delete server;
close_fd:
close(fd[0]);
close(fd[1]);
close_ret:
image.close();
io_ctx.close();
rados.shutdown();
forker.exit(r < 0 ? EXIT_FAILURE : 0);
// Unreachable;
return r;
}
static int do_detach(Config *cfg)
{
int r = kill(cfg->pid, SIGTERM);
if (r == -1) {
r = -errno;
cerr << "rbd-nbd: failed to terminate " << cfg->pid << ": "
<< cpp_strerror(r) << std::endl;
return r;
}
return wait_for_terminate(cfg->pid, cfg->reattach_timeout);
}
static int do_unmap(Config *cfg)
{
/*
* The netlink disconnect call supports devices setup with netlink or ioctl,
* so we always try that first.
*/
int r = netlink_disconnect_by_path(cfg->devpath);
if (r < 0) {
return r;
}
if (r == 1) {
int nbd = open(cfg->devpath.c_str(), O_RDWR);
if (nbd < 0) {
cerr << "rbd-nbd: failed to open device: " << cfg->devpath << std::endl;
return nbd;
}
r = ioctl(nbd, NBD_DISCONNECT);
if (r < 0) {
cerr << "rbd-nbd: the device is not used" << std::endl;
}
close(nbd);
if (r < 0) {
return r;
}
}
if (cfg->pid > 0) {
r = wait_for_terminate(cfg->pid, cfg->reattach_timeout);
}
return 0;
}
static int parse_imgpath(const std::string &imgpath, Config *cfg,
std::ostream *err_msg) {
std::regex pattern("^(?:([^/]+)/(?:([^/@]+)/)?)?([^@]+)(?:@([^/@]+))?$");
std::smatch match;
if (!std::regex_match(imgpath, match, pattern)) {
std::cerr << "rbd-nbd: invalid spec '" << imgpath << "'" << std::endl;
return -EINVAL;
}
if (match[1].matched) {
cfg->poolname = match[1];
}
if (match[2].matched) {
cfg->nsname = match[2];
}
cfg->imgname = match[3];
if (match[4].matched)
cfg->snapname = match[4];
return 0;
}
static int do_list_mapped_devices(const std::string &format, bool pretty_format)
{
bool should_print = false;
std::unique_ptr<ceph::Formatter> f;
TextTable tbl;
if (format == "json") {
f.reset(new JSONFormatter(pretty_format));
} else if (format == "xml") {
f.reset(new XMLFormatter(pretty_format));
} else if (!format.empty() && format != "plain") {
std::cerr << "rbd-nbd: invalid output format: " << format << std::endl;
return -EINVAL;
}
if (f) {
f->open_array_section("devices");
} else {
tbl.define_column("id", TextTable::LEFT, TextTable::LEFT);
tbl.define_column("pool", TextTable::LEFT, TextTable::LEFT);
tbl.define_column("namespace", TextTable::LEFT, TextTable::LEFT);
tbl.define_column("image", TextTable::LEFT, TextTable::LEFT);
tbl.define_column("snap", TextTable::LEFT, TextTable::LEFT);
tbl.define_column("device", TextTable::LEFT, TextTable::LEFT);
tbl.define_column("cookie", TextTable::LEFT, TextTable::LEFT);
}
Config cfg;
NBDListIterator it;
while (it.get(&cfg)) {
std::string snap = (cfg.snapid != CEPH_NOSNAP ?
"@" + std::to_string(cfg.snapid) : cfg.snapname);
if (f) {
f->open_object_section("device");
f->dump_int("id", cfg.pid);
f->dump_string("pool", cfg.poolname);
f->dump_string("namespace", cfg.nsname);
f->dump_string("image", cfg.imgname);
f->dump_string("snap", snap);
f->dump_string("device", cfg.devpath);
f->dump_string("cookie", cfg.cookie);
f->close_section();
} else {
should_print = true;
tbl << cfg.pid << cfg.poolname << cfg.nsname << cfg.imgname
<< (snap.empty() ? "-" : snap) << cfg.devpath << cfg.cookie
<< TextTable::endrow;
}
}
if (f) {
f->close_section(); // devices
f->flush(std::cout);
}
if (should_print) {
std::cout << tbl;
}
return 0;
}
static bool find_mapped_dev_by_spec(Config *cfg, int skip_pid=-1) {
Config c;
NBDListIterator it;
while (it.get(&c)) {
if (c.pid != skip_pid &&
c.poolname == cfg->poolname && c.nsname == cfg->nsname &&
c.imgname == cfg->imgname && c.snapname == cfg->snapname &&
(cfg->devpath.empty() || c.devpath == cfg->devpath) &&
c.snapid == cfg->snapid) {
*cfg = c;
return true;
}
}
return false;
}
static int find_proc_by_dev(Config *cfg) {
Config c;
NBDListIterator it;
while (it.get(&c)) {
if (c.devpath == cfg->devpath) {
*cfg = c;
return true;
}
}
return false;
}
static int parse_args(vector<const char*>& args, std::ostream *err_msg,
Config *cfg) {
std::string conf_file_list;
std::string cluster;
CephInitParameters iparams = ceph_argparse_early_args(
args, CEPH_ENTITY_TYPE_CLIENT, &cluster, &conf_file_list);
ConfigProxy config{false};
config->name = iparams.name;
config->cluster = cluster;
if (!conf_file_list.empty()) {
config.parse_config_files(conf_file_list.c_str(), nullptr, 0);
} else {
config.parse_config_files(nullptr, nullptr, 0);
}
config.parse_env(CEPH_ENTITY_TYPE_CLIENT);
config.parse_argv(args);
cfg->poolname = config.get_val<std::string>("rbd_default_pool");
std::vector<const char*>::iterator i;
std::ostringstream err;
std::string arg_value;
long long snapid;
for (i = args.begin(); i != args.end(); ) {
if (ceph_argparse_flag(args, i, "-h", "--help", (char*)NULL)) {
return HELP_INFO;
} else if (ceph_argparse_flag(args, i, "-v", "--version", (char*)NULL)) {
return VERSION_INFO;
} else if (ceph_argparse_witharg(args, i, &cfg->devpath, "--device", (char *)NULL)) {
} else if (ceph_argparse_witharg(args, i, &cfg->io_timeout, err,
"--io-timeout", (char *)NULL)) {
if (!err.str().empty()) {
*err_msg << "rbd-nbd: " << err.str();
return -EINVAL;
}
if (cfg->io_timeout < 0) {
*err_msg << "rbd-nbd: Invalid argument for io-timeout!";
return -EINVAL;
}
} else if (ceph_argparse_witharg(args, i, &cfg->nbds_max, err, "--nbds_max", (char *)NULL)) {
if (!err.str().empty()) {
*err_msg << "rbd-nbd: " << err.str();
return -EINVAL;
}
if (cfg->nbds_max < 0) {
*err_msg << "rbd-nbd: Invalid argument for nbds_max!";
return -EINVAL;
}
} else if (ceph_argparse_witharg(args, i, &cfg->max_part, err, "--max_part", (char *)NULL)) {
if (!err.str().empty()) {
*err_msg << "rbd-nbd: " << err.str();
return -EINVAL;
}
if ((cfg->max_part < 0) || (cfg->max_part > 255)) {
*err_msg << "rbd-nbd: Invalid argument for max_part(0~255)!";
return -EINVAL;
}
cfg->set_max_part = true;
} else if (ceph_argparse_flag(args, i, "--quiesce", (char *)NULL)) {
cfg->quiesce = true;
} else if (ceph_argparse_witharg(args, i, &cfg->quiesce_hook,
"--quiesce-hook", (char *)NULL)) {
} else if (ceph_argparse_flag(args, i, "--read-only", (char *)NULL)) {
cfg->readonly = true;
} else if (ceph_argparse_witharg(args, i, &cfg->reattach_timeout, err,
"--reattach-timeout", (char *)NULL)) {
if (!err.str().empty()) {
*err_msg << "rbd-nbd: " << err.str();
return -EINVAL;
}
if (cfg->reattach_timeout < 0) {
*err_msg << "rbd-nbd: Invalid argument for reattach-timeout!";
return -EINVAL;
}
} else if (ceph_argparse_flag(args, i, "--exclusive", (char *)NULL)) {
cfg->exclusive = true;
} else if (ceph_argparse_flag(args, i, "--notrim", (char *)NULL)) {
cfg->notrim = true;
} else if (ceph_argparse_witharg(args, i, &cfg->io_timeout, err,
"--timeout", (char *)NULL)) {
if (!err.str().empty()) {
*err_msg << "rbd-nbd: " << err.str();
return -EINVAL;
}
if (cfg->io_timeout < 0) {
*err_msg << "rbd-nbd: Invalid argument for timeout!";
return -EINVAL;
}
*err_msg << "rbd-nbd: --timeout is deprecated (use --io-timeout)";
} else if (ceph_argparse_witharg(args, i, &cfg->format, err, "--format",
(char *)NULL)) {
} else if (ceph_argparse_flag(args, i, "--pretty-format", (char *)NULL)) {
cfg->pretty_format = true;
} else if (ceph_argparse_flag(args, i, "--try-netlink", (char *)NULL)) {
cfg->try_netlink = true;
} else if (ceph_argparse_flag(args, i, "--show-cookie", (char *)NULL)) {
cfg->show_cookie = true;
} else if (ceph_argparse_witharg(args, i, &cfg->cookie, "--cookie", (char *)NULL)) {
} else if (ceph_argparse_witharg(args, i, &snapid, err,
"--snap-id", (char *)NULL)) {
if (!err.str().empty()) {
*err_msg << "rbd-nbd: " << err.str();
return -EINVAL;
}
if (snapid < 0) {
*err_msg << "rbd-nbd: Invalid argument for snap-id!";
return -EINVAL;
}
cfg->snapid = snapid;
} else if (ceph_argparse_witharg(args, i, &arg_value,
"--encryption-format", (char *)NULL)) {
if (arg_value == "luks1") {
cfg->encryption_formats.push_back(RBD_ENCRYPTION_FORMAT_LUKS1);
} else if (arg_value == "luks2") {
cfg->encryption_formats.push_back(RBD_ENCRYPTION_FORMAT_LUKS2);
} else if (arg_value == "luks") {
cfg->encryption_formats.push_back(RBD_ENCRYPTION_FORMAT_LUKS);
} else {
*err_msg << "rbd-nbd: Invalid encryption format";
return -EINVAL;
}
} else if (ceph_argparse_witharg(args, i, &arg_value,
"--encryption-passphrase-file",
(char *)NULL)) {
cfg->encryption_passphrase_files.push_back(arg_value);
} else {
++i;
}
}
if (cfg->encryption_formats.empty() &&
!cfg->encryption_passphrase_files.empty()) {
cfg->encryption_formats.resize(cfg->encryption_passphrase_files.size(),
RBD_ENCRYPTION_FORMAT_LUKS);
}
if (cfg->encryption_formats.size() != cfg->encryption_passphrase_files.size()) {
*err_msg << "rbd-nbd: Encryption formats count does not match "
<< "passphrase files count";
return -EINVAL;
}
Command cmd = None;
if (args.begin() != args.end()) {
if (strcmp(*args.begin(), "map") == 0) {
cmd = Map;
} else if (strcmp(*args.begin(), "unmap") == 0) {
cmd = Unmap;
} else if (strcmp(*args.begin(), "attach") == 0) {
cmd = Attach;
} else if (strcmp(*args.begin(), "detach") == 0) {
cmd = Detach;
} else if (strcmp(*args.begin(), "list-mapped") == 0) {
cmd = List;
} else {
*err_msg << "rbd-nbd: unknown command: " << *args.begin();
return -EINVAL;
}
args.erase(args.begin());
}
if (cmd == None) {
*err_msg << "rbd-nbd: must specify command";
return -EINVAL;
}
std::string cookie;
switch (cmd) {
case Attach:
if (cfg->devpath.empty()) {
*err_msg << "rbd-nbd: must specify device to attach";
return -EINVAL;
}
// Allowing attach without --cookie option for kernel without
// NBD_ATTR_BACKEND_IDENTIFIER support for compatibility
cookie = get_cookie(cfg->devpath);
if (!cookie.empty()) {
if (cfg->cookie.empty()) {
*err_msg << "rbd-nbd: must specify cookie to attach";
return -EINVAL;
} else if (cookie != cfg->cookie) {
*err_msg << "rbd-nbd: cookie mismatch";
return -EINVAL;
}
} else if (!cfg->cookie.empty()) {
*err_msg << "rbd-nbd: kernel does not have cookie support";
return -EINVAL;
}
[[fallthrough]];
case Map:
if (args.begin() == args.end()) {
*err_msg << "rbd-nbd: must specify image-or-snap-spec";
return -EINVAL;
}
if (parse_imgpath(*args.begin(), cfg, err_msg) < 0) {
return -EINVAL;
}
args.erase(args.begin());
break;
case Detach:
case Unmap:
if (args.begin() == args.end()) {
*err_msg << "rbd-nbd: must specify nbd device or image-or-snap-spec";
return -EINVAL;
}
if (boost::starts_with(*args.begin(), "/dev/")) {
cfg->devpath = *args.begin();
} else {
if (parse_imgpath(*args.begin(), cfg, err_msg) < 0) {
return -EINVAL;
}
}
args.erase(args.begin());
break;
default:
//shut up gcc;
break;
}
if (cfg->snapid != CEPH_NOSNAP && !cfg->snapname.empty()) {
*err_msg << "rbd-nbd: use either snapname or snapid, not both";
return -EINVAL;
}
if (args.begin() != args.end()) {
*err_msg << "rbd-nbd: unknown args: " << *args.begin();
return -EINVAL;
}
cfg->command = cmd;
return 0;
}
static int rbd_nbd(int argc, const char *argv[])
{
int r;
Config cfg;
auto args = argv_to_vec(argc, argv);
std::ostringstream err_msg;
r = parse_args(args, &err_msg, &cfg);
if (r == HELP_INFO) {
usage();
return 0;
} else if (r == VERSION_INFO) {
std::cout << pretty_version_to_str() << std::endl;
return 0;
} else if (r < 0) {
cerr << err_msg.str() << std::endl;
return r;
}
if (!err_msg.str().empty()) {
cerr << err_msg.str() << std::endl;
}
switch (cfg.command) {
case Attach:
ceph_assert(!cfg.devpath.empty());
if (find_mapped_dev_by_spec(&cfg, getpid())) {
cerr << "rbd-nbd: " << cfg.devpath << " has process " << cfg.pid
<< " connected" << std::endl;
return -EBUSY;
}
[[fallthrough]];
case Map:
if (cfg.imgname.empty()) {
cerr << "rbd-nbd: image name was not specified" << std::endl;
return -EINVAL;
}
r = do_map(argc, argv, &cfg, cfg.command == Attach);
if (r < 0)
return -EINVAL;
break;
case Detach:
if (cfg.devpath.empty()) {
if (!find_mapped_dev_by_spec(&cfg)) {
cerr << "rbd-nbd: " << cfg.image_spec() << " is not mapped"
<< std::endl;
return -ENOENT;
}
} else if (!find_proc_by_dev(&cfg)) {
cerr << "rbd-nbd: no process attached to " << cfg.devpath << " found"
<< std::endl;
return -ENOENT;
}
r = do_detach(&cfg);
if (r < 0)
return -EINVAL;
break;
case Unmap:
if (cfg.devpath.empty()) {
if (!find_mapped_dev_by_spec(&cfg)) {
cerr << "rbd-nbd: " << cfg.image_spec() << " is not mapped"
<< std::endl;
return -ENOENT;
}
} else if (!find_proc_by_dev(&cfg)) {
// still try to send disconnect to the device
}
r = do_unmap(&cfg);
if (r < 0)
return -EINVAL;
break;
case List:
r = do_list_mapped_devices(cfg.format, cfg.pretty_format);
if (r < 0)
return -EINVAL;
break;
default:
usage();
break;
}
return 0;
}
int main(int argc, const char *argv[])
{
int r = rbd_nbd(argc, argv);
if (r < 0) {
return EXIT_FAILURE;
}
return 0;
}
| 62,839 | 25.270903 | 116 | cc |
null | ceph-main/src/tools/rbd_recover_tool/test_rbd_recover_tool.sh | #!/usr/bin/env bash
#
# Copyright (C) 2015 Ubuntu Kylin
#
# Author: Min Chen <minchen@ubuntukylin.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Library Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Public License for more details.
#
# unit test case for rbd-recover-tool
#prepare:
# - write config files: config/osd_host, config/mon_host, config/storage_path, config/mds_host if exist mds
#step 1. rbd export all images as you need
#step 2. stop all ceph services
#step 3. use ceph_rbd_recover_tool to recover all images
#step 4. compare md5sum of recover image with that of export image who has the same image name
ssh_opt="-o ConnectTimeout=1"
my_dir=$(dirname "$0")
tool_dir=$my_dir
#storage_path=$my_dir/config/storage_path
mon_host=$my_dir/config/mon_host
osd_host=$my_dir/config/osd_host
mds_host=$my_dir/config/mds_host
test_dir= # `cat $storage_path`
export_dir= #$test_dir/export
recover_dir= #$test_dir/recover
image_names= #$test_dir/image_names
online_images= #$test_dir/online_images, all images on ceph rbd pool
gen_db= #$test_dir/gen_db, label database if exist
pool=rbd
pool_id=2
function get_pool_id()
{
local pool_id_file=/tmp/pool_id_file.$$$$
ceph osd pool stats $pool|head -n 1|awk '{print $4}' >$pool_id_file
if [ $? -ne 0 ];then
echo "$func: get pool id failed: pool = $pool"
rm -f $pool_id_file
exit
fi
pool_id=`cat $pool_id_file`
echo "$func: pool_id = $pool_id"
rm -f $pool_id_file
}
function init()
{
local func="init"
if [ $# -eq 0 ];then
echo "$func: must input <path> to storage images, enough disk space is good"
exit
fi
if [ ! -s $osd_host ];then
echo "$func: config/osd_host not exists or empty"
exit
fi
if [ ! -s $mon_host ];then
echo "$func: config/mon_host not exists or empty"
exit
fi
if [ ! -e $mds_host ];then
echo "$func: config/mds_host not exists"
exit
fi
test_dir=$1
export_dir=$test_dir/export
recover_dir=$test_dir/recover
image_names=$test_dir/image_names
online_images=$test_dir/online_images
gen_db=$test_dir/gen_db
trap 'echo "ceph cluster is stopped ..."; exit;' INT
ceph -s >/dev/null
get_pool_id
mkdir -p $test_dir
mkdir -p $export_dir
mkdir -p $recover_dir
rm -rf $export_dir/*
rm -rf $recover_dir/*
}
function do_gen_database()
{
local func="do_gen_database"
if [ -s $gen_db ] && [ `cat $gen_db` = 1 ];then
echo "$func: database already existed"
exit
fi
bash $tool_dir/rbd-recover-tool database
echo 1 >$gen_db
}
#check if all ceph processes are stopped
function check_ceph_service()
{
local func="check_ceph_service"
local res=`cat $osd_host $mon_host $mds_host|sort -u|tr -d [:blank:]|xargs -n 1 -I @ ssh $ssh_opt @ "ps aux|grep -E \"(ceph-osd|ceph-mon|ceph-mds)\"|grep -v grep"`
if [ "$res"x != ""x ];then
echo "$func: NOT all ceph services are stopped"
return 1
exit
fi
echo "$func: all ceph services are stopped"
return 0
}
function stop_ceph()
{
local func="stop_ceph"
#cat osd_host|xargs -n 1 -I @ ssh $ssh_opt @ "killall ceph-osd"
while read osd
do
{
osd=`echo $osd|tr -d [:blank:]`
if [ "$osd"x = ""x ];then
continue
fi
#ssh $ssh_opt $osd "killall ceph-osd ceph-mon ceph-mds" </dev/null
ssh $ssh_opt $osd "killall ceph-osd" </dev/null
} &
done < $osd_host
wait
echo "waiting kill all osd ..."
sleep 1
#cat $mon_host|xargs -n 1 -I @ ssh $ssh_opt @ "killall ceph-mon ceph-osd ceph-mds"
cat $mon_host|xargs -n 1 -I @ ssh $ssh_opt @ "killall ceph-mon"
#cat $mds_host|xargs -n 1 -I @ ssh $ssh_opt @ "killall ceph-mds ceph-mon ceph-osd"
cat $mds_host|xargs -n 1 -I @ ssh $ssh_opt @ "killall ceph-mds"
}
function create_image()
{
local func="create_image"
if [ ${#} -lt 3 ];then
echo "create_image: parameters: <image_name> <size> <image_format>"
exit
fi
local image_name=$1
local size=$2
local image_format=$3
if [ $image_format -lt 1 ] || [ $image_format -gt 2 ];then
echo "$func: image_format must be 1 or 2"
exit
fi
local res=`rbd list|grep -E "^$1$"`
echo "$func $image_name ..."
if [ "$res"x = ""x ];then
rbd -p $pool create $image_name --size $size --image_format $image_format
else
if [ $image_format -eq 2 ];then
rbd snap ls $image_name|tail -n +2|awk '{print $2}'|xargs -n 1 -I % rbd snap unprotect $image_name@%
fi
rbd snap purge $image_name
#rbd rm $image_name
rbd -p $pool resize --allow-shrink --size $size $image_name
fi
}
function export_image()
{
local func="export_image"
if [ $# -lt 2 ];then
echo "$func: parameters: <image_name> <image_format> [<image_size>]"
exit
fi
local image_name=$1
local format=$(($2))
local size=$(($3)) #MB
if [ $format -ne 1 ] && [ $format -ne 2 ];then
echo "$func: image format must be 1 or 2"
exit
fi
if [ $size -eq 0 ];then
size=24 #MB
echo "$func: size = $size"
fi
local mnt=/rbdfuse
mount |grep "rbd-fuse on /rbdfuse" &>/dev/null
if [ $? -ne 0 ];then
rbd-fuse $mnt
fi
create_image $image_name $size $format
dd conv=notrunc if=/dev/urandom of=$mnt/$image_name bs=4M count=$(($size/4))
local export_image_dir=$export_dir/pool_$pool_id/$image_name
mkdir -p $export_image_dir
local export_md5_nosnap=$export_image_dir/@md5_nosnap
>$export_md5_nosnap
local export_image_path=$export_image_dir/$image_name
rm -f $export_image_path
rbd export $pool/$image_name $export_image_path
md5sum $export_image_path |awk '{print $1}' >$export_md5_nosnap
}
function recover_image()
{
local func="recover_snapshots"
if [ $# -lt 1 ];then
echo "$func: parameters: <image_name>"
exit
fi
local image_name=$1
#pool_id=29
local recover_image_dir=$recover_dir/pool_$pool_id/$image_name
mkdir -p $recover_image_dir
local recover_md5_nosnap=$recover_image_dir/@md5_nosnap
>$recover_md5_nosnap
local snapshot=
bash $tool_dir/rbd-recover-tool recover $pool_id/$image_name $recover_dir
md5sum $recover_image_dir/$image_name|awk '{print $1}' >$recover_md5_nosnap
}
function make_snapshot()
{
local func="make_snapshot"
if [ $# -lt 5 ];then
echo "$func: parameters: <ofile> <seek> <count> <snap> <export_image_dir>"
exit
fi
local ofile=$1
local seek=$(($2))
local count=$(($3))
local snap=$4
local export_image_dir=$5
if [ $seek -lt 0 ];then
echo "$func: seek can not be minus"
exit
fi
if [ $count -lt 1 ];then
echo "$func: count must great than zero"
exit
fi
echo "[$snap] $func ..."
echo "$1 $2 $3 $4"
rbd snap ls $image_name|grep $snap;
local res=$?
if [ $res -eq 0 ];then
return $res
fi
dd conv=notrunc if=/dev/urandom of=$ofile bs=1M count=$count seek=$seek 2>/dev/null
snapshot=$image_name@$snap
rbd snap create $snapshot
rm -f $export_image_dir/$snapshot
rbd export $pool/$image_name $export_image_dir/$snapshot
pushd $export_image_dir >/dev/null
md5sum $snapshot >> @md5
popd >/dev/null
}
function recover_snapshots()
{
local func="recover_snapshots"
if [ $# -lt 1 ];then
echo "$func: parameters: <image_name>"
exit
fi
local image_name=$1
#pool_id=29
local recover_image_dir=$recover_dir/pool_$pool_id/$image_name
mkdir -p $recover_image_dir
local recover_md5=$recover_image_dir/@md5
>$recover_md5
local snapshot=
# recover head
bash $tool_dir/rbd-recover-tool recover $pool_id/$image_name $recover_dir
# recover snapshots
for((i=1; i<10; i++))
do
snapshot=snap$i
bash $tool_dir/rbd-recover-tool recover $pool_id/$image_name@$snapshot $recover_dir
pushd $recover_image_dir >/dev/null
local chksum=`md5sum $image_name|awk '{print $1}'`
echo "$chksum $image_name@$snapshot" >>@md5
popd >/dev/null
done
}
function export_snapshots()
{
local func="export_snapshots"
if [ $# -lt 2 ];then
echo "$func: parameters: <image_name> <image_format> [<image_size>]"
exit
fi
local image_name=$1
local format=$(($2))
local size=$(($3)) #MB
if [ $format -ne 1 ] && [ $format -ne 2 ];then
echo "$func: image format must be 1 or 2"
exit
fi
if [ $size -eq 0 ];then
size=24 #MB
echo "$func: size = $size"
fi
local mnt=/rbdfuse
mount |grep "rbd-fuse on /rbdfuse" &>/dev/null
if [ $? -ne 0 ];then
rbd-fuse $mnt
fi
create_image $image_name $size $format
local export_image_dir=$export_dir/pool_$pool_id/$image_name
mkdir -p $export_image_dir
local export_md5=$export_image_dir/@md5
>$export_md5
# create 9 snapshots
# image = {object0, object1, object2, object3, object4, object5, ...}
#
# snap1 : init/write all objects
# snap2 : write object0
# snap3 : write object1
# snap4 : write object2
# snap5 : write object3
# snap6 : write object4
# snap7 : write object5
# snap8 : write object0
# snap9 : write object3
make_snapshot $mnt/$image_name 0 $size snap1 $export_image_dir
make_snapshot $mnt/$image_name 0 1 snap2 $export_image_dir
make_snapshot $mnt/$image_name 4 1 snap3 $export_image_dir
make_snapshot $mnt/$image_name 8 1 snap4 $export_image_dir
make_snapshot $mnt/$image_name 12 1 snap5 $export_image_dir
make_snapshot $mnt/$image_name 16 1 snap6 $export_image_dir
make_snapshot $mnt/$image_name 20 1 snap7 $export_image_dir
make_snapshot $mnt/$image_name 1 1 snap8 $export_image_dir
make_snapshot $mnt/$image_name 13 1 snap9 $export_image_dir
}
function check_recover_nosnap()
{
local func="check_recover_nosnap"
if [ $# -lt 3 ];then
echo "$func: parameters: <export_md5_file> <recover_md5_file> <image_name>"
fi
local export_md5=$1
local recover_md5=$2
local image_name=$3
local ifpassed="FAILED"
echo "================ < $image_name nosnap > ================"
local export_md5sum=`cat $export_md5`
local recover_md5sum=`cat $recover_md5`
if [ "$export_md5sum"x != ""x ] && [ "$export_md5sum"x = "$recover_md5sum"x ];then
ifpassed="PASSED"
fi
echo "export: $export_md5sum"
echo "recover: $recover_md5sum $ifpassed"
}
function check_recover_snapshots()
{
local func="check_recover_snapshots"
if [ $# -lt 3 ];then
echo "$func: parameters: <export_md5_file> <recover_md5_file> <image_name>"
fi
local export_md5=$1
local recover_md5=$2
local image_name=$3
local ifpassed="FAILED"
echo "================ < $image_name snapshots > ================"
OIFS=$IFS
IFS=$'\n'
local export_md5s=(`cat $export_md5`)
local recover_md5s=(`cat $recover_md5`)
for((i=0; i<9; i++))
do
OOIFS=$IFS
IFS=$' '
local x=$(($i+1))
snapshot=snap$x
local export_arr=(`echo ${export_md5s[$i]}`)
local recover_arr=(`echo ${recover_md5s[$i]}`)
echo "export: ${export_md5s[$i]}"
if [ "${export_arr[1]}"x != ""x ] && [ "${export_arr[1]}"x = "${recover_arr[1]}"x ];then
ifpassed="PASSED"
fi
echo "recover: ${recover_md5s[$i]} $ifpassed"
IFS=$OOIFS
done
IFS=$OIFS
}
# step 1: export image, snapshot
function do_export_nosnap()
{
export_image image_v1_nosnap 1
export_image image_v2_nosnap 2
}
function do_export_snap()
{
export_snapshots image_v1_snap 1
export_snapshots image_v2_snap 2
}
# step 2: stop ceph cluster and gen database
function stop_cluster_gen_database()
{
trap 'echo stop ceph cluster failed; exit;' INT HUP
stop_ceph
sleep 2
check_ceph_service
local res=$?
while [ $res -ne 0 ]
do
stop_ceph
sleep 2
check_ceph_service
res=$?
done
echo 0 >$gen_db
do_gen_database
}
# step 3: recover image,snapshot
function do_recover_nosnap()
{
recover_image image_v1_nosnap
recover_image image_v2_nosnap
}
function do_recover_snap()
{
recover_snapshots image_v1_snap
recover_snapshots image_v2_snap
}
# step 4: check md5sum pair<export_md5sum, recover_md5sum>
function do_check_recover_nosnap()
{
local image1=image_v1_nosnap
local image2=image_v2_nosnap
local export_md5_1=$export_dir/pool_$pool_id/$image1/@md5_nosnap
local export_md5_2=$export_dir/pool_$pool_id/$image2/@md5_nosnap
local recover_md5_1=$recover_dir/pool_$pool_id/$image1/@md5_nosnap
local recover_md5_2=$recover_dir/pool_$pool_id/$image2/@md5_nosnap
check_recover_nosnap $export_md5_1 $recover_md5_1 $image1
check_recover_nosnap $export_md5_2 $recover_md5_2 $image2
}
function do_check_recover_snap()
{
local image1=image_v1_snap
local image2=image_v2_snap
local export_md5_1=$export_dir/pool_$pool_id/$image1/@md5
local export_md5_2=$export_dir/pool_$pool_id/$image2/@md5
local recover_md5_1=$recover_dir/pool_$pool_id/$image1/@md5
local recover_md5_2=$recover_dir/pool_$pool_id/$image2/@md5
check_recover_snapshots $export_md5_1 $recover_md5_1 $image1
check_recover_snapshots $export_md5_2 $recover_md5_2 $image2
}
function test_case_1()
{
do_export_nosnap
stop_cluster_gen_database
do_recover_nosnap
do_check_recover_nosnap
}
function test_case_2()
{
do_export_snap
stop_cluster_gen_database
do_recover_snap
do_check_recover_snap
}
function test_case_3()
{
do_export_nosnap
do_export_snap
stop_cluster_gen_database
do_recover_nosnap
do_recover_snap
do_check_recover_nosnap
do_check_recover_snap
}
init $*
test_case_3
| 13,603 | 24.053407 | 165 | sh |
null | ceph-main/src/tools/rbd_wnbd/rbd_wnbd.cc | /*
* rbd-wnbd - RBD in userspace
*
* Copyright (C) 2020 SUSE LINUX GmbH
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <objidl.h>
// LOCK_WRITE is also defined by objidl.h, we have to avoid
// a collision.
#undef LOCK_WRITE
#include "include/int_types.h"
#include <atomic>
#include <stdio.h>
#include <stdlib.h>
#include <stddef.h>
#include <errno.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <unistd.h>
#include "wnbd_handler.h"
#include "wnbd_wmi.h"
#include "rbd_wnbd.h"
#include <fstream>
#include <memory>
#include <regex>
#include "common/Formatter.h"
#include "common/TextTable.h"
#include "common/ceph_argparse.h"
#include "common/config.h"
#include "common/debug.h"
#include "common/dout.h"
#include "common/errno.h"
#include "common/version.h"
#include "common/win32/service.h"
#include "common/win32/wstring.h"
#include "common/admin_socket_client.h"
#include "global/global_init.h"
#include "include/uuid.h"
#include "include/rados/librados.hpp"
#include "include/rbd/librbd.hpp"
#include <shellapi.h>
#define dout_context g_ceph_context
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "rbd-wnbd: "
using namespace std;
// Wait 2s before recreating the wmi subscription in case of errors
#define WMI_SUBSCRIPTION_RETRY_INTERVAL 2
// SCSI adapter modification events aren't received until the entire polling
// interval has elapsed (unlike other WMI classes, such as Msvm_ComputerSystem).
// With longer intervals, it even seems to miss events. For this reason,
// we're using a relatively short interval but have adapter state monitoring
// as an optional feature, mainly used for dev / driver certification purposes.
#define WNBD_ADAPTER_WMI_POLL_INTERVAL 2
// Wait for wmi events up to two seconds
#define WMI_EVENT_TIMEOUT 2
bool is_process_running(DWORD pid)
{
HANDLE process = OpenProcess(SYNCHRONIZE, FALSE, pid);
DWORD ret = WaitForSingleObject(process, 0);
CloseHandle(process);
return ret == WAIT_TIMEOUT;
}
DWORD WNBDActiveDiskIterator::fetch_list(
PWNBD_CONNECTION_LIST* conn_list)
{
DWORD curr_buff_sz = 0;
DWORD buff_sz = 0;
DWORD err = 0;
PWNBD_CONNECTION_LIST tmp_list = NULL;
// We're using a loop because other connections may show up by the time
// we retry.
do {
if (tmp_list)
free(tmp_list);
if (buff_sz) {
tmp_list = (PWNBD_CONNECTION_LIST) calloc(1, buff_sz);
if (!tmp_list) {
derr << "Could not allocate " << buff_sz << " bytes." << dendl;
err = ERROR_NOT_ENOUGH_MEMORY;
break;
}
}
curr_buff_sz = buff_sz;
// If the buffer is too small, the return value is 0 and "BufferSize"
// will contain the required size. This is counterintuitive, but
// Windows drivers can't return a buffer as well as a non-zero status.
err = WnbdList(tmp_list, &buff_sz);
if (err)
break;
} while (curr_buff_sz < buff_sz);
if (err) {
if (tmp_list)
free(tmp_list);
} else {
*conn_list = tmp_list;
}
return err;
}
WNBDActiveDiskIterator::WNBDActiveDiskIterator()
{
DWORD status = WNBDActiveDiskIterator::fetch_list(&conn_list);
switch (status) {
case 0:
// no error
break;
case ERROR_OPEN_FAILED:
error = -ENOENT;
break;
default:
error = -EINVAL;
break;
}
}
WNBDActiveDiskIterator::~WNBDActiveDiskIterator()
{
if (conn_list) {
free(conn_list);
conn_list = NULL;
}
}
bool WNBDActiveDiskIterator::get(Config *cfg)
{
index += 1;
*cfg = Config();
if (!conn_list || index >= (int)conn_list->Count) {
return false;
}
auto conn_info = conn_list->Connections[index];
auto conn_props = conn_info.Properties;
if (strncmp(conn_props.Owner, RBD_WNBD_OWNER_NAME, WNBD_MAX_OWNER_LENGTH)) {
dout(10) << "Ignoring disk: " << conn_props.InstanceName
<< ". Owner: " << conn_props.Owner << dendl;
return this->get(cfg);
}
error = load_mapping_config_from_registry(conn_props.InstanceName, cfg);
if (error) {
derr << "Could not load registry disk info for: "
<< conn_props.InstanceName << ". Error: " << error << dendl;
return false;
}
cfg->disk_number = conn_info.DiskNumber;
cfg->serial_number = std::string(conn_props.SerialNumber);
cfg->pid = conn_props.Pid;
cfg->active = cfg->disk_number > 0 && is_process_running(conn_props.Pid);
cfg->wnbd_mapped = true;
return true;
}
RegistryDiskIterator::RegistryDiskIterator()
{
reg_key = new RegistryKey(g_ceph_context, HKEY_LOCAL_MACHINE,
SERVICE_REG_KEY, false);
if (!reg_key->hKey) {
if (!reg_key->missingKey)
error = -EINVAL;
return;
}
if (RegQueryInfoKey(reg_key->hKey, NULL, NULL, NULL, &subkey_count,
NULL, NULL, NULL, NULL, NULL, NULL, NULL)) {
derr << "Could not query registry key: " << SERVICE_REG_KEY << dendl;
error = -EINVAL;
return;
}
}
bool RegistryDiskIterator::get(Config *cfg)
{
index += 1;
*cfg = Config();
if (!reg_key->hKey || !subkey_count) {
return false;
}
char subkey_name[MAX_PATH] = {0};
DWORD subkey_name_sz = MAX_PATH;
int err = RegEnumKeyEx(
reg_key->hKey, index, subkey_name, &subkey_name_sz,
NULL, NULL, NULL, NULL);
if (err == ERROR_NO_MORE_ITEMS) {
return false;
} else if (err) {
derr << "Could not enumerate registry. Error: " << err << dendl;
error = -EINVAL;
return false;
}
if (load_mapping_config_from_registry(subkey_name, cfg)) {
error = -EINVAL;
return false;
};
return true;
}
// Iterate over all RBD mappings, getting info from the registry and the driver.
bool WNBDDiskIterator::get(Config *cfg)
{
*cfg = Config();
bool found_active = active_iterator.get(cfg);
if (found_active) {
active_devices.insert(cfg->devpath);
return true;
}
error = active_iterator.get_error();
if (error) {
dout(5) << ": WNBD iterator error: " << error << dendl;
return false;
}
while(registry_iterator.get(cfg)) {
if (active_devices.find(cfg->devpath) != active_devices.end()) {
// Skip active devices that were already yielded.
continue;
}
return true;
}
error = registry_iterator.get_error();
if (error) {
dout(5) << ": Registry iterator error: " << error << dendl;
}
return false;
}
int get_exe_path(std::string& path) {
char buffer[MAX_PATH];
DWORD err = 0;
int ret = GetModuleFileNameA(NULL, buffer, MAX_PATH);
if (!ret || ret == MAX_PATH) {
err = GetLastError();
derr << "Could not retrieve executable path. "
<< "Error: " << win32_strerror(err) << dendl;
return -EINVAL;
}
path = buffer;
return 0;
}
std::string get_cli_args() {
std::ostringstream cmdline;
for (int i=1; i<__argc; i++) {
if (i > 1)
cmdline << " ";
cmdline << std::quoted(__argv[i]);
}
return cmdline.str();
}
int send_map_request(std::string arguments) {
dout(15) << __func__ << ": command arguments: " << arguments << dendl;
BYTE request_buff[SERVICE_PIPE_BUFFSZ] = { 0 };
ServiceRequest* request = (ServiceRequest*) request_buff;
request->command = Connect;
arguments.copy(
(char*)request->arguments,
SERVICE_PIPE_BUFFSZ - FIELD_OFFSET(ServiceRequest, arguments));
ServiceReply reply = { 0 };
DWORD bytes_read = 0;
BOOL success = CallNamedPipe(
SERVICE_PIPE_NAME,
request_buff,
SERVICE_PIPE_BUFFSZ,
&reply,
sizeof(reply),
&bytes_read,
DEFAULT_MAP_TIMEOUT_MS);
if (!success) {
DWORD err = GetLastError();
derr << "Could not send device map request. "
<< "Make sure that the ceph service is running. "
<< "Error: " << win32_strerror(err) << dendl;
return -EINVAL;
}
if (reply.status) {
derr << "The ceph service failed to map the image. "
<< "Check the log file or pass '-f' (foreground mode) "
<< "for additional information. "
<< "Error: " << cpp_strerror(reply.status)
<< dendl;
}
return reply.status;
}
// Spawn a subprocess using the specified "rbd-wnbd" command
// arguments. A pipe is passed to the child process,
// which will allow it to communicate the mapping status
int map_device_using_suprocess(std::string arguments, int timeout_ms)
{
STARTUPINFO si;
PROCESS_INFORMATION pi;
char ch;
DWORD err = 0, status = 0;
int exit_code = 0;
std::ostringstream command_line;
std::string exe_path;
// Windows async IO context
OVERLAPPED connect_o, read_o;
HANDLE connect_event = NULL, read_event = NULL;
// Used for waiting on multiple events that are going to be initialized later.
HANDLE wait_events[2] = { INVALID_HANDLE_VALUE, INVALID_HANDLE_VALUE};
DWORD bytes_read = 0;
// We may get a command line containing an old pipe handle when
// recreating mappings, so we'll have to replace it.
std::regex pipe_pattern("([\'\"]?--pipe-name[\'\"]? +[\'\"]?[^ ]+[\'\"]?)");
uuid_d uuid;
uuid.generate_random();
std::ostringstream pipe_name;
pipe_name << "\\\\.\\pipe\\rbd-wnbd-" << uuid;
// Create an unique named pipe to communicate with the child. */
HANDLE pipe_handle = CreateNamedPipe(
pipe_name.str().c_str(),
PIPE_ACCESS_INBOUND | FILE_FLAG_FIRST_PIPE_INSTANCE |
FILE_FLAG_OVERLAPPED,
PIPE_WAIT,
1, // Only accept one instance
SERVICE_PIPE_BUFFSZ,
SERVICE_PIPE_BUFFSZ,
SERVICE_PIPE_TIMEOUT_MS,
NULL);
if (pipe_handle == INVALID_HANDLE_VALUE) {
err = GetLastError();
derr << "CreateNamedPipe failed: " << win32_strerror(err) << dendl;
exit_code = -ECHILD;
goto finally;
}
connect_event = CreateEvent(0, TRUE, FALSE, NULL);
read_event = CreateEvent(0, TRUE, FALSE, NULL);
if (!connect_event || !read_event) {
err = GetLastError();
derr << "CreateEvent failed: " << win32_strerror(err) << dendl;
exit_code = -ECHILD;
goto finally;
}
connect_o.hEvent = connect_event;
read_o.hEvent = read_event;
status = ConnectNamedPipe(pipe_handle, &connect_o);
err = GetLastError();
if (status || err != ERROR_IO_PENDING) {
if (status)
err = status;
derr << "ConnectNamedPipe failed: " << win32_strerror(err) << dendl;
exit_code = -ECHILD;
goto finally;
}
err = 0;
dout(5) << __func__ << ": command arguments: " << arguments << dendl;
// We'll avoid running arbitrary commands, instead using the executable
// path of this process (expected to be the full rbd-wnbd.exe path).
err = get_exe_path(exe_path);
if (err) {
exit_code = -EINVAL;
goto finally;
}
command_line << std::quoted(exe_path)
<< " " << std::regex_replace(arguments, pipe_pattern, "")
<< " --pipe-name " << pipe_name.str();
dout(5) << __func__ << ": command line: " << command_line.str() << dendl;
GetStartupInfo(&si);
// Create a detached child
if (!CreateProcess(NULL, (char*)command_line.str().c_str(),
NULL, NULL, FALSE, DETACHED_PROCESS,
NULL, NULL, &si, &pi)) {
err = GetLastError();
derr << "CreateProcess failed: " << win32_strerror(err) << dendl;
exit_code = -ECHILD;
goto finally;
}
wait_events[0] = connect_event;
wait_events[1] = pi.hProcess;
status = WaitForMultipleObjects(2, wait_events, FALSE, timeout_ms);
switch(status) {
case WAIT_OBJECT_0:
if (!GetOverlappedResult(pipe_handle, &connect_o, &bytes_read, TRUE)) {
err = GetLastError();
derr << "Couldn't establish a connection with the child process. "
<< "Error: " << win32_strerror(err) << dendl;
exit_code = -ECHILD;
goto clean_process;
}
// We have an incoming connection.
break;
case WAIT_OBJECT_0 + 1:
// The process has exited prematurely.
goto clean_process;
case WAIT_TIMEOUT:
derr << "Timed out waiting for child process connection." << dendl;
goto clean_process;
default:
derr << "Failed waiting for child process. Status: " << status << dendl;
goto clean_process;
}
// Block and wait for child to say it is ready.
dout(5) << __func__ << ": waiting for child notification." << dendl;
if (!ReadFile(pipe_handle, &ch, 1, NULL, &read_o)) {
err = GetLastError();
if (err != ERROR_IO_PENDING) {
derr << "Receiving child process reply failed with: "
<< win32_strerror(err) << dendl;
exit_code = -ECHILD;
goto clean_process;
}
}
wait_events[0] = read_event;
wait_events[1] = pi.hProcess;
// The RBD daemon is expected to write back right after opening the
// pipe. We'll use the same timeout value for now.
status = WaitForMultipleObjects(2, wait_events, FALSE, timeout_ms);
switch(status) {
case WAIT_OBJECT_0:
if (!GetOverlappedResult(pipe_handle, &read_o, &bytes_read, TRUE)) {
err = GetLastError();
derr << "Receiving child process reply failed with: "
<< win32_strerror(err) << dendl;
exit_code = -ECHILD;
goto clean_process;
}
break;
case WAIT_OBJECT_0 + 1:
// The process has exited prematurely.
goto clean_process;
case WAIT_TIMEOUT:
derr << "Timed out waiting for child process message." << dendl;
goto clean_process;
default:
derr << "Failed waiting for child process. Status: " << status << dendl;
goto clean_process;
}
dout(5) << __func__ << ": received child notification." << dendl;
goto finally;
clean_process:
if (!is_process_running(pi.dwProcessId)) {
GetExitCodeProcess(pi.hProcess, (PDWORD)&exit_code);
derr << "Daemon failed with: " << cpp_strerror(exit_code) << dendl;
} else {
// The process closed the pipe without notifying us or exiting.
// This is quite unlikely, but we'll terminate the process.
dout(0) << "Terminating unresponsive process." << dendl;
TerminateProcess(pi.hProcess, 1);
exit_code = -EINVAL;
}
finally:
if (exit_code)
derr << "Could not start RBD daemon." << dendl;
if (pipe_handle)
CloseHandle(pipe_handle);
if (connect_event)
CloseHandle(connect_event);
if (read_event)
CloseHandle(read_event);
return exit_code;
}
BOOL WINAPI console_handler_routine(DWORD dwCtrlType)
{
dout(0) << "Received control signal: " << dwCtrlType
<< ". Exiting." << dendl;
std::unique_lock l{shutdown_lock};
if (handler)
handler->shutdown();
return true;
}
int save_config_to_registry(Config* cfg)
{
std::string strKey{ SERVICE_REG_KEY };
strKey.append("\\");
strKey.append(cfg->devpath);
auto reg_key = RegistryKey(
g_ceph_context, HKEY_LOCAL_MACHINE, strKey.c_str(), true);
if (!reg_key.hKey) {
return -EINVAL;
}
int ret_val = 0;
// Registry writes are immediately available to other processes.
// Still, we'll do a flush to ensure that the mapping can be
// recreated after a system crash.
if (reg_key.set("pid", getpid()) ||
reg_key.set("devpath", cfg->devpath) ||
reg_key.set("poolname", cfg->poolname) ||
reg_key.set("nsname", cfg->nsname) ||
reg_key.set("imgname", cfg->imgname) ||
reg_key.set("snapname", cfg->snapname) ||
reg_key.set("command_line", get_cli_args()) ||
reg_key.set("persistent", cfg->persistent) ||
reg_key.set("admin_sock_path", g_conf()->admin_socket) ||
reg_key.flush()) {
ret_val = -EINVAL;
}
return ret_val;
}
int remove_config_from_registry(Config* cfg)
{
std::string strKey{ SERVICE_REG_KEY };
strKey.append("\\");
strKey.append(cfg->devpath);
return RegistryKey::remove(
g_ceph_context, HKEY_LOCAL_MACHINE, strKey.c_str());
}
int load_mapping_config_from_registry(string devpath, Config* cfg)
{
std::string strKey{ SERVICE_REG_KEY };
strKey.append("\\");
strKey.append(devpath);
auto reg_key = RegistryKey(
g_ceph_context, HKEY_LOCAL_MACHINE, strKey.c_str(), false);
if (!reg_key.hKey) {
if (reg_key.missingKey)
return -ENOENT;
else
return -EINVAL;
}
reg_key.get("devpath", cfg->devpath);
reg_key.get("poolname", cfg->poolname);
reg_key.get("nsname", cfg->nsname);
reg_key.get("imgname", cfg->imgname);
reg_key.get("snapname", cfg->snapname);
reg_key.get("command_line", cfg->command_line);
reg_key.get("persistent", cfg->persistent);
reg_key.get("admin_sock_path", cfg->admin_sock_path);
return 0;
}
int restart_registered_mappings(
int worker_count,
int total_timeout,
int image_map_timeout)
{
Config cfg;
WNBDDiskIterator iterator;
int r;
std::atomic<int> err = 0;
dout(0) << "remounting persistent disks" << dendl;
int total_timeout_ms = max(total_timeout, total_timeout * 1000);
int image_map_timeout_ms = max(image_map_timeout, image_map_timeout * 1000);
LARGE_INTEGER start_t, counter_freq;
QueryPerformanceFrequency(&counter_freq);
QueryPerformanceCounter(&start_t);
boost::asio::thread_pool pool(worker_count);
while (iterator.get(&cfg)) {
if (cfg.command_line.empty()) {
derr << "Could not recreate mapping, missing command line: "
<< cfg.devpath << dendl;
err = -EINVAL;
continue;
}
if (cfg.wnbd_mapped) {
dout(1) << __func__ << ": device already mapped: "
<< cfg.devpath << dendl;
continue;
}
if (!cfg.persistent) {
dout(1) << __func__ << ": cleaning up non-persistent mapping: "
<< cfg.devpath << dendl;
r = remove_config_from_registry(&cfg);
if (r) {
derr << __func__ << ": could not clean up non-persistent mapping: "
<< cfg.devpath << dendl;
}
continue;
}
boost::asio::post(pool,
[cfg, start_t, counter_freq, total_timeout_ms,
image_map_timeout_ms, &err]()
{
LARGE_INTEGER curr_t, elapsed_ms;
QueryPerformanceCounter(&curr_t);
elapsed_ms.QuadPart = curr_t.QuadPart - start_t.QuadPart;
elapsed_ms.QuadPart *= 1000;
elapsed_ms.QuadPart /= counter_freq.QuadPart;
int time_left_ms = max(
0,
total_timeout_ms - (int)elapsed_ms.QuadPart);
time_left_ms = min(image_map_timeout_ms, time_left_ms);
if (!time_left_ms) {
err = -ETIMEDOUT;
return;
}
dout(1) << "Remapping: " << cfg.devpath
<< ". Timeout: " << time_left_ms << " ms." << dendl;
// We'll try to map all devices and return a non-zero value
// if any of them fails.
int r = map_device_using_suprocess(cfg.command_line, time_left_ms);
if (r) {
err = r;
derr << "Could not create mapping: "
<< cfg.devpath << ". Error: " << r << dendl;
} else {
dout(1) << "Successfully remapped: " << cfg.devpath << dendl;
}
});
}
pool.join();
r = iterator.get_error();
if (r) {
derr << "Could not fetch all mappings. Error: " << r << dendl;
err = r;
}
return err;
}
int disconnect_all_mappings(
bool unregister,
bool hard_disconnect,
int soft_disconnect_timeout,
int worker_count)
{
// Although not generally recommended, soft_disconnect_timeout can be 0,
// which means infinite timeout.
ceph_assert(soft_disconnect_timeout >= 0);
ceph_assert(worker_count > 0);
int64_t timeout_ms = soft_disconnect_timeout * 1000;
Config cfg;
WNBDActiveDiskIterator iterator;
int r;
std::atomic<int> err = 0;
boost::asio::thread_pool pool(worker_count);
LARGE_INTEGER start_t, counter_freq;
QueryPerformanceFrequency(&counter_freq);
QueryPerformanceCounter(&start_t);
while (iterator.get(&cfg)) {
boost::asio::post(pool,
[cfg, start_t, counter_freq, timeout_ms,
hard_disconnect, unregister, &err]() mutable
{
LARGE_INTEGER curr_t, elapsed_ms;
QueryPerformanceCounter(&curr_t);
elapsed_ms.QuadPart = curr_t.QuadPart - start_t.QuadPart;
elapsed_ms.QuadPart *= 1000;
elapsed_ms.QuadPart /= counter_freq.QuadPart;
int64_t time_left_ms = max((int64_t)0, timeout_ms - elapsed_ms.QuadPart);
cfg.hard_disconnect = hard_disconnect || !time_left_ms;
cfg.hard_disconnect_fallback = true;
cfg.soft_disconnect_timeout = time_left_ms / 1000;
dout(1) << "Removing mapping: " << cfg.devpath
<< ". Timeout: " << cfg.soft_disconnect_timeout
<< "s. Hard disconnect: " << cfg.hard_disconnect
<< dendl;
int r = do_unmap(&cfg, unregister);
if (r) {
err = r;
derr << "Could not remove mapping: " << cfg.devpath
<< ". Error: " << r << dendl;
} else {
dout(1) << "Successfully removed mapping: " << cfg.devpath << dendl;
}
});
}
pool.join();
r = iterator.get_error();
if (r == -ENOENT) {
dout(0) << __func__ << ": wnbd adapter unavailable, "
<< "assuming that no wnbd mappings exist." << dendl;
err = 0;
} else if (r) {
derr << "Could not fetch all mappings. Error: " << r << dendl;
err = r;
}
return err;
}
class RBDService : public ServiceBase {
private:
bool hard_disconnect;
int soft_disconnect_timeout;
int thread_count;
int service_start_timeout;
int image_map_timeout;
bool remap_failure_fatal;
bool adapter_monitoring_enabled;
std::thread adapter_monitor_thread;
ceph::mutex start_lock = ceph::make_mutex("RBDService::StartLocker");
ceph::mutex shutdown_lock = ceph::make_mutex("RBDService::ShutdownLocker");
bool started = false;
std::atomic<bool> stop_requsted = false;
public:
RBDService(bool _hard_disconnect,
int _soft_disconnect_timeout,
int _thread_count,
int _service_start_timeout,
int _image_map_timeout,
bool _remap_failure_fatal,
bool _adapter_monitoring_enabled)
: ServiceBase(g_ceph_context)
, hard_disconnect(_hard_disconnect)
, soft_disconnect_timeout(_soft_disconnect_timeout)
, thread_count(_thread_count)
, service_start_timeout(_service_start_timeout)
, image_map_timeout(_image_map_timeout)
, remap_failure_fatal(_remap_failure_fatal)
, adapter_monitoring_enabled(_adapter_monitoring_enabled)
{
}
static int execute_command(ServiceRequest* request)
{
switch(request->command) {
case Connect:
dout(1) << "Received device connect request. Command line: "
<< (char*)request->arguments << dendl;
// TODO: use the configured service map timeout.
// TODO: add ceph.conf options.
return map_device_using_suprocess(
(char*)request->arguments, DEFAULT_MAP_TIMEOUT_MS);
default:
dout(1) << "Received unsupported command: "
<< request->command << dendl;
return -ENOSYS;
}
}
static DWORD handle_connection(HANDLE pipe_handle)
{
PBYTE message[SERVICE_PIPE_BUFFSZ] = { 0 };
DWORD bytes_read = 0, bytes_written = 0;
DWORD err = 0;
DWORD reply_sz = 0;
ServiceReply reply = { 0 };
dout(20) << __func__ << ": Receiving message." << dendl;
BOOL success = ReadFile(
pipe_handle, message, SERVICE_PIPE_BUFFSZ,
&bytes_read, NULL);
if (!success || !bytes_read) {
err = GetLastError();
derr << "Could not read service command: "
<< win32_strerror(err) << dendl;
goto exit;
}
dout(20) << __func__ << ": Executing command." << dendl;
reply.status = execute_command((ServiceRequest*) message);
reply_sz = sizeof(reply);
dout(20) << __func__ << ": Sending reply. Status: "
<< reply.status << dendl;
success = WriteFile(
pipe_handle, &reply, reply_sz, &bytes_written, NULL);
if (!success || reply_sz != bytes_written) {
err = GetLastError();
derr << "Could not send service command result: "
<< win32_strerror(err) << dendl;
}
exit:
dout(20) << __func__ << ": Cleaning up connection." << dendl;
FlushFileBuffers(pipe_handle);
DisconnectNamedPipe(pipe_handle);
CloseHandle(pipe_handle);
return err;
}
// We have to support Windows server 2016. Unix sockets only work on
// WS 2019, so we can't use the Ceph admin socket abstraction.
// Getting the Ceph admin sockets to work with Windows named pipes
// would require quite a few changes.
static DWORD accept_pipe_connection() {
DWORD err = 0;
// We're currently using default ACLs, which grant full control to the
// LocalSystem account and administrator as well as the owner.
dout(20) << __func__ << ": opening new pipe instance" << dendl;
HANDLE pipe_handle = CreateNamedPipe(
SERVICE_PIPE_NAME,
PIPE_ACCESS_DUPLEX,
PIPE_TYPE_MESSAGE | PIPE_READMODE_MESSAGE | PIPE_WAIT,
PIPE_UNLIMITED_INSTANCES,
SERVICE_PIPE_BUFFSZ,
SERVICE_PIPE_BUFFSZ,
SERVICE_PIPE_TIMEOUT_MS,
NULL);
if (pipe_handle == INVALID_HANDLE_VALUE) {
err = GetLastError();
derr << "CreatePipe failed: " << win32_strerror(err) << dendl;
return -EINVAL;
}
dout(20) << __func__ << ": waiting for connections." << dendl;
BOOL connected = ConnectNamedPipe(pipe_handle, NULL);
if (!connected) {
err = GetLastError();
if (err != ERROR_PIPE_CONNECTED) {
derr << "Pipe connection failed: " << win32_strerror(err) << dendl;
CloseHandle(pipe_handle);
return err;
}
}
dout(20) << __func__ << ": Connection received." << dendl;
// We'll handle the connection in a separate thread and at the same time
// accept a new connection.
HANDLE handler_thread = CreateThread(
NULL, 0, (LPTHREAD_START_ROUTINE) handle_connection, pipe_handle, 0, 0);
if (!handler_thread) {
err = GetLastError();
derr << "Could not start pipe connection handler thread: "
<< win32_strerror(err) << dendl;
CloseHandle(pipe_handle);
} else {
CloseHandle(handler_thread);
}
return err;
}
static int pipe_server_loop(LPVOID arg)
{
dout(5) << "Accepting admin pipe connections." << dendl;
while (1) {
// This call will block until a connection is received, which will
// then be handled in a separate thread. The function returns, allowing
// us to accept another simultaneous connection.
accept_pipe_connection();
}
return 0;
}
int create_pipe_server() {
HANDLE handler_thread = CreateThread(
NULL, 0, (LPTHREAD_START_ROUTINE) pipe_server_loop, NULL, 0, 0);
DWORD err = 0;
if (!handler_thread) {
err = GetLastError();
derr << "Could not start pipe server: " << win32_strerror(err) << dendl;
} else {
CloseHandle(handler_thread);
}
return err;
}
void monitor_wnbd_adapter()
{
dout(5) << __func__ << ": initializing COM" << dendl;
// Initialize the Windows COM library for this thread.
COMBootstrapper com_bootstrapper;
HRESULT hres = com_bootstrapper.initialize();
if (FAILED(hres)) {
return;
}
WmiSubscription subscription = subscribe_wnbd_adapter_events(
WNBD_ADAPTER_WMI_POLL_INTERVAL);
dout(5) << __func__ << ": initializing wmi subscription" << dendl;
hres = subscription.initialize();
dout(0) << "monitoring wnbd adapter state changes" << dendl;
// The event watcher will wait at most WMI_EVENT_TIMEOUT (2s)
// and exit the loop if the service is being stopped.
while (!stop_requsted) {
IWbemClassObject* object;
ULONG returned = 0;
if (FAILED(hres)) {
derr << "couldn't retrieve wnbd adapter events, wmi hresult: "
<< hres << ". Reestablishing wmi listener in "
<< WMI_SUBSCRIPTION_RETRY_INTERVAL << " seconds." << dendl;
subscription.close();
Sleep(WMI_SUBSCRIPTION_RETRY_INTERVAL * 1000);
dout(20) << "recreating wnbd adapter wmi subscription" << dendl;
subscription = subscribe_wnbd_adapter_events(
WNBD_ADAPTER_WMI_POLL_INTERVAL);
hres = subscription.initialize();
continue;
}
dout(20) << "fetching wnbd adapter events" << dendl;
hres = subscription.next(
WMI_EVENT_TIMEOUT * 1000,
1, // we'll process one event at a time
&object,
&returned);
if (!FAILED(hres) && returned) {
if (WBEM_S_NO_ERROR == object->InheritsFrom(L"__InstanceCreationEvent")) {
dout(0) << "wnbd adapter (re)created, remounting disks" << dendl;
restart_registered_mappings(
thread_count, service_start_timeout, image_map_timeout);
} else if (WBEM_S_NO_ERROR == object->InheritsFrom(L"__InstanceDeletionEvent")) {
dout(0) << "wnbd adapter removed" << dendl;
// nothing to do here
} else if (WBEM_S_NO_ERROR == object->InheritsFrom(L"__InstanceModificationEvent")) {
dout(0) << "wnbd adapter changed" << dendl;
// TODO: look for state changes and log the availability/status
}
object->Release();
}
}
dout(10) << "service stop requested, wnbd event monitor exited" << dendl;
}
int run_hook() override {
std::unique_lock l{start_lock};
if (started) {
// The run hook is only supposed to be called once per process,
// however we're staying cautious.
derr << "Service already running." << dendl;
return -EALREADY;
}
started = true;
// Restart registered mappings before accepting new ones.
int r = restart_registered_mappings(
thread_count, service_start_timeout, image_map_timeout);
if (r) {
if (remap_failure_fatal) {
derr << "Couldn't remap all images. Cleaning up." << dendl;
return r;
} else {
dout(0) << "Ignoring image remap failure." << dendl;
}
}
if (adapter_monitoring_enabled) {
adapter_monitor_thread = std::thread(&monitor_wnbd_adapter, this);
} else {
dout(0) << "WNBD adapter monitoring disabled." << dendl;
}
return create_pipe_server();
}
// Invoked when the service is requested to stop.
int stop_hook() override {
std::unique_lock l{shutdown_lock};
stop_requsted = true;
int r = disconnect_all_mappings(
false, hard_disconnect, soft_disconnect_timeout, thread_count);
if (adapter_monitor_thread.joinable()) {
dout(10) << "waiting for wnbd event monitor thread" << dendl;
adapter_monitor_thread.join();
dout(10) << "wnbd event monitor stopped" << dendl;
}
return r;
}
// Invoked when the system is shutting down.
int shutdown_hook() override {
return stop_hook();
}
};
class WNBDWatchCtx : public librbd::UpdateWatchCtx
{
private:
librados::IoCtx &io_ctx;
WnbdHandler* handler;
librbd::Image ℑ
uint64_t size;
public:
WNBDWatchCtx(librados::IoCtx& io_ctx, WnbdHandler* handler,
librbd::Image& image, uint64_t size)
: io_ctx(io_ctx)
, handler(handler)
, image(image)
, size(size)
{ }
~WNBDWatchCtx() override {}
void handle_notify() override
{
uint64_t new_size;
if (image.size(&new_size) == 0 && new_size != size &&
handler->resize(new_size) == 0) {
size = new_size;
}
}
};
static void usage()
{
const char* usage_str =R"(
Usage: rbd-wnbd [options] map <image-or-snap-spec> Map an image to wnbd device
[options] unmap <device|image-or-snap-spec> Unmap wnbd device
[options] list List mapped wnbd devices
[options] show <image-or-snap-spec> Show mapped wnbd device
stats <image-or-snap-spec> Show IO counters
[options] service Windows service entrypoint,
handling device lifecycle
Map options:
--device <device path> Optional mapping unique identifier
--exclusive Forbid writes by other clients
--read-only Map read-only
--non-persistent Do not recreate the mapping when the Ceph service
restarts. By default, mappings are persistent
--io-req-workers The number of workers that dispatch IO requests.
Default: 4
--io-reply-workers The number of workers that dispatch IO replies.
Default: 4
Unmap options:
--hard-disconnect Skip attempting a soft disconnect
--no-hard-disconnect-fallback Immediately return an error if the soft
disconnect fails instead of attempting a hard
disconnect as fallback
--soft-disconnect-timeout Soft disconnect timeout in seconds. The soft
disconnect operation uses PnP to notify the
Windows storage stack that the device is going to
be disconnected. Storage drivers can block this
operation if there are pending operations,
unflushed caches or open handles. Default: 15
Service options:
--hard-disconnect Skip attempting a soft disconnect
--soft-disconnect-timeout Cumulative soft disconnect timeout in seconds,
used when disconnecting existing mappings. A hard
disconnect will be issued when hitting the timeout
--service-thread-count The number of workers used when mapping or
unmapping images. Default: 8
--start-timeout The service start timeout in seconds. Default: 120
--map-timeout Individual image map timeout in seconds. Default: 20
--remap-failure-fatal If set, the service will stop when failing to remap
an image at start time, unmapping images that have
been mapped so far.
--adapter-monitoring-enabled If set, the service will monitor WNBD adapter WMI
events and remount the images when the adapter gets
recreated. Mainly used for development and driver
certification purposes.
Show|List options:
--format plain|json|xml Output format (default: plain)
--pretty-format Pretty formatting (json and xml)
Common options:
--wnbd-log-level libwnbd.dll log level
)";
std::cout << usage_str;
generic_server_usage();
}
static Command cmd = None;
int construct_devpath_if_missing(Config* cfg)
{
// Windows doesn't allow us to request specific disk paths when mapping an
// image. This will just be used by rbd-wnbd and wnbd as an identifier.
if (cfg->devpath.empty()) {
if (cfg->imgname.empty()) {
derr << "Missing image name." << dendl;
return -EINVAL;
}
if (!cfg->poolname.empty()) {
cfg->devpath += cfg->poolname;
cfg->devpath += '/';
}
if (!cfg->nsname.empty()) {
cfg->devpath += cfg->nsname;
cfg->devpath += '/';
}
cfg->devpath += cfg->imgname;
if (!cfg->snapname.empty()) {
cfg->devpath += '@';
cfg->devpath += cfg->snapname;
}
}
return 0;
}
boost::intrusive_ptr<CephContext> do_global_init(
int argc, const char *argv[], Config *cfg)
{
auto args = argv_to_vec(argc, argv);
code_environment_t code_env;
int flags;
switch(cmd) {
case Connect:
code_env = CODE_ENVIRONMENT_DAEMON;
flags = CINIT_FLAG_UNPRIVILEGED_DAEMON_DEFAULTS;
break;
case Service:
code_env = CODE_ENVIRONMENT_DAEMON;
flags = CINIT_FLAG_UNPRIVILEGED_DAEMON_DEFAULTS |
CINIT_FLAG_NO_MON_CONFIG |
CINIT_FLAG_NO_DAEMON_ACTIONS;
break;
default:
code_env = CODE_ENVIRONMENT_UTILITY;
flags = CINIT_FLAG_NO_MON_CONFIG;
break;
}
global_pre_init(NULL, args, CEPH_ENTITY_TYPE_CLIENT, code_env, flags);
// Avoid cluttering the console when spawning a mapping that will run
// in the background.
if (g_conf()->daemonize && cfg->parent_pipe.empty()) {
flags |= CINIT_FLAG_NO_DAEMON_ACTIONS;
}
auto cct = global_init(NULL, args, CEPH_ENTITY_TYPE_CLIENT,
code_env, flags, FALSE);
// There's no fork on Windows, we should be safe calling this anytime.
common_init_finish(g_ceph_context);
global_init_chdir(g_ceph_context);
return cct;
}
static int do_map(Config *cfg)
{
int r;
librados::Rados rados;
librbd::RBD rbd;
librados::IoCtx io_ctx;
librbd::Image image;
librbd::image_info_t info;
HANDLE parent_pipe_handle = INVALID_HANDLE_VALUE;
int err = 0;
if (g_conf()->daemonize && cfg->parent_pipe.empty()) {
return send_map_request(get_cli_args());
}
dout(0) << "Mapping RBD image: " << cfg->devpath << dendl;
r = rados.init_with_context(g_ceph_context);
if (r < 0) {
derr << "rbd-wnbd: couldn't initialize rados: " << cpp_strerror(r)
<< dendl;
goto close_ret;
}
r = rados.connect();
if (r < 0) {
derr << "rbd-wnbd: couldn't connect to rados: " << cpp_strerror(r)
<< dendl;
goto close_ret;
}
r = rados.ioctx_create(cfg->poolname.c_str(), io_ctx);
if (r < 0) {
derr << "rbd-wnbd: couldn't create IO context: " << cpp_strerror(r)
<< dendl;
goto close_ret;
}
io_ctx.set_namespace(cfg->nsname);
r = rbd.open(io_ctx, image, cfg->imgname.c_str());
if (r < 0) {
derr << "rbd-wnbd: couldn't open rbd image: " << cpp_strerror(r)
<< dendl;
goto close_ret;
}
if (cfg->exclusive) {
r = image.lock_acquire(RBD_LOCK_MODE_EXCLUSIVE);
if (r < 0) {
derr << "rbd-wnbd: failed to acquire exclusive lock: " << cpp_strerror(r)
<< dendl;
goto close_ret;
}
}
if (!cfg->snapname.empty()) {
r = image.snap_set(cfg->snapname.c_str());
if (r < 0) {
derr << "rbd-wnbd: couldn't use snapshot: " << cpp_strerror(r)
<< dendl;
goto close_ret;
}
}
r = image.stat(info, sizeof(info));
if (r < 0)
goto close_ret;
if (info.size > _UI64_MAX) {
r = -EFBIG;
derr << "rbd-wnbd: image is too large (" << byte_u_t(info.size)
<< ", max is " << byte_u_t(_UI64_MAX) << ")" << dendl;
goto close_ret;
}
// We're storing mapping details in the registry even for non-persistent
// mappings. This allows us to easily retrieve mapping details such
// as the rbd pool or admin socket path.
// We're cleaning up the registry entry when the non-persistent mapping
// gets disconnected or when the ceph service restarts.
r = save_config_to_registry(cfg);
if (r < 0)
goto close_ret;
handler = new WnbdHandler(image, cfg->devpath,
info.size / RBD_WNBD_BLKSIZE,
RBD_WNBD_BLKSIZE,
!cfg->snapname.empty() || cfg->readonly,
g_conf().get_val<bool>("rbd_cache"),
cfg->io_req_workers,
cfg->io_reply_workers);
r = handler->start();
if (r) {
r = r == ERROR_ALREADY_EXISTS ? -EEXIST : -EINVAL;
goto close_ret;
}
// We're informing the parent processes that the initialization
// was successful.
if (!cfg->parent_pipe.empty()) {
parent_pipe_handle = CreateFile(
cfg->parent_pipe.c_str(), GENERIC_WRITE, 0, NULL,
OPEN_EXISTING, 0, NULL);
if (parent_pipe_handle == INVALID_HANDLE_VALUE) {
derr << "Could not open parent pipe: " << win32_strerror(err) << dendl;
} else if (!WriteFile(parent_pipe_handle, "a", 1, NULL, NULL)) {
// TODO: consider exiting in this case. The parent didn't wait for us,
// maybe it was killed after a timeout.
err = GetLastError();
derr << "Failed to communicate with the parent: "
<< win32_strerror(err) << dendl;
} else {
dout(5) << __func__ << ": submitted parent notification." << dendl;
}
if (parent_pipe_handle != INVALID_HANDLE_VALUE)
CloseHandle(parent_pipe_handle);
global_init_postfork_finish(g_ceph_context);
}
{
uint64_t watch_handle;
WNBDWatchCtx watch_ctx(io_ctx, handler, image, info.size);
r = image.update_watch(&watch_ctx, &watch_handle);
if (r < 0) {
derr << __func__ << ": update_watch failed with error: "
<< cpp_strerror(r) << dendl;
handler->shutdown();
goto close_ret;
}
handler->wait();
r = image.update_unwatch(watch_handle);
if (r < 0)
derr << __func__ << ": update_unwatch failed with error: "
<< cpp_strerror(r) << dendl;
handler->shutdown();
}
close_ret:
// The registry record shouldn't be removed for (already) running mappings.
if (!cfg->persistent) {
dout(5) << __func__ << ": cleaning up non-persistent mapping: "
<< cfg->devpath << dendl;
r = remove_config_from_registry(cfg);
if (r) {
derr << __func__ << ": could not clean up non-persistent mapping: "
<< cfg->devpath << dendl;
}
}
std::unique_lock l{shutdown_lock};
image.close();
io_ctx.close();
rados.shutdown();
if (handler) {
delete handler;
handler = nullptr;
}
return r;
}
static int do_unmap(Config *cfg, bool unregister)
{
WNBD_REMOVE_OPTIONS remove_options = {0};
remove_options.Flags.HardRemove = cfg->hard_disconnect;
remove_options.Flags.HardRemoveFallback = cfg->hard_disconnect_fallback;
remove_options.SoftRemoveTimeoutMs = cfg->soft_disconnect_timeout * 1000;
remove_options.SoftRemoveRetryIntervalMs = SOFT_REMOVE_RETRY_INTERVAL * 1000;
int err = WnbdRemoveEx(cfg->devpath.c_str(), &remove_options);
if (err && err != ERROR_FILE_NOT_FOUND) {
return -EINVAL;
}
if (unregister) {
err = remove_config_from_registry(cfg);
if (err) {
derr << "rbd-wnbd: failed to unregister device: "
<< cfg->devpath << ". Error: " << err << dendl;
return -EINVAL;
}
}
return 0;
}
static int parse_imgpath(const std::string &imgpath, Config *cfg,
std::ostream *err_msg)
{
std::regex pattern("^(?:([^/]+)/(?:([^/@]+)/)?)?([^@]+)(?:@([^/@]+))?$");
std::smatch match;
if (!std::regex_match(imgpath, match, pattern)) {
derr << "rbd-wnbd: invalid spec '" << imgpath << "'" << dendl;
return -EINVAL;
}
if (match[1].matched) {
cfg->poolname = match[1];
}
if (match[2].matched) {
cfg->nsname = match[2];
}
cfg->imgname = match[3];
if (match[4].matched)
cfg->snapname = match[4];
return 0;
}
static int do_list_mapped_devices(const std::string &format, bool pretty_format)
{
std::unique_ptr<ceph::Formatter> f;
TextTable tbl;
if (format == "json") {
f.reset(new JSONFormatter(pretty_format));
} else if (format == "xml") {
f.reset(new XMLFormatter(pretty_format));
} else if (!format.empty() && format != "plain") {
derr << "rbd-wnbd: invalid output format: " << format << dendl;
return -EINVAL;
}
if (f) {
f->open_array_section("devices");
} else {
tbl.define_column("id", TextTable::LEFT, TextTable::LEFT);
tbl.define_column("pool", TextTable::LEFT, TextTable::LEFT);
tbl.define_column("namespace", TextTable::LEFT, TextTable::LEFT);
tbl.define_column("image", TextTable::LEFT, TextTable::LEFT);
tbl.define_column("snap", TextTable::LEFT, TextTable::LEFT);
tbl.define_column("device", TextTable::LEFT, TextTable::LEFT);
tbl.define_column("disk_number", TextTable::LEFT, TextTable::LEFT);
tbl.define_column("status", TextTable::LEFT, TextTable::LEFT);
}
Config cfg;
WNBDDiskIterator wnbd_disk_iterator;
while (wnbd_disk_iterator.get(&cfg)) {
const char* status = cfg.active ?
WNBD_STATUS_ACTIVE : WNBD_STATUS_INACTIVE;
if (f) {
f->open_object_section("device");
f->dump_int("id", cfg.pid ? cfg.pid : -1);
f->dump_string("device", cfg.devpath);
f->dump_string("pool", cfg.poolname);
f->dump_string("namespace", cfg.nsname);
f->dump_string("image", cfg.imgname);
f->dump_string("snap", cfg.snapname);
f->dump_int("disk_number", cfg.disk_number ? cfg.disk_number : -1);
f->dump_string("status", status);
f->close_section();
} else {
if (cfg.snapname.empty()) {
cfg.snapname = "-";
}
tbl << (cfg.pid ? cfg.pid : -1) << cfg.poolname << cfg.nsname
<< cfg.imgname << cfg.snapname << cfg.devpath
<< cfg.disk_number << status << TextTable::endrow;
}
}
int error = wnbd_disk_iterator.get_error();
if (error) {
derr << "Could not get disk list: " << error << dendl;
return error;
}
if (f) {
f->close_section();
f->flush(std::cout);
} else {
std::cout << tbl;
}
return 0;
}
static int do_show_mapped_device(std::string format, bool pretty_format,
std::string devpath)
{
std::unique_ptr<ceph::Formatter> f;
TextTable tbl;
if (format.empty() || format == "plain") {
format = "json";
pretty_format = true;
}
if (format == "json") {
f.reset(new JSONFormatter(pretty_format));
} else if (format == "xml") {
f.reset(new XMLFormatter(pretty_format));
} else {
derr << "rbd-wnbd: invalid output format: " << format << dendl;
return -EINVAL;
}
Config cfg;
int error = load_mapping_config_from_registry(devpath, &cfg);
if (error) {
derr << "Could not load registry disk info for: "
<< devpath << ". Error: " << error << dendl;
return error;
}
WNBD_CONNECTION_INFO conn_info = { 0 };
// If the device is currently disconnected but there is a persistent
// mapping record, we'll show that.
DWORD ret = WnbdShow(devpath.c_str(), &conn_info);
if (ret && ret != ERROR_FILE_NOT_FOUND) {
return -EINVAL;
}
auto conn_props = conn_info.Properties;
cfg.active = conn_info.DiskNumber > 0 && is_process_running(conn_props.Pid);
f->open_object_section("device");
f->dump_int("id", conn_props.Pid ? conn_props.Pid : -1);
f->dump_string("device", cfg.devpath);
f->dump_string("pool", cfg.poolname);
f->dump_string("namespace", cfg.nsname);
f->dump_string("image", cfg.imgname);
f->dump_string("snap", cfg.snapname);
f->dump_int("persistent", cfg.persistent);
f->dump_int("disk_number", conn_info.DiskNumber ? conn_info.DiskNumber : -1);
f->dump_string("status", cfg.active ? WNBD_STATUS_ACTIVE : WNBD_STATUS_INACTIVE);
f->dump_string("pnp_device_id", to_string(conn_info.PNPDeviceID));
f->dump_int("readonly", conn_props.Flags.ReadOnly);
f->dump_int("block_size", conn_props.BlockSize);
f->dump_int("block_count", conn_props.BlockCount);
f->dump_int("flush_enabled", conn_props.Flags.FlushSupported);
f->close_section();
f->flush(std::cout);
return 0;
}
static int do_stats(std::string search_devpath)
{
Config cfg;
WNBDDiskIterator wnbd_disk_iterator;
while (wnbd_disk_iterator.get(&cfg)) {
if (cfg.devpath != search_devpath)
continue;
AdminSocketClient client = AdminSocketClient(cfg.admin_sock_path);
std::string output;
std::string result = client.do_request("{\"prefix\":\"wnbd stats\"}",
&output);
if (!result.empty()) {
std::cerr << "Admin socket error: " << result << std::endl;
return -EINVAL;
}
std::cout << output << std::endl;
return 0;
}
int error = wnbd_disk_iterator.get_error();
if (!error) {
error = -ENOENT;
}
derr << "Could not find the specified disk." << dendl;
return error;
}
static int parse_args(std::vector<const char*>& args,
std::ostream *err_msg,
Command *command, Config *cfg)
{
std::string conf_file_list;
std::string cluster;
CephInitParameters iparams = ceph_argparse_early_args(
args, CEPH_ENTITY_TYPE_CLIENT, &cluster, &conf_file_list);
ConfigProxy config{false};
config->name = iparams.name;
config->cluster = cluster;
if (!conf_file_list.empty()) {
config.parse_config_files(conf_file_list.c_str(), nullptr, 0);
} else {
config.parse_config_files(nullptr, nullptr, 0);
}
config.parse_env(CEPH_ENTITY_TYPE_CLIENT);
config.parse_argv(args);
cfg->poolname = config.get_val<std::string>("rbd_default_pool");
std::vector<const char*>::iterator i;
std::ostringstream err;
// TODO: consider using boost::program_options like Device.cc does.
// This should simplify argument parsing. Also, some arguments must be tied
// to specific commands, for example the disconnect timeout. Luckily,
// this is enforced by the "rbd device" wrapper.
for (i = args.begin(); i != args.end(); ) {
if (ceph_argparse_flag(args, i, "-h", "--help", (char*)NULL)) {
return HELP_INFO;
} else if (ceph_argparse_flag(args, i, "-v", "--version", (char*)NULL)) {
return VERSION_INFO;
} else if (ceph_argparse_witharg(args, i, &cfg->devpath, "--device", (char *)NULL)) {
} else if (ceph_argparse_witharg(args, i, &cfg->format, err, "--format",
(char *)NULL)) {
} else if (ceph_argparse_flag(args, i, "--read-only", (char *)NULL)) {
cfg->readonly = true;
} else if (ceph_argparse_flag(args, i, "--exclusive", (char *)NULL)) {
cfg->exclusive = true;
} else if (ceph_argparse_flag(args, i, "--non-persistent", (char *)NULL)) {
cfg->persistent = false;
} else if (ceph_argparse_flag(args, i, "--pretty-format", (char *)NULL)) {
cfg->pretty_format = true;
} else if (ceph_argparse_flag(args, i, "--remap-failure-fatal", (char *)NULL)) {
cfg->remap_failure_fatal = true;
} else if (ceph_argparse_flag(args, i, "--adapter-monitoring-enabled", (char *)NULL)) {
cfg->adapter_monitoring_enabled = true;
} else if (ceph_argparse_witharg(args, i, &cfg->parent_pipe, err,
"--pipe-name", (char *)NULL)) {
if (!err.str().empty()) {
*err_msg << "rbd-wnbd: " << err.str();
return -EINVAL;
}
} else if (ceph_argparse_witharg(args, i, (int*)&cfg->wnbd_log_level,
err, "--wnbd-log-level", (char *)NULL)) {
if (!err.str().empty()) {
*err_msg << "rbd-wnbd: " << err.str();
return -EINVAL;
}
if (cfg->wnbd_log_level < 0) {
*err_msg << "rbd-wnbd: Invalid argument for wnbd-log-level";
return -EINVAL;
}
} else if (ceph_argparse_witharg(args, i, (int*)&cfg->io_req_workers,
err, "--io-req-workers", (char *)NULL)) {
if (!err.str().empty()) {
*err_msg << "rbd-wnbd: " << err.str();
return -EINVAL;
}
if (cfg->io_req_workers <= 0) {
*err_msg << "rbd-wnbd: Invalid argument for io-req-workers";
return -EINVAL;
}
} else if (ceph_argparse_witharg(args, i, (int*)&cfg->io_reply_workers,
err, "--io-reply-workers", (char *)NULL)) {
if (!err.str().empty()) {
*err_msg << "rbd-wnbd: " << err.str();
return -EINVAL;
}
if (cfg->io_reply_workers <= 0) {
*err_msg << "rbd-wnbd: Invalid argument for io-reply-workers";
return -EINVAL;
}
} else if (ceph_argparse_witharg(args, i, (int*)&cfg->service_thread_count,
err, "--service-thread-count", (char *)NULL)) {
if (!err.str().empty()) {
*err_msg << "rbd-wnbd: " << err.str();
return -EINVAL;
}
if (cfg->service_thread_count <= 0) {
*err_msg << "rbd-wnbd: Invalid argument for service-thread-count";
return -EINVAL;
}
} else if (ceph_argparse_flag(args, i, "--hard-disconnect", (char *)NULL)) {
cfg->hard_disconnect = true;
} else if (ceph_argparse_flag(args, i,
"--no-hard-disconnect-fallback", (char *)NULL)) {
cfg->hard_disconnect_fallback = false;
} else if (ceph_argparse_witharg(args, i,
(int*)&cfg->soft_disconnect_timeout,
err, "--soft-disconnect-timeout",
(char *)NULL)) {
if (!err.str().empty()) {
*err_msg << "rbd-wnbd: " << err.str();
return -EINVAL;
}
if (cfg->soft_disconnect_timeout < 0) {
*err_msg << "rbd-wnbd: Invalid argument for soft-disconnect-timeout";
return -EINVAL;
}
} else if (ceph_argparse_witharg(args, i,
(int*)&cfg->service_start_timeout,
err, "--start-timeout",
(char *)NULL)) {
if (!err.str().empty()) {
*err_msg << "rbd-wnbd: " << err.str();
return -EINVAL;
}
if (cfg->service_start_timeout <= 0) {
*err_msg << "rbd-wnbd: Invalid argument for start-timeout";
return -EINVAL;
}
} else if (ceph_argparse_witharg(args, i,
(int*)&cfg->image_map_timeout,
err, "--map-timeout",
(char *)NULL)) {
if (!err.str().empty()) {
*err_msg << "rbd-wnbd: " << err.str();
return -EINVAL;
}
if (cfg->image_map_timeout <= 0) {
*err_msg << "rbd-wnbd: Invalid argument for map-timeout";
return -EINVAL;
}
} else {
++i;
}
}
Command cmd = None;
if (args.begin() != args.end()) {
if (strcmp(*args.begin(), "map") == 0) {
cmd = Connect;
} else if (strcmp(*args.begin(), "unmap") == 0) {
cmd = Disconnect;
} else if (strcmp(*args.begin(), "list") == 0) {
cmd = List;
} else if (strcmp(*args.begin(), "show") == 0) {
cmd = Show;
} else if (strcmp(*args.begin(), "service") == 0) {
cmd = Service;
} else if (strcmp(*args.begin(), "stats") == 0) {
cmd = Stats;
} else if (strcmp(*args.begin(), "help") == 0) {
return HELP_INFO;
} else {
*err_msg << "rbd-wnbd: unknown command: " << *args.begin();
return -EINVAL;
}
args.erase(args.begin());
}
if (cmd == None) {
*err_msg << "rbd-wnbd: must specify command";
return -EINVAL;
}
switch (cmd) {
case Connect:
case Disconnect:
case Show:
case Stats:
if (args.begin() == args.end()) {
*err_msg << "rbd-wnbd: must specify wnbd device or image-or-snap-spec";
return -EINVAL;
}
if (parse_imgpath(*args.begin(), cfg, err_msg) < 0) {
return -EINVAL;
}
args.erase(args.begin());
break;
default:
//shut up gcc;
break;
}
if (args.begin() != args.end()) {
*err_msg << "rbd-wnbd: unknown args: " << *args.begin();
return -EINVAL;
}
*command = cmd;
return 0;
}
static int rbd_wnbd(int argc, const char *argv[])
{
Config cfg;
auto args = argv_to_vec(argc, argv);
// Avoid using dout before calling "do_global_init"
if (args.empty()) {
std::cout << argv[0] << ": -h or --help for usage" << std::endl;
exit(1);
}
std::ostringstream err_msg;
int r = parse_args(args, &err_msg, &cmd, &cfg);
if (r == HELP_INFO) {
usage();
return 0;
} else if (r == VERSION_INFO) {
std::cout << pretty_version_to_str() << std::endl;
return 0;
} else if (r < 0) {
std::cout << err_msg.str() << std::endl;
return r;
}
auto cct = do_global_init(argc, argv, &cfg);
WnbdSetLogger(WnbdHandler::LogMessage);
WnbdSetLogLevel(cfg.wnbd_log_level);
switch (cmd) {
case Connect:
if (construct_devpath_if_missing(&cfg)) {
return -EINVAL;
}
r = do_map(&cfg);
if (r < 0)
return r;
break;
case Disconnect:
if (construct_devpath_if_missing(&cfg)) {
return -EINVAL;
}
r = do_unmap(&cfg, true);
if (r < 0)
return r;
break;
case List:
r = do_list_mapped_devices(cfg.format, cfg.pretty_format);
if (r < 0)
return r;
break;
case Show:
if (construct_devpath_if_missing(&cfg)) {
return r;
}
r = do_show_mapped_device(cfg.format, cfg.pretty_format, cfg.devpath);
if (r < 0)
return r;
break;
case Service:
{
RBDService service(cfg.hard_disconnect, cfg.soft_disconnect_timeout,
cfg.service_thread_count,
cfg.service_start_timeout,
cfg.image_map_timeout,
cfg.remap_failure_fatal,
cfg.adapter_monitoring_enabled);
// This call will block until the service stops.
r = RBDService::initialize(&service);
if (r < 0)
return r;
break;
}
case Stats:
if (construct_devpath_if_missing(&cfg)) {
return -EINVAL;
}
return do_stats(cfg.devpath);
default:
usage();
break;
}
return 0;
}
int main(int argc, const char *argv[])
{
SetConsoleCtrlHandler(console_handler_routine, true);
// Avoid the Windows Error Reporting dialog.
SetErrorMode(GetErrorMode() | SEM_NOGPFAULTERRORBOX);
int r = rbd_wnbd(argc, argv);
if (r < 0) {
return r;
}
return 0;
}
| 58,098 | 30.035791 | 95 | cc |
null | ceph-main/src/tools/rbd_wnbd/rbd_wnbd.h | /*
* Ceph - scalable distributed file system
*
* Copyright (C) 2020 SUSE LINUX GmbH
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef RBD_WNBD_H
#define RBD_WNBD_H
#include <string.h>
#include <iostream>
#include <vector>
#include "include/compat.h"
#include "common/win32/registry.h"
#include "wnbd_handler.h"
#define SERVICE_REG_KEY "SYSTEM\\CurrentControlSet\\Services\\rbd-wnbd"
#define SERVICE_PIPE_NAME "\\\\.\\pipe\\rbd-wnbd"
#define SERVICE_PIPE_TIMEOUT_MS 5000
#define SERVICE_PIPE_BUFFSZ 4096
#define DEFAULT_MAP_TIMEOUT_MS 30000
#define RBD_WNBD_BLKSIZE 512UL
#define DEFAULT_SERVICE_START_TIMEOUT 120
#define DEFAULT_IMAGE_MAP_TIMEOUT 20
#define HELP_INFO 1
#define VERSION_INFO 2
#define WNBD_STATUS_ACTIVE "active"
#define WNBD_STATUS_INACTIVE "inactive"
#define DEFAULT_SERVICE_THREAD_COUNT 8
static WnbdHandler* handler = nullptr;
ceph::mutex shutdown_lock = ceph::make_mutex("RbdWnbd::ShutdownLock");
struct Config {
bool exclusive = false;
bool readonly = false;
std::string parent_pipe;
std::string poolname;
std::string nsname;
std::string imgname;
std::string snapname;
std::string devpath;
std::string format;
bool pretty_format = false;
bool hard_disconnect = false;
int soft_disconnect_timeout = DEFAULT_SOFT_REMOVE_TIMEOUT;
bool hard_disconnect_fallback = true;
int service_start_timeout = DEFAULT_SERVICE_START_TIMEOUT;
int image_map_timeout = DEFAULT_IMAGE_MAP_TIMEOUT;
bool remap_failure_fatal = false;
bool adapter_monitoring_enabled = false;
// TODO: consider moving those fields to a separate structure. Those
// provide connection information without actually being configurable.
// The disk number is provided by Windows.
int disk_number = -1;
int pid = 0;
std::string serial_number;
bool active = false;
bool wnbd_mapped = false;
std::string command_line;
std::string admin_sock_path;
WnbdLogLevel wnbd_log_level = WnbdLogLevelInfo;
int io_req_workers = DEFAULT_IO_WORKER_COUNT;
int io_reply_workers = DEFAULT_IO_WORKER_COUNT;
int service_thread_count = DEFAULT_SERVICE_THREAD_COUNT;
// register the mapping, recreating it when the Ceph service starts.
bool persistent = true;
};
enum Command {
None,
Connect,
Disconnect,
List,
Show,
Service,
Stats
};
typedef struct {
Command command;
BYTE arguments[1];
} ServiceRequest;
typedef struct {
int status;
} ServiceReply;
bool is_process_running(DWORD pid);
void unmap_at_exit();
int disconnect_all_mappings(
bool unregister,
bool hard_disconnect,
int soft_disconnect_timeout,
int worker_count);
int restart_registered_mappings(
int worker_count, int total_timeout, int image_map_timeout);
int map_device_using_suprocess(std::string command_line);
int construct_devpath_if_missing(Config* cfg);
int save_config_to_registry(Config* cfg);
int remove_config_from_registry(Config* cfg);
int load_mapping_config_from_registry(std::string devpath, Config* cfg);
BOOL WINAPI console_handler_routine(DWORD dwCtrlType);
static int parse_args(std::vector<const char*>& args,
std::ostream *err_msg,
Command *command, Config *cfg);
static int do_unmap(Config *cfg, bool unregister);
class BaseIterator {
public:
virtual ~BaseIterator() {};
virtual bool get(Config *cfg) = 0;
int get_error() {
return error;
}
protected:
int error = 0;
int index = -1;
};
// Iterate over mapped devices, retrieving info from the driver.
class WNBDActiveDiskIterator : public BaseIterator {
public:
WNBDActiveDiskIterator();
~WNBDActiveDiskIterator();
bool get(Config *cfg);
private:
PWNBD_CONNECTION_LIST conn_list = NULL;
static DWORD fetch_list(PWNBD_CONNECTION_LIST* conn_list);
};
// Iterate over the Windows registry key, retrieving registered mappings.
class RegistryDiskIterator : public BaseIterator {
public:
RegistryDiskIterator();
~RegistryDiskIterator() {
delete reg_key;
}
bool get(Config *cfg);
private:
DWORD subkey_count = 0;
char subkey_name[MAX_PATH];
RegistryKey* reg_key = NULL;
};
// Iterate over all RBD mappings, getting info from the registry and driver.
class WNBDDiskIterator : public BaseIterator {
public:
bool get(Config *cfg);
private:
// We'll keep track of the active devices.
std::set<std::string> active_devices;
WNBDActiveDiskIterator active_iterator;
RegistryDiskIterator registry_iterator;
};
#endif // RBD_WNBD_H
| 4,685 | 23.154639 | 76 | h |
null | ceph-main/src/tools/rbd_wnbd/wnbd_handler.cc | /*
* Ceph - scalable distributed file system
*
* Copyright (C) 2020 SUSE LINUX GmbH
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#define dout_context g_ceph_context
#define dout_subsys ceph_subsys_rbd
#include "wnbd_handler.h"
#define _NTSCSI_USER_MODE_
#include <rpc.h>
#include <ddk/scsi.h>
#include <boost/thread/tss.hpp>
#include "common/debug.h"
#include "common/errno.h"
#include "common/safe_io.h"
#include "common/SubProcess.h"
#include "common/Formatter.h"
#include "global/global_context.h"
#include "rbd_wnbd.h"
WnbdHandler::~WnbdHandler()
{
if (started && wnbd_disk) {
dout(10) << __func__ << ": terminating" << dendl;
shutdown();
reply_tpool->join();
WnbdClose(wnbd_disk);
started = false;
delete reply_tpool;
delete admin_hook;
}
}
int WnbdHandler::wait()
{
int err = 0;
if (started && wnbd_disk) {
dout(10) << __func__ << ": waiting" << dendl;
err = WnbdWaitDispatcher(wnbd_disk);
if (err) {
derr << __func__ << " failed waiting for dispatcher to stop: "
<< err << dendl;
}
}
return err;
}
int WnbdAdminHook::call (
std::string_view command, const cmdmap_t& cmdmap,
const bufferlist&,
Formatter *f,
std::ostream& errss,
bufferlist& out)
{
if (command == "wnbd stats") {
return m_handler->dump_stats(f);
}
return -ENOSYS;
}
int WnbdHandler::dump_stats(Formatter *f)
{
if (!f) {
return -EINVAL;
}
WNBD_USR_STATS stats = { 0 };
DWORD err = WnbdGetUserspaceStats(wnbd_disk, &stats);
if (err) {
derr << "Failed to retrieve WNBD userspace stats. Error: " << err << dendl;
return -EINVAL;
}
f->open_object_section("stats");
f->dump_int("TotalReceivedRequests", stats.TotalReceivedRequests);
f->dump_int("TotalSubmittedRequests", stats.TotalSubmittedRequests);
f->dump_int("TotalReceivedReplies", stats.TotalReceivedReplies);
f->dump_int("UnsubmittedRequests", stats.UnsubmittedRequests);
f->dump_int("PendingSubmittedRequests", stats.PendingSubmittedRequests);
f->dump_int("PendingReplies", stats.PendingReplies);
f->dump_int("ReadErrors", stats.ReadErrors);
f->dump_int("WriteErrors", stats.WriteErrors);
f->dump_int("FlushErrors", stats.FlushErrors);
f->dump_int("UnmapErrors", stats.UnmapErrors);
f->dump_int("InvalidRequests", stats.InvalidRequests);
f->dump_int("TotalRWRequests", stats.TotalRWRequests);
f->dump_int("TotalReadBlocks", stats.TotalReadBlocks);
f->dump_int("TotalWrittenBlocks", stats.TotalWrittenBlocks);
f->close_section();
return 0;
}
void WnbdHandler::shutdown()
{
std::unique_lock l{shutdown_lock};
if (!terminated && wnbd_disk) {
// We're requesting the disk to be removed but continue serving IO
// requests until the driver sends us the "Disconnect" event.
// TODO: expose PWNBD_REMOVE_OPTIONS, we're using the defaults ATM.
WnbdRemove(wnbd_disk, NULL);
wait();
terminated = true;
}
}
void WnbdHandler::aio_callback(librbd::completion_t cb, void *arg)
{
librbd::RBD::AioCompletion *aio_completion =
reinterpret_cast<librbd::RBD::AioCompletion*>(cb);
WnbdHandler::IOContext* ctx = static_cast<WnbdHandler::IOContext*>(arg);
int ret = aio_completion->get_return_value();
dout(20) << __func__ << ": " << *ctx << dendl;
if (ret == -EINVAL) {
// if shrinking an image, a pagecache writeback might reference
// extents outside of the range of the new image extents
dout(0) << __func__ << ": masking IO out-of-bounds error" << *ctx << dendl;
ctx->data.clear();
ret = 0;
}
if (ret < 0) {
ctx->err_code = -ret;
// TODO: check the actual error.
ctx->set_sense(SCSI_SENSE_MEDIUM_ERROR,
SCSI_ADSENSE_UNRECOVERED_ERROR);
} else if ((ctx->req_type == WnbdReqTypeRead) &&
ret < static_cast<int>(ctx->req_size)) {
int pad_byte_count = static_cast<int> (ctx->req_size) - ret;
ctx->data.append_zero(pad_byte_count);
dout(20) << __func__ << ": " << *ctx << ": Pad byte count: "
<< pad_byte_count << dendl;
ctx->err_code = 0;
} else {
ctx->err_code = 0;
}
boost::asio::post(
*ctx->handler->reply_tpool,
[&, ctx]()
{
ctx->handler->send_io_response(ctx);
});
aio_completion->release();
}
void WnbdHandler::send_io_response(WnbdHandler::IOContext *ctx) {
std::unique_ptr<WnbdHandler::IOContext> pctx{ctx};
ceph_assert(WNBD_DEFAULT_MAX_TRANSFER_LENGTH >= pctx->data.length());
WNBD_IO_RESPONSE wnbd_rsp = {0};
wnbd_rsp.RequestHandle = pctx->req_handle;
wnbd_rsp.RequestType = pctx->req_type;
wnbd_rsp.Status = pctx->wnbd_status;
int err = 0;
// Use TLS to store an overlapped structure so that we avoid
// recreating one each time we send a reply.
static boost::thread_specific_ptr<OVERLAPPED> overlapped_tls(
// Cleanup routine
[](LPOVERLAPPED p_overlapped)
{
if (p_overlapped->hEvent) {
CloseHandle(p_overlapped->hEvent);
}
delete p_overlapped;
});
LPOVERLAPPED overlapped = overlapped_tls.get();
if (!overlapped)
{
overlapped = new OVERLAPPED{0};
HANDLE overlapped_evt = CreateEventA(0, TRUE, TRUE, NULL);
if (!overlapped_evt) {
err = GetLastError();
derr << "Could not create event. Error: " << err << dendl;
return;
}
overlapped->hEvent = overlapped_evt;
overlapped_tls.reset(overlapped);
}
if (!ResetEvent(overlapped->hEvent)) {
err = GetLastError();
derr << "Could not reset event. Error: " << err << dendl;
return;
}
err = WnbdSendResponseEx(
pctx->handler->wnbd_disk,
&wnbd_rsp,
pctx->data.c_str(),
pctx->data.length(),
overlapped);
if (err == ERROR_IO_PENDING) {
DWORD returned_bytes = 0;
err = 0;
// We've got ERROR_IO_PENDING, which means that the operation is in
// progress. We'll use GetOverlappedResult to wait for it to complete
// and then retrieve the result.
if (!GetOverlappedResult(pctx->handler->wnbd_disk, overlapped,
&returned_bytes, TRUE)) {
err = GetLastError();
derr << "Could not send response. Request id: " << wnbd_rsp.RequestHandle
<< ". Error: " << err << dendl;
}
}
}
void WnbdHandler::IOContext::set_sense(uint8_t sense_key, uint8_t asc, uint64_t info)
{
WnbdSetSenseEx(&wnbd_status, sense_key, asc, info);
}
void WnbdHandler::IOContext::set_sense(uint8_t sense_key, uint8_t asc)
{
WnbdSetSense(&wnbd_status, sense_key, asc);
}
void WnbdHandler::Read(
PWNBD_DISK Disk,
UINT64 RequestHandle,
PVOID Buffer,
UINT64 BlockAddress,
UINT32 BlockCount,
BOOLEAN ForceUnitAccess)
{
WnbdHandler* handler = nullptr;
ceph_assert(!WnbdGetUserContext(Disk, (PVOID*)&handler));
WnbdHandler::IOContext* ctx = new WnbdHandler::IOContext();
ctx->handler = handler;
ctx->req_handle = RequestHandle;
ctx->req_type = WnbdReqTypeRead;
ctx->req_size = BlockCount * handler->block_size;
ctx->req_from = BlockAddress * handler->block_size;
ceph_assert(ctx->req_size <= WNBD_DEFAULT_MAX_TRANSFER_LENGTH);
int op_flags = 0;
if (ForceUnitAccess) {
op_flags |= LIBRADOS_OP_FLAG_FADVISE_FUA;
}
dout(20) << *ctx << ": start" << dendl;
librbd::RBD::AioCompletion *c = new librbd::RBD::AioCompletion(ctx, aio_callback);
handler->image.aio_read2(ctx->req_from, ctx->req_size, ctx->data, c, op_flags);
dout(20) << *ctx << ": submitted" << dendl;
}
void WnbdHandler::Write(
PWNBD_DISK Disk,
UINT64 RequestHandle,
PVOID Buffer,
UINT64 BlockAddress,
UINT32 BlockCount,
BOOLEAN ForceUnitAccess)
{
WnbdHandler* handler = nullptr;
ceph_assert(!WnbdGetUserContext(Disk, (PVOID*)&handler));
WnbdHandler::IOContext* ctx = new WnbdHandler::IOContext();
ctx->handler = handler;
ctx->req_handle = RequestHandle;
ctx->req_type = WnbdReqTypeWrite;
ctx->req_size = BlockCount * handler->block_size;
ctx->req_from = BlockAddress * handler->block_size;
bufferptr ptr((char*)Buffer, ctx->req_size);
ctx->data.push_back(ptr);
int op_flags = 0;
if (ForceUnitAccess) {
op_flags |= LIBRADOS_OP_FLAG_FADVISE_FUA;
}
dout(20) << *ctx << ": start" << dendl;
librbd::RBD::AioCompletion *c = new librbd::RBD::AioCompletion(ctx, aio_callback);
handler->image.aio_write2(ctx->req_from, ctx->req_size, ctx->data, c, op_flags);
dout(20) << *ctx << ": submitted" << dendl;
}
void WnbdHandler::Flush(
PWNBD_DISK Disk,
UINT64 RequestHandle,
UINT64 BlockAddress,
UINT32 BlockCount)
{
WnbdHandler* handler = nullptr;
ceph_assert(!WnbdGetUserContext(Disk, (PVOID*)&handler));
WnbdHandler::IOContext* ctx = new WnbdHandler::IOContext();
ctx->handler = handler;
ctx->req_handle = RequestHandle;
ctx->req_type = WnbdReqTypeFlush;
ctx->req_size = BlockCount * handler->block_size;
ctx->req_from = BlockAddress * handler->block_size;
dout(20) << *ctx << ": start" << dendl;
librbd::RBD::AioCompletion *c = new librbd::RBD::AioCompletion(ctx, aio_callback);
handler->image.aio_flush(c);
dout(20) << *ctx << ": submitted" << dendl;
}
void WnbdHandler::Unmap(
PWNBD_DISK Disk,
UINT64 RequestHandle,
PWNBD_UNMAP_DESCRIPTOR Descriptors,
UINT32 Count)
{
WnbdHandler* handler = nullptr;
ceph_assert(!WnbdGetUserContext(Disk, (PVOID*)&handler));
ceph_assert(1 == Count);
WnbdHandler::IOContext* ctx = new WnbdHandler::IOContext();
ctx->handler = handler;
ctx->req_handle = RequestHandle;
ctx->req_type = WnbdReqTypeUnmap;
ctx->req_size = Descriptors[0].BlockCount * handler->block_size;
ctx->req_from = Descriptors[0].BlockAddress * handler->block_size;
dout(20) << *ctx << ": start" << dendl;
librbd::RBD::AioCompletion *c = new librbd::RBD::AioCompletion(ctx, aio_callback);
handler->image.aio_discard(ctx->req_from, ctx->req_size, c);
dout(20) << *ctx << ": submitted" << dendl;
}
void WnbdHandler::LogMessage(
WnbdLogLevel LogLevel,
const char* Message,
const char* FileName,
UINT32 Line,
const char* FunctionName)
{
// We're already passing the log level to WNBD, so we'll use the highest
// log level here.
dout(0) << "libwnbd.dll!" << FunctionName << " "
<< WnbdLogLevelToStr(LogLevel) << " " << Message << dendl;
}
int WnbdHandler::resize(uint64_t new_size)
{
int err = 0;
uint64_t new_block_count = new_size / block_size;
dout(5) << "Resizing disk. Block size: " << block_size
<< ". New block count: " << new_block_count
<< ". Old block count: "
<< wnbd_disk->Properties.BlockCount << "." << dendl;
err = WnbdSetDiskSize(wnbd_disk, new_block_count);
if (err) {
derr << "WNBD: Setting disk size failed with error: "
<< win32_strerror(err) << dendl;
return -EINVAL;
}
dout(5) << "Successfully resized disk to: " << new_block_count << " blocks"
<< dendl;
return 0;
}
int WnbdHandler::start()
{
int err = 0;
WNBD_PROPERTIES wnbd_props = {0};
instance_name.copy(wnbd_props.InstanceName, sizeof(wnbd_props.InstanceName));
ceph_assert(strlen(RBD_WNBD_OWNER_NAME) < WNBD_MAX_OWNER_LENGTH);
strncpy(wnbd_props.Owner, RBD_WNBD_OWNER_NAME, WNBD_MAX_OWNER_LENGTH);
wnbd_props.BlockCount = block_count;
wnbd_props.BlockSize = block_size;
wnbd_props.MaxUnmapDescCount = 1;
wnbd_props.Flags.ReadOnly = readonly;
wnbd_props.Flags.UnmapSupported = 1;
if (rbd_cache_enabled) {
wnbd_props.Flags.FUASupported = 1;
wnbd_props.Flags.FlushSupported = 1;
}
err = WnbdCreate(&wnbd_props, &RbdWnbdInterface, this, &wnbd_disk);
if (err)
goto exit;
started = true;
err = WnbdStartDispatcher(wnbd_disk, io_req_workers);
if (err) {
derr << "Could not start WNBD dispatcher. Error: " << err << dendl;
}
exit:
return err;
}
std::ostream &operator<<(std::ostream &os, const WnbdHandler::IOContext &ctx) {
os << "[" << std::hex << ctx.req_handle;
switch (ctx.req_type)
{
case WnbdReqTypeRead:
os << " READ ";
break;
case WnbdReqTypeWrite:
os << " WRITE ";
break;
case WnbdReqTypeFlush:
os << " FLUSH ";
break;
case WnbdReqTypeUnmap:
os << " TRIM ";
break;
default:
os << " UNKNOWN(" << ctx.req_type << ") ";
break;
}
os << ctx.req_from << "~" << ctx.req_size << " "
<< std::dec << ntohl(ctx.err_code) << "]";
return os;
}
| 12,469 | 26.286652 | 85 | cc |
null | ceph-main/src/tools/rbd_wnbd/wnbd_handler.h | /*
* Ceph - scalable distributed file system
*
* Copyright (C) 2020 SUSE LINUX GmbH
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef WNBD_HANDLER_H
#define WNBD_HANDLER_H
#include <wnbd.h>
#include "common/admin_socket.h"
#include "common/ceph_context.h"
#include "common/Thread.h"
#include "include/rbd/librbd.hpp"
#include "include/xlist.h"
#include "global/global_context.h"
// TODO: make this configurable.
#define RBD_WNBD_MAX_TRANSFER 2 * 1024 * 1024
#define SOFT_REMOVE_RETRY_INTERVAL 2
#define DEFAULT_SOFT_REMOVE_TIMEOUT 15
#define DEFAULT_IO_WORKER_COUNT 4
// Not defined by mingw.
#ifndef SCSI_ADSENSE_UNRECOVERED_ERROR
#define SCSI_ADSENSE_UNRECOVERED_ERROR 0x11
#endif
// The following will be assigned to the "Owner" field of the WNBD
// parameters, which can be used to determine the application managing
// a disk. We'll ignore other disks.
#define RBD_WNBD_OWNER_NAME "ceph-rbd-wnbd"
class WnbdHandler;
class WnbdAdminHook : public AdminSocketHook {
WnbdHandler *m_handler;
public:
explicit WnbdAdminHook(WnbdHandler *handler) :
m_handler(handler) {
g_ceph_context->get_admin_socket()->register_command(
"wnbd stats", this, "get WNBD stats");
}
~WnbdAdminHook() override {
g_ceph_context->get_admin_socket()->unregister_commands(this);
}
int call(std::string_view command, const cmdmap_t& cmdmap,
const bufferlist&,
Formatter *f, std::ostream& errss, bufferlist& out) override;
};
class WnbdHandler
{
private:
librbd::Image ℑ
std::string instance_name;
uint64_t block_count;
uint32_t block_size;
bool readonly;
bool rbd_cache_enabled;
uint32_t io_req_workers;
uint32_t io_reply_workers;
WnbdAdminHook* admin_hook;
boost::asio::thread_pool* reply_tpool;
public:
WnbdHandler(librbd::Image& _image, std::string _instance_name,
uint64_t _block_count, uint32_t _block_size,
bool _readonly, bool _rbd_cache_enabled,
uint32_t _io_req_workers,
uint32_t _io_reply_workers)
: image(_image)
, instance_name(_instance_name)
, block_count(_block_count)
, block_size(_block_size)
, readonly(_readonly)
, rbd_cache_enabled(_rbd_cache_enabled)
, io_req_workers(_io_req_workers)
, io_reply_workers(_io_reply_workers)
{
admin_hook = new WnbdAdminHook(this);
// Instead of relying on librbd's own thread pool, we're going to use a
// separate one. This allows us to make assumptions on the threads that
// are going to send the IO replies and thus be able to cache Windows
// OVERLAPPED structures.
reply_tpool = new boost::asio::thread_pool(_io_reply_workers);
}
int resize(uint64_t new_size);
int start();
// Wait for the handler to stop, which normally happens when the driver
// passes the "Disconnect" request.
int wait();
void shutdown();
int dump_stats(Formatter *f);
~WnbdHandler();
static VOID LogMessage(
WnbdLogLevel LogLevel,
const char* Message,
const char* FileName,
UINT32 Line,
const char* FunctionName);
private:
ceph::mutex shutdown_lock = ceph::make_mutex("WnbdHandler::DisconnectLocker");
bool started = false;
bool terminated = false;
WNBD_DISK* wnbd_disk = nullptr;
struct IOContext
{
xlist<IOContext*>::item item;
WnbdHandler *handler = nullptr;
WNBD_STATUS wnbd_status = {0};
WnbdRequestType req_type = WnbdReqTypeUnknown;
uint64_t req_handle = 0;
uint32_t err_code = 0;
size_t req_size;
uint64_t req_from;
bufferlist data;
IOContext()
: item(this)
{}
void set_sense(uint8_t sense_key, uint8_t asc, uint64_t info);
void set_sense(uint8_t sense_key, uint8_t asc);
};
friend std::ostream &operator<<(std::ostream &os, const IOContext &ctx);
void send_io_response(IOContext *ctx);
static void aio_callback(librbd::completion_t cb, void *arg);
// WNBD IO entry points
static void Read(
PWNBD_DISK Disk,
UINT64 RequestHandle,
PVOID Buffer,
UINT64 BlockAddress,
UINT32 BlockCount,
BOOLEAN ForceUnitAccess);
static void Write(
PWNBD_DISK Disk,
UINT64 RequestHandle,
PVOID Buffer,
UINT64 BlockAddress,
UINT32 BlockCount,
BOOLEAN ForceUnitAccess);
static void Flush(
PWNBD_DISK Disk,
UINT64 RequestHandle,
UINT64 BlockAddress,
UINT32 BlockCount);
static void Unmap(
PWNBD_DISK Disk,
UINT64 RequestHandle,
PWNBD_UNMAP_DESCRIPTOR Descriptors,
UINT32 Count);
static constexpr WNBD_INTERFACE RbdWnbdInterface =
{
Read,
Write,
Flush,
Unmap,
};
};
std::ostream &operator<<(std::ostream &os, const WnbdHandler::IOContext &ctx);
#endif // WNBD_HANDLER_H
| 4,886 | 24.857143 | 80 | h |
null | ceph-main/src/tools/rbd_wnbd/wnbd_wmi.cc | /*
* Ceph - scalable distributed file system
*
* Copyright (c) 2019 SUSE LLC
* Copyright (C) 2022 Cloudbase Solutions
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "wnbd_wmi.h"
#include "common/debug.h"
#include "common/win32/wstring.h"
#define dout_context g_ceph_context
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "rbd-wnbd: "
// Initializes the COM library for use by the calling thread using
// COINIT_MULTITHREADED.
static HRESULT co_initialize_basic()
{
dout(10) << "initializing COM library" << dendl;
HRESULT hres = CoInitializeEx(0, COINIT_MULTITHREADED);
if (FAILED(hres)) {
derr << "CoInitializeEx failed. HRESULT: " << hres << dendl;
return hres;
}
// CoInitializeSecurity must be called once per process.
static bool com_security_flags_set = false;
if (!com_security_flags_set) {
hres = CoInitializeSecurity(
NULL, -1, NULL, NULL,
RPC_C_AUTHN_LEVEL_DEFAULT,
RPC_C_IMP_LEVEL_IMPERSONATE,
NULL,
EOAC_NONE,
NULL);
if (FAILED(hres)) {
derr << "CoInitializeSecurity failed. HRESULT: " << hres << dendl;
CoUninitialize();
return hres;
}
com_security_flags_set = true;
}
return 0;
}
// co_uninitialize must be called once for every successful
// co_initialize_basic call. Any WMI objects (including connections,
// event subscriptions, etc) must be released beforehand.
static void co_uninitialize()
{
dout(10) << "closing COM library" << dendl;
CoUninitialize();
}
HRESULT COMBootstrapper::initialize()
{
std::unique_lock l{init_lock};
HRESULT hres = co_initialize_basic();
if (!FAILED(hres)) {
initialized = true;
}
return hres;
}
void COMBootstrapper::cleanup()
{
if (initialized) {
co_uninitialize();
initialized = false;
}
}
void WmiConnection::close()
{
dout(20) << "closing wmi conn: " << this
<< ", svc: " << wbem_svc
<< ", loc: " << wbem_loc << dendl;
if (wbem_svc != NULL) {
wbem_svc->Release();
wbem_svc = NULL;
}
if (wbem_loc != NULL) {
wbem_loc->Release();
wbem_loc = NULL;
}
}
HRESULT WmiConnection::initialize()
{
HRESULT hres = CoCreateInstance(
CLSID_WbemLocator, 0, CLSCTX_INPROC_SERVER,
IID_IWbemLocator, (LPVOID*)&wbem_loc);
if (FAILED(hres)) {
derr << "CoCreateInstance failed. HRESULT: " << hres << dendl;
return hres;
}
hres = wbem_loc->ConnectServer(
_bstr_t(ns.c_str()).GetBSTR(), NULL, NULL, NULL,
WBEM_FLAG_CONNECT_USE_MAX_WAIT, NULL, NULL,
&wbem_svc);
if (FAILED(hres)) {
derr << "Could not connect to WMI service. HRESULT: " << hres << dendl;
return hres;
}
if (!wbem_svc) {
hres = MAKE_HRESULT(SEVERITY_ERROR, FACILITY_WIN32,
ERROR_INVALID_HANDLE);
derr << "WMI connection failed, no WMI service object received." << dendl;
return hres;
}
hres = CoSetProxyBlanket(
wbem_svc, RPC_C_AUTHN_WINNT, RPC_C_AUTHZ_NONE, NULL,
RPC_C_AUTHN_LEVEL_CALL, RPC_C_IMP_LEVEL_IMPERSONATE, NULL, EOAC_NONE);
if (FAILED(hres)) {
derr << "CoSetProxyBlanket failed. HRESULT:" << hres << dendl;
}
return hres;
}
HRESULT get_property_str(
IWbemClassObject* cls_obj,
const std::wstring& property,
std::wstring& value)
{
VARIANT vt_prop;
VariantInit(&vt_prop);
HRESULT hres = cls_obj->Get(property.c_str(), 0, &vt_prop, 0, 0);
if (!FAILED(hres)) {
VARIANT vt_bstr_prop;
VariantInit(&vt_bstr_prop);
hres = VariantChangeType(&vt_bstr_prop, &vt_prop, 0, VT_BSTR);
if (!FAILED(hres)) {
value = vt_bstr_prop.bstrVal;
}
VariantClear(&vt_bstr_prop);
}
VariantClear(&vt_prop);
if (FAILED(hres)) {
derr << "Could not get WMI property: " << to_string(property)
<< ". HRESULT: " << hres << dendl;
}
return hres;
}
HRESULT get_property_int(
IWbemClassObject* cls_obj,
const std::wstring& property,
uint32_t& value)
{
VARIANT vt_prop;
VariantInit(&vt_prop);
HRESULT hres = cls_obj->Get(property.c_str(), 0, &vt_prop, 0, 0);
if (!FAILED(hres)) {
VARIANT vt_uint_prop;
VariantInit(&vt_uint_prop);
hres = VariantChangeType(&vt_uint_prop, &vt_prop, 0, VT_UINT);
if (!FAILED(hres)) {
value = vt_uint_prop.intVal;
}
VariantClear(&vt_uint_prop);
}
VariantClear(&vt_prop);
if (FAILED(hres)) {
derr << "Could not get WMI property: " << to_string(property)
<< ". HRESULT: " << hres << dendl;
}
return hres;
}
HRESULT WmiSubscription::initialize()
{
HRESULT hres = conn.initialize();
if (FAILED(hres)) {
derr << "Could not create WMI connection" << dendl;
return hres;
}
hres = conn.wbem_svc->ExecNotificationQuery(
_bstr_t(L"WQL").GetBSTR(),
_bstr_t(query.c_str()).GetBSTR(),
WBEM_FLAG_FORWARD_ONLY | WBEM_FLAG_RETURN_IMMEDIATELY,
NULL,
&event_enum);
if (FAILED(hres)) {
derr << "Notification query failed, unable to subscribe to "
<< "WMI events. HRESULT: " << hres << dendl;
} else {
dout(20) << "wmi subscription initialized: " << this
<< ", event enum: " << event_enum
<< ", conn: " << &conn << ", conn svc: " << conn.wbem_svc << dendl;
}
return hres;
}
void WmiSubscription::close()
{
dout(20) << "closing wmi subscription: " << this
<< ", event enum: " << event_enum << dendl;
if (event_enum != NULL) {
event_enum->Release();
event_enum = NULL;
}
}
HRESULT WmiSubscription::next(
long timeout,
ULONG count,
IWbemClassObject **objects,
ULONG *returned)
{
if (!event_enum) {
HRESULT hres = MAKE_HRESULT(
SEVERITY_ERROR, FACILITY_WIN32,
ERROR_INVALID_HANDLE);
derr << "WMI subscription uninitialized." << dendl;
return hres;
}
HRESULT hres = event_enum->Next(timeout, count, objects, returned);
if (FAILED(hres)) {
derr << "Unable to retrieve WMI events. HRESULT: "
<< hres << dendl;
}
return hres;
}
WmiSubscription subscribe_wnbd_adapter_events(
uint32_t interval)
{
std::wostringstream query_stream;
query_stream
<< L"SELECT * FROM __InstanceOperationEvent "
<< L"WITHIN " << interval
<< L"WHERE TargetInstance ISA 'Win32_ScsiController' "
<< L"AND TargetInstance.Description="
<< L"'WNBD SCSI Virtual Adapter'";
return WmiSubscription(L"root\\cimv2", query_stream.str());
}
| 6,511 | 23.854962 | 78 | cc |
null | ceph-main/src/tools/rbd_wnbd/wnbd_wmi.h | /*
* Ceph - scalable distributed file system
*
* Copyright (c) 2019 SUSE LLC
* Copyright (C) 2022 Cloudbase Solutions
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#pragma once
#include <comutil.h>
#define _WIN32_DCOM
#include <wbemcli.h>
#include <string>
#include <vector>
#include "common/ceph_mutex.h"
// Convenience helper for initializing and cleaning up the
// Windows COM library using "COINIT_MULTITHREADED" concurrency mode.
// Any WMI objects (including connections, event subscriptions, etc)
// must be released before the COM library gets closed.
class COMBootstrapper
{
private:
bool initialized = false;
ceph::mutex init_lock = ceph::make_mutex("COMBootstrapper::InitLocker");
public:
HRESULT initialize();
void cleanup();
~COMBootstrapper()
{
cleanup();
}
};
class WmiConnection
{
private:
std::wstring ns;
public:
IWbemLocator* wbem_loc;
IWbemServices* wbem_svc;
WmiConnection(std::wstring ns)
: ns(ns)
, wbem_loc(nullptr)
, wbem_svc(nullptr)
{
}
~WmiConnection()
{
close();
}
HRESULT initialize();
void close();
};
HRESULT get_property_str(
IWbemClassObject* cls_obj,
const std::wstring& property,
std::wstring& value);
HRESULT get_property_int(
IWbemClassObject* cls_obj,
const std::wstring& property,
uint32_t& value);
class WmiSubscription
{
private:
std::wstring query;
WmiConnection conn;
IEnumWbemClassObject *event_enum;
public:
WmiSubscription(std::wstring ns, std::wstring query)
: query(query)
, conn(WmiConnection(ns))
, event_enum(nullptr)
{
}
~WmiSubscription()
{
close();
}
HRESULT initialize();
void close();
// IEnumWbemClassObject::Next wrapper
HRESULT next(
long timeout,
ULONG count,
IWbemClassObject **objects,
ULONG *returned);
};
WmiSubscription subscribe_wnbd_adapter_events(uint32_t interval);
| 2,059 | 17.727273 | 74 | h |
null | ceph-main/src/tools/rgw/parse-cr-dump.py | #!/usr/bin/python
from __future__ import print_function
from collections import Counter
import argparse
import json
import re
import sys
def gen_mgrs(args, cr_dump):
""" traverse and return one manager at a time """
mgrs = cr_dump['coroutine_managers']
if args.manager is not None:
yield mgrs[args.manager]
else:
for mgr in mgrs:
yield mgr
def gen_stacks(args, cr_dump):
""" traverse and return one stack at a time """
for mgr in gen_mgrs(args, cr_dump):
for ctx in mgr['run_contexts']:
for stack in ctx['entries']:
yield stack
def gen_ops(args, cr_dump):
""" traverse and return one op at a time """
for stack in gen_stacks(args, cr_dump):
for op in stack['ops']:
yield stack, op
def op_status(op):
""" return op status or (none) """
# "status": {"status": "...", "timestamp": "..."}
return op.get('status', {}).get('status', '(none)')
def do_crs(args, cr_dump):
""" print a sorted list of coroutines """
counter = Counter()
if args.group == 'status':
print('Count:\tStatus:')
for _, op in gen_ops(args, cr_dump):
if args.filter and not re.search(args.filter, op['type']):
continue
counter[op_status(op)] += 1
else:
print('Count:\tCoroutine:')
for _, op in gen_ops(args, cr_dump):
name = op['type']
if args.filter and not re.search(args.filter, name):
continue
counter[name] += 1
crs = counter.most_common();
if args.order == 'asc':
crs.reverse()
if args.limit:
crs = crs[:args.limit]
for op in crs:
print('%d\t%s' % (op[1], op[0]))
print('Total:', sum(counter.values()))
return 0
def match_ops(name, ops):
""" return true if any op matches the given filter """
for op in ops:
if re.search(name, op):
return True
return False
def do_stacks(args, cr_dump):
""" print a list of coroutine stacks """
print('Stack:\t\tCoroutines:')
count = 0
for stack in gen_stacks(args, cr_dump):
stack_id = stack['stack']
ops = [op['type'] for op in stack['ops']]
if args.filter and not match_ops(args.filter, ops):
continue
if args.limit and count == args.limit:
print('...')
break
print('%s\t%s' % (stack_id, ', '.join(ops)))
count += 1
print('Total:', count)
return 0
def traverse_spawned_stacks(args, stack, depth, stacks, callback):
""" recurse through spawned stacks, passing each op to the callback """
for op in stack['ops']:
# only filter ops in base stack
if depth == 0 and args.filter and not re.search(args.filter, op['type']):
continue
if not callback(stack, op, depth):
return False
for spawned in op.get('spawned', []):
s = stacks.get(spawned)
if not s:
continue
if not traverse_spawned_stacks(args, s, depth + 1, stacks, callback):
return False
return True
def do_stack(args, cr_dump):
""" inspect a given stack and its descendents """
# build a lookup table of stacks by id
stacks = {s['stack']: s for s in gen_stacks(args, cr_dump)}
stack = stacks.get(args.stack)
if not stack:
print('Stack %s not found' % args.stack, file=sys.stderr)
return 1
do_stack.count = 0 # for use in closure
def print_stack_op(stack, op, depth):
indent = ' ' * depth * 4
if args.limit and do_stack.count == args.limit:
print('%s...' % indent)
return False # stop traversal
do_stack.count += 1
print('%s[%s] %s: %s' % (indent, stack['stack'], op['type'], op_status(op)))
return True
traverse_spawned_stacks(args, stack, 0, stacks, print_stack_op)
return 0
def do_spawned(args, cr_dump):
""" search all ops for the given spawned stack """
for stack, op in gen_ops(args, cr_dump):
if args.stack in op.get('spawned', []):
print('Stack %s spawned by [%s] %s' % (args.stack, stack['stack'], op['type']))
return 0
print('Stack %s not spawned' % args.stack, file=sys.stderr)
return 1
def main():
parser = argparse.ArgumentParser(description='Parse and inspect the output of the "cr dump" admin socket command.')
parser.add_argument('--filename', type=argparse.FileType(), default=sys.stdin, help='Input filename (or stdin if empty)')
parser.add_argument('--filter', type=str, help='Filter by coroutine type (regex syntax is supported)')
parser.add_argument('--limit', type=int)
parser.add_argument('--manager', type=int, help='Index into coroutine_managers[]')
subparsers = parser.add_subparsers()
crs_parser = subparsers.add_parser('crs', help='Produce a sorted list of coroutines')
crs_parser.add_argument('--group', type=str, choices=['type', 'status'])
crs_parser.add_argument('--order', type=str, choices=['desc', 'asc'])
crs_parser.set_defaults(func=do_crs)
stacks_parser = subparsers.add_parser('stacks', help='Produce a list of coroutine stacks and their ops')
stacks_parser.set_defaults(func=do_stacks)
stack_parser = subparsers.add_parser('stack', help='Inspect a given coroutine stack')
stack_parser.add_argument('stack', type=str)
stack_parser.set_defaults(func=do_stack)
spawned_parser = subparsers.add_parser('spawned', help='Find the op that spawned the given stack')
spawned_parser.add_argument('stack', type=str)
spawned_parser.set_defaults(func=do_spawned)
args = parser.parse_args()
return args.func(args, json.load(args.filename))
if __name__ == "__main__":
result = main()
sys.exit(result)
| 5,850 | 33.621302 | 125 | py |
null | ceph-main/src/tracing/README.md | Installation
============
The LTTng libraries that ship with Ubuntu 12.04 have been very buggy, and the
generated header files using `lttng-gen-tp` have needed to be fixed just to
compile in the Ceph tree. The packages available in Ubuntu 14.04 seem to work
alright, and for older versions please install LTTng from the LTTng PPA.
https://launchpad.net/~lttng/+archive/ppa
Then install as normal
apt-get install lttng-tools liblttng-ust-dev
Add/Update Provider
===================
## Create tracepoint definition file
Add tracepoint definitions for the provider into a `.tp` file. Documentation
on defining a tracepoint can be found in `man lttng-ust`. By convention files
are named according to the logical sub-system they correspond to (e.g.
`mutex.tp`, `pg.tp`). And add a C source file to be compiled into the tracepoint
provider shared object, in which `TRACEPOINT_DEFINE` should be defined. See
[LTTng document](http://lttng.org/docs/#doc-dynamic-linking) for details.
Place the `.tp` and the `.c` files into the `src/tracing` directory
and modify the CMake file `src/tracing/CMakeLists.txt` accordingly.
Function Instrumentation
========================
Ceph supports instrumentation using GCC's `-finstrument-functions` flag.
Supported CMake flags are:
* `-DWITH_OSD_INSTRUMENT_FUNCTIONS=ON`: instrument OSD code
Note that this instrumentation adds an extra function call on each function entry
and exit of Ceph code. This option is currently only supported with GCC. Using it
with Clang has no effect.
The only function tracing implementation at the moment is done using LTTng UST.
In order to use it, Ceph needs to be configured with LTTng using `-DWITH_LTTNG=ON`.
[TraceCompass](http://www.tracecompass.org) can be used to generate flame
charts/graphs and other metrics.
It is also possible to use [libbabeltrace](http://diamon.org/babeltrace/#docs)
to write custom analysis. The entry and exit tracepoints are called
`lttng_ust_cyg_profile:func_enter` and `lttng_ust_cyg_profile:func_exit`
respectively. The payload variable `addr` holds the address of the function
called and the payload variable `call_site` holds the address where it is called.
`nm` can be used to resolve function addresses (`addr` to function name).
| 2,257 | 42.423077 | 83 | md |
null | ceph-main/src/tracing/bluestore.c | #define TRACEPOINT_CREATE_PROBES
/*
* The header containing our TRACEPOINT_EVENTs.
*/
#include "tracing/bluestore.h"
| 119 | 19 | 47 | c |
null | ceph-main/src/tracing/cyg_profile.c | #define TRACEPOINT_CREATE_PROBES
/*
* The header containing our TRACEPOINT_EVENTs.
*/
#include "tracing/cyg_profile.h"
| 122 | 16.571429 | 47 | c |
null | ceph-main/src/tracing/cyg_profile_functions.c | #include "acconfig.h"
#ifdef WITH_LTTNG
#define TRACEPOINT_DEFINE
#define TRACEPOINT_PROBE_DYNAMIC_LINKAGE
#include "tracing/cyg_profile.h"
#undef TRACEPOINT_PROBE_DYNAMIC_LINKAGE
#undef TRACEPOINT_DEFINE
#endif
void __cyg_profile_func_enter(void *this_fn, void *call_site)
__attribute__((no_instrument_function));
void __cyg_profile_func_exit(void *this_fn, void *call_site)
__attribute__((no_instrument_function));
void __cyg_profile_func_enter(void *this_fn, void *call_site)
{
#ifdef WITH_LTTNG
tracepoint(lttng_ust_cyg_profile, func_entry, this_fn, call_site);
#endif
}
void __cyg_profile_func_exit(void *this_fn, void *call_site)
{
#ifdef WITH_LTTNG
tracepoint(lttng_ust_cyg_profile, func_exit, this_fn, call_site);
#endif
}
| 754 | 22.59375 | 70 | c |
null | ceph-main/src/tracing/eventtrace.c | #define TRACEPOINT_CREATE_PROBES
/*
* The header containing our TRACEPOINT_EVENTs.
*/
#include "tracing/eventtrace.h"
| 120 | 19.166667 | 47 | c |
null | ceph-main/src/tracing/librados.c |
#define TRACEPOINT_CREATE_PROBES
/*
* The header containing our TRACEPOINT_EVENTs.
*/
#include "tracing/librados.h"
| 119 | 16.142857 | 47 | c |
null | ceph-main/src/tracing/librbd.c |
#define TRACEPOINT_CREATE_PROBES
/*
* The header containing our TRACEPOINT_EVENTs.
*/
#include "tracing/librbd.h"
| 117 | 15.857143 | 47 | c |
null | ceph-main/src/tracing/objectstore.c |
#define TRACEPOINT_CREATE_PROBES
/*
* The header containing our TRACEPOINT_EVENTs.
*/
#include "tracing/objectstore.h"
| 122 | 16.571429 | 47 | c |
null | ceph-main/src/tracing/oprequest.c |
#define TRACEPOINT_CREATE_PROBES
/*
* The header containing our TRACEPOINT_EVENTs.
*/
#include "tracing/oprequest.h"
| 120 | 16.285714 | 47 | c |
null | ceph-main/src/tracing/osd.c |
#define TRACEPOINT_CREATE_PROBES
/*
* The header containing our TRACEPOINT_EVENTs.
*/
#include "tracing/osd.h"
| 114 | 15.428571 | 47 | c |
null | ceph-main/src/tracing/pg.c |
#define TRACEPOINT_CREATE_PROBES
/*
* The header containing our TRACEPOINT_EVENTs.
*/
#include "tracing/pg.h"
| 113 | 15.285714 | 47 | c |
null | ceph-main/src/tracing/rgw_op.c |
#define TRACEPOINT_CREATE_PROBES
/*
* The header containing our TRACEPOINT_EVENTs.
*/
#include "tracing/rgw_op.h"
| 117 | 15.857143 | 47 | c |
null | ceph-main/src/tracing/rgw_rados.c |
#define TRACEPOINT_CREATE_PROBES
/*
* The header containing our TRACEPOINT_EVENTs.
*/
#include "tracing/rgw_rados.h"
| 120 | 16.285714 | 47 | c |
null | ceph-main/src/tracing/tracing-common.h | #if !defined(TRACING_COMMON_H)
#define TRACING_COMMON_H
// Amount of buffer data to dump when using ceph_ctf_sequence or ceph_ctf_sequencep.
// If 0, then *_data field is omitted entirely.
#if !defined(CEPH_TRACE_BUF_TRUNC_LEN)
#define CEPH_TRACE_BUF_TRUNC_LEN 0u
#endif
// TODO: This is GCC-specific. Replace CEPH_MAX and CEPH_MIN with standard macros, if possible.
#define CEPH_MAX(a,b) \
({ __typeof__ (a) _a = (a); \
__typeof__ (b) _b = (b); \
_a > _b ? _a : _b; })
#define CEPH_MIN(a,b) \
({ __typeof__ (a) _a = (a); \
__typeof__ (b) _b = (b); \
_a < _b ? _a : _b; })
// type should be an integer type
// val should have type type*
#define ceph_ctf_integerp(type, field, val) \
ctf_integer(type, field, (val) == NULL ? 0 : (val)) \
ctf_integer(uint8_t, field##_isnull, (val) == NULL)
// val should have type char*
#define ceph_ctf_string(field, val) \
ctf_string(field, (val) == NULL ? "" : (val)) \
ctf_integer(uint8_t, field##_isnull, (val) == NULL)
// val should have type char**
#define ceph_ctf_stringp(field, val) \
ctf_string(field, ((val) == NULL || *(val) == NULL) ? "" : *(val)) \
ctf_integer(uint8_t, field##_isnull, (val) == NULL) \
ctf_integer(uint8_t, field##_data_isnull, (val) == NULL || *(val) == NULL)
// val should have type type*
// lenval should have type lentype
#if CEPH_TRACE_BUF_TRUNC_LEN > 0
#define ceph_ctf_sequence(type, field, val, lentype, lenval) \
ctf_integer_hex(void*, field, val) \
ctf_sequence(type, field##_data, (val) == NULL ? "" : (val), lentype, (val) == NULL ? 0 : CEPH_MIN((lenval), CEPH_TRACE_BUF_TRUNC_LEN)) \
ctf_integer(uint8_t, field##_isnull, (val) == NULL) \
ctf_integer(lentype, field##_len, lenval)
#else
#define ceph_ctf_sequence(type, field, val, lentype, lenval) \
ctf_integer_hex(void*, field, val) \
ctf_integer(uint8_t, field##_isnull, (val) == NULL) \
ctf_integer(lentype, field##_len, lenval)
#endif
// val should have type type**
// lenval should have type lentype*
#if CEPH_TRACE_BUF_TRUNC_LEN > 0
#define ceph_ctf_sequencep(type, field, val, lentype, lenval) \
ctf_integer_hex(void*, field, val) \
ctf_sequence(type, \
field##_data, \
((val) == NULL || *(val) == NULL) ? "" : *(val), \
lentype, \
((val) == NULL || *(val) == NULL || (lenval) == NULL) ? 0 : CEPH_MIN(*(lenval), CEPH_TRACE_BUF_TRUNC_LEN)) \
ctf_integer(uint8_t, field##_isnull, (val) == NULL) \
ctf_integer(uint8_t, field##_data_isnull, ((val) == NULL || *(val) == NULL)) \
ctf_integer(lentype, field##_len, (lenval) == NULL ? 0 : *(lenval)) \
ctf_integer(lentype, field##_len_isnull, (lenval) == NULL)
#else
#define ceph_ctf_sequencep(type, field, val, lentype, lenval) \
ctf_integer_hex(void*, field, val) \
ctf_integer(uint8_t, field##_isnull, (val) == NULL) \
ctf_integer(uint8_t, field##_data_isnull, ((val) == NULL || *(val) == NULL)) \
ctf_integer(lentype, field##_len, (lenval) == NULL ? 0 : *(lenval)) \
ctf_integer(lentype, field##_len_isnull, (lenval) == NULL)
#endif
// p should be of type struct timeval*
#define ceph_ctf_timevalp(field, p) \
ctf_integer(long int, field##_sec, (p) == NULL ? 0 : (p)->tv_sec) \
ctf_integer(long int, field##_usec, (p) == NULL ? 0 : (p)->tv_usec) \
ctf_integer(uint8_t, field##_isnull, (p) == NULL)
// p should be of type struct timespec*
#define ceph_ctf_timespecp(field, p) \
ctf_integer(long int, field##_sec, (p) == NULL ? 0 : (p)->tv_sec) \
ctf_integer(long int, field##_nsec, (p) == NULL ? 0 : (p)->tv_nsec) \
ctf_integer(uint8_t, field##_isnull, (p) == NULL)
// val should be of type time_t
// Currently assumes that time_t is an integer and no more than 64 bits wide.
// This is verified by the configure script.
#define ceph_ctf_time_t(field, val) \
ctf_integer(uint64_t, field, (uint64_t)(val))
// val should be of type time_t*
// Currently assumes that time_t is an integer and no more than 64 bits wide.
// This is verified by the configure script.
#define ceph_ctf_time_tp(field, val) \
ctf_integer(uint64_t, field, (val) == NULL ? 0 : (uint64_t)(*val)) \
ctf_integer(uint8_t, field##_isnull, (val) == NULL)
#endif /* TRACING_COMMON_H */
| 4,279 | 40.553398 | 141 | h |
lightly | lightly-master/.pre-commit-config.yaml | repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v2.3.0
hooks:
- id: detect-private-key # check for private keys
- id: check-added-large-files # prevent commit of files >500kB
args: ['--maxkb=500']
- repo: local
hooks:
- id: pytest-check # run all tests
name: pytest-check
entry: make test
language: system
pass_filenames: false
stages: [push]
# Avoid running tests if non-tested files have changed.
# The regex follows the pattern in the docs: https://pre-commit.com/#regular-expressions
exclude: |
(?x)^(
benchmark_logs/.*|
docs/.*|
examples/.*|
\.gitignore|
CONTRIBUTING\.md|
DOCS\.md|
LICENSE\.txt|
PRECOMMITHOOKS\.md|
README\.md
)$
| 830 | 26.7 | 92 | yaml |
lightly | lightly-master/.readthedocs.yml | # .readthedocs.yml
# Read the Docs configuration file
# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
# Required
version: 2
# Build documentation in the docs/ directory with Sphinx
sphinx:
configuration: docs/source/conf.py
# Optionally build your docs in additional formats such as PDF
formats:
- pdf
# Optionally set the version of Python and requirements required to build your docs
python:
version: 3.7
install:
- requirements: requirements/base.txt
- requirements: requirements/dev.txt | 538 | 24.666667 | 83 | yml |
lightly | lightly-master/CONTRIBUTING.md | # How to contribute to lightly?
Everyone is welcome to contribute, and we value everybody's contribution. Code is thus not the only way to help the community. Answering questions, helping others, reaching out and improving the documentations are immensely valuable to the community.
It also helps us if you spread the word: reference the library from blog posts on the awesome projects it made possible, shout out on Twitter every time it has helped you, or simply star the repo to say "thank you".
## You can contribute in so many ways!
There are 4 ways you can contribute to lightly:
* Fixing outstanding issues with the existing code;
* Implementing new models;
* Contributing to the examples or to the documentation;
* Submitting issues related to bugs or desired new features.
*All are equally valuable to the community.*
## Submitting a new issue or feature request
Do your best to follow these guidelines when submitting an issue or a feature
request. It will make it easier for us to come back to you quickly and with good
feedback.
### Did you find a bug?
First, **please make sure the bug was not already reported** (use the search bar on Github under Issues).
* Include your **OS type and version**, the versions of **Python**, **PyTorch**, and **PyTorch Lightning**.
* A code snippet that allows us to reproduce the bug in less than 30s.
* Provide the *full* traceback if an exception is raised.
### Do you want to implement a new self-supervised model?
Awesome! Please provide the following information:
* Short description of the model and link to the paper;
* Link to the implementation if it's open source;
If you are willing to contribute the model yourself, let us know so we can best
guide you.
### Do you want a new feature (that is not a model)?
A world-class feature request addresses the following points:
1. Motivation first:
* Is it related to a problem/frustration with the library? If so, please explain
why. Providing a code snippet that demonstrates the problem is best.
* Is it related to something you would need for a project? We'd love to hear
about it!
* Is it something you worked on and think could benefit the community?
Awesome! Tell us what problem it solved for you.
2. Provide a **code snippet** that demonstrates its future use;
3. Attach any additional information (drawings, screenshots, etc.) you think may help.
## Pull Requests
Before writing code, we strongly advise you to search through the exising PRs or
issues to make sure that nobody is already working on the same thing. If you are
unsure, it is always a good idea to open an issue to get some feedback.
Follow these steps to start contributing:
1. Fork the [repository](https://github.com/lightly-ai/lightly/) by
clicking on the 'Fork' button on the repository's page. This creates a copy of the code
under your GitHub user account.
2. Clone your fork to your local disk, and add the base repository as a remote:
```bash
$ git clone git@github.com:lightly-ai/lightly.git
$ cd lightly
$ git remote add upstream https://github.com/lightly-ai/lightly.git
```
3. Create a new branch to hold your development changes:
```bash
$ git checkout -b a_descriptive_name_for_my_changes
```
**do not** work on the `master` branch.
4. Set up a development environment by running the following command in a virtual environment:
```bash
$ pip install -e ".[dev]"
```
5. Develop the features on your branch.
As you work on the features, you should make sure that the code is formatted and the
test suite passes:
```bash
$ make format
$ make all-checks
```
If you get an error from isort or black, please run `make format` again before
running `make all-checks`.
If you're modifying documents under `docs/source`, make sure to validate that
they can still be built. This check also runs in CI.
```bash
$ cd docs
$ make html
```
Once you're happy with your changes, add changed files using `git add` and
make a commit with `git commit` to record your changes locally:
```bash
$ git add modified_file.py
$ git commit
```
Please write [good commit messages](https://chris.beams.io/posts/git-commit/).
It is a good idea to sync your copy of the code with the original
repository regularly. This way you can quickly account for changes:
```bash
$ git fetch upstream
$ git rebase upstream/develop
```
Push the changes to your account using:
```bash
$ git push -u origin a_descriptive_name_for_my_changes
```
6. Once you are satisfied, go to the webpage of your fork on GitHub.
Click on 'Pull request' to send your changes to the project maintainers for review.
7. It's ok if maintainers ask you for changes. It happens to core contributors
too! So everyone can see the changes in the Pull request, work in your local
branch and push the changes to your fork. They will automatically appear in
the pull request.
### Style guide
`lightly` follows the [Google styleguide](https://google.github.io/styleguide/pyguide.html) and the [PyTorch styleguide](https://github.com/IgorSusmelj/pytorch-styleguide) by Igor Susmelj.
Check our [documentation writing guide](https://github.com/lightly-ai/lightly/docs/README.md) for more information.
#### This guide was inspired by Transformers [transformers guide to contributing](https://github.com/huggingface/transformers/blob/master/CONTRIBUTING.md) which was influenced by Scikit-learn [scikit-learn guide to contributing](https://github.com/scikit-learn/scikit-learn/blob/master/CONTRIBUTING.md). | 5,628 | 37.292517 | 303 | md |
lightly | lightly-master/DOCS.md | # Lightly
Lightly is a PIP package for self-supervised learning.
> We, at [Lightly](https://www.lightly.ai), are passionate engineers who want to make deep learning more efficient. We want to help popularize the use of self-supervised methods to understand and filter raw image data. Our solution can be applied before any data annotation step and the learned representations can be used to analyze and visualize datasets as well as for selecting a core set of samples.
If you are curious to learn more about our work - check out [Lightly](https://www.lightly.ai)!
## Installation
```
pip install lightly
```
## Links
- [Homepage](https://www.lightly.ai)
- [Web-App](https://app.lightly.ai)
- [Documentation](https://docs.lightly.ai)
- [Github](https://github.com/lightly-ai/lightly)
- [Discord](https://discord.gg/xvNJW94) | 830 | 38.571429 | 404 | md |
lightly | lightly-master/PRECOMMITHOOKS.md | # Pre-Commit Hooks
We use pre-commit hooks to identify simple issues before submission to code review. In particular, our hooks currently check for:
* Private keys in the commit
* Large files in the commit (>500kB)
* Units which don't pass their unit tests (on push only)
## Install Pre-Commit
`pre-commit` comes as a pip package and is specified in `requirements/dev.txt`.
To install it either run:
```
$ pip install .[dev]
```
Or, to install it separately:
```
$ pip install pre-commit
```
Test your installation:
```
$ pre-commit --version
```
If the installation failed, try
```
$ curl https://pre-commit.com/install-local.py | python -
```
or see the [documentation of pre-commit](https://pre-commit.com/) for more information.
## Install Pre-Commit Hooks
To install the pre-commit hooks specified in `.pre-commit-hooks.yaml`, simply run
```
$ pre-commit install
```
Install the pre-push hooks like this
```
$ pre-commit install --hook-type pre-push
```
You can verify that the hooks were installed correctly with
```
$ pre-commit run --all-files
```
The output should look like this:
```
$ pre-commit run --all-files
Detect Private Key................................Passed
Check for added large files.......................Passed
```
## Usage
With the new setup, checks for private keys and large files are made before every commit and all tests must pass for a push.
| 1,383 | 25.113208 | 129 | md |
lightly | lightly-master/README.md |



[](https://pypi.org/project/lightly/)
[](https://pepy.tech/project/lightly)
[](https://github.com/psf/black)
Lightly is a computer vision framework for self-supervised learning.
> We, at [Lightly](https://www.lightly.ai), are passionate engineers who want to make deep learning more efficient. That's why - together with our community - we want to popularize the use of self-supervised methods to understand and curate raw image data. Our solution can be applied before any data annotation step and the learned representations can be used to visualize and analyze datasets. This allows to select the best set of samples for model training through advanced filtering.
- [Homepage](https://www.lightly.ai)
- [Web-App](https://app.lightly.ai)
- [Documentation](https://docs.lightly.ai/self-supervised-learning/)
- [Lightly Solution Documentation (Lightly Worker & API)](https://docs.lightly.ai/)
- [Github](https://github.com/lightly-ai/lightly)
- [Discord](https://discord.gg/xvNJW94) (We have weekly paper sessions!)
## Features
Lightly offers features like
- Modular framework which exposes low-level building blocks such as loss functions and
model heads.
- Easy to use and written in a PyTorch like style.
- Supports custom backbone models for self-supervised pre-training.
- Support for distributed training using PyTorch Lightning.
### Supported Models
You can [find sample code for all the supported models here.](https://docs.lightly.ai/self-supervised-learning/examples/models.html) We provide PyTorch, PyTorch Lightning,
and PyTorch Lightning distributed examples for all models to kickstart your project.
**Models**:
- Barlow Twins, 2021 [paper](https://arxiv.org/abs/2103.03230) [docs](https://docs.lightly.ai/self-supervised-learning/examples/barlowtwins.html)
- BYOL, 2020 [paper](https://arxiv.org/abs/2006.07733) [docs](https://docs.lightly.ai/self-supervised-learning/examples/byol.html)
- DCL & DCLW, 2021 [paper](https://arxiv.org/abs/2110.06848) [docs](https://docs.lightly.ai/self-supervised-learning/examples/dcl.html)
- DINO, 2021 [paper](https://arxiv.org/abs/2104.14294) [docs](https://docs.lightly.ai/self-supervised-learning/examples/dino.html)
- MAE, 2021 [paper](https://arxiv.org/abs/2111.06377) [docs](https://docs.lightly.ai/self-supervised-learning/examples/mae.html)
- MSN, 2022 [paper](https://arxiv.org/abs/2204.07141) [docs](https://docs.lightly.ai/self-supervised-learning/examples/msn.html)
- MoCo, 2019 [paper](https://arxiv.org/abs/1911.05722) [docs](https://docs.lightly.ai/self-supervised-learning/examples/moco.html)
- NNCLR, 2021 [paper](https://arxiv.org/abs/2104.14548) [docs](https://docs.lightly.ai/self-supervised-learning/examples/nnclr.html)
- PMSN, 2022 [paper](https://arxiv.org/abs/2210.07277) [docs](https://docs.lightly.ai/self-supervised-learning/examples/pmsn.html)
- SimCLR, 2020 [paper](https://arxiv.org/abs/2002.05709) [docs](https://docs.lightly.ai/self-supervised-learning/examples/simclr.html)
- SimMIM, 2021 [paper](https://arxiv.org/abs/2111.09886) [docs](https://docs.lightly.ai/self-supervised-learning/examples/simmim.html)
- SimSiam, 2021 [paper](https://arxiv.org/abs/2011.10566) [docs](https://docs.lightly.ai/self-supervised-learning/examples/simsiam.html)
- SMoG, 2022 [paper](https://arxiv.org/abs/2207.06167) [docs](https://docs.lightly.ai/self-supervised-learning/examples/smog.html)
- SwaV, 2020 [paper](https://arxiv.org/abs/2006.09882) [docs](https://docs.lightly.ai/self-supervised-learning/examples/swav.html)
- TiCo, 2022 [paper](https://arxiv.org/abs/2206.10698) [docs](https://docs.lightly.ai/self-supervised-learning/examples/tico.html)
- VICReg, 2022 [paper](https://arxiv.org/abs/2105.04906) [docs](https://docs.lightly.ai/self-supervised-learning/examples/vicreg.html)
- VICRegL, 2022 [paper](https://arxiv.org/abs/2210.01571) [docs](https://docs.lightly.ai/self-supervised-learning/examples/vicregl.html)
## Tutorials
Want to jump to the tutorials and see Lightly in action?
- [Train MoCo on CIFAR-10](https://docs.lightly.ai/self-supervised-learning/tutorials/package/tutorial_moco_memory_bank.html)
- [Train SimCLR on Clothing Data](https://docs.lightly.ai/self-supervised-learning/tutorials/package/tutorial_simclr_clothing.html)
- [Train SimSiam on Satellite Images](https://docs.lightly.ai/self-supervised-learning/tutorials/package/tutorial_simsiam_esa.html)
- [Use Lightly with Custom Augmentations](https://docs.lightly.ai/self-supervised-learning/tutorials/package/tutorial_custom_augmentations.html)
- [Pre-train a Detectron2 Backbone with Lightly](https://docs.lightly.ai/self-supervised-learning/tutorials/package/tutorial_pretrain_detectron2.html)
Tutorials for the Lightly Solution (Lightly Worker & API):
- [General Docs of Lightly Solution](https://docs.lightly.ai)
- [Active Learning Using YOLOv7 and Comma10k](https://docs.lightly.ai/docs/active-learning-yolov7)
- [Active Learning for Driveable Area Segmentation Using Cityscapes](https://docs.lightly.ai/docs/active-learning-for-driveable-area-segmentation-using-cityscapes)
- [Active Learning for Transactions of Images](https://docs.lightly.ai/docs/active-learning-for-transactions-of-images)
- [Improving YOLOv8 using Active Learning on Videos](https://docs.lightly.ai/docs/active-learning-yolov8-video)
- [Assertion-based Active Learning with YOLOv8](https://docs.lightly.ai/docs/assertion-based-active-learning-tutorial)
- and more ...
Community and partner projects:
- [On-Device Deep Learning with Lightly on an ARM microcontroller](https://github.com/ARM-software/EndpointAI/tree/master/ProofOfConcepts/Vision/OpenMvMaskDefaults)
## Quick Start
Lightly requires **Python 3.6+** but we recommend using **Python 3.7+**. We recommend installing Lightly in a **Linux** or **OSX** environment.
### Dependencies
- [PyTorch](https://pytorch.org/)
- [Torchvision](https://pytorch.org/vision/stable/index.html)
- [PyTorch Lightning](https://www.pytorchlightning.ai/index.html) v1.5+
Lightly is compatible with PyTorch and PyTorch Lightning v2.0+!
Vision transformer based models require Torchvision v0.12+.
### Installation
You can install Lightly and its dependencies from PyPI with:
```
pip3 install lightly
```
We strongly recommend that you install Lightly in a dedicated virtualenv, to avoid
conflicting with your system packages.
If you only want to install the API client without torch and torchvision dependencies
follow the docs on [how to install the Lightly Python Client](https://docs.lightly.ai/docs/install-lightly#install-the-lightly-python-client).
### Lightly in Action
With Lightly, you can use the latest self-supervised learning methods in a modular
way using the full power of PyTorch. Experiment with different backbones,
models, and loss functions. The framework has been designed to be easy to use
from the ground up. [Find more examples in our docs](https://docs.lightly.ai/self-supervised-learning/examples/models.html).
```python
import torch
import torchvision
from lightly import loss
from lightly import transforms
from lightly.data import LightlyDataset
from lightly.models.modules import heads
# Create a PyTorch module for the SimCLR model.
class SimCLR(torch.nn.Module):
def __init__(self, backbone):
super().__init__()
self.backbone = backbone
self.projection_head = heads.SimCLRProjectionHead(
input_dim=512, # Resnet18 features have 512 dimensions.
hidden_dim=512,
output_dim=128,
)
def forward(self, x):
features = self.backbone(x).flatten(start_dim=1)
z = self.projection_head(features)
return z
# Use a resnet backbone.
backbone = torchvision.models.resnet18()
# Ignore the classification head as we only want the features.
backbone.fc = torch.nn.Identity()
# Build the SimCLR model.
model = SimCLR(backbone)
# Prepare transform that creates multiple random views for every image.
transform = transforms.SimCLRTransform(input_size=32, cj_prob=0.5)
# Create a dataset from your image folder.
dataset = data.LightlyDataset(input_dir="./my/cute/cats/dataset/", transform=transform)
# Build a PyTorch dataloader.
dataloader = torch.utils.data.DataLoader(
dataset, # Pass the dataset to the dataloader.
batch_size=128, # A large batch size helps with the learning.
shuffle=True, # Shuffling is important!
)
# Lightly exposes building blocks such as loss functions.
criterion = loss.NTXentLoss(temperature=0.5)
# Get a PyTorch optimizer.
optimizer = torch.optim.SGD(model.parameters(), lr=0.1, weight_decay=1e-6)
# Train the model.
for epoch in range(10):
for (view0, view1), targets, filenames in dataloader:
z0 = model(view0)
z1 = model(view1)
loss = criterion(z0, z1)
loss.backward()
optimizer.step()
optimizer.zero_grad()
print(f"loss: {loss.item():.5f}")
```
You can easily use another model like SimSiam by swapping the model and the
loss function.
```python
# PyTorch module for the SimSiam model.
class SimSiam(torch.nn.Module):
def __init__(self, backbone):
super().__init__()
self.backbone = backbone
self.projection_head = heads.SimSiamProjectionHead(512, 512, 128)
self.prediction_head = heads.SimSiamPredictionHead(128, 64, 128)
def forward(self, x):
features = self.backbone(x).flatten(start_dim=1)
z = self.projection_head(features)
p = self.prediction_head(z)
z = z.detach()
return z, p
model = SimSiam(backbone)
# Use the SimSiam loss function.
criterion = loss.NegativeCosineSimilarity()
```
You can [find a more complete example for SimSiam here.](https://docs.lightly.ai/self-supervised-learning/examples/simsiam.html)
Use PyTorch Lightning to train the model:
```python
from pytorch_lightning import LightningModule, Trainer
class SimCLR(LightningModule):
def __init__(self):
super().__init__()
resnet = torchvision.models.resnet18()
resnet.fc = torch.nn.Identity()
self.backbone = resnet
self.projection_head = heads.SimCLRProjectionHead(512, 512, 128)
self.criterion = loss.NTXentLoss()
def forward(self, x):
features = self.backbone(x).flatten(start_dim=1)
z = self.projection_head(features)
return z
def training_step(self, batch, batch_index):
(view0, view1), _, _ = batch
z0 = self.forward(view0)
z1 = self.forward(view1)
loss = self.criterion(z0, z1)
return loss
def configure_optimizers(self):
optim = torch.optim.SGD(self.parameters(), lr=0.06)
return optim
model = SimCLR()
trainer = Trainer(max_epochs=10, devices=1, accelerator="gpu")
trainer.fit(model, dataloader)
```
See [our docs for a full PyTorch Lightning example.](https://docs.lightly.ai/self-supervised-learning/examples/simclr.html)
Or train the model on 4 GPUs:
```python
# Use distributed version of loss functions.
criterion = loss.NTXentLoss(gather_distributed=True)
trainer = Trainer(
max_epochs=10,
devices=4,
accelerator="gpu",
strategy="ddp",
sync_batchnorm=True,
use_distributed_sampler=True, # or replace_sampler_ddp=True for PyTorch Lightning <2.0
)
trainer.fit(model, dataloader)
```
We provide multi-GPU training examples with distributed gather and synchronized BatchNorm.
[Have a look at our docs regarding distributed training.](https://docs.lightly.ai/self-supervised-learning/getting_started/distributed_training.html)
## Benchmarks
Implemented models and their performance on various datasets. Hyperparameters are not
tuned for maximum accuracy. For detailed results and more info about the benchmarks click
[here](https://docs.lightly.ai/self-supervised-learning/getting_started/benchmarks.html).
### Imagenet
> **Note**: Evaluation settings are based on these papers:
> * Linear: [SimCLR](https://arxiv.org/abs/2002.05709)
> * Finetune: [SimCLR](https://arxiv.org/abs/2002.05709)
> * KNN: [InstDisc](https://arxiv.org/abs/1805.01978)
>
> See the [benchmarking scripts](./benchmarks/imagenet/resnet50/) for details.
| Model | Backbone | Batch Size | Epochs | Linear Top1 | Finetune Top1 | KNN Top1 | Tensorboard | Checkpoint |
|----------------|----------|------------|--------|-------------|---------------|----------|-------------|------------|
| BYOL | Res50 | 256 | 100 | 62.4 | 74.0 | 45.6 | [link](https://tensorboard.dev/experiment/Z0iG2JLaTJe5nuBD7DK1bg) | [link](https://lightly-ssl-checkpoints.s3.amazonaws.com/imagenet_resnet50_byol_2023-07-10_10-37-32/pretrain/version_0/checkpoints/epoch%3D99-step%3D500400.ckpt) |
| DINO | Res50 | 128 | 100 | 68.2 | 72.5 | 49.9 | [link](https://tensorboard.dev/experiment/DvKHX9sNSWWqDrRksllPLA) | [link](https://lightly-ssl-checkpoints.s3.amazonaws.com/imagenet_resnet50_dino_2023-06-06_13-59-48/pretrain/version_0/checkpoints/epoch%3D99-step%3D1000900.ckpt) |
| SimCLR* | Res50 | 256 | 100 | 63.2 | 73.9 | 44.8 | [link](https://tensorboard.dev/experiment/Ugol97adQdezgcVibDYMMA) | [link](https://lightly-ssl-checkpoints.s3.amazonaws.com/imagenet_resnet50_simclr_2023-06-22_09-11-13/pretrain/version_0/checkpoints/epoch%3D99-step%3D500400.ckpt) |
| SimCLR* + DCL | Res50 | 256 | 100 | 65.1 | 73.5 | 49.6 | [link](https://tensorboard.dev/experiment/k4ZonZ77QzmBkc0lXswQlg/) | [link](https://lightly-ssl-checkpoints.s3.amazonaws.com/imagenet_resnet50_dcl_2023-07-04_16-51-40/pretrain/version_0/checkpoints/epoch%3D99-step%3D500400.ckpt) |
| SimCLR* + DCLW | Res50 | 256 | 100 | 64.5 | 73.2 | 48.5 | [link](https://tensorboard.dev/experiment/TrALnpwFQ4OkZV3uvaX7wQ/) | [link](https://lightly-ssl-checkpoints.s3.amazonaws.com/imagenet_resnet50_dclw_2023-07-07_14-57-13/pretrain/version_0/checkpoints/epoch%3D99-step%3D500400.ckpt) |
| SwAV | Res50 | 256 | 100 | 67.2 | 75.4 | 49.5 | [link](https://tensorboard.dev/experiment/Ipx4Oxl5Qkqm5Sl5kWyKKg) | [link](https://lightly-ssl-checkpoints.s3.amazonaws.com/imagenet_resnet50_swav_2023-05-25_08-29-14/pretrain/version_0/checkpoints/epoch%3D99-step%3D500400.ckpt) |
*\*We use square root learning rate scaling instead of linear scaling as it yields
better results for smaller batch sizes. See Appendix B.1 in [SimCLR paper](https://arxiv.org/abs/2002.05709).*
### ImageNette
| Model | Backbone | Batch Size | Epochs | KNN Top1 |
|-------------|----------|------------|--------|----------|
| BarlowTwins | Res18 | 256 | 800 | 0.852 |
| BYOL | Res18 | 256 | 800 | 0.887 |
| DCL | Res18 | 256 | 800 | 0.861 |
| DCLW | Res18 | 256 | 800 | 0.865 |
| DINO | Res18 | 256 | 800 | 0.888 |
| FastSiam | Res18 | 256 | 800 | 0.873 |
| MAE | ViT-S | 256 | 800 | 0.610 |
| MSN | ViT-S | 256 | 800 | 0.828 |
| Moco | Res18 | 256 | 800 | 0.874 |
| NNCLR | Res18 | 256 | 800 | 0.884 |
| PMSN | ViT-S | 256 | 800 | 0.822 |
| SimCLR | Res18 | 256 | 800 | 0.889 |
| SimMIM | ViT-B32 | 256 | 800 | 0.343 |
| SimSiam | Res18 | 256 | 800 | 0.872 |
| SwaV | Res18 | 256 | 800 | 0.902 |
| SwaVQueue | Res18 | 256 | 800 | 0.890 |
| SMoG | Res18 | 256 | 800 | 0.788 |
| TiCo | Res18 | 256 | 800 | 0.856 |
| VICReg | Res18 | 256 | 800 | 0.845 |
| VICRegL | Res18 | 256 | 800 | 0.778 |
### Cifar10
| Model | Backbone | Batch Size | Epochs | KNN Top1 |
|-------------|----------|------------|--------|----------|
| BarlowTwins | Res18 | 512 | 800 | 0.859 |
| BYOL | Res18 | 512 | 800 | 0.910 |
| DCL | Res18 | 512 | 800 | 0.874 |
| DCLW | Res18 | 512 | 800 | 0.871 |
| DINO | Res18 | 512 | 800 | 0.848 |
| FastSiam | Res18 | 512 | 800 | 0.902 |
| Moco | Res18 | 512 | 800 | 0.899 |
| NNCLR | Res18 | 512 | 800 | 0.892 |
| SimCLR | Res18 | 512 | 800 | 0.879 |
| SimSiam | Res18 | 512 | 800 | 0.904 |
| SwaV | Res18 | 512 | 800 | 0.884 |
| SMoG | Res18 | 512 | 800 | 0.800 |
## Terminology
Below you can see a schematic overview of the different concepts in the package.
The terms in bold are explained in more detail in our [documentation](https://docs.lightly.ai/self-supervised-learning/).
<img src="/docs/source/getting_started/images/lightly_overview.png" alt="Overview of the Lightly pip package"/></a>
### Next Steps
Head to the [documentation](https://docs.lightly.ai) and see the things you can achieve with Lightly!
## Development
To install dev dependencies (for example to contribute to the framework) you can use the following command:
```
pip3 install -e ".[dev]"
```
For more information about how to contribute have a look [here](CONTRIBUTING.md).
### Running Tests
Unit tests are within the [tests directory](tests/) and we recommend running them using
[pytest](https://docs.pytest.org/en/stable/). There are two test configurations
available. By default, only a subset will be run:
```
make test-fast
```
To run all tests (including the slow ones) you can use the following command:
```
make test
```
To test a specific file or directory use:
```
pytest <path to file or directory>
```
### Code Formatting
To format code with [black](https://black.readthedocs.io/en/stable/) and [isort](https://docs.pytest.org) run:
```
make format
```
## Further Reading
**Self-Supervised Learning**:
- Have a look at our [#papers channel on discord](https://discord.com/channels/752876370337726585/815153188487299083)
for the newest self-supervised learning papers.
- [A Cookbook of Self-Supervised Learning, 2023](https://arxiv.org/abs/2304.12210)
- [Masked Autoencoders Are Scalable Vision Learners, 2021](https://arxiv.org/abs/2111.06377)
- [Emerging Properties in Self-Supervised Vision Transformers, 2021](https://arxiv.org/abs/2104.14294)
- [Unsupervised Learning of Visual Features by Contrasting Cluster Assignments, 2021](https://arxiv.org/abs/2006.09882)
- [What Should Not Be Contrastive in Contrastive Learning, 2020](https://arxiv.org/abs/2008.05659)
- [A Simple Framework for Contrastive Learning of Visual Representations, 2020](https://arxiv.org/abs/2002.05709)
- [Momentum Contrast for Unsupervised Visual Representation Learning, 2020](https://arxiv.org/abs/1911.05722)
## FAQ
- Why should I care about self-supervised learning? Aren't pre-trained models from ImageNet much better for transfer learning?
- Self-supervised learning has become increasingly popular among scientists over the last years because the learned representations perform extraordinarily well on downstream tasks. This means that they capture the important information in an image better than other types of pre-trained models. By training a self-supervised model on *your* dataset, you can make sure that the representations have all the necessary information about your images.
- How can I contribute?
- Create an issue if you encounter bugs or have ideas for features we should implement. You can also add your own code by forking this repository and creating a PR. More details about how to contribute with code is in our [contribution guide](CONTRIBUTING.md).
- Is this framework for free?
- Yes, this framework is completely free to use and we provide the source code. We believe that we need to make training deep learning models more data efficient to achieve widespread adoption. One step to achieve this goal is by leveraging self-supervised learning. The company behind Lightly is committed to keep this framework open-source.
- If this framework is free, how is the company behind Lightly making money?
- Training self-supervised models is only one part of our solution.
[The company behind Lightly](https://lightly.ai/) focuses on processing and analyzing embeddings created by self-supervised models.
By building, what we call a self-supervised active learning loop we help companies understand and work with their data more efficiently.
As the [Lightly Solution](https://docs.lightly.ai) is a freemium product, you can try it out for free. However, we will charge for some features.
- In any case this framework will always be free to use, even for commercial purposes.
## Lightly in Research
- [Reverse Engineering Self-Supervised Learning, 2023](https://arxiv.org/abs/2305.15614)
- [Learning Visual Representations via Language-Guided Sampling, 2023](https://arxiv.org/pdf/2302.12248.pdf)
- [Self-Supervised Learning Methods for Label-Efficient Dental Caries Classification, 2022](https://www.mdpi.com/2075-4418/12/5/1237)
- [DPCL: Constrative Representation Learning with Differential Privacy, 2022](https://assets.researchsquare.com/files/rs-1516950/v1_covered.pdf?c=1654486158)
- [Decoupled Contrastive Learning, 2021](https://arxiv.org/abs/2110.06848)
- [solo-learn: A Library of Self-supervised Methods for Visual Representation Learning, 2021](https://www.jmlr.org/papers/volume23/21-1155/21-1155.pdf)
## BibTeX
If you want to cite the framework feel free to use this:
```bibtex
@article{susmelj2020lightly,
title={Lightly},
author={Igor Susmelj and Matthias Heller and Philipp Wirth and Jeremy Prescott and Malte Ebner et al.},
journal={GitHub. Note: https://github.com/lightly-ai/lightly},
year={2020}
}
```
| 22,364 | 48.370861 | 488 | md |
lightly | lightly-master/setup.py | import os
import sys
import setuptools
try:
import builtins
except ImportError:
import __builtin__ as builtins
PATH_ROOT = PATH_ROOT = os.path.dirname(__file__)
builtins.__LIGHTLY_SETUP__ = True
import lightly
def load_description(path_dir=PATH_ROOT, filename="DOCS.md"):
"""Load long description from readme in the path_dir/ directory"""
with open(os.path.join(path_dir, filename)) as f:
long_description = f.read()
return long_description
def load_requirements(path_dir=PATH_ROOT, filename="base.txt", comment_char="#"):
"""From pytorch-lightning repo: https://github.com/PyTorchLightning/pytorch-lightning.
Load requirements from text file in the path_dir/requirements/ directory.
"""
with open(os.path.join(path_dir, "requirements", filename), "r") as file:
lines = [ln.strip() for ln in file.readlines()]
reqs = []
for ln in lines:
# filer all comments
if comment_char in ln:
ln = ln[: ln.index(comment_char)].strip()
# skip directly installed dependencies
if ln.startswith("http"):
continue
if ln: # if requirement is not empty
reqs.append(ln)
return reqs
if __name__ == "__main__":
name = "lightly"
version = lightly.__version__
description = lightly.__doc__
author = "Philipp Wirth & Igor Susmelj"
author_email = "philipp@lightly.ai"
description = "A deep learning package for self-supervised learning"
entry_points = {
"console_scripts": [
"lightly-crop = lightly.cli.crop_cli:entry",
"lightly-train = lightly.cli.train_cli:entry",
"lightly-embed = lightly.cli.embed_cli:entry",
"lightly-magic = lightly.cli.lightly_cli:entry",
"lightly-download = lightly.cli.download_cli:entry",
"lightly-version = lightly.cli.version_cli:entry",
]
}
long_description = load_description()
python_requires = ">=3.6"
base_requires = load_requirements(filename="base.txt")
openapi_requires = load_requirements(filename="openapi.txt")
torch_requires = load_requirements(filename="torch.txt")
video_requires = load_requirements(filename="video.txt")
dev_requires = load_requirements(filename="dev.txt")
setup_requires = ["setuptools>=21"]
install_requires = base_requires + openapi_requires + torch_requires
extras_require = {
"video": video_requires,
"dev": dev_requires,
"all": dev_requires + video_requires,
}
packages = [
"lightly",
"lightly.api",
"lightly.cli",
"lightly.cli.config",
"lightly.data",
"lightly.embedding",
"lightly.loss",
"lightly.loss.regularizer",
"lightly.models",
"lightly.models.modules",
"lightly.transforms",
"lightly.utils",
"lightly.utils.benchmarking",
"lightly.utils.cropping",
"lightly.active_learning",
"lightly.active_learning.config",
"lightly.openapi_generated",
"lightly.openapi_generated.swagger_client",
"lightly.openapi_generated.swagger_client.api",
"lightly.openapi_generated.swagger_client.models",
]
project_urls = {
"Homepage": "https://www.lightly.ai",
"Web-App": "https://app.lightly.ai",
"Documentation": "https://docs.lightly.ai",
"Github": "https://github.com/lightly-ai/lightly",
"Discord": "https://discord.gg/xvNJW94",
}
classifiers = [
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Mathematics",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Software Development",
"Topic :: Software Development :: Libraries",
"Topic :: Software Development :: Libraries :: Python Modules",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"License :: OSI Approved :: MIT License",
]
setuptools.setup(
name=name,
version=version,
author=author,
author_email=author_email,
description=description,
entry_points=entry_points,
license="MIT",
long_description=long_description,
long_description_content_type="text/markdown",
setup_requires=setup_requires,
install_requires=install_requires,
extras_require=extras_require,
python_requires=python_requires,
packages=packages,
classifiers=classifiers,
include_package_data=True,
project_urls=project_urls,
)
| 4,985 | 32.24 | 90 | py |
lightly | lightly-master/.github/PULL_REQUEST_TEMPLATE/PR_template_checklist.md | closes #issue_number
## Description
- [ ] My change is breaking
Please_describe_what_you_changed_and_why___You_do_not_need_to_repeat_stuff_from_the_issue
## Tests
- [ ] My change is covered by existing tests.
- [ ] My change needs new tests.
- [ ] I have added/adapted the tests accordingly.
- [ ] I have manually tested the change. if_yes_describe_how
## Documentation
- [ ] I have added docstrings to all public functions/methods.
- [ ] My change requires a change to the documentation ( `.rst` files).
- [ ] I have updated the documentation accordingly.
- [ ] The autodocs update the documentation accordingly.
## Implications / comments / further issues
- #e_g_link_to_issue_to_cover_breaking_changes
| 710 | 31.318182 | 89 | md |
lightly | lightly-master/.github/PULL_REQUEST_TEMPLATE/PR_template_checklist_full.md | closes #issue_number
## Description
- [ ] My change is breaking
Please_describe_what_you_changed_and_why___You_do_not_need_to_repeat_stuff_from_the_issue
## Tests
- [ ] My change is covered by existing tests
- [ ] My change needs new tests
- [ ] I have added/adapted tests accordingly.
- [ ] I have manually tested the change.
If applicable, describe the manual test procedure, e.g:
```bash
pip uninstall lightly
export BRANCH_NAME="branch_name"
pip install "git+https://github.com/lightly-ai/lightly.git@$BRANCH_NAME"
lightly-cli_do_something_command
```
## Documentation
- [ ] I have added docstrings to all changed/added public functions/methods.
- [ ] My change requires a change to the documentation ( `.rst` files).
- [ ] I have updated the documentation accordingly.
- [ ] The autodocs update the documentation accordingly.`
## Improvements put into another issue:
- #issue_number
## Issues covering the breaking change:
- #link_to_issue_in_other_repo to adapt the other side of the breaking change | 1,012 | 31.677419 | 89 | md |
lightly | lightly-master/.github/PULL_REQUEST_TEMPLATE/PR_template_minimal.md | closes #issue_number
## Description
Please_describe_what_you_changed_and_why___You_do_not_need_to_repeat_stuff_from_the_issue
## Documentation
- [ ] I have updated the documentation.
- [ ] I need help on it.
## Tests
- [ ] I have updated the tests.
- [ ] I need help on it. | 276 | 22.083333 | 89 | md |
lightly | lightly-master/.github/workflows/release_pypi.yml | name: Build and release lightly
on:
workflow_dispatch:
# TODO(Philipp, 03/23): Enable me after proper testing.
# release:
# types: [published]
jobs:
build:
name: Build and release
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Checkout latest release tag
id: checkout_latest_release_tag
run: |
LATEST_TAG=$(git describe --tags `git rev-list --tags --max-count=1`)
git checkout $LATEST_TAG
echo "tag_name=$LATEST_TAG" >> $GITHUB_OUTPUT;
- name: Set up Python 3.10
uses: actions/setup-python@v4
with:
python-version: "3.10"
- name: Build and release
id: build_and_release
run: |
pip3 install wheel
pip3 install twine
make dist
twine upload -u ${{ secrets.PYPI_USER_NAME }} -p ${{ secrets.PYPI_PASSWORD }} dist/*
- name: Convert success/failure strings to emojis
id: emoji_status
run: |
function set-emoji-output {
if [ "$2" == "success" ];
then echo "$1=:github-check-mark:" >> $GITHUB_OUTPUT;
else echo "$1=:github-changes-requested:" >> $GITHUB_OUTPUT;
fi
}
set-emoji-output status ${{ steps.build_and_release.outcome }}
- name: Slack notification
if: always()
uses: rtCamp/action-slack-notify@v2
env:
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK_RELEASES }}
SLACK_ICON_EMOJI: ":python:"
SLACK_USERNAME: Release of Lightly PIP Package ${{steps.checkout_latest_release_tag.outputs.tag_name}}
SLACK_COLOR: ${{ steps.build_and_release.outcome }}
SLACK_FOOTER: ""
SLACK_MESSAGE: |
Release status: ${{steps.emoji_status.outputs.status}}
https://pypi.org/project/lightly/${{steps.checkout_latest_release_tag.outputs.tag_name}}
| 1,905 | 32.438596 | 110 | yml |
lightly | lightly-master/.github/workflows/test.yml | name: Unit Tests
on:
push:
paths-ignore:
- 'docs/**'
pull_request:
paths-ignore:
- 'docs/**'
workflow_dispatch:
jobs:
test:
name: Test
runs-on: ubuntu-latest
strategy:
matrix:
python: ["3.7", "3.10"]
steps:
- name: Checkout Code
uses: actions/checkout@v3
- name: Hack to get setup-python to work on nektos/act
run: |
if [ ! -f "/etc/lsb-release" ] ; then
echo "DISTRIB_RELEASE=18.04" > /etc/lsb-release
fi
- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python }}
- uses: actions/cache@v2
with:
path: ${{ env.pythonLocation }}
key: cache_v2_${{ env.pythonLocation }}-${{ hashFiles('requirements/**') }}
- name: Install Dependencies and lightly
run: pip install -e '.[all]'
- name: Run Pytest
run: |
export LIGHTLY_SERVER_LOCATION="localhost:-1"
pip install pytest-cov
python -m pytest -s -v --runslow --cov=./lightly --cov-report=xml --ignore=./lightly/openapi_generated/
- name: Upload coverage to Codecov
uses: codecov/codecov-action@v3
with:
fail_ci_if_error: true
files: ./coverage.xml
token: ${{ secrets.CODECOV_TOKEN }}
| 1,307 | 24.153846 | 111 | yml |
lightly | lightly-master/.github/workflows/test_code_format.yml | name: Code Format Check
on:
push:
pull_request:
workflow_dispatch:
jobs:
test:
name: Check
runs-on: ubuntu-latest
steps:
- name: Checkout Code
uses: actions/checkout@v3
- name: Hack to get setup-python to work on nektos/act
run: |
if [ ! -f "/etc/lsb-release" ] ; then
echo "DISTRIB_RELEASE=18.04" > /etc/lsb-release
fi
- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: "3.10"
- uses: actions/cache@v2
with:
path: ${{ env.pythonLocation }}
key: cache_v2_${{ env.pythonLocation }}-${{ hashFiles('requirements/**') }}
- name: Install Dependencies and lightly
run: pip install -e '.[all]'
- name: Run Format Check
run: |
export LIGHTLY_SERVER_LOCATION="localhost:-1"
make format-check
| 861 | 23.628571 | 83 | yml |
lightly | lightly-master/.github/workflows/test_setup.yml | name: check setup.py
on:
push:
paths-ignore:
- 'docs/**'
pull_request:
paths-ignore:
- 'docs/**'
workflow_dispatch:
jobs:
test:
name: Test setup.py
runs-on: ubuntu-latest
steps:
- name: Checkout Code
uses: actions/checkout@v3
- name: Hack to get setup-python to work on nektos/act
run: |
if [ ! -f "/etc/lsb-release" ] ; then
echo "DISTRIB_RELEASE=18.04" > /etc/lsb-release
fi
- name: Set up Python 3.7
uses: actions/setup-python@v4
with:
python-version: 3.7
- uses: actions/cache@v2
with:
path: ${{ env.pythonLocation }}
key: cache_v2_${{ env.pythonLocation }}-${{ hashFiles('requirements/**') }}
- name: Install Dependencies and lightly
run: pip install .
- name: basic tests of CLI
run: |
LIGHTLY_SERVER_LOCATION="localhost:-1"
lightly-crop --help
lightly-train --help
lightly-embed --help
lightly-magic --help
lightly-download --help
lightly-version
- name: test of CLI on a real dataset
run: |
LIGHTLY_SERVER_LOCATION="localhost:-1"
git clone https://github.com/alexeygrigorev/clothing-dataset-small clothing_dataset_small
INPUT_DIR_1="clothing_dataset_small/test/dress"
lightly-train input_dir=$INPUT_DIR_1 trainer.max_epochs=1 loader.num_workers=6
lightly-embed input_dir=$INPUT_DIR_1
| 1,458 | 27.057692 | 97 | yml |
lightly | lightly-master/.github/workflows/tests_unmocked.yml | name: run unmocked tests
on: [workflow_dispatch]
# Why is this only triggered manually with workflow_dispatch?
# - We have many unittests anyway
# - We ran into quota problems with too many tests (3000mins for the pip package)
# - The tests are unreliable / not deterministic, as they depend on the API from staging
# - The tests take 10mins, which can slow down development
jobs:
test:
name: Run unmocked tests
runs-on: ubuntu-latest
steps:
- name: Checkout Code
uses: actions/checkout@v3
- name: Set up Python 3.7
uses: actions/setup-python@v4
with:
python-version: 3.7
- uses: actions/cache@v2
with:
path: ${{ env.pythonLocation }}
key: cache_v2_${{ env.pythonLocation }}-${{ hashFiles('requirements/**') }}
- name: Install Dependencies and lightly
run: pip install .
- name: run unmocked tests
run: |
export LIGHTLY_SERVER_LOCATION=${{ secrets.LIGHTLY_SERVER_LOCATION }}
bash tests/UNMOCKED_end2end_tests/run_all_unmocked_tests.sh ${{ secrets.DUMMY_USER_TOKEN_STAGING }}
| 1,087 | 34.096774 | 107 | yml |
lightly | lightly-master/.github/workflows/weekly_dependency_test.yml | # Install lightly from scratch and run tests.
# Python environment for regular unit tests is cached. In this workflow,
# we always pick up the latest dependencies and therefore check if a new
# external release breaks lightly.
name: Weekly Dependency Test
on:
workflow_dispatch:
schedule:
- cron: '0 22 * * THU' # each Thursday night, at 22:00 UTC
jobs:
test_fresh_install:
name: Test fresh install
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v3
- name: Set up Python 3.10
uses: actions/setup-python@v4
with:
python-version: "3.10"
- name: Install Lightly from scratch
run: |
pip3 install ".[dev]"
- name: Run tests
id: run_tests
run: |
pytest -n auto
- name: Slack notification
if: always()
uses: rtCamp/action-slack-notify@v2
env:
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK_NIGHTLY }}
SLACK_ICON_EMOJI: ":github:"
SLACK_USERNAME: Lightly PIP Package ${{ github.workflow }}
SLACK_COLOR: ${{ steps.run_tests.outcome }}
SLACK_FOOTER: ""
| 1,127 | 27.923077 | 72 | yml |
lightly | lightly-master/benchmarks/imagenet/resnet50/README.md | # ImageNet ResNet50
Reference implementations for self-supervised learning (SSL) methods on ImageNet with
ResNet50 backbones.
**Note**
> The benchmarks are still in beta phase and there will be breaking changes and
frequent updates. PRs for new methods are highly welcome!
**Goals**
* Provide easy to use/adapt reference implementations of SSL methods.
* Implemented methods should be self-contained and use the Lightly building blocks.
See [simclr.py](simclr.py).
* Remain as framework agnostic as possible. The benchmarks currently only rely on PyTorch and PyTorch Lightning.
**Non-Goals**
* Lightly doesn't strive to be an end-to-end SSL framework with vast configuration options.
Instead, we try to provide building blocks and examples to make it as easy as possible to
build on top of existing SSL methods.
You can find benchmark resuls in our [docs](https://docs.lightly.ai/self-supervised-learning/getting_started/benchmarks.html).
## Run Benchmark
To run the benchmark first download the ImageNet ILSVRC2012 split from here: https://www.image-net.org/challenges/LSVRC/2012/.
Then start the benchmark with:
```
python main.py --epochs 100 --train-dir /datasets/imagenet/train --val-dir /datasets/imagenet/val --num-workers 12 --devices 2 --batch-size-per-device 128 --skip-finetune-eval
```
Or with SLURM, create the following script (`run_imagenet.sh`):
```
#!/bin/bash
#SBATCH --nodes=1
#SBATCH --gres=gpu:2 # Must match --devices argument
#SBATCH --ntasks-per-node=2 # Must match --devices argument
#SBATCH --cpus-per-task=16 # Must be >= --num-workers argument
#SBATCH --mem=0
eval "$(conda shell.bash hook)"
conda activate lightly-env
srun python main.py --epochs 100 --train-dir /datasets/imagenet/train --val-dir /datasets/imagenet/val --num-workers 12 --devices 2 --batch-size-per-device 128
conda deactivate
```
And run it with sbatch: `sbatch run_imagenet.sh`.
## Configuration
To run the benchmark on specific methods use the `--methods` flag:
```
python main.py --epochs 100 --batch-size-per-device 128 --methods simclr byol
```
Training/evaluation steps can be skipped as follows:
```
python main.py --batch-size-per-device 128 \
--epochs 0 # no pretraining
--skip-knn-eval # no KNN evaluation
--skip-linear-eval # no linear evaluation
--skip-finetune-eval # no finetune evaluation
```
## ImageNet100
For ImageNet100 you have to adapt the dataset location and set number of classes to 100:
```
python main.py --train-dir /datasets/imagenet100/train --val-dir /datasets/imagenet100/val --num-classes 100 --epochs 100 --num-workers 12 --devices 2 --batch-size-per-device 128
```
## Imagenette
For [Imagenette](https://github.com/fastai/imagenette) you have to adapt the dataset location and set number of classes to 10:
```
python main.py --train-dir /datasets/imagenette2-320/train --val-dir /datasets/imagenette2-320/val --num-classes 10 --epochs 100 --num-workers 12 --devices 2 --batch-size-per-device 128
``` | 3,022 | 34.564706 | 185 | md |
lightly | lightly-master/benchmarks/imagenet/resnet50/byol.py | import copy
from typing import List, Tuple
import torch
from pytorch_lightning import LightningModule
from torch import Tensor
from torch.nn import Identity
from torchvision.models import resnet50
from lightly.loss import NegativeCosineSimilarity
from lightly.models.modules import BYOLPredictionHead, BYOLProjectionHead
from lightly.models.utils import get_weight_decay_parameters, update_momentum
from lightly.transforms import SimCLRTransform
from lightly.utils.benchmarking import OnlineLinearClassifier
from lightly.utils.lars import LARS
from lightly.utils.scheduler import CosineWarmupScheduler, cosine_schedule
class BYOL(LightningModule):
def __init__(self, batch_size_per_device: int, num_classes: int) -> None:
super().__init__()
self.save_hyperparameters()
self.batch_size_per_device = batch_size_per_device
resnet = resnet50()
resnet.fc = Identity() # Ignore classification head
self.backbone = resnet
self.projection_head = BYOLProjectionHead()
self.student_backbone = copy.deepcopy(self.backbone)
self.student_projection_head = BYOLProjectionHead()
self.student_prediction_head = BYOLPredictionHead()
self.criterion = NegativeCosineSimilarity()
self.online_classifier = OnlineLinearClassifier(num_classes=num_classes)
def forward(self, x: Tensor) -> Tensor:
return self.backbone(x)
@torch.no_grad()
def forward_teacher(self, x: Tensor) -> Tuple[Tensor, Tensor]:
features = self(x).flatten(start_dim=1)
projections = self.projection_head(features)
return features, projections
def forward_student(self, x: Tensor) -> Tensor:
features = self.student_backbone(x).flatten(start_dim=1)
projections = self.student_projection_head(features)
predictions = self.student_prediction_head(projections)
return predictions
def training_step(
self, batch: Tuple[List[Tensor], Tensor, List[str]], batch_idx: int
) -> Tensor:
# Momentum update teacher.
# Settings follow original code for 100 epochs which are slightly different
# from the paper, see:
# https://github.com/deepmind/deepmind-research/blob/f5de0ede8430809180254ee957abf36ed62579ef/byol/configs/byol.py#L21-L23
momentum = cosine_schedule(
step=self.trainer.global_step,
max_steps=self.trainer.estimated_stepping_batches,
start_value=0.99,
end_value=1.0,
)
update_momentum(self.student_backbone, self.backbone, m=momentum)
update_momentum(self.student_projection_head, self.projection_head, m=momentum)
# Forward pass and loss calculation.
views, targets = batch[0], batch[1]
teacher_features_0, teacher_projections_0 = self.forward_teacher(views[0])
_, teacher_projections_1 = self.forward_teacher(views[1])
student_predictions_0 = self.forward_student(views[0])
student_predictions_1 = self.forward_student(views[1])
# NOTE: Factor 2 because: L2(norm(x), norm(y)) = 2 - 2 * cossim(x, y)
loss_0 = 2 * self.criterion(teacher_projections_0, student_predictions_1)
loss_1 = 2 * self.criterion(teacher_projections_1, student_predictions_0)
# NOTE: No mean because original code only takes mean over batch dimension, not
# views.
loss = loss_0 + loss_1
self.log(
"train_loss", loss, prog_bar=True, sync_dist=True, batch_size=len(targets)
)
# Online linear evaluation.
cls_loss, cls_log = self.online_classifier.training_step(
(teacher_features_0.detach(), targets), batch_idx
)
self.log_dict(cls_log, sync_dist=True, batch_size=len(targets))
return loss + cls_loss
def validation_step(
self, batch: Tuple[Tensor, Tensor, List[str]], batch_idx: int
) -> Tensor:
images, targets = batch[0], batch[1]
features = self.forward(images).flatten(start_dim=1)
cls_loss, cls_log = self.online_classifier.validation_step(
(features.detach(), targets), batch_idx
)
self.log_dict(cls_log, prog_bar=True, sync_dist=True, batch_size=len(targets))
return cls_loss
def configure_optimizers(self):
# Don't use weight decay for batch norm, bias parameters, and classification
# head to improve performance.
params, params_no_weight_decay = get_weight_decay_parameters(
[
self.student_backbone,
self.student_projection_head,
self.student_prediction_head,
]
)
optimizer = LARS(
[
{"name": "byol", "params": params},
{
"name": "byol_no_weight_decay",
"params": params_no_weight_decay,
"weight_decay": 0.0,
},
{
"name": "online_classifier",
"params": self.online_classifier.parameters(),
"weight_decay": 0.0,
},
],
# Settings follow original code for 100 epochs which are slightly different
# from the paper, see:
# https://github.com/deepmind/deepmind-research/blob/f5de0ede8430809180254ee957abf36ed62579ef/byol/configs/byol.py#L21-L23
lr=0.45 * self.batch_size_per_device * self.trainer.world_size / 256,
momentum=0.9,
weight_decay=1e-6,
)
scheduler = {
"scheduler": CosineWarmupScheduler(
optimizer=optimizer,
warmup_epochs=(
self.trainer.estimated_stepping_batches
/ self.trainer.max_epochs
* 10
),
max_epochs=self.trainer.estimated_stepping_batches,
),
"interval": "step",
}
return [optimizer], [scheduler]
# BYOL uses same transform as SimCLR.
transform = SimCLRTransform()
| 6,095 | 39.912752 | 134 | py |
lightly | lightly-master/benchmarks/imagenet/resnet50/dcl.py | import math
from typing import List, Tuple
import torch
from pytorch_lightning import LightningModule
from torch import Tensor
from torch.nn import Identity
from torchvision.models import resnet50
from lightly.loss.dcl_loss import DCLLoss
from lightly.models.modules import SimCLRProjectionHead
from lightly.models.utils import get_weight_decay_parameters
from lightly.transforms import SimCLRTransform
from lightly.utils.benchmarking import OnlineLinearClassifier
from lightly.utils.lars import LARS
from lightly.utils.scheduler import CosineWarmupScheduler
class DCL(LightningModule):
def __init__(self, batch_size_per_device: int, num_classes: int) -> None:
super().__init__()
self.save_hyperparameters()
self.batch_size_per_device = batch_size_per_device
resnet = resnet50()
resnet.fc = Identity() # Ignore classification head
self.backbone = resnet
self.projection_head = SimCLRProjectionHead() # DCL uses SimCLR head
self.criterion = DCLLoss(temperature=0.1, gather_distributed=True)
self.online_classifier = OnlineLinearClassifier(num_classes=num_classes)
def forward(self, x: Tensor) -> Tensor:
return self.backbone(x)
def training_step(
self, batch: Tuple[List[Tensor], Tensor, List[str]], batch_idx: int
) -> Tensor:
views, targets = batch[0], batch[1]
features = self.forward(torch.cat(views)).flatten(start_dim=1)
z = self.projection_head(features)
z0, z1 = z.chunk(len(views))
loss = self.criterion(z0, z1)
self.log(
"train_loss", loss, prog_bar=True, sync_dist=True, batch_size=len(targets)
)
cls_loss, cls_log = self.online_classifier.training_step(
(features.detach(), targets.repeat(len(views))), batch_idx
)
self.log_dict(cls_log, sync_dist=True, batch_size=len(targets))
return loss + cls_loss
def validation_step(
self, batch: Tuple[Tensor, Tensor, List[str]], batch_idx: int
) -> Tensor:
images, targets = batch[0], batch[1]
features = self.forward(images).flatten(start_dim=1)
cls_loss, cls_log = self.online_classifier.validation_step(
(features.detach(), targets), batch_idx
)
self.log_dict(cls_log, prog_bar=True, sync_dist=True, batch_size=len(targets))
return cls_loss
def configure_optimizers(self):
# Don't use weight decay for batch norm, bias parameters, and classification
# head to improve performance.
params, params_no_weight_decay = get_weight_decay_parameters(
[self.backbone, self.projection_head]
)
optimizer = LARS(
[
{"name": "dcl", "params": params},
{
"name": "dcl_no_weight_decay",
"params": params_no_weight_decay,
"weight_decay": 0.0,
},
{
"name": "online_classifier",
"params": self.online_classifier.parameters(),
"weight_decay": 0.0,
},
],
# DCL uses SimCLR's learning rate scaling scheme.
# Square root learning rate scaling improves performance for small
# batch sizes (<=2048) and few training epochs (<=200). Alternatively,
# linear scaling can be used for larger batches and longer training:
# lr=0.3 * self.batch_size_per_device * self.trainer.world_size / 256
# See Appendix B.1. in the SimCLR paper https://arxiv.org/abs/2002.05709
lr=0.075 * math.sqrt(self.batch_size_per_device * self.trainer.world_size),
momentum=0.9,
# Note: Paper uses weight decay of 1e-6 but reference code 1e-4. See:
# https://github.com/google-research/simclr/blob/2fc637bdd6a723130db91b377ac15151e01e4fc2/README.md?plain=1#L103
weight_decay=1e-6,
)
scheduler = {
"scheduler": CosineWarmupScheduler(
optimizer=optimizer,
warmup_epochs=(
self.trainer.estimated_stepping_batches
/ self.trainer.max_epochs
* 10
),
max_epochs=self.trainer.estimated_stepping_batches,
),
"interval": "step",
}
return [optimizer], [scheduler]
# DCL uses SimCLR augmentations
transform = SimCLRTransform()
| 4,528 | 38.72807 | 124 | py |
lightly | lightly-master/benchmarks/imagenet/resnet50/dclw.py | import math
from typing import List, Tuple
import torch
from pytorch_lightning import LightningModule
from torch import Tensor
from torch.nn import Identity
from torchvision.models import resnet50
from lightly.loss.dcl_loss import DCLWLoss
from lightly.models.modules import SimCLRProjectionHead
from lightly.models.utils import get_weight_decay_parameters
from lightly.transforms import SimCLRTransform
from lightly.utils.benchmarking import OnlineLinearClassifier
from lightly.utils.lars import LARS
from lightly.utils.scheduler import CosineWarmupScheduler
class DCLW(LightningModule):
def __init__(self, batch_size_per_device: int, num_classes: int) -> None:
super().__init__()
self.save_hyperparameters()
self.batch_size_per_device = batch_size_per_device
resnet = resnet50()
resnet.fc = Identity() # Ignore classification head
self.backbone = resnet
self.projection_head = SimCLRProjectionHead() # DCLW uses SimCLR head
self.criterion = DCLWLoss(temperature=0.1, sigma=0.5, gather_distributed=True)
self.online_classifier = OnlineLinearClassifier(num_classes=num_classes)
def forward(self, x: Tensor) -> Tensor:
return self.backbone(x)
def training_step(
self, batch: Tuple[List[Tensor], Tensor, List[str]], batch_idx: int
) -> Tensor:
views, targets = batch[0], batch[1]
features = self.forward(torch.cat(views)).flatten(start_dim=1)
z = self.projection_head(features)
z0, z1 = z.chunk(len(views))
loss = self.criterion(z0, z1)
self.log(
"train_loss", loss, prog_bar=True, sync_dist=True, batch_size=len(targets)
)
cls_loss, cls_log = self.online_classifier.training_step(
(features.detach(), targets.repeat(len(views))), batch_idx
)
self.log_dict(cls_log, sync_dist=True, batch_size=len(targets))
return loss + cls_loss
def validation_step(
self, batch: Tuple[Tensor, Tensor, List[str]], batch_idx: int
) -> Tensor:
images, targets = batch[0], batch[1]
features = self.forward(images).flatten(start_dim=1)
cls_loss, cls_log = self.online_classifier.validation_step(
(features.detach(), targets), batch_idx
)
self.log_dict(cls_log, prog_bar=True, sync_dist=True, batch_size=len(targets))
return cls_loss
def configure_optimizers(self):
# Don't use weight decay for batch norm, bias parameters, and classification
# head to improve performance.
params, params_no_weight_decay = get_weight_decay_parameters(
[self.backbone, self.projection_head]
)
optimizer = LARS(
[
{"name": "dclw", "params": params},
{
"name": "dclw_no_weight_decay",
"params": params_no_weight_decay,
"weight_decay": 0.0,
},
{
"name": "online_classifier",
"params": self.online_classifier.parameters(),
"weight_decay": 0.0,
},
],
# DCLW uses SimCLR's learning rate scaling scheme.
# Square root learning rate scaling improves performance for small
# batch sizes (<=2048) and few training epochs (<=200). Alternatively,
# linear scaling can be used for larger batches and longer training:
# lr=0.3 * self.batch_size_per_device * self.trainer.world_size / 256
# See Appendix B.1. in the SimCLR paper https://arxiv.org/abs/2002.05709
lr=0.075 * math.sqrt(self.batch_size_per_device * self.trainer.world_size),
momentum=0.9,
# Note: Paper uses weight decay of 1e-6 but reference code 1e-4. See:
# https://github.com/google-research/simclr/blob/2fc637bdd6a723130db91b377ac15151e01e4fc2/README.md?plain=1#L103
weight_decay=1e-6,
)
scheduler = {
"scheduler": CosineWarmupScheduler(
optimizer=optimizer,
warmup_epochs=(
self.trainer.estimated_stepping_batches
/ self.trainer.max_epochs
* 10
),
max_epochs=self.trainer.estimated_stepping_batches,
),
"interval": "step",
}
return [optimizer], [scheduler]
# DCLW uses SimCLR augmentations
transform = SimCLRTransform()
| 4,547 | 38.894737 | 124 | py |
lightly | lightly-master/benchmarks/imagenet/resnet50/dino.py | import copy
from typing import List, Tuple, Union
import torch
from pytorch_lightning import LightningModule
from torch import Tensor
from torch.nn import Identity
from torch.optim import SGD
from torch.optim.optimizer import Optimizer
from torchvision.models import resnet50
from lightly.loss import DINOLoss
from lightly.models.modules import DINOProjectionHead
from lightly.models.utils import (
activate_requires_grad,
deactivate_requires_grad,
get_weight_decay_parameters,
update_momentum,
)
from lightly.transforms import DINOTransform
from lightly.utils.benchmarking import OnlineLinearClassifier
from lightly.utils.scheduler import CosineWarmupScheduler, cosine_schedule
class DINO(LightningModule):
def __init__(self, batch_size_per_device: int, num_classes: int) -> None:
super().__init__()
self.save_hyperparameters()
self.batch_size_per_device = batch_size_per_device
resnet = resnet50()
resnet.fc = Identity() # Ignore classification head
self.backbone = resnet
self.projection_head = DINOProjectionHead(freeze_last_layer=1)
self.student_backbone = copy.deepcopy(self.backbone)
self.student_projection_head = DINOProjectionHead()
self.criterion = DINOLoss()
self.online_classifier = OnlineLinearClassifier(num_classes=num_classes)
def forward(self, x: Tensor) -> Tensor:
return self.backbone(x)
def forward_student(self, x: Tensor) -> Tensor:
features = self.student_backbone(x).flatten(start_dim=1)
projections = self.student_projection_head(features)
return projections
def on_train_start(self) -> None:
deactivate_requires_grad(self.backbone)
deactivate_requires_grad(self.projection_head)
def on_train_end(self) -> None:
activate_requires_grad(self.backbone)
activate_requires_grad(self.projection_head)
def training_step(
self, batch: Tuple[List[Tensor], Tensor, List[str]], batch_idx: int
) -> Tensor:
# Momentum update teacher.
momentum = cosine_schedule(
step=self.trainer.global_step,
max_steps=self.trainer.estimated_stepping_batches,
start_value=0.996,
end_value=1.0,
)
update_momentum(self.student_backbone, self.backbone, m=momentum)
update_momentum(self.student_projection_head, self.projection_head, m=momentum)
views, targets = batch[0], batch[1]
global_views = torch.cat(views[:2])
local_views = torch.cat(views[2:])
teacher_features = self.forward(global_views).flatten(start_dim=1)
teacher_projections = self.projection_head(teacher_features)
student_projections = torch.cat(
[self.forward_student(global_views), self.forward_student(local_views)]
)
loss = self.criterion(
teacher_out=teacher_projections.chunk(2),
student_out=student_projections.chunk(len(views)),
epoch=self.current_epoch,
)
self.log_dict(
{"train_loss": loss, "ema_momentum": momentum},
prog_bar=True,
sync_dist=True,
batch_size=len(targets),
)
# Online classification.
cls_loss, cls_log = self.online_classifier.training_step(
(teacher_features.chunk(2)[0].detach(), targets), batch_idx
)
self.log_dict(cls_log, sync_dist=True, batch_size=len(targets))
return loss + cls_loss
def validation_step(
self, batch: Tuple[Tensor, Tensor, List[str]], batch_idx: int
) -> Tensor:
images, targets = batch[0], batch[1]
features = self.forward(images).flatten(start_dim=1)
cls_loss, cls_log = self.online_classifier.validation_step(
(features.detach(), targets), batch_idx
)
self.log_dict(cls_log, prog_bar=True, sync_dist=True, batch_size=len(targets))
return cls_loss
def configure_optimizers(self):
# Don't use weight decay for batch norm, bias parameters, and classification
# head to improve performance.
params, params_no_weight_decay = get_weight_decay_parameters(
[self.student_backbone, self.student_projection_head]
)
# For ResNet50 we use SGD instead of AdamW/LARS as recommended by the authors:
# https://github.com/facebookresearch/dino#resnet-50-and-other-convnets-trainings
optimizer = SGD(
[
{"name": "dino", "params": params},
{
"name": "dino_no_weight_decay",
"params": params_no_weight_decay,
"weight_decay": 0.0,
},
{
"name": "online_classifier",
"params": self.online_classifier.parameters(),
"weight_decay": 0.0,
},
],
lr=0.03 * self.batch_size_per_device * self.trainer.world_size / 256,
momentum=0.9,
weight_decay=1e-4,
)
scheduler = {
"scheduler": CosineWarmupScheduler(
optimizer=optimizer,
warmup_epochs=(
self.trainer.estimated_stepping_batches
/ self.trainer.max_epochs
* 10
),
max_epochs=self.trainer.estimated_stepping_batches,
),
"interval": "step",
}
return [optimizer], [scheduler]
def configure_gradient_clipping(
self,
optimizer: Optimizer,
gradient_clip_val: Union[int, float, None] = None,
gradient_clip_algorithm: Union[str, None] = None,
) -> None:
self.clip_gradients(
optimizer=optimizer,
gradient_clip_val=3.0,
gradient_clip_algorithm="norm",
)
self.student_projection_head.cancel_last_layer_gradients(self.current_epoch)
# For ResNet50 we adjust crop scales as recommended by the authors:
# https://github.com/facebookresearch/dino#resnet-50-and-other-convnets-trainings
transform = DINOTransform(global_crop_scale=(0.14, 1), local_crop_scale=(0.05, 0.14))
| 6,242 | 36.383234 | 89 | py |
lightly | lightly-master/benchmarks/imagenet/resnet50/finetune_eval.py | from pathlib import Path
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import DeviceStatsMonitor, LearningRateMonitor
from pytorch_lightning.loggers import TensorBoardLogger
from torch.nn import Module
from torch.optim import SGD
from torch.utils.data import DataLoader
from torchvision import transforms as T
from lightly.data import LightlyDataset
from lightly.transforms.utils import IMAGENET_NORMALIZE
from lightly.utils.benchmarking import LinearClassifier, MetricCallback
from lightly.utils.scheduler import CosineWarmupScheduler
class FinetuneLinearClassifier(LinearClassifier):
def configure_optimizers(self):
parameters = list(self.classification_head.parameters())
parameters += self.model.parameters()
optimizer = SGD(
parameters,
lr=0.05 * self.batch_size_per_device * self.trainer.world_size / 256,
momentum=0.9,
weight_decay=0.0,
)
scheduler = {
"scheduler": CosineWarmupScheduler(
optimizer=optimizer,
warmup_epochs=0,
max_epochs=self.trainer.estimated_stepping_batches,
),
"interval": "step",
}
return [optimizer], [scheduler]
def finetune_eval(
model: Module,
train_dir: Path,
val_dir: Path,
log_dir: Path,
batch_size_per_device: int,
num_workers: int,
accelerator: str,
devices: int,
precision: str,
num_classes: int,
) -> None:
"""Runs fine-tune evaluation on the given model.
Parameters follow SimCLR [0] settings.
The most important settings are:
- Backbone: Frozen
- Epochs: 30
- Optimizer: SGD
- Base Learning Rate: 0.05
- Momentum: 0.9
- Weight Decay: 0.0
- LR Schedule: Cosine without warmup
References:
- [0]: SimCLR, 2020, https://arxiv.org/abs/2002.05709
"""
print("Running fine-tune evaluation...")
# Setup training data.
train_transform = T.Compose(
[
T.RandomResizedCrop(224),
T.RandomHorizontalFlip(),
T.ToTensor(),
T.Normalize(mean=IMAGENET_NORMALIZE["mean"], std=IMAGENET_NORMALIZE["std"]),
]
)
train_dataset = LightlyDataset(input_dir=str(train_dir), transform=train_transform)
train_dataloader = DataLoader(
train_dataset,
batch_size=batch_size_per_device,
shuffle=True,
num_workers=num_workers,
drop_last=True,
persistent_workers=True,
)
# Setup validation data.
val_transform = T.Compose(
[
T.Resize(256),
T.CenterCrop(224),
T.ToTensor(),
T.Normalize(mean=IMAGENET_NORMALIZE["mean"], std=IMAGENET_NORMALIZE["std"]),
]
)
val_dataset = LightlyDataset(input_dir=str(val_dir), transform=val_transform)
val_dataloader = DataLoader(
val_dataset,
batch_size=batch_size_per_device,
shuffle=False,
num_workers=num_workers,
persistent_workers=True,
)
# Train linear classifier.
metric_callback = MetricCallback()
trainer = Trainer(
max_epochs=30,
accelerator=accelerator,
devices=devices,
callbacks=[
LearningRateMonitor(),
DeviceStatsMonitor(),
metric_callback,
],
logger=TensorBoardLogger(save_dir=str(log_dir), name="finetune_eval"),
precision=precision,
strategy="ddp_find_unused_parameters_true",
)
classifier = FinetuneLinearClassifier(
model=model,
batch_size_per_device=batch_size_per_device,
feature_dim=2048,
num_classes=num_classes,
freeze_model=False,
)
trainer.fit(
model=classifier,
train_dataloaders=train_dataloader,
val_dataloaders=val_dataloader,
)
for metric in ["val_top1", "val_top5"]:
print(f"max finetune {metric}: {max(metric_callback.val_metrics[metric])}")
| 4,040 | 29.156716 | 88 | py |
lightly | lightly-master/benchmarks/imagenet/resnet50/knn_eval.py | from pathlib import Path
import torch
from pytorch_lightning import LightningModule, Trainer
from pytorch_lightning.callbacks import DeviceStatsMonitor
from pytorch_lightning.loggers import TensorBoardLogger
from torch.utils.data import DataLoader
from torchvision import transforms as T
from lightly.data import LightlyDataset
from lightly.transforms.utils import IMAGENET_NORMALIZE
from lightly.utils.benchmarking import KNNClassifier, MetricCallback
def knn_eval(
model: LightningModule,
train_dir: Path,
val_dir: Path,
log_dir: Path,
batch_size_per_device: int,
num_workers: int,
accelerator: str,
devices: int,
num_classes: int,
) -> None:
"""Runs KNN evaluation on the given model.
Parameters follow InstDisc [0] settings.
The most important settings are:
- Num nearest neighbors: 200
- Temperature: 0.1
References:
- [0]: InstDict, 2018, https://arxiv.org/abs/1805.01978
"""
print("Running KNN evaluation...")
# Setup training data.
transform = T.Compose(
[
T.Resize(256),
T.CenterCrop(224),
T.ToTensor(),
T.Normalize(mean=IMAGENET_NORMALIZE["mean"], std=IMAGENET_NORMALIZE["std"]),
]
)
train_dataset = LightlyDataset(input_dir=str(train_dir), transform=transform)
train_dataloader = DataLoader(
train_dataset,
batch_size=batch_size_per_device,
shuffle=False,
num_workers=num_workers,
drop_last=False,
)
# Setup validation data.
val_dataset = LightlyDataset(input_dir=str(val_dir), transform=transform)
val_dataloader = DataLoader(
val_dataset,
batch_size=batch_size_per_device,
shuffle=False,
num_workers=num_workers,
)
classifier = KNNClassifier(
model=model,
num_classes=num_classes,
feature_dtype=torch.float16,
)
# Run KNN evaluation.
metric_callback = MetricCallback()
trainer = Trainer(
max_epochs=1,
accelerator=accelerator,
devices=devices,
logger=TensorBoardLogger(save_dir=str(log_dir), name="knn_eval"),
callbacks=[
DeviceStatsMonitor(),
metric_callback,
],
strategy="ddp_find_unused_parameters_true",
)
trainer.fit(
model=classifier,
train_dataloaders=train_dataloader,
val_dataloaders=val_dataloader,
)
for metric in ["val_top1", "val_top5"]:
print(f"knn {metric}: {max(metric_callback.val_metrics[metric])}")
| 2,573 | 26.978261 | 88 | py |
lightly | lightly-master/benchmarks/imagenet/resnet50/linear_eval.py | from pathlib import Path
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import DeviceStatsMonitor, LearningRateMonitor
from pytorch_lightning.loggers import TensorBoardLogger
from torch.nn import Module
from torch.utils.data import DataLoader
from torchvision import transforms as T
from lightly.data import LightlyDataset
from lightly.transforms.utils import IMAGENET_NORMALIZE
from lightly.utils.benchmarking import LinearClassifier, MetricCallback
def linear_eval(
model: Module,
train_dir: Path,
val_dir: Path,
log_dir: Path,
batch_size_per_device: int,
num_workers: int,
accelerator: str,
devices: int,
precision: str,
num_classes: int,
) -> None:
"""Runs a linear evaluation on the given model.
Parameters follow SimCLR [0] settings.
The most important settings are:
- Backbone: Frozen
- Epochs: 90
- Optimizer: SGD
- Base Learning Rate: 0.1
- Momentum: 0.9
- Weight Decay: 0.0
- LR Schedule: Cosine without warmup
References:
- [0]: SimCLR, 2020, https://arxiv.org/abs/2002.05709
"""
print("Running linear evaluation...")
# Setup training data.
train_transform = T.Compose(
[
T.RandomResizedCrop(224),
T.RandomHorizontalFlip(),
T.ToTensor(),
T.Normalize(mean=IMAGENET_NORMALIZE["mean"], std=IMAGENET_NORMALIZE["std"]),
]
)
train_dataset = LightlyDataset(input_dir=str(train_dir), transform=train_transform)
train_dataloader = DataLoader(
train_dataset,
batch_size=batch_size_per_device,
shuffle=True,
num_workers=num_workers,
drop_last=True,
persistent_workers=True,
)
# Setup validation data.
val_transform = T.Compose(
[
T.Resize(256),
T.CenterCrop(224),
T.ToTensor(),
T.Normalize(mean=IMAGENET_NORMALIZE["mean"], std=IMAGENET_NORMALIZE["std"]),
]
)
val_dataset = LightlyDataset(input_dir=str(val_dir), transform=val_transform)
val_dataloader = DataLoader(
val_dataset,
batch_size=batch_size_per_device,
shuffle=False,
num_workers=num_workers,
persistent_workers=True,
)
# Train linear classifier.
metric_callback = MetricCallback()
trainer = Trainer(
max_epochs=90,
accelerator=accelerator,
devices=devices,
callbacks=[
LearningRateMonitor(),
DeviceStatsMonitor(),
metric_callback,
],
logger=TensorBoardLogger(save_dir=str(log_dir), name="linear_eval"),
precision=precision,
strategy="ddp_find_unused_parameters_true",
)
classifier = LinearClassifier(
model=model,
batch_size_per_device=batch_size_per_device,
feature_dim=2048,
num_classes=num_classes,
freeze_model=True,
)
trainer.fit(
model=classifier,
train_dataloaders=train_dataloader,
val_dataloaders=val_dataloader,
)
for metric in ["val_top1", "val_top5"]:
print(f"max linear {metric}: {max(metric_callback.val_metrics[metric])}")
| 3,233 | 28.135135 | 88 | py |
lightly | lightly-master/benchmarks/imagenet/resnet50/main.py | from argparse import ArgumentParser
from datetime import datetime
from pathlib import Path
from typing import Sequence, Union
import byol
import dcl
import dclw
import dino
import finetune_eval
import knn_eval
import linear_eval
import simclr
import swav
import torch
from pytorch_lightning import LightningModule, Trainer
from pytorch_lightning.callbacks import (
DeviceStatsMonitor,
EarlyStopping,
LearningRateMonitor,
)
from pytorch_lightning.loggers import TensorBoardLogger
from torch.utils.data import DataLoader
from torchvision import transforms as T
from lightly.data import LightlyDataset
from lightly.transforms.utils import IMAGENET_NORMALIZE
from lightly.utils.benchmarking import MetricCallback
parser = ArgumentParser("ImageNet ResNet50 Benchmarks")
parser.add_argument("--train-dir", type=Path, default="/datasets/imagenet/train")
parser.add_argument("--val-dir", type=Path, default="/datasets/imagenet/val")
parser.add_argument("--log-dir", type=Path, default="benchmark_logs")
parser.add_argument("--batch-size-per-device", type=int, default=128)
parser.add_argument("--epochs", type=int, default=100)
parser.add_argument("--num-workers", type=int, default=8)
parser.add_argument("--accelerator", type=str, default="gpu")
parser.add_argument("--devices", type=int, default=1)
parser.add_argument("--precision", type=str, default="16-mixed")
parser.add_argument("--compile-model", action="store_true")
parser.add_argument("--methods", type=str, nargs="+")
parser.add_argument("--num-classes", type=int, default=1000)
parser.add_argument("--skip-knn-eval", action="store_true")
parser.add_argument("--skip-linear-eval", action="store_true")
parser.add_argument("--skip-finetune-eval", action="store_true")
METHODS = {
"byol": {"model": byol.BYOL, "transform": byol.transform},
"dcl": {"model": dcl.DCL, "transform": dcl.transform},
"dclw": {"model": dclw.DCLW, "transform": dclw.transform},
"dino": {"model": dino.DINO, "transform": dino.transform},
"simclr": {"model": simclr.SimCLR, "transform": simclr.transform},
"swav": {"model": swav.SwAV, "transform": swav.transform},
}
def main(
train_dir: Path,
val_dir: Path,
log_dir: Path,
batch_size_per_device: int,
epochs: int,
num_workers: int,
accelerator: str,
devices: int,
precision: str,
compile_model: bool,
methods: Union[Sequence[str], None],
num_classes: int,
skip_knn_eval: bool,
skip_linear_eval: bool,
skip_finetune_eval: bool,
) -> None:
torch.set_float32_matmul_precision("high")
method_names = methods or METHODS.keys()
for method in method_names:
method_dir = (
log_dir / method / datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
).resolve()
model = METHODS[method]["model"](
batch_size_per_device=batch_size_per_device, num_classes=num_classes
)
if compile_model and hasattr(torch, "compile"):
# Compile model if PyTorch supports it.
print("Compiling model...")
model = torch.compile(model)
if epochs <= 0:
print("Epochs <= 0, skipping pretraining.")
else:
pretrain(
model=model,
method=method,
train_dir=train_dir,
val_dir=val_dir,
log_dir=method_dir,
batch_size_per_device=batch_size_per_device,
epochs=epochs,
num_workers=num_workers,
accelerator=accelerator,
devices=devices,
precision=precision,
)
if skip_knn_eval:
print("Skipping KNN eval.")
else:
knn_eval.knn_eval(
model=model,
num_classes=num_classes,
train_dir=train_dir,
val_dir=val_dir,
log_dir=method_dir,
batch_size_per_device=batch_size_per_device,
num_workers=num_workers,
accelerator=accelerator,
devices=devices,
)
if skip_linear_eval:
print("Skipping linear eval.")
else:
linear_eval.linear_eval(
model=model,
num_classes=num_classes,
train_dir=train_dir,
val_dir=val_dir,
log_dir=method_dir,
batch_size_per_device=batch_size_per_device,
num_workers=num_workers,
accelerator=accelerator,
devices=devices,
precision=precision,
)
if skip_finetune_eval:
print("Skipping fine-tune eval.")
else:
finetune_eval.finetune_eval(
model=model,
num_classes=num_classes,
train_dir=train_dir,
val_dir=val_dir,
log_dir=method_dir,
batch_size_per_device=batch_size_per_device,
num_workers=num_workers,
accelerator=accelerator,
devices=devices,
precision=precision,
)
def pretrain(
model: LightningModule,
method: str,
train_dir: Path,
val_dir: Path,
log_dir: Path,
batch_size_per_device: int,
epochs: int,
num_workers: int,
accelerator: str,
devices: int,
precision: str,
) -> None:
print(f"Running pretraining for {method}...")
# Setup training data.
train_transform = METHODS[method]["transform"]
train_dataset = LightlyDataset(input_dir=str(train_dir), transform=train_transform)
train_dataloader = DataLoader(
train_dataset,
batch_size=batch_size_per_device,
shuffle=True,
num_workers=num_workers,
drop_last=True,
persistent_workers=True,
)
# Setup validation data.
val_transform = T.Compose(
[
T.Resize(256),
T.CenterCrop(224),
T.ToTensor(),
T.Normalize(mean=IMAGENET_NORMALIZE["mean"], std=IMAGENET_NORMALIZE["std"]),
]
)
val_dataset = LightlyDataset(input_dir=str(val_dir), transform=val_transform)
val_dataloader = DataLoader(
val_dataset,
batch_size=batch_size_per_device,
shuffle=False,
num_workers=num_workers,
persistent_workers=True,
)
# Train model.
metric_callback = MetricCallback()
trainer = Trainer(
max_epochs=epochs,
accelerator=accelerator,
devices=devices,
callbacks=[
LearningRateMonitor(),
# Stop if training loss diverges.
EarlyStopping(monitor="train_loss", patience=int(1e12), check_finite=True),
DeviceStatsMonitor(),
metric_callback,
],
logger=TensorBoardLogger(save_dir=str(log_dir), name="pretrain"),
precision=precision,
strategy="ddp_find_unused_parameters_true",
sync_batchnorm=True,
)
trainer.fit(
model=model,
train_dataloaders=train_dataloader,
val_dataloaders=val_dataloader,
)
for metric in ["val_online_cls_top1", "val_online_cls_top5"]:
print(f"max {metric}: {max(metric_callback.val_metrics[metric])}")
if __name__ == "__main__":
args = parser.parse_args()
main(**vars(args))
| 7,370 | 30.909091 | 88 | py |
lightly | lightly-master/benchmarks/imagenet/resnet50/simclr.py | import math
from typing import List, Tuple
import torch
from pytorch_lightning import LightningModule
from torch import Tensor
from torch.nn import Identity
from torchvision.models import resnet50
from lightly.loss.ntx_ent_loss import NTXentLoss
from lightly.models.modules import SimCLRProjectionHead
from lightly.models.utils import get_weight_decay_parameters
from lightly.transforms import SimCLRTransform
from lightly.utils.benchmarking import OnlineLinearClassifier
from lightly.utils.lars import LARS
from lightly.utils.scheduler import CosineWarmupScheduler
class SimCLR(LightningModule):
def __init__(self, batch_size_per_device: int, num_classes: int) -> None:
super().__init__()
self.save_hyperparameters()
self.batch_size_per_device = batch_size_per_device
resnet = resnet50()
resnet.fc = Identity() # Ignore classification head
self.backbone = resnet
self.projection_head = SimCLRProjectionHead()
self.criterion = NTXentLoss(temperature=0.1, gather_distributed=True)
self.online_classifier = OnlineLinearClassifier(num_classes=num_classes)
def forward(self, x: Tensor) -> Tensor:
return self.backbone(x)
def training_step(
self, batch: Tuple[List[Tensor], Tensor, List[str]], batch_idx: int
) -> Tensor:
views, targets = batch[0], batch[1]
features = self.forward(torch.cat(views)).flatten(start_dim=1)
z = self.projection_head(features)
z0, z1 = z.chunk(len(views))
loss = self.criterion(z0, z1)
self.log(
"train_loss", loss, prog_bar=True, sync_dist=True, batch_size=len(targets)
)
cls_loss, cls_log = self.online_classifier.training_step(
(features.detach(), targets.repeat(len(views))), batch_idx
)
self.log_dict(cls_log, sync_dist=True, batch_size=len(targets))
return loss + cls_loss
def validation_step(
self, batch: Tuple[Tensor, Tensor, List[str]], batch_idx: int
) -> Tensor:
images, targets = batch[0], batch[1]
features = self.forward(images).flatten(start_dim=1)
cls_loss, cls_log = self.online_classifier.validation_step(
(features.detach(), targets), batch_idx
)
self.log_dict(cls_log, prog_bar=True, sync_dist=True, batch_size=len(targets))
return cls_loss
def configure_optimizers(self):
# Don't use weight decay for batch norm, bias parameters, and classification
# head to improve performance.
params, params_no_weight_decay = get_weight_decay_parameters(
[self.backbone, self.projection_head]
)
optimizer = LARS(
[
{"name": "simclr", "params": params},
{
"name": "simclr_no_weight_decay",
"params": params_no_weight_decay,
"weight_decay": 0.0,
},
{
"name": "online_classifier",
"params": self.online_classifier.parameters(),
"weight_decay": 0.0,
},
],
# Square root learning rate scaling improves performance for small
# batch sizes (<=2048) and few training epochs (<=200). Alternatively,
# linear scaling can be used for larger batches and longer training:
# lr=0.3 * self.batch_size_per_device * self.trainer.world_size / 256
# See Appendix B.1. in the SimCLR paper https://arxiv.org/abs/2002.05709
lr=0.075 * math.sqrt(self.batch_size_per_device * self.trainer.world_size),
momentum=0.9,
# Note: Paper uses weight decay of 1e-6 but reference code 1e-4. See:
# https://github.com/google-research/simclr/blob/2fc637bdd6a723130db91b377ac15151e01e4fc2/README.md?plain=1#L103
weight_decay=1e-6,
)
scheduler = {
"scheduler": CosineWarmupScheduler(
optimizer=optimizer,
warmup_epochs=(
self.trainer.estimated_stepping_batches
/ self.trainer.max_epochs
* 10
),
max_epochs=self.trainer.estimated_stepping_batches,
),
"interval": "step",
}
return [optimizer], [scheduler]
transform = SimCLRTransform()
| 4,429 | 38.553571 | 124 | py |
lightly | lightly-master/benchmarks/imagenet/resnet50/swav.py | import math
from typing import List, Tuple
import torch
from pytorch_lightning import LightningModule
from torch import Tensor
from torch.nn import Identity, ModuleList
from torch.nn import functional as F
from torchvision.models import resnet50
from lightly.loss.memory_bank import MemoryBankModule
from lightly.loss.swav_loss import SwaVLoss
from lightly.models.modules import SwaVProjectionHead, SwaVPrototypes
from lightly.models.utils import get_weight_decay_parameters
from lightly.transforms import SwaVTransform
from lightly.utils.benchmarking import OnlineLinearClassifier
from lightly.utils.lars import LARS
from lightly.utils.scheduler import CosineWarmupScheduler
CROP_COUNTS: Tuple[int, int] = (2, 6)
class SwAV(LightningModule):
def __init__(self, batch_size_per_device: int, num_classes: int) -> None:
super().__init__()
self.save_hyperparameters()
self.batch_size_per_device = batch_size_per_device
resnet = resnet50()
resnet.fc = Identity() # Ignore classification head
self.backbone = resnet
self.projection_head = SwaVProjectionHead()
self.prototypes = SwaVPrototypes(n_steps_frozen_prototypes=1)
self.criterion = SwaVLoss(sinkhorn_gather_distributed=True)
self.online_classifier = OnlineLinearClassifier(num_classes=num_classes)
# Use a queue for small batch sizes (<= 256).
self.start_queue_at_epoch = 15
self.n_batches_in_queue = 15
self.queues = ModuleList(
[
MemoryBankModule(
size=self.n_batches_in_queue * self.batch_size_per_device
)
for _ in range(CROP_COUNTS[0])
]
)
def forward(self, x: Tensor) -> Tensor:
return self.backbone(x)
def project(self, x: Tensor) -> Tensor:
x = self.projection_head(x)
return F.normalize(x, dim=1, p=2)
def training_step(
self, batch: Tuple[List[Tensor], Tensor, List[str]], batch_idx: int
) -> Tensor:
# Normalize the prototypes so they are on the unit sphere.
self.prototypes.normalize()
# The dataloader returns a list of image crops where the
# first few items are high resolution crops and the rest are low
# resolution crops.
multi_crops, targets = batch[0], batch[1]
# Forward pass through backbone and projection head.
multi_crop_features = [
self.forward(crops).flatten(start_dim=1) for crops in multi_crops
]
multi_crop_projections = [
self.project(features) for features in multi_crop_features
]
# Get the queue projections and logits.
queue_crop_logits = None
with torch.no_grad():
if self.current_epoch >= self.start_queue_at_epoch:
# Start filling the queue.
queue_crop_projections = _update_queue(
projections=multi_crop_projections[: CROP_COUNTS[0]],
queues=self.queues,
)
if batch_idx > self.n_batches_in_queue:
# The queue is filled, so we can start using it.
queue_crop_logits = [
self.prototypes(projections, step=self.current_epoch)
for projections in queue_crop_projections
]
# Get the rest of the multi-crop logits.
multi_crop_logits = [
self.prototypes(projections, step=self.current_epoch)
for projections in multi_crop_projections
]
# Calculate the SwAV loss.
loss = self.criterion(
high_resolution_outputs=multi_crop_logits[: CROP_COUNTS[0]],
low_resolution_outputs=multi_crop_logits[CROP_COUNTS[0] :],
queue_outputs=queue_crop_logits,
)
self.log(
"train_loss",
loss,
prog_bar=True,
sync_dist=True,
batch_size=len(targets),
)
# Calculate the classification loss.
cls_loss, cls_log = self.online_classifier.training_step(
(multi_crop_features[0].detach(), targets), batch_idx
)
self.log_dict(cls_log, sync_dist=True, batch_size=len(targets))
return loss + cls_loss
def validation_step(
self, batch: Tuple[Tensor, Tensor, List[str]], batch_idx: int
) -> Tensor:
images, targets = batch[0], batch[1]
features = self.forward(images).flatten(start_dim=1)
cls_loss, cls_log = self.online_classifier.validation_step(
(features.detach(), targets), batch_idx
)
self.log_dict(cls_log, prog_bar=True, sync_dist=True, batch_size=len(targets))
return cls_loss
def configure_optimizers(self):
# Don't use weight decay for batch norm, bias parameters, and classification
# head to improve performance.
params, params_no_weight_decay = get_weight_decay_parameters(
[self.backbone, self.projection_head, self.prototypes]
)
optimizer = LARS(
[
{"name": "swav", "params": params},
{
"name": "swav_no_weight_decay",
"params": params_no_weight_decay,
"weight_decay": 0.0,
},
{
"name": "online_classifier",
"params": self.online_classifier.parameters(),
"weight_decay": 0.0,
},
],
# Smaller learning rate for smaller batches: lr=0.6 for batch_size=256
# scaled linearly by batch size to lr=4.8 for batch_size=2048.
# See Appendix A.1. and A.6. in SwAV paper https://arxiv.org/pdf/2006.09882.pdf
lr=0.6 * (self.batch_size_per_device * self.trainer.world_size) / 256,
momentum=0.9,
weight_decay=1e-6,
)
scheduler = {
"scheduler": CosineWarmupScheduler(
optimizer=optimizer,
warmup_epochs=(
self.trainer.estimated_stepping_batches
/ self.trainer.max_epochs
* 10
),
max_epochs=self.trainer.estimated_stepping_batches,
end_value=0.0006
* (self.batch_size_per_device * self.trainer.world_size)
/ 256,
),
"interval": "step",
}
return [optimizer], [scheduler]
transform = SwaVTransform(crop_counts=CROP_COUNTS)
@torch.no_grad()
def _update_queue(
projections: List[Tensor],
queues: ModuleList,
):
"""Adds the high resolution projections to the queues and returns the queues."""
if len(projections) != len(queues):
raise ValueError(
f"The number of queues ({len(queues)}) should be equal to the number of high "
f"resolution inputs ({len(projections)})."
)
# Get the queue projections
queue_projections = []
for i in range(len(queues)):
_, queue_proj = queues[i](projections[i], update=True)
# Queue projections are in (num_ftrs X queue_length) shape, while the high res
# projections are in (batch_size_per_device X num_ftrs). Swap the axes for interoperability.
queue_proj = torch.permute(queue_proj, (1, 0))
queue_projections.append(queue_proj)
return queue_projections
| 7,494 | 36.475 | 100 | py |
lightly | lightly-master/docs/README.md | # Documentation Guide
## Prerequisites
Make sure you installed dev dependencies:
```
pip install -r ../requirements/dev.txt
```
You may have to set up a clean environment (e.g. with Conda) and use setuptools from the parent directory:
```
conda create -n lightly python=3.7
conda activate lightly
pip install -e .["all"]
```
For building docs with python files (including tutorials) install detectron2.
This isn't handled in requirements because the version you'll need depends on your GPU/ hardware.
[Follow instructions](https://detectron2.readthedocs.io/en/latest/tutorials/install.html)
## Build the Docs
`sphinx` provides a Makefile, so to build the `html` documentation, simply type:
```
make html
```
To build docs without running python files (tutorials) use
```
make html-noplot
```
Shortcut to build the docs (with env variables for active-learning tutorial) use:
```
LIGHTLY_SERVER_LOCATION='https://api.lightly.ai' LIGHTLY_TOKEN='YOUR_TOKEN' AL_TUTORIAL_DATASET_ID='YOUR_DATASET_ID' make html && python -m http.server 1234 -d build/html
```
You can host the docs after building using the following python command
`python -m http.server 1234 -d build/html` from the docs folder.
Open a browser and go to `http://localhost:1234` to see the documentation.
Once the docs are built they are cached in `docs/build`. A new build will only recompile changed files.
The cache can be cleared with `make clean`.
## Deploy the Docs
Only Lightly core team members will have access to deploy new docs.
1. Open a terminal and go to the `docs/` folder.
1. If not done yet, authenticate your account using `gcloud auth login`
1. Deploy to app engine using `gcloud app deploy app.yaml`
## Docstrings and Style Guide
We build our code based on the [Google Python Styleguide]().
Important notes:
- Always use three double-quotes (`"""`).
- A function must have a docstring, unless it meets all of the following criteria: not externally visible, very short, obvious.
- Always use type hints when possible.
- Don't overlook the `Raises`.
- Use punctuation.
- Provide examples only for cli commands and core.py atm.
- **Please look carefully at the examples provided below (from the styleguide)**.
### Packages and Modules
Packages (i.e. the `__init__.py` files) and modules should start with a docstring describing the contents and usage of the package / module.
Example:
```python
"""A one line summary of the module or program, terminated by a period.
Leave one blank line. The rest of this docstring should contain an
overall description of the module or program. Optionally, it may also
contain a brief description of exported classes and functions and/or usage
examples.
Typical usage example:
foo = ClassFoo()
bar = foo.FunctionBar()
"""
````
### Functions
Example:
```python
def fetch_smalltable_rows(table_handle: smalltable.Table,
keys: Sequence[Union[bytes, str]],
require_all_keys: bool = False,
) -> Mapping[bytes, Tuple[str]]:
"""Fetches rows from a Smalltable.
Retrieves rows pertaining to the given keys from the Table instance
represented by table_handle. String keys will be UTF-8 encoded.
Args:
table_handle:
An open smalltable.Table instance.
keys:
A sequence of strings representing the key of each table row to
fetch. String keys will be UTF-8 encoded.
require_all_keys:
Optional; If require_all_keys is True only rows with values set
for all keys will be returned.
Returns:
A dict mapping keys to the corresponding table row data
fetched. Each row is represented as a tuple of strings. For
example:
{b'Serak': ('Rigel VII', 'Preparer'),
b'Zim': ('Irk', 'Invader'),
b'Lrrr': ('Omicron Persei 8', 'Emperor')}
Returned keys are always bytes. If a key from the keys argument is
missing from the dictionary, then that row was not found in the
table (and require_all_keys must have been False).
Raises:
IOError: An error occurred accessing the smalltable.
"""
```
### Classes
Attributes of a class should follow the same rules as the arguments for a function.
Example:
```python
class SampleClass:
"""Summary of class here.
Longer class information....
Longer class information....
Attributes:
likes_spam:
A boolean indicating if we like SPAM or not.
eggs:
An integer count of the eggs we have laid.
"""
def __init__(self, likes_spam=False):
"""Inits SampleClass with blah."""
self.likes_spam = likes_spam
self.eggs = 0
def public_method(self):
"""Performs operation blah."""
def public_method_2(self, x: str):
"""Performs operation blah 2.
Args:
x:
Some explanation for x.
"""
```
| 4,904 | 30.044304 | 170 | md |
lightly | lightly-master/docs/app.yaml | # When using the first time you need to authenticate your machine
# running `gcloud auth login` in your terminal.
#
# To deploy the app simply run `gcloud app deploy` from a terminal
# within the docs folder.
runtime: python27
api_version: 1
threadsafe: true
service: docs
handlers:
- url: /
secure: always
redirect_http_response_code: 301
static_files: build/html/index.html
upload: build/html/index.html
- url: /(.*)
secure: always
redirect_http_response_code: 301
static_files: build/html/\1
upload: build/html/(.*)
| 538 | 21.458333 | 66 | yaml |
lightly | lightly-master/docs/source/conf.py | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath("../.."))
import sphinx_rtd_theme
import lightly
# -- Project information -----------------------------------------------------
project = "lightly"
copyright_year = "2020"
copyright = "Lightly AG"
website_url = "https://www.lightly.ai/"
author = "Philipp Wirth, Igor Susmelj"
# The full version, including alpha/beta/rc tags
release = lightly.__version__
master_doc = "index"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.napoleon",
"sphinx_rtd_theme",
"sphinx.ext.autosummary",
"sphinx_gallery.gen_gallery",
"sphinx_tabs.tabs",
"sphinx_copybutton",
"sphinx_design",
"sphinx_reredirects",
]
sphinx_gallery_conf = {
"examples_dirs": ["tutorials_source/package", "tutorials_source/platform"],
"gallery_dirs": [
"tutorials/package",
"tutorials/platform",
], # path to where to save gallery generated output
"filename_pattern": "/tutorial_",
}
napoleon_google_docstring = True
napoleon_numpy_docstring = False
napoleon_include_init_with_doc = False
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = False
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = False
napoleon_use_rtype = False
napoleon_type_aliases = None
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
html_theme_options = {
"collapse_navigation": False, # set to false to prevent menu item collapse
"logo_only": True,
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
html_favicon = "favicon.png"
html_logo = "../logos/lightly_logo_crop_white_text.png"
# Exposes variables so that they can be used by django
html_context = {
"copyright_year": copyright_year,
"website_url": website_url,
}
| 3,329 | 29.833333 | 79 | py |
lightly | lightly-master/docs/source/_static/my-styles.css | body{
font-family:Arial, Helvetica, sans-serif;
}
h1{
font-family:Arial, Helvetica, sans-serif !important;
} | 116 | 18.5 | 56 | css |
lightly | lightly-master/docs/source/_templates/footer.html | <!--
Copied from https://github.com/readthedocs/sphinx_rtd_theme/blob/ddf840cb7206d7c45270560f077b12daa147f915/sphinx_rtd_theme/footer.html
Adapted to add link to website from copyright in the footer, see comment below
-->
<footer>
{% if (theme_prev_next_buttons_location == 'bottom' or theme_prev_next_buttons_location == 'both') and (next or prev) %}
<div class="rst-footer-buttons" role="navigation" aria-label="footer navigation">
{% if next %}
<a href="{{ next.link|e }}" class="btn btn-neutral float-right" title="{{ next.title|striptags|e }}" accesskey="n" rel="next">{{ _('Next') }} <span class="fa fa-arrow-circle-right"></span></a>
{% endif %}
{% if prev %}
<a href="{{ prev.link|e }}" class="btn btn-neutral float-left" title="{{ prev.title|striptags|e }}" accesskey="p" rel="prev"><span class="fa fa-arrow-circle-left"></span> {{ _('Previous') }}</a>
{% endif %}
</div>
{% endif %}
<hr/>
<div role="contentinfo">
<p>
{%- if show_copyright %}
{%- if hasdoc('copyright') %}
{% set path = pathto('copyright') %}
{% set copyright = copyright|e %}
© <a href="{{ path }}">{% trans %}Copyright{% endtrans %}</a> {{ copyright }}
{%- else %}
{% set copyright = copyright|e %}
<!-- Adapted to include link to website -->
© {% trans %}Copyright{% endtrans %} {{ copyright_year }}, <a href="{{ website_url }}">{{ copyright }}</a>
{%- endif %}
{%- endif %}
{%- if build_id and build_url %}
<span class="build">
{# Translators: Build is a noun, not a verb #}
{% trans %}Build{% endtrans %}
<a href="{{ build_url }}">{{ build_id }}</a>.
</span>
{%- elif commit %}
<span class="commit">
{% trans %}Revision{% endtrans %} <code>{{ commit }}</code>.
</span>
{%- elif last_updated %}
<span class="lastupdated">
{% trans last_updated=last_updated|e %}Last updated on {{ last_updated }}.{% endtrans %}
</span>
{%- endif %}
</p>
</div>
{%- if show_sphinx %}
{% set sphinx_web = '<a href="http://sphinx-doc.org/">Sphinx</a>' %}
{% set readthedocs_web = '<a href="https://readthedocs.org">Read the Docs</a>' %}
{% trans sphinx_web=sphinx_web, readthedocs_web=readthedocs_web %}Built with {{ sphinx_web }} using a{% endtrans %} <a href="https://github.com/rtfd/sphinx_rtd_theme">{% trans %}theme{% endtrans %}</a> {% trans %}provided by {{ readthedocs_web }}{% endtrans %}.
{%- endif %}
{%- block extrafooter %} {% endblock %}
</footer>
| 2,704 | 44.083333 | 269 | html |
lightly | lightly-master/docs/source/_templates/layout.html | {% extends "!layout.html" %}
{# Custom CSS overrides #}
{% set bootswatch_css_custom = ['/_static/my-styles.css'] %}
<!--
Copy from https://github.com/readthedocs/sphinx_rtd_theme/blob/ddf840cb7206d7c45270560f077b12daa147f915/sphinx_rtd_theme/layout.html#L184-L205
We need this to override the footer
-->
{%- block content %}
{% if theme_style_external_links|tobool %}
<div class="rst-content style-external-links">
{% else %}
<div class="rst-content">
{% endif %}
{% include "breadcrumbs.html" %}
<div role="main" class="document" itemscope="itemscope" itemtype="http://schema.org/Article">
{%- block document %}
<div itemprop="articleBody">
{% block body %}{% endblock %}
</div>
{% if self.comments()|trim %}
<div class="articleComments">
{% block comments %}{% endblock %}
</div>
{% endif%}
</div>
{%- endblock %}
{% include "footer.html" %}
</div>
{%- endblock %}
{% block footer %}
{{ super() }}
<!-- Google Analytics -->
<script>
(function (i, s, o, g, r, a, m) {
i['GoogleAnalyticsObject'] = r; i[r] = i[r] || function () {
(i[r].q = i[r].q || []).push(arguments)
}, i[r].l = 1 * new Date(); a = s.createElement(o),
m = s.getElementsByTagName(o)[0]; a.async = 1; a.src = g; m.parentNode.insertBefore(a, m)
})(window, document, 'script', 'https://www.google-analytics.com/analytics.js', 'ga');
var host = window.location.hostname;
if (host != "localhost") {
ga('create', 'UA-147883152-5', 'auto');
ga('send', 'pageview');
}
</script>
<script async src="https://www.googletagmanager.com/gtag/js?id=UA-147883152-5"></script>
<script>
window.dataLayer = window.dataLayer || [];
function gtag() { dataLayer.push(arguments); }
gtag('js', new Date());
gtag('config', 'UA-147883152-5');
</script>
<style>
/* Sidebar header (and topbar for mobile) */
.wy-side-nav-search,
.wy-nav-top {
background: #092643;
}
/* Sidebar */
.wy-nav-side {
background: #092643;
}
/*body{
font-family: "Arial, Helvetica, sans-serif";
}*/
</style>
{% endblock %} | 2,190 | 25.083333 | 145 | html |
lightly | lightly-master/docs/source/docker/advanced/code_examples/load_model_from_checkpoint.py | from collections import OrderedDict
import torch
import lightly
def load_ckpt(ckpt_path, model_name="resnet-18", model_width=1, map_location="cpu"):
ckpt = torch.load(ckpt_path, map_location=map_location)
state_dict = OrderedDict()
for key, value in ckpt["state_dict"].items():
if ("projection_head" in key) or ("backbone.7" in key):
# drop layers used for projection head
continue
state_dict[key.replace("model.backbone.", "")] = value
resnet = lightly.models.ResNetGenerator(name=model_name, width=model_width)
model = torch.nn.Sequential(
lightly.models.batchnorm.get_norm_layer(3, 0),
*list(resnet.children())[:-1],
torch.nn.AdaptiveAvgPool2d(1),
torch.nn.Flatten(1),
)
try:
model.load_state_dict(state_dict)
except RuntimeError:
raise RuntimeError(
f"It looks like you tried loading a checkpoint from a model that is not a {model_name} with width={model_width}! "
f"Please set model_name and model_width to the lightly.model.name and lightly.model.width parameters from the "
f"configuration you used to run Lightly. The configuration from a Lightly worker run can be found in output_dir/config/config.yaml"
)
return model
# loading the model
model = load_ckpt("output_dir/lightly_epoch_X.ckpt")
# example usage
image_batch = torch.rand(16, 3, 224, 224)
out = model(image_batch)
print(out.shape) # prints: torch.Size([16, 512])
# creating a classifier from the pre-trained model
num_classes = 10
classifier = torch.nn.Sequential(
model, torch.nn.Linear(512, num_classes) # use 2048 instead of 512 for resnet-50
)
out = classifier(image_batch)
print(out.shape) # prints: torch.Size(16, 10)
| 1,782 | 32.018519 | 143 | py |
lightly | lightly-master/docs/source/docker/advanced/code_examples/python_create_dataset_azure_example.py | import json
import lightly
from lightly.openapi_generated.swagger_client.models.dataset_type import DatasetType
from lightly.openapi_generated.swagger_client.models.datasource_purpose import (
DatasourcePurpose,
)
# Create the Lightly client to connect to the API.
client = lightly.api.ApiWorkflowClient(token="YOUR_TOKEN")
# Create a new dataset on the Lightly Platform.
client.create_dataset("pedestrian-videos-datapool", dataset_type=DatasetType.VIDEOS)
# Azure Blob Storage
# Input bucket
client.set_azure_config(
container_name="my-container/input/",
account_name="ACCOUNT-NAME",
sas_token="SAS-TOKEN",
purpose=DatasourcePurpose.INPUT,
)
# Output bucket
client.set_azure_config(
container_name="my-container/output/",
account_name="ACCOUNT-NAME",
sas_token="SAS-TOKEN",
purpose=DatasourcePurpose.LIGHTLY,
)
| 852 | 27.433333 | 84 | py |
lightly | lightly-master/docs/source/docker/advanced/code_examples/python_create_dataset_gcs_example.py | import json
import lightly
from lightly.openapi_generated.swagger_client.models.dataset_type import DatasetType
from lightly.openapi_generated.swagger_client.models.datasource_purpose import (
DatasourcePurpose,
)
# Create the Lightly client to connect to the API.
client = lightly.api.ApiWorkflowClient(token="YOUR_TOKEN")
# Create a new dataset on the Lightly Platform.
client.create_dataset("pedestrian-videos-datapool", dataset_type=DatasetType.VIDEOS)
# Google Cloud Storage
# Input bucket
client.set_gcs_config(
resource_path="gs://bucket/input/",
project_id="PROJECT-ID",
credentials=json.dumps(json.load(open("credentials_read.json"))),
purpose=DatasourcePurpose.INPUT,
)
# Output bucket
client.set_gcs_config(
resource_path="gs://bucket/output/",
project_id="PROJECT-ID",
credentials=json.dumps(json.load(open("credentials_write.json"))),
purpose=DatasourcePurpose.LIGHTLY,
)
| 925 | 29.866667 | 84 | py |
lightly | lightly-master/docs/source/docker/advanced/code_examples/python_create_dataset_s3_example.py | import json
import lightly
from lightly.openapi_generated.swagger_client.models.dataset_type import DatasetType
from lightly.openapi_generated.swagger_client.models.datasource_purpose import (
DatasourcePurpose,
)
# Create the Lightly client to connect to the API.
client = lightly.api.ApiWorkflowClient(token="YOUR_TOKEN")
# Create a new dataset on the Lightly Platform.
client.create_dataset("pedestrian-videos-datapool", dataset_type=DatasetType.VIDEOS)
# AWS S3
# Input bucket
client.set_s3_config(
resource_path="s3://bucket/input/",
region="eu-central-1",
access_key="S3-ACCESS-KEY",
secret_access_key="S3-SECRET-ACCESS-KEY",
purpose=DatasourcePurpose.INPUT,
)
# Output bucket
client.set_s3_config(
resource_path="s3://bucket/output/",
region="eu-central-1",
access_key="S3-ACCESS-KEY",
secret_access_key="S3-SECRET-ACCESS-KEY",
purpose=DatasourcePurpose.LIGHTLY,
)
| 920 | 27.78125 | 84 | py |
lightly | lightly-master/docs/source/docker/advanced/code_examples/python_create_frame_predictions.py | import json
from pathlib import Path
from typing import Dict, List
import av
dataset_dir = Path("/datasets/my_dataset")
predictions_dir = dataset_dir / ".lightly" / "predictions" / "my_prediction_task"
def model_predict(frame) -> List[Dict]:
# This function must be overwritten to generate predictions for a frame using
# a prediction model of your choice. Here we just return an example prediction.
# See https://docs.lightly.ai/docker/advanced/datasource_predictions.html#prediction-format
# for possible prediction formats.
return [{"category_id": 0, "bbox": [0, 10, 100, 30], "score": 0.8}]
for video_path in dataset_dir.glob("**/*.mp4"):
# get predictions for frames
predictions = []
with av.open(str(video_path)) as container:
stream = container.streams.video[0]
for frame in container.decode(stream):
predictions.append(model_predict(frame.to_image()))
# save predictions
num_frames = len(predictions)
zero_padding = len(str(num_frames))
for frame_index, frame_predictions in enumerate(predictions):
video_name = video_path.relative_to(dataset_dir).with_suffix("")
frame_name = Path(
f"{video_name}-{frame_index:0{zero_padding}}-{video_path.suffix[1:]}.png"
)
prediction = {
"file_name": str(frame_name),
"predictions": frame_predictions,
}
out_path = predictions_dir / frame_name.with_suffix(".json")
out_path.parent.mkdir(parents=True, exist_ok=True)
with open(out_path, "w") as file:
json.dump(prediction, file)
# example directory structure before
# .
# ├── test
# │ └── video_0.mp4
# └── train
# ├── video_1.mp4
# └── video_2.mp4
#
# example directory structure after
# .
# ├── .lightly
# │ └── predictions
# │ └── my_prediction_task
# │ ├── test
# │ │ ├── video_0-000-mp4.json
# │ │ ├── video_0-001-mp4.json
# │ │ ├── video_0-002-mp4.json
# │ │ └── ...
# │ └── train
# │ ├── video_1-000-mp4.json
# │ ├── video_1-001-mp4.json
# │ ├── video_1-002-mp4.json
# | ├── ...
# | ├── video_2-000-mp4.json
# | ├── video_2-001-mp4.json
# | ├── video_2-002-mp4.json
# │ └── ...
# ├── test
# │ └── video_0.mp4
# └── train
# ├── video_1.mp4
# └── video_2.mp4
| 2,462 | 30.987013 | 95 | py |
lightly | lightly-master/docs/source/docker/advanced/code_examples/python_run_datapool_example.py | import lightly
# Create the Lightly client to connect to the API.
client = lightly.api.ApiWorkflowClient(token="YOUR_TOKEN")
# Let's fetch the dataset we created above, by name
client.set_dataset_id_by_name("pedestrian-videos-datapool")
# Schedule the compute run using our custom config.
# We show here the full default config so you can easily edit the
# values according to your needs.
client.schedule_compute_worker_run(
worker_config={
"enable_corruptness_check": True,
"remove_exact_duplicates": True,
"enable_training": False,
"pretagging": False,
"pretagging_debug": False,
},
selection_config={
"n_samples": 100,
"strategies": [
{
"input": {"type": "EMBEDDINGS"},
"strategy": {
"type": "DIVERSITY",
"stopping_condition_minimum_distance": 0.1,
},
}
],
},
lightly_config={
"loader": {
"batch_size": 128,
"shuffle": True,
"num_workers": -1,
"drop_last": True,
},
"model": {"name": "resnet-18", "out_dim": 128, "num_ftrs": 32, "width": 1},
"trainer": {"gpus": 1, "max_epochs": 1, "precision": 16},
"criterion": {"temperature": 0.5},
"optimizer": {"lr": 1, "weight_decay": 0.00001},
"collate": {
"input_size": 64,
"cj_prob": 0.8,
"cj_bright": 0.7,
"cj_contrast": 0.7,
"cj_sat": 0.7,
"cj_hue": 0.2,
"min_scale": 0.15,
"random_gray_scale": 0.2,
"gaussian_blur": 0.0,
"kernel_size": 0.1,
"vf_prob": 0,
"hf_prob": 0.5,
"rr_prob": 0,
},
},
)
| 1,812 | 29.216667 | 83 | py |
lightly | lightly-master/docs/source/docker/advanced/code_examples/python_run_datapool_example_2.py | import lightly
# Create the Lightly client to connect to the API.
client = lightly.api.ApiWorkflowClient(token="YOUR_TOKEN")
# Let's fetch the dataset we created above, by name
client.set_dataset_id_by_name("pedestrian-videos-datapool")
# Schedule the compute run using our custom config.
# We show here the full default config so you can easily edit the
# values according to your needs.
client.schedule_compute_worker_run(
worker_config={
"enable_corruptness_check": True,
"remove_exact_duplicates": True,
"enable_training": False,
"pretagging": False,
"pretagging_debug": False,
},
selection_config={
"n_samples": 100,
"strategies": [
{
"input": {"type": "EMBEDDINGS"},
"strategy": {
"type": "DIVERSITY",
"stopping_condition_minimum_distance": 0.2,
},
}
],
},
lightly_config={
"loader": {
"batch_size": 128,
"shuffle": True,
"num_workers": -1,
"drop_last": True,
},
"model": {"name": "resnet-18", "out_dim": 128, "num_ftrs": 32, "width": 1},
"trainer": {"gpus": 1, "max_epochs": 1, "precision": 16},
"criterion": {"temperature": 0.5},
"optimizer": {"lr": 1, "weight_decay": 0.00001},
"collate": {
"input_size": 64,
"cj_prob": 0.8,
"cj_bright": 0.7,
"cj_contrast": 0.7,
"cj_sat": 0.7,
"cj_hue": 0.2,
"min_scale": 0.15,
"random_gray_scale": 0.2,
"gaussian_blur": 0.0,
"kernel_size": 0.1,
"vf_prob": 0,
"hf_prob": 0.5,
"rr_prob": 0,
},
},
)
| 1,812 | 29.216667 | 83 | py |
lightly | lightly-master/docs/source/docker/advanced/code_examples/python_run_object_level.py | import json
import lightly
from lightly.openapi_generated.swagger_client.models.dataset_type import DatasetType
from lightly.openapi_generated.swagger_client.models.datasource_purpose import (
DatasourcePurpose,
)
# Create the Lightly client to connect to the API.
client = lightly.api.ApiWorkflowClient(token="YOUR_TOKEN")
# Create a new dataset on the Lightly Platform.
client.create_dataset("dataset-name", dataset_type=DatasetType.IMAGES)
# Pick one of the following three blocks depending on where your data is
# AWS S3
# Input bucket
client.set_s3_config(
resource_path="s3://bucket/input/",
region="eu-central-1",
access_key="S3-ACCESS-KEY",
secret_access_key="S3-SECRET-ACCESS-KEY",
purpose=DatasourcePurpose.INPUT,
)
# Output bucket
client.set_s3_config(
resource_path="s3://bucket/output/",
region="eu-central-1",
access_key="S3-ACCESS-KEY",
secret_access_key="S3-SECRET-ACCESS-KEY",
purpose=DatasourcePurpose.LIGHTLY,
)
# or Google Cloud Storage
# Input bucket
client.set_gcs_config(
resource_path="gs://bucket/input/",
project_id="PROJECT-ID",
credentials=json.dumps(json.load(open("credentials_read.json"))),
purpose=DatasourcePurpose.INPUT,
)
# Output bucket
client.set_gcs_config(
resource_path="gs://bucket/output/",
project_id="PROJECT-ID",
credentials=json.dumps(json.load(open("credentials_write.json"))),
purpose=DatasourcePurpose.LIGHTLY,
)
# or Azure Blob Storage
# Input bucket
client.set_azure_config(
container_name="my-container/input/",
account_name="ACCOUNT-NAME",
sas_token="SAS-TOKEN",
purpose=DatasourcePurpose.INPUT,
)
# Output bucket
client.set_azure_config(
container_name="my-container/output/",
account_name="ACCOUNT-NAME",
sas_token="SAS-TOKEN",
purpose=DatasourcePurpose.LIGHTLY,
)
# Schedule the docker run with the "object_level.task_name" argument set.
# All other settings are default values and we show them so you can easily edit
# the values according to your need.
client.schedule_compute_worker_run(
worker_config={
"enable_corruptness_check": True,
"remove_exact_duplicates": True,
"enable_training": False,
"pretagging": False,
"pretagging_debug": False,
"object_level": { # used for object level workflow
"task_name": "vehicles_object_detections"
},
},
selection_config={
"n_samples": 100,
"strategies": [
{
"input": {"type": "EMBEDDINGS"},
"strategy": {
"type": "DIVERSITY",
},
},
# Optionally, you can combine diversity selection with active learning
# to prefer selecting objects the model struggles with.
# If you want that, just include the following code:
"""
{
"input": {
"type": "SCORES",
"task": "vehicles_object_detections", # change to your task
"score": "uncertainty_entropy" # change to your preferred score
},
"strategy": {
"type": "WEIGHTS"
}
}
""",
],
},
lightly_config={
"loader": {
"batch_size": 16,
"shuffle": True,
"num_workers": -1,
"drop_last": True,
},
"model": {"name": "resnet-18", "out_dim": 128, "num_ftrs": 32, "width": 1},
"trainer": {"gpus": 1, "max_epochs": 100, "precision": 32},
"criterion": {"temperature": 0.5},
"optimizer": {"lr": 1, "weight_decay": 0.00001},
"collate": {
"input_size": 64,
"cj_prob": 0.8,
"cj_bright": 0.7,
"cj_contrast": 0.7,
"cj_sat": 0.7,
"cj_hue": 0.2,
"min_scale": 0.15,
"random_gray_scale": 0.2,
"gaussian_blur": 0.5,
"kernel_size": 0.1,
"vf_prob": 0,
"hf_prob": 0.5,
"rr_prob": 0,
},
},
)
| 4,122 | 29.316176 | 84 | py |
lightly | lightly-master/docs/source/docker/advanced/code_examples/python_run_object_level_pretagging.py | import json
import lightly
from lightly.openapi_generated.swagger_client.models.dataset_type import DatasetType
from lightly.openapi_generated.swagger_client.models.datasource_purpose import (
DatasourcePurpose,
)
# Create the Lightly client to connect to the API.
client = lightly.api.ApiWorkflowClient(token="YOUR_TOKEN")
# Create a new dataset on the Lightly Platform.
client.create_dataset("dataset-name", dataset_type=DatasetType.IMAGES)
# Pick one of the following three blocks depending on where your data is
# AWS S3
# Input bucket
client.set_s3_config(
resource_path="s3://bucket/input/",
region="eu-central-1",
access_key="S3-ACCESS-KEY",
secret_access_key="S3-SECRET-ACCESS-KEY",
purpose=DatasourcePurpose.INPUT,
)
# Output bucket
client.set_s3_config(
resource_path="s3://bucket/output/",
region="eu-central-1",
access_key="S3-ACCESS-KEY",
secret_access_key="S3-SECRET-ACCESS-KEY",
purpose=DatasourcePurpose.LIGHTLY,
)
# or Google Cloud Storage
# Input bucket
client.set_gcs_config(
resource_path="gs://bucket/input/",
project_id="PROJECT-ID",
credentials=json.dumps(json.load(open("credentials_read.json"))),
purpose=DatasourcePurpose.INPUT,
)
# Output bucket
client.set_gcs_config(
resource_path="gs://bucket/output/",
project_id="PROJECT-ID",
credentials=json.dumps(json.load(open("credentials_write.json"))),
purpose=DatasourcePurpose.LIGHTLY,
)
# or Azure Blob Storage
# Input bucket
client.set_azure_config(
container_name="my-container/input/",
account_name="ACCOUNT-NAME",
sas_token="SAS-TOKEN",
purpose=DatasourcePurpose.INPUT,
)
# Output bucket
client.set_azure_config(
container_name="my-container/output/",
account_name="ACCOUNT-NAME",
sas_token="SAS-TOKEN",
purpose=DatasourcePurpose.LIGHTLY,
)
# Schedule the docker run with the "object_level.task_name" argument set to
# "lightly_pretagging" and with "pretagging" set to True.
# All other settings are default values and we show them so you can easily edit
# the values according to your need.
client.schedule_compute_worker_run(
worker_config={
"enable_corruptness_check": True,
"remove_exact_duplicates": True,
"enable_training": False,
"pretagging": True,
"pretagging_debug": False,
"object_level": {"task_name": "lightly_pretagging"},
},
selection_config={
"n_samples": 100,
"strategies": [
{
"input": {"type": "EMBEDDINGS"},
"strategy": {
"type": "DIVERSITY",
},
}
],
},
lightly_config={
"loader": {
"batch_size": 16,
"shuffle": True,
"num_workers": -1,
"drop_last": True,
},
"model": {"name": "resnet-18", "out_dim": 128, "num_ftrs": 32, "width": 1},
"trainer": {"gpus": 1, "max_epochs": 100, "precision": 32},
"criterion": {"temperature": 0.5},
"optimizer": {"lr": 1, "weight_decay": 0.00001},
"collate": {
"input_size": 64,
"cj_prob": 0.8,
"cj_bright": 0.7,
"cj_contrast": 0.7,
"cj_sat": 0.7,
"cj_hue": 0.2,
"min_scale": 0.15,
"random_gray_scale": 0.2,
"gaussian_blur": 0.5,
"kernel_size": 0.1,
"vf_prob": 0,
"hf_prob": 0.5,
"rr_prob": 0,
},
},
)
| 3,506 | 27.983471 | 84 | py |
lightly | lightly-master/docs/source/docker/advanced/code_examples/python_run_pretagging.py | import json
import lightly
from lightly.openapi_generated.swagger_client.models.dataset_type import DatasetType
from lightly.openapi_generated.swagger_client.models.datasource_purpose import (
DatasourcePurpose,
)
# Create the Lightly client to connect to the API.
client = lightly.api.ApiWorkflowClient(token="YOUR_TOKEN")
# Create a new dataset on the Lightly Platform. In this example we use pretagging
# on images. We can also use videos instead by setting dataset_type=DatasetType.VIDEOS
client.create_dataset("your-dataset-name", dataset_type=DatasetType.IMAGES)
# Pick one of the following three blocks depending on where your data is
# AWS S3
# Input bucket
client.set_s3_config(
resource_path="s3://bucket/input/",
region="eu-central-1",
access_key="S3-ACCESS-KEY",
secret_access_key="S3-SECRET-ACCESS-KEY",
purpose=DatasourcePurpose.INPUT,
)
# Output bucket
client.set_s3_config(
resource_path="s3://bucket/output/",
region="eu-central-1",
access_key="S3-ACCESS-KEY",
secret_access_key="S3-SECRET-ACCESS-KEY",
purpose=DatasourcePurpose.LIGHTLY,
)
# or Google Cloud Storage
# Input bucket
client.set_gcs_config(
resource_path="gs://bucket/input/",
project_id="PROJECT-ID",
credentials=json.dumps(json.load(open("credentials_read.json"))),
purpose=DatasourcePurpose.INPUT,
)
# Output bucket
client.set_gcs_config(
resource_path="gs://bucket/output/",
project_id="PROJECT-ID",
credentials=json.dumps(json.load(open("credentials_write.json"))),
purpose=DatasourcePurpose.LIGHTLY,
)
# or Azure Blob Storage
# Input bucket
client.set_azure_config(
container_name="my-container/input/",
account_name="ACCOUNT-NAME",
sas_token="SAS-TOKEN",
purpose=DatasourcePurpose.INPUT,
)
# Output bucket
client.set_azure_config(
container_name="my-container/output/",
account_name="ACCOUNT-NAME",
sas_token="SAS-TOKEN",
purpose=DatasourcePurpose.LIGHTLY,
)
# Schedule the compute run using our custom config.
# We show here the full default config so you can easily edit the
# values according to your needs.
client.schedule_compute_worker_run(
worker_config={
"enable_corruptness_check": True,
"remove_exact_duplicates": True,
"enable_training": False,
"pretagging": True, # to enable pretagging
"pretagging_debug": True, # we also want debugging images in the report
},
selection_config={
"n_samples": 100,
"strategies": [
{
"input": {"type": "EMBEDDINGS"},
"strategy": {
"type": "DIVERSITY",
},
}
],
},
lightly_config={
"loader": {
"batch_size": 128,
"shuffle": True,
"num_workers": -1,
"drop_last": True,
},
"model": {"name": "resnet-18", "out_dim": 128, "num_ftrs": 32, "width": 1},
"trainer": {"gpus": 1, "max_epochs": 1, "precision": 16},
"criterion": {"temperature": 0.5},
"optimizer": {"lr": 1, "weight_decay": 0.00001},
"collate": {
"input_size": 64,
"cj_prob": 0.8,
"cj_bright": 0.7,
"cj_contrast": 0.7,
"cj_sat": 0.7,
"cj_hue": 0.2,
"min_scale": 0.15,
"random_gray_scale": 0.2,
"gaussian_blur": 0.0,
"kernel_size": 0.1,
"vf_prob": 0,
"hf_prob": 0.5,
"rr_prob": 0,
},
},
)
| 3,540 | 28.756303 | 86 | py |
lightly | lightly-master/docs/source/docker/advanced/code_examples/python_run_sequence_selection.py | import json
import lightly
from lightly.openapi_generated.swagger_client.models.dataset_type import DatasetType
from lightly.openapi_generated.swagger_client.models.datasource_purpose import (
DatasourcePurpose,
)
# Create the Lightly client to connect to the API.
client = lightly.api.ApiWorkflowClient(token="YOUR_TOKEN")
# Create a new dataset on the Lightly Platform.
client.create_dataset("pexels", dataset_type=DatasetType.VIDEOS)
# Pick one of the following three blocks depending on where your data is
# AWS S3
# Input bucket
client.set_s3_config(
resource_path="s3://bucket/input/",
region="eu-central-1",
access_key="S3-ACCESS-KEY",
secret_access_key="S3-SECRET-ACCESS-KEY",
purpose=DatasourcePurpose.INPUT,
)
# Output bucket
client.set_s3_config(
resource_path="s3://bucket/output/",
region="eu-central-1",
access_key="S3-ACCESS-KEY",
secret_access_key="S3-SECRET-ACCESS-KEY",
purpose=DatasourcePurpose.LIGHTLY,
)
# or Google Cloud Storage
# Input bucket
client.set_gcs_config(
resource_path="gs://bucket/input/",
project_id="PROJECT-ID",
credentials=json.dumps(json.load(open("credentials_read.json"))),
purpose=DatasourcePurpose.INPUT,
)
# Output bucket
client.set_gcs_config(
resource_path="gs://bucket/output/",
project_id="PROJECT-ID",
credentials=json.dumps(json.load(open("credentials_write.json"))),
purpose=DatasourcePurpose.LIGHTLY,
)
# or Azure Blob Storage
# Input bucket
client.set_azure_config(
container_name="my-container/input/",
account_name="ACCOUNT-NAME",
sas_token="SAS-TOKEN",
purpose=DatasourcePurpose.INPUT,
)
# Output bucket
client.set_azure_config(
container_name="my-container/output/",
account_name="ACCOUNT-NAME",
sas_token="SAS-TOKEN",
purpose=DatasourcePurpose.LIGHTLY,
)
# Schedule the compute run using our custom config.
# We show here the full default config so you can easily edit the
# values according to your needs.
client.schedule_compute_worker_run(
worker_config={
"enable_corruptness_check": False,
"remove_exact_duplicates": False,
"enable_training": False,
"pretagging": False,
"pretagging_debug": False,
"method": "coreset",
"stopping_condition": {
"n_samples": 200, # select 200 frames of length 10 frames -> 20 sequences
"min_distance": -1,
},
"selected_sequence_length": 10, # we want sequences of 10 frames lenght
},
lightly_config={
"loader": {
"batch_size": 128,
"shuffle": True,
"num_workers": -1,
"drop_last": True,
},
"model": {"name": "resnet-18", "out_dim": 128, "num_ftrs": 32, "width": 1},
"trainer": {"gpus": 1, "max_epochs": 1, "precision": 16},
"criterion": {"temperature": 0.5},
"optimizer": {"lr": 1, "weight_decay": 0.00001},
"collate": {
"input_size": 64,
"cj_prob": 0.8,
"cj_bright": 0.7,
"cj_contrast": 0.7,
"cj_sat": 0.7,
"cj_hue": 0.2,
"min_scale": 0.15,
"random_gray_scale": 0.2,
"gaussian_blur": 0.0,
"kernel_size": 0.1,
"vf_prob": 0,
"hf_prob": 0.5,
"rr_prob": 0,
},
},
)
| 3,355 | 28.699115 | 86 | py |
lightly | lightly-master/docs/source/docker/advanced/code_examples/semantic_segmentation_inference.py | import json
import os
import numpy as np
TASK_NAME = "lightly_semantic_segmentation"
CATEGORIES = ["background", "car", "person"]
def get_dummy_prediction(height: int = 500, width: int = 500):
"""Returns a dummy prediction of shape h x w x n_classes.
Height and width are in pixels.
"""
return np.random.rand(height, width, len(CATEGORIES))
def filename_to_json(filename: str):
"""Turns an image filename into the respective json filename."""
root, _ = os.path.splitext(filename)
return f"{root}.json"
def binary_to_rle(binary_mask: np.ndarray) -> np.ndarray:
"""Converts a binary segmentation mask to RLE."""
# Flatten mask and add -1 at beginning and end of array
flat = np.concatenate(([-1], np.ravel(binary_mask), [-1]))
# Find indices where a change to 0 or 1 happens
borders = np.nonzero(np.diff(flat))[0]
# Find counts of subsequent 0s and 1s
rle = np.diff(borders)
if flat[1]:
# The first value in the encoding must always be the count
# of initial 0s. If the mask starts with a 1 we must set
# this count to 0.
rle = np.concatenate(([0], rle))
return rle
def convert_to_lightly_prediction(filename: str, seg_map: np.ndarray):
"""Converts a segmentation map of shape W x H x C to Lightly format."""
seg_map_argmax = np.argmax(seg_map, axis=-1)
prediction = {"file_name": filename, "predictions": []}
for category_id in np.unique(seg_map_argmax):
rle = binary_to_rle(seg_map_argmax == category_id)
logits = np.mean(seg_map[seg_map_argmax == category_id], axis=0)
assert np.argmax(logits) == category_id
probabilities = np.exp(logits) / np.sum(np.exp(logits))
assert abs(np.sum(probabilities) - 1.0) < 1e-6
prediction["predictions"].append(
{
"category_id": int(category_id),
"segmentation": [int(r) for r in rle],
"score": float(probabilities[category_id]),
"probabilities": [float(p) for p in probabilities],
}
)
return prediction
# The following code will generate a tasks.json, a schema.json, and a dummy
# prediction file called my_image.json. To use them with the Lightly worker,
# arrange them as follows in a .lightly directory
#
# .lightly/
# L predictions/
# L tasks.json
# L lightly_semantic_segmentation/
# L schema.json
# L // add the real prediction files here
#
# add tasks.json
tasks = [TASK_NAME]
with open("tasks.json", "w") as f:
json.dump(tasks, f)
# add schema.json
schema = {
"task_type": "semantic-segmentation",
"categories": [
{
"id": i,
"name": name,
}
for i, name in enumerate(CATEGORIES)
],
}
with open("schema.json", "w") as f:
json.dump(schema, f)
# generate a dummy prediction
filename = "my_image.png"
prediction = get_dummy_prediction() # this is a h x w x n_classes numpy array
category_ids = np.argmax(prediction, axis=-1)
lightly_prediction = {"file_name": filename, "predictions": []}
for category_id in np.unique(category_ids):
# get the run-length encoding
rle = binary_to_rle(category_ids == category_id)
# get the logits
logits = np.mean(prediction[category_ids == category_id], axis=0)
assert np.argmax(logits) == category_id
probabilities = np.exp(logits) / np.sum(np.exp(logits))
assert abs(np.sum(probabilities) - 1.0) < 1e-6
lightly_prediction["predictions"].append(
{
"category_id": int(category_id),
"segmentation": [int(r) for r in rle],
"score": float(probabilities[category_id]),
"probabilities": [float(p) for p in probabilities],
}
)
with open(filename_to_json(filename), "w") as f:
json.dump(lightly_prediction, f)
| 3,854 | 30.598361 | 78 | py |
lightly | lightly-master/docs/source/docker/integration/examples/create_dataset.py | import lightly
# Create the Lightly client to connect to the API.
client = lightly.api.ApiWorkflowClient(token="LIGHTLY_TOKEN")
# Create a new dataset on the Lightly Platform.
client.create_dataset("dataset-name")
# Connect the dataset to your cloud bucket.
# AWS S3
client.set_s3_config(
resource_path="s3://bucket/dataset/",
region="eu-central-1",
access_key="ACCESS-KEY",
secret_access_key="SECRET",
thumbnail_suffix=None,
)
# Google Cloud Storage
import json
client.set_gcs_config(
resource_path="gs://bucket/dataset/",
project_id="PROJECT-ID",
credentials=json.dumps(json.load(open("credentials.json"))),
thumbnail_suffix=None,
)
# Azure Blob Storage
client.set_azure_config(
container_name="container/dataset/",
account_name="ACCOUNT-NAME",
sas_token="SAS-TOKEN",
thumbnail_suffix=None,
)
| 854 | 22.108108 | 64 | py |
lightly | lightly-master/docs/source/docker/integration/examples/trigger_job.py | import time
from lightly.openapi_generated.swagger_client import (
DockerRunScheduledState,
DockerRunState,
)
# You can reuse the client from previous scripts. If you want to create a new
# one you can uncomment the following line:
# import lightly
# client = lightly.api.ApiWorkflowClient(token="LIGHTLY_TOKEN", dataset_id="DATASET_ID")
# Schedule the compute run using a custom config.
# You can easily edit the values according to your needs.
scheduled_run_id = client.schedule_compute_worker_run(
worker_config={
"enable_corruptness_check": True,
"remove_exact_duplicates": True,
"enable_training": False,
},
selection_config={
"n_samples": 50,
"strategies": [
{"input": {"type": "EMBEDDINGS"}, "strategy": {"type": "DIVERSITY"}}
],
},
lightly_config={
"loader": {
"batch_size": 16,
"shuffle": True,
"num_workers": -1,
"drop_last": True,
},
"model": {"name": "resnet-18", "out_dim": 128, "num_ftrs": 32, "width": 1},
"trainer": {"gpus": 1, "max_epochs": 100, "precision": 32},
"criterion": {"temperature": 0.5},
"optimizer": {"lr": 1, "weight_decay": 0.00001},
"collate": {
"input_size": 64,
"cj_prob": 0.8,
"cj_bright": 0.7,
"cj_contrast": 0.7,
"cj_sat": 0.7,
"cj_hue": 0.2,
"min_scale": 0.15,
"random_gray_scale": 0.2,
"gaussian_blur": 0.5,
"kernel_size": 0.1,
"vf_prob": 0,
"hf_prob": 0.5,
"rr_prob": 0,
},
},
)
"""
Optionally, You can use this code to track and print the state of the compute worker.
The loop will end once the compute worker run has finished, was canceled or aborted/failed.
"""
for run_info in client.compute_worker_run_info_generator(
scheduled_run_id=scheduled_run_id
):
print(
f"Compute worker run is now in state='{run_info.state}' with message='{run_info.message}'"
)
if run_info.ended_successfully():
print("SUCCESS")
else:
print("FAILURE")
| 2,169 | 28.726027 | 98 | py |
lightly | lightly-master/docs/source/docker_archive/advanced/code_examples/python_run_active_learning.py | import lightly
# Create the Lightly client to connect to the API.
client = lightly.api.ApiWorkflowClient(token="LIGHTLY_TOKEN", dataset_id="DATASET_ID")
# Schedule the docker run with
# - "active_learning.task_name" set to your task name
# - "method" set to "coral"
# All other settings are default values and we show them so you can easily edit
# the values according to your need.
client.schedule_compute_worker_run(
worker_config={
"enable_corruptness_check": True,
"remove_exact_duplicates": True,
"enable_training": False,
"pretagging": False,
"pretagging_debug": False,
"method": "coral",
"stopping_condition": {"n_samples": 0.1, "min_distance": -1},
"scorer": "object-frequency",
"scorer_config": {"frequency_penalty": 0.25, "min_score": 0.9},
"active_learning": {
"task_name": "my-classification-task",
"score_name": "uncertainty_margin",
},
},
lightly_config={
"loader": {
"batch_size": 16,
"shuffle": True,
"num_workers": -1,
"drop_last": True,
},
"model": {"name": "resnet-18", "out_dim": 128, "num_ftrs": 32, "width": 1},
"trainer": {"gpus": 1, "max_epochs": 100, "precision": 32},
"criterion": {"temperature": 0.5},
"optimizer": {"lr": 1, "weight_decay": 0.00001},
"collate": {
"input_size": 64,
"cj_prob": 0.8,
"cj_bright": 0.7,
"cj_contrast": 0.7,
"cj_sat": 0.7,
"cj_hue": 0.2,
"min_scale": 0.15,
"random_gray_scale": 0.2,
"gaussian_blur": 0.5,
"kernel_size": 0.1,
"vf_prob": 0,
"hf_prob": 0.5,
"rr_prob": 0,
},
},
)
| 1,832 | 32.327273 | 86 | py |
lightly | lightly-master/docs/source/docker_archive/advanced/code_examples/python_run_object_level.py | import lightly
# Create the Lightly client to connect to the API.
client = lightly.api.ApiWorkflowClient(token="LIGHTLY_TOKEN", dataset_id="DATASET_ID")
# Schedule the docker run with the "object_level.task_name" argument set.
# All other settings are default values and we show them so you can easily edit
# the values according to your need.
client.schedule_compute_worker_run(
worker_config={
"object_level": {"task_name": "vehicles_object_detections"},
"enable_corruptness_check": True,
"remove_exact_duplicates": True,
"enable_training": False,
"pretagging": False,
"pretagging_debug": False,
"method": "coreset",
"stopping_condition": {"n_samples": 0.1, "min_distance": -1},
"scorer": "object-frequency",
"scorer_config": {"frequency_penalty": 0.25, "min_score": 0.9},
"active_learning": {"task_name": "", "score_name": "uncertainty_margin"},
},
lightly_config={
"loader": {
"batch_size": 16,
"shuffle": True,
"num_workers": -1,
"drop_last": True,
},
"model": {"name": "resnet-18", "out_dim": 128, "num_ftrs": 32, "width": 1},
"trainer": {"gpus": 1, "max_epochs": 100, "precision": 32},
"criterion": {"temperature": 0.5},
"optimizer": {"lr": 1, "weight_decay": 0.00001},
"collate": {
"input_size": 64,
"cj_prob": 0.8,
"cj_bright": 0.7,
"cj_contrast": 0.7,
"cj_sat": 0.7,
"cj_hue": 0.2,
"min_scale": 0.15,
"random_gray_scale": 0.2,
"gaussian_blur": 0.5,
"kernel_size": 0.1,
"vf_prob": 0,
"hf_prob": 0.5,
"rr_prob": 0,
},
},
)
| 1,805 | 34.411765 | 86 | py |
lightly | lightly-master/docs/source/docker_archive/advanced/code_examples/python_run_object_level_pretagging.py | import lightly
# Create the Lightly client to connect to the API.
client = lightly.api.ApiWorkflowClient(token="LIGHTLY_TOKEN", dataset_id="DATASET_ID")
# Schedule the docker run with the "object_level.task_name" argument set to
# "lightly_pretagging" and with "pretagging" set to True.
# All other settings are default values and we show them so you can easily edit
# the values according to your need.
client.schedule_compute_worker_run(
worker_config={
"object_level": {"task_name": "lightly_pretagging"},
"enable_corruptness_check": True,
"remove_exact_duplicates": True,
"enable_training": False,
"pretagging": True,
"pretagging_debug": False,
"method": "coreset",
"stopping_condition": {"n_samples": 0.1, "min_distance": -1},
"scorer": "object-frequency",
"scorer_config": {"frequency_penalty": 0.25, "min_score": 0.9},
"active_learning": {"task_name": "", "score_name": "uncertainty_margin"},
},
lightly_config={
"loader": {
"batch_size": 16,
"shuffle": True,
"num_workers": -1,
"drop_last": True,
},
"model": {"name": "resnet-18", "out_dim": 128, "num_ftrs": 32, "width": 1},
"trainer": {"gpus": 1, "max_epochs": 100, "precision": 32},
"criterion": {"temperature": 0.5},
"optimizer": {"lr": 1, "weight_decay": 0.00001},
"collate": {
"input_size": 64,
"cj_prob": 0.8,
"cj_bright": 0.7,
"cj_contrast": 0.7,
"cj_sat": 0.7,
"cj_hue": 0.2,
"min_scale": 0.15,
"random_gray_scale": 0.2,
"gaussian_blur": 0.5,
"kernel_size": 0.1,
"vf_prob": 0,
"hf_prob": 0.5,
"rr_prob": 0,
},
},
)
| 1,856 | 34.711538 | 86 | py |
lightly | lightly-master/docs/source/docker_archive/integration/examples/create_dataset.py | import lightly
# Create the Lightly client to connect to the API.
client = lightly.api.ApiWorkflowClient(token="LIGHTLY_TOKEN")
# Create a new dataset on the Lightly Platform.
client.create_dataset("dataset-name")
# Connect the dataset to your cloud bucket.
# AWS S3
client.set_s3_config(
resource_path="s3://bucket/dataset/",
region="eu-central-1",
access_key="ACCESS-KEY",
secret_access_key="SECRET",
thumbnail_suffix=None,
)
# Google Cloud Storage
import json
client.set_gcs_config(
resource_path="gs://bucket/dataset/",
project_id="PROJECT-ID",
credentials=json.dumps(json.load(open("credentials.json"))),
thumbnail_suffix=None,
)
# Azure Blob Storage
client.set_azure_config(
container_name="container/dataset/",
account_name="ACCOUNT-NAME",
sas_token="SAS-TOKEN",
thumbnail_suffix=None,
)
| 854 | 22.108108 | 64 | py |
lightly | lightly-master/docs/source/docker_archive/integration/examples/trigger_job.py | # You can reuse the client from the previous script. If you want to create a new
# one you can uncomment the following line:
# client = lightly.api.ApiWorkflowClient(token="LIGHTLY_TOKEN", dataset_id="DATASET_ID")
# Schedule the compute run using our custom config.
# We show here the full default config so you can easily edit the
# values according to your needs.
client.schedule_compute_worker_run(
worker_config={
"enable_corruptness_check": True,
"remove_exact_duplicates": True,
"enable_training": False,
"pretagging": False,
"pretagging_debug": False,
"method": "coreset",
"stopping_condition": {"n_samples": 0.1, "min_distance": -1},
"scorer": "object-frequency",
"scorer_config": {"frequency_penalty": 0.25, "min_score": 0.9},
},
lightly_config={
"loader": {
"batch_size": 16,
"shuffle": True,
"num_workers": -1,
"drop_last": True,
},
"model": {"name": "resnet-18", "out_dim": 128, "num_ftrs": 32, "width": 1},
"trainer": {"gpus": 1, "max_epochs": 100, "precision": 32},
"criterion": {"temperature": 0.5},
"optimizer": {"lr": 1, "weight_decay": 0.00001},
"collate": {
"input_size": 64,
"cj_prob": 0.8,
"cj_bright": 0.7,
"cj_contrast": 0.7,
"cj_sat": 0.7,
"cj_hue": 0.2,
"min_scale": 0.15,
"random_gray_scale": 0.2,
"gaussian_blur": 0.5,
"kernel_size": 0.1,
"vf_prob": 0,
"hf_prob": 0.5,
"rr_prob": 0,
},
},
)
| 1,675 | 33.916667 | 88 | py |
lightly | lightly-master/docs/source/getting_started/benchmarks/cifar10_benchmark.py | # -*- coding: utf-8 -*-
"""
Benchmark Results
Updated: 27.03.2023 (42a6a924b1b6d5b6cc89a6b2a0a0942cc4af93ab)
------------------------------------------------------------------------------------------
| Model | Batch Size | Epochs | KNN Test Accuracy | Time | Peak GPU Usage |
------------------------------------------------------------------------------------------
| BarlowTwins | 128 | 200 | 0.842 | 375.9 Min | 1.7 GByte |
| BYOL | 128 | 200 | 0.869 | 121.9 Min | 1.6 GByte |
| DCL | 128 | 200 | 0.844 | 102.2 Min | 1.5 GByte |
| DCLW | 128 | 200 | 0.833 | 100.4 Min | 1.5 GByte |
| DINO | 128 | 200 | 0.840 | 120.3 Min | 1.6 GByte |
| FastSiam | 128 | 200 | 0.906 | 164.0 Min | 2.7 GByte |
| Moco | 128 | 200 | 0.838 | 128.8 Min | 1.7 GByte |
| NNCLR | 128 | 200 | 0.834 | 101.5 Min | 1.5 GByte |
| SimCLR | 128 | 200 | 0.847 | 97.7 Min | 1.5 GByte |
| SimSiam | 128 | 200 | 0.819 | 97.3 Min | 1.6 GByte |
| SwaV | 128 | 200 | 0.812 | 99.6 Min | 1.5 GByte |
| SMoG | 128 | 200 | 0.743 | 192.2 Min | 1.2 GByte |
------------------------------------------------------------------------------------------
| BarlowTwins | 512 | 200 | 0.819 | 153.3 Min | 5.1 GByte |
| BYOL | 512 | 200 | 0.868 | 108.3 Min | 5.6 GByte |
| DCL | 512 | 200 | 0.840 | 88.2 Min | 4.9 GByte |
| DCLW | 512 | 200 | 0.824 | 87.9 Min | 4.9 GByte |
| DINO | 512 | 200 | 0.813 | 108.6 Min | 5.0 GByte |
| FastSiam | 512 | 200 | 0.788 | 146.9 Min | 9.5 GByte |
| Moco (*) | 512 | 200 | 0.847 | 112.2 Min | 5.6 GByte |
| NNCLR (*) | 512 | 200 | 0.815 | 88.1 Min | 5.0 GByte |
| SimCLR | 512 | 200 | 0.848 | 87.1 Min | 4.9 GByte |
| SimSiam | 512 | 200 | 0.764 | 87.8 Min | 5.0 GByte |
| SwaV | 512 | 200 | 0.842 | 88.7 Min | 4.9 GByte |
| SMoG | 512 | 200 | 0.686 | 110.0 Min | 3.4 GByte |
------------------------------------------------------------------------------------------
| BarlowTwins | 512 | 800 | 0.859 | 517.5 Min | 7.9 GByte |
| BYOL | 512 | 800 | 0.910 | 400.9 Min | 5.4 GByte |
| DCL | 512 | 800 | 0.874 | 334.6 Min | 4.9 GByte |
| DCLW | 512 | 800 | 0.871 | 333.3 Min | 4.9 GByte |
| DINO | 512 | 800 | 0.848 | 405.2 Min | 5.0 GByte |
| FastSiam | 512 | 800 | 0.902 | 582.0 Min | 9.5 GByte |
| Moco (*) | 512 | 800 | 0.899 | 417.8 Min | 5.4 GByte |
| NNCLR (*) | 512 | 800 | 0.892 | 335.0 Min | 5.0 GByte |
| SimCLR | 512 | 800 | 0.879 | 331.1 Min | 4.9 GByte |
| SimSiam | 512 | 800 | 0.904 | 333.7 Min | 5.1 GByte |
| SwaV | 512 | 800 | 0.884 | 330.5 Min | 5.0 GByte |
| SMoG | 512 | 800 | 0.800 | 415.6 Min | 3.2 GByte |
------------------------------------------------------------------------------------------
(*): Increased size of memory bank from 4096 to 8192 to avoid too quickly
changing memory bank due to larger batch size.
The benchmarks were created on a single NVIDIA RTX A6000.
Note that this benchmark also supports a multi-GPU setup. If you run it on
a system with multiple GPUs make sure that you kill all the processes when
killing the application. Due to the way we setup this benchmark the distributed
processes might continue the benchmark if one of the nodes is killed.
If you know how to fix this don't hesitate to create an issue or PR :)
"""
import copy
import os
import time
import numpy as np
import pytorch_lightning as pl
import torch
import torch.nn as nn
import torchvision
from pytorch_lightning.loggers import TensorBoardLogger
from lightly.data import LightlyDataset
from lightly.loss import (
BarlowTwinsLoss,
DCLLoss,
DCLWLoss,
DINOLoss,
NegativeCosineSimilarity,
NTXentLoss,
SwaVLoss,
memory_bank,
)
from lightly.models import ResNetGenerator, modules, utils
from lightly.models.modules import heads
from lightly.transforms import (
DINOTransform,
FastSiamTransform,
SimCLRTransform,
SimSiamTransform,
SMoGTransform,
SwaVTransform,
)
from lightly.transforms.utils import IMAGENET_NORMALIZE
from lightly.utils.benchmarking import BenchmarkModule
logs_root_dir = os.path.join(os.getcwd(), "benchmark_logs")
# set max_epochs to 800 for long run (takes around 10h on a single V100)
max_epochs = 200
num_workers = 8
knn_k = 200
knn_t = 0.1
classes = 10
# Set to True to enable Distributed Data Parallel training.
distributed = False
# Set to True to enable Synchronized Batch Norm (requires distributed=True).
# If enabled the batch norm is calculated over all gpus, otherwise the batch
# norm is only calculated from samples on the same gpu.
sync_batchnorm = False
# Set to True to gather features from all gpus before calculating
# the loss (requires distributed=True).
# If enabled then the loss on every gpu is calculated with features from all
# gpus, otherwise only features from the same gpu are used.
gather_distributed = False
# benchmark
n_runs = 1 # optional, increase to create multiple runs and report mean + std
batch_size = 128
lr_factor = batch_size / 128 # scales the learning rate linearly with batch size
# Number of devices and hardware to use for training.
devices = torch.cuda.device_count() if torch.cuda.is_available() else 1
accelerator = "gpu" if torch.cuda.is_available() else "cpu"
if distributed:
strategy = "ddp"
# reduce batch size for distributed training
batch_size = batch_size // devices
else:
strategy = None # Set to "auto" if using PyTorch Lightning >= 2.0
# limit to single device if not using distributed training
devices = min(devices, 1)
# Adapted from our MoCo Tutorial on CIFAR-10
#
# Replace the path with the location of your CIFAR-10 dataset.
# We assume we have a train folder with subfolders
# for each class and .png images inside.
#
# You can download `CIFAR-10 in folders from kaggle
# <https://www.kaggle.com/swaroopkml/cifar10-pngs-in-folders>`_.
# The dataset structure should be like this:
# cifar10/train/
# L airplane/
# L 10008_airplane.png
# L ...
# L automobile/
# L bird/
# L cat/
# L deer/
# L dog/
# L frog/
# L horse/
# L ship/
# L truck/
path_to_train = "/datasets/cifar10/train/"
path_to_test = "/datasets/cifar10/test/"
# Use SimCLR augmentations
simclr_transform = SimCLRTransform(
input_size=32,
cj_strength=0.5,
gaussian_blur=0.0,
)
# Use SimSiam augmentations
simsiam_transform = SimSiamTransform(
input_size=32,
gaussian_blur=0.0,
)
# Multi crop augmentation for FastSiam
fast_siam_transform = FastSiamTransform(input_size=32, gaussian_blur=0.0)
# Multi crop augmentation for SwAV, additionally, disable blur for cifar10
swav_transform = SwaVTransform(
crop_sizes=[32],
crop_counts=[2], # 2 crops @ 32x32px
crop_min_scales=[0.14],
cj_strength=0.5,
gaussian_blur=0,
)
# Multi crop augmentation for DINO, additionally, disable blur for cifar10
dino_transform = DINOTransform(
global_crop_size=32,
n_local_views=0,
cj_strength=0.5,
gaussian_blur=(0, 0, 0),
)
# Two crops for SMoG
smog_transform = SMoGTransform(
crop_sizes=(32, 32),
crop_counts=(1, 1),
cj_strength=0.5,
gaussian_blur_probs=(0.0, 0.0),
crop_min_scales=(0.2, 0.2),
crop_max_scales=(1.0, 1.0),
)
# No additional augmentations for the test set
test_transforms = torchvision.transforms.Compose(
[
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
mean=IMAGENET_NORMALIZE["mean"],
std=IMAGENET_NORMALIZE["std"],
),
]
)
# we use test transformations for getting the feature for kNN on train data
dataset_train_kNN = LightlyDataset(input_dir=path_to_train, transform=test_transforms)
dataset_test = LightlyDataset(input_dir=path_to_test, transform=test_transforms)
def create_dataset_train_ssl(model):
"""Helper method to apply the correct transform for ssl.
Args:
model:
Model class for which to select the transform.
"""
model_to_transform = {
BarlowTwinsModel: simclr_transform,
BYOLModel: simclr_transform,
DCL: simclr_transform,
DCLW: simclr_transform,
DINOModel: dino_transform,
FastSiamModel: fast_siam_transform,
MocoModel: simclr_transform,
NNCLRModel: simclr_transform,
SimCLRModel: simclr_transform,
SimSiamModel: simsiam_transform,
SwaVModel: swav_transform,
SMoGModel: smog_transform,
}
transform = model_to_transform[model]
return LightlyDataset(input_dir=path_to_train, transform=transform)
def get_data_loaders(batch_size: int, dataset_train_ssl):
"""Helper method to create dataloaders for ssl, kNN train and kNN test.
Args:
batch_size: Desired batch size for all dataloaders.
"""
dataloader_train_ssl = torch.utils.data.DataLoader(
dataset_train_ssl,
batch_size=batch_size,
shuffle=True,
drop_last=True,
num_workers=num_workers,
)
dataloader_train_kNN = torch.utils.data.DataLoader(
dataset_train_kNN,
batch_size=batch_size,
shuffle=False,
drop_last=False,
num_workers=num_workers,
)
dataloader_test = torch.utils.data.DataLoader(
dataset_test,
batch_size=batch_size,
shuffle=False,
drop_last=False,
num_workers=num_workers,
)
return dataloader_train_ssl, dataloader_train_kNN, dataloader_test
class MocoModel(BenchmarkModule):
def __init__(self, dataloader_kNN, num_classes):
super().__init__(dataloader_kNN, num_classes)
# create a ResNet backbone and remove the classification head
num_splits = 0 if sync_batchnorm else 8
resnet = ResNetGenerator("resnet-18", num_splits=num_splits)
self.backbone = nn.Sequential(
*list(resnet.children())[:-1], nn.AdaptiveAvgPool2d(1)
)
# create a moco model based on ResNet
self.projection_head = heads.MoCoProjectionHead(512, 512, 128)
self.backbone_momentum = copy.deepcopy(self.backbone)
self.projection_head_momentum = copy.deepcopy(self.projection_head)
utils.deactivate_requires_grad(self.backbone_momentum)
utils.deactivate_requires_grad(self.projection_head_momentum)
# create our loss with the optional memory bank
self.criterion = NTXentLoss(
temperature=0.1,
memory_bank_size=4096,
)
def forward(self, x):
x = self.backbone(x).flatten(start_dim=1)
return self.projection_head(x)
def training_step(self, batch, batch_idx):
(x0, x1), _, _ = batch
# update momentum
utils.update_momentum(self.backbone, self.backbone_momentum, 0.99)
utils.update_momentum(self.projection_head, self.projection_head_momentum, 0.99)
def step(x0_, x1_):
x1_, shuffle = utils.batch_shuffle(x1_, distributed=distributed)
x0_ = self.backbone(x0_).flatten(start_dim=1)
x0_ = self.projection_head(x0_)
x1_ = self.backbone_momentum(x1_).flatten(start_dim=1)
x1_ = self.projection_head_momentum(x1_)
x1_ = utils.batch_unshuffle(x1_, shuffle, distributed=distributed)
return x0_, x1_
# We use a symmetric loss (model trains faster at little compute overhead)
# https://colab.research.google.com/github/facebookresearch/moco/blob/colab-notebook/colab/moco_cifar10_demo.ipynb
loss_1 = self.criterion(*step(x0, x1))
loss_2 = self.criterion(*step(x1, x0))
loss = 0.5 * (loss_1 + loss_2)
self.log("train_loss_ssl", loss)
return loss
def configure_optimizers(self):
params = list(self.backbone.parameters()) + list(
self.projection_head.parameters()
)
optim = torch.optim.SGD(
params,
lr=6e-2 * lr_factor,
momentum=0.9,
weight_decay=5e-4,
)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optim, max_epochs)
return [optim], [scheduler]
class SimCLRModel(BenchmarkModule):
def __init__(self, dataloader_kNN, num_classes):
super().__init__(dataloader_kNN, num_classes)
# create a ResNet backbone and remove the classification head
resnet = ResNetGenerator("resnet-18")
self.backbone = nn.Sequential(
*list(resnet.children())[:-1], nn.AdaptiveAvgPool2d(1)
)
self.projection_head = heads.SimCLRProjectionHead(512, 512, 128)
self.criterion = NTXentLoss()
def forward(self, x):
x = self.backbone(x).flatten(start_dim=1)
z = self.projection_head(x)
return z
def training_step(self, batch, batch_index):
(x0, x1), _, _ = batch
z0 = self.forward(x0)
z1 = self.forward(x1)
loss = self.criterion(z0, z1)
self.log("train_loss_ssl", loss)
return loss
def configure_optimizers(self):
optim = torch.optim.SGD(
self.parameters(), lr=6e-2 * lr_factor, momentum=0.9, weight_decay=5e-4
)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optim, max_epochs)
return [optim], [scheduler]
class SimSiamModel(BenchmarkModule):
def __init__(self, dataloader_kNN, num_classes):
super().__init__(dataloader_kNN, num_classes)
# create a ResNet backbone and remove the classification head
resnet = ResNetGenerator("resnet-18")
self.backbone = nn.Sequential(
*list(resnet.children())[:-1], nn.AdaptiveAvgPool2d(1)
)
self.prediction_head = heads.SimSiamPredictionHead(2048, 512, 2048)
# use a 2-layer projection head for cifar10 as described in the paper
self.projection_head = heads.ProjectionHead(
[
(512, 2048, nn.BatchNorm1d(2048), nn.ReLU(inplace=True)),
(2048, 2048, nn.BatchNorm1d(2048), None),
]
)
self.criterion = NegativeCosineSimilarity()
def forward(self, x):
f = self.backbone(x).flatten(start_dim=1)
z = self.projection_head(f)
p = self.prediction_head(z)
z = z.detach()
return z, p
def training_step(self, batch, batch_idx):
(x0, x1), _, _ = batch
z0, p0 = self.forward(x0)
z1, p1 = self.forward(x1)
loss = 0.5 * (self.criterion(z0, p1) + self.criterion(z1, p0))
self.log("train_loss_ssl", loss)
return loss
def configure_optimizers(self):
optim = torch.optim.SGD(
self.parameters(),
lr=6e-2, # no lr-scaling, results in better training stability
momentum=0.9,
weight_decay=5e-4,
)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optim, max_epochs)
return [optim], [scheduler]
class FastSiamModel(SimSiamModel):
def __init__(self, dataloader_kNN, num_classes):
super().__init__(dataloader_kNN, num_classes)
def training_step(self, batch, batch_idx):
views, _, _ = batch
features = [self.forward(view) for view in views]
zs = torch.stack([z for z, _ in features])
ps = torch.stack([p for _, p in features])
loss = 0.0
for i in range(len(views)):
mask = torch.arange(len(views), device=self.device) != i
loss += self.criterion(ps[i], torch.mean(zs[mask], dim=0)) / len(views)
self.log("train_loss_ssl", loss)
return loss
class BarlowTwinsModel(BenchmarkModule):
def __init__(self, dataloader_kNN, num_classes):
super().__init__(dataloader_kNN, num_classes)
# create a ResNet backbone and remove the classification head
resnet = ResNetGenerator("resnet-18")
self.backbone = nn.Sequential(
*list(resnet.children())[:-1], nn.AdaptiveAvgPool2d(1)
)
# use a 2-layer projection head for cifar10 as described in the paper
self.projection_head = heads.ProjectionHead(
[
(512, 2048, nn.BatchNorm1d(2048), nn.ReLU(inplace=True)),
(2048, 2048, None, None),
]
)
self.criterion = BarlowTwinsLoss(gather_distributed=gather_distributed)
def forward(self, x):
x = self.backbone(x).flatten(start_dim=1)
z = self.projection_head(x)
return z
def training_step(self, batch, batch_index):
(x0, x1), _, _ = batch
z0 = self.forward(x0)
z1 = self.forward(x1)
loss = self.criterion(z0, z1)
self.log("train_loss_ssl", loss)
return loss
def configure_optimizers(self):
optim = torch.optim.SGD(
self.parameters(), lr=6e-2 * lr_factor, momentum=0.9, weight_decay=5e-4
)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optim, max_epochs)
return [optim], [scheduler]
class BYOLModel(BenchmarkModule):
def __init__(self, dataloader_kNN, num_classes):
super().__init__(dataloader_kNN, num_classes)
# create a ResNet backbone and remove the classification head
resnet = ResNetGenerator("resnet-18")
self.backbone = nn.Sequential(
*list(resnet.children())[:-1], nn.AdaptiveAvgPool2d(1)
)
# create a byol model based on ResNet
self.projection_head = heads.BYOLProjectionHead(512, 1024, 256)
self.prediction_head = heads.BYOLPredictionHead(256, 1024, 256)
self.backbone_momentum = copy.deepcopy(self.backbone)
self.projection_head_momentum = copy.deepcopy(self.projection_head)
utils.deactivate_requires_grad(self.backbone_momentum)
utils.deactivate_requires_grad(self.projection_head_momentum)
self.criterion = NegativeCosineSimilarity()
def forward(self, x):
y = self.backbone(x).flatten(start_dim=1)
z = self.projection_head(y)
p = self.prediction_head(z)
return p
def forward_momentum(self, x):
y = self.backbone_momentum(x).flatten(start_dim=1)
z = self.projection_head_momentum(y)
z = z.detach()
return z
def training_step(self, batch, batch_idx):
utils.update_momentum(self.backbone, self.backbone_momentum, m=0.99)
utils.update_momentum(
self.projection_head, self.projection_head_momentum, m=0.99
)
(x0, x1), _, _ = batch
p0 = self.forward(x0)
z0 = self.forward_momentum(x0)
p1 = self.forward(x1)
z1 = self.forward_momentum(x1)
loss = 0.5 * (self.criterion(p0, z1) + self.criterion(p1, z0))
self.log("train_loss_ssl", loss)
return loss
def configure_optimizers(self):
params = (
list(self.backbone.parameters())
+ list(self.projection_head.parameters())
+ list(self.prediction_head.parameters())
)
optim = torch.optim.SGD(
params,
lr=6e-2 * lr_factor,
momentum=0.9,
weight_decay=5e-4,
)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optim, max_epochs)
return [optim], [scheduler]
class SwaVModel(BenchmarkModule):
def __init__(self, dataloader_kNN, num_classes):
super().__init__(dataloader_kNN, num_classes)
# create a ResNet backbone and remove the classification head
resnet = ResNetGenerator("resnet-18")
self.backbone = nn.Sequential(
*list(resnet.children())[:-1], nn.AdaptiveAvgPool2d(1)
)
self.projection_head = heads.SwaVProjectionHead(512, 512, 128)
self.prototypes = heads.SwaVPrototypes(128, 512) # use 512 prototypes
self.criterion = SwaVLoss(sinkhorn_gather_distributed=gather_distributed)
def forward(self, x):
x = self.backbone(x).flatten(start_dim=1)
x = self.projection_head(x)
x = nn.functional.normalize(x, dim=1, p=2)
return self.prototypes(x)
def training_step(self, batch, batch_idx):
# normalize the prototypes so they are on the unit sphere
self.prototypes.normalize()
# the multi-crop dataloader returns a list of image crops where the
# first two items are the high resolution crops and the rest are low
# resolution crops
multi_crops, _, _ = batch
multi_crop_features = [self.forward(x) for x in multi_crops]
# split list of crop features into high and low resolution
high_resolution_features = multi_crop_features[:2]
low_resolution_features = multi_crop_features[2:]
# calculate the SwaV loss
loss = self.criterion(high_resolution_features, low_resolution_features)
self.log("train_loss_ssl", loss)
return loss
def configure_optimizers(self):
optim = torch.optim.Adam(
self.parameters(),
lr=1e-3 * lr_factor,
weight_decay=1e-6,
)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optim, max_epochs)
return [optim], [scheduler]
class NNCLRModel(BenchmarkModule):
def __init__(self, dataloader_kNN, num_classes):
super().__init__(dataloader_kNN, num_classes)
# create a ResNet backbone and remove the classification head
resnet = ResNetGenerator("resnet-18")
self.backbone = nn.Sequential(
*list(resnet.children())[:-1], nn.AdaptiveAvgPool2d(1)
)
self.prediction_head = heads.NNCLRPredictionHead(256, 4096, 256)
# use only a 2-layer projection head for cifar10
self.projection_head = heads.ProjectionHead(
[
(512, 2048, nn.BatchNorm1d(2048), nn.ReLU(inplace=True)),
(2048, 256, nn.BatchNorm1d(256), None),
]
)
self.criterion = NTXentLoss()
self.memory_bank = modules.NNMemoryBankModule(size=4096)
def forward(self, x):
y = self.backbone(x).flatten(start_dim=1)
z = self.projection_head(y)
p = self.prediction_head(z)
z = z.detach()
return z, p
def training_step(self, batch, batch_idx):
(x0, x1), _, _ = batch
z0, p0 = self.forward(x0)
z1, p1 = self.forward(x1)
z0 = self.memory_bank(z0, update=False)
z1 = self.memory_bank(z1, update=True)
loss = 0.5 * (self.criterion(z0, p1) + self.criterion(z1, p0))
return loss
def configure_optimizers(self):
optim = torch.optim.SGD(
self.parameters(),
lr=6e-2 * lr_factor,
momentum=0.9,
weight_decay=5e-4,
)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optim, max_epochs)
return [optim], [scheduler]
class DINOModel(BenchmarkModule):
def __init__(self, dataloader_kNN, num_classes):
super().__init__(dataloader_kNN, num_classes)
# create a ResNet backbone and remove the classification head
resnet = ResNetGenerator("resnet-18")
self.backbone = nn.Sequential(
*list(resnet.children())[:-1], nn.AdaptiveAvgPool2d(1)
)
self.head = self._build_projection_head()
self.teacher_backbone = copy.deepcopy(self.backbone)
self.teacher_head = self._build_projection_head()
utils.deactivate_requires_grad(self.teacher_backbone)
utils.deactivate_requires_grad(self.teacher_head)
self.criterion = DINOLoss(output_dim=2048)
def _build_projection_head(self):
head = heads.DINOProjectionHead(512, 2048, 256, 2048, batch_norm=True)
# use only 2 layers for cifar10
head.layers = heads.ProjectionHead(
[
(512, 2048, nn.BatchNorm1d(2048), nn.GELU()),
(2048, 256, None, None),
]
).layers
return head
def forward(self, x):
y = self.backbone(x).flatten(start_dim=1)
z = self.head(y)
return z
def forward_teacher(self, x):
y = self.teacher_backbone(x).flatten(start_dim=1)
z = self.teacher_head(y)
return z
def training_step(self, batch, batch_idx):
utils.update_momentum(self.backbone, self.teacher_backbone, m=0.99)
utils.update_momentum(self.head, self.teacher_head, m=0.99)
views, _, _ = batch
views = [view.to(self.device) for view in views]
global_views = views[:2]
teacher_out = [self.forward_teacher(view) for view in global_views]
student_out = [self.forward(view) for view in views]
loss = self.criterion(teacher_out, student_out, epoch=self.current_epoch)
self.log("train_loss_ssl", loss)
return loss
def configure_optimizers(self):
param = list(self.backbone.parameters()) + list(self.head.parameters())
optim = torch.optim.SGD(
param,
lr=6e-2 * lr_factor,
momentum=0.9,
weight_decay=5e-4,
)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optim, max_epochs)
return [optim], [scheduler]
class DCL(BenchmarkModule):
def __init__(self, dataloader_kNN, num_classes):
super().__init__(dataloader_kNN, num_classes)
# create a ResNet backbone and remove the classification head
resnet = ResNetGenerator("resnet-18")
self.backbone = nn.Sequential(
*list(resnet.children())[:-1], nn.AdaptiveAvgPool2d(1)
)
self.projection_head = heads.SimCLRProjectionHead(512, 512, 128)
self.criterion = DCLLoss()
def forward(self, x):
x = self.backbone(x).flatten(start_dim=1)
z = self.projection_head(x)
return z
def training_step(self, batch, batch_index):
(x0, x1), _, _ = batch
z0 = self.forward(x0)
z1 = self.forward(x1)
loss = self.criterion(z0, z1)
self.log("train_loss_ssl", loss)
return loss
def configure_optimizers(self):
optim = torch.optim.SGD(
self.parameters(), lr=6e-2 * lr_factor, momentum=0.9, weight_decay=5e-4
)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optim, max_epochs)
return [optim], [scheduler]
class DCLW(BenchmarkModule):
def __init__(self, dataloader_kNN, num_classes):
super().__init__(dataloader_kNN, num_classes)
# create a ResNet backbone and remove the classification head
resnet = ResNetGenerator("resnet-18")
self.backbone = nn.Sequential(
*list(resnet.children())[:-1], nn.AdaptiveAvgPool2d(1)
)
self.projection_head = heads.SimCLRProjectionHead(512, 512, 128)
self.criterion = DCLWLoss()
def forward(self, x):
x = self.backbone(x).flatten(start_dim=1)
z = self.projection_head(x)
return z
def training_step(self, batch, batch_index):
(x0, x1), _, _ = batch
z0 = self.forward(x0)
z1 = self.forward(x1)
loss = self.criterion(z0, z1)
self.log("train_loss_ssl", loss)
return loss
def configure_optimizers(self):
optim = torch.optim.SGD(
self.parameters(), lr=6e-2 * lr_factor, momentum=0.9, weight_decay=5e-4
)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optim, max_epochs)
return [optim], [scheduler]
from sklearn.cluster import KMeans
class SMoGModel(BenchmarkModule):
def __init__(self, dataloader_kNN, num_classes):
super().__init__(dataloader_kNN, num_classes)
# create a ResNet backbone and remove the classification head
resnet = ResNetGenerator("resnet-18")
self.backbone = nn.Sequential(
*list(resnet.children())[:-1], nn.AdaptiveAvgPool2d(1)
)
# create a model based on ResNet
self.projection_head = heads.SMoGProjectionHead(512, 2048, 128)
self.prediction_head = heads.SMoGPredictionHead(128, 2048, 128)
self.backbone_momentum = copy.deepcopy(self.backbone)
self.projection_head_momentum = copy.deepcopy(self.projection_head)
utils.deactivate_requires_grad(self.backbone_momentum)
utils.deactivate_requires_grad(self.projection_head_momentum)
# smog
self.n_groups = 300
memory_bank_size = 10000
self.memory_bank = memory_bank.MemoryBankModule(size=memory_bank_size)
# create our loss
group_features = torch.nn.functional.normalize(
torch.rand(self.n_groups, 128), dim=1
)
self.smog = heads.SMoGPrototypes(group_features=group_features, beta=0.99)
self.criterion = nn.CrossEntropyLoss()
def _cluster_features(self, features: torch.Tensor) -> torch.Tensor:
features = features.cpu().numpy()
kmeans = KMeans(self.n_groups).fit(features)
clustered = torch.from_numpy(kmeans.cluster_centers_).float()
clustered = torch.nn.functional.normalize(clustered, dim=1)
return clustered
def _reset_group_features(self):
# see https://arxiv.org/pdf/2207.06167.pdf Table 7b)
features = self.memory_bank.bank
group_features = self._cluster_features(features.t())
self.smog.set_group_features(group_features)
def _reset_momentum_weights(self):
# see https://arxiv.org/pdf/2207.06167.pdf Table 7b)
self.backbone_momentum = copy.deepcopy(self.backbone)
self.projection_head_momentum = copy.deepcopy(self.projection_head)
utils.deactivate_requires_grad(self.backbone_momentum)
utils.deactivate_requires_grad(self.projection_head_momentum)
def training_step(self, batch, batch_idx):
if self.global_step > 0 and self.global_step % 300 == 0:
# reset group features and weights every 300 iterations
self._reset_group_features()
self._reset_momentum_weights()
else:
# update momentum
utils.update_momentum(self.backbone, self.backbone_momentum, 0.99)
utils.update_momentum(
self.projection_head, self.projection_head_momentum, 0.99
)
(x0, x1), _, _ = batch
if batch_idx % 2:
# swap batches every second iteration
x0, x1 = x1, x0
x0_features = self.backbone(x0).flatten(start_dim=1)
x0_encoded = self.projection_head(x0_features)
x0_predicted = self.prediction_head(x0_encoded)
x1_features = self.backbone_momentum(x1).flatten(start_dim=1)
x1_encoded = self.projection_head_momentum(x1_features)
# update group features and get group assignments
assignments = self.smog.assign_groups(x1_encoded)
group_features = self.smog.get_updated_group_features(x0_encoded)
logits = self.smog(x0_predicted, group_features, temperature=0.1)
self.smog.set_group_features(group_features)
loss = self.criterion(logits, assignments)
# use memory bank to periodically reset the group features with k-means
self.memory_bank(x0_encoded, update=True)
return loss
def configure_optimizers(self):
params = (
list(self.backbone.parameters())
+ list(self.projection_head.parameters())
+ list(self.prediction_head.parameters())
)
optim = torch.optim.SGD(
params,
lr=0.01,
momentum=0.9,
weight_decay=1e-6,
)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optim, max_epochs)
return [optim], [scheduler]
models = [
BarlowTwinsModel,
BYOLModel,
DCL,
DCLW,
DINOModel,
MocoModel,
NNCLRModel,
SimCLRModel,
SimSiamModel,
SwaVModel,
SMoGModel,
]
bench_results = dict()
experiment_version = None
# loop through configurations and train models
for BenchmarkModel in models:
runs = []
model_name = BenchmarkModel.__name__.replace("Model", "")
for seed in range(n_runs):
pl.seed_everything(seed)
dataset_train_ssl = create_dataset_train_ssl(BenchmarkModel)
dataloader_train_ssl, dataloader_train_kNN, dataloader_test = get_data_loaders(
batch_size=batch_size, dataset_train_ssl=dataset_train_ssl
)
benchmark_model = BenchmarkModel(dataloader_train_kNN, classes)
# Save logs to: {CWD}/benchmark_logs/cifar10/{experiment_version}/{model_name}/
# If multiple runs are specified a subdirectory for each run is created.
sub_dir = model_name if n_runs <= 1 else f"{model_name}/run{seed}"
logger = TensorBoardLogger(
save_dir=os.path.join(logs_root_dir, "cifar10"),
name="",
sub_dir=sub_dir,
version=experiment_version,
)
if experiment_version is None:
# Save results of all models under same version directory
experiment_version = logger.version
checkpoint_callback = pl.callbacks.ModelCheckpoint(
dirpath=os.path.join(logger.log_dir, "checkpoints")
)
trainer = pl.Trainer(
max_epochs=max_epochs,
devices=devices,
accelerator=accelerator,
default_root_dir=logs_root_dir,
strategy=strategy,
sync_batchnorm=sync_batchnorm,
logger=logger,
callbacks=[checkpoint_callback],
)
start = time.time()
trainer.fit(
benchmark_model,
train_dataloaders=dataloader_train_ssl,
val_dataloaders=dataloader_test,
)
end = time.time()
run = {
"model": model_name,
"batch_size": batch_size,
"epochs": max_epochs,
"max_accuracy": benchmark_model.max_accuracy,
"runtime": end - start,
"gpu_memory_usage": torch.cuda.max_memory_allocated(),
"seed": seed,
}
runs.append(run)
print(run)
# delete model and trainer + free up cuda memory
del benchmark_model
del trainer
torch.cuda.reset_peak_memory_stats()
torch.cuda.empty_cache()
bench_results[model_name] = runs
# print results table
header = (
f"| {'Model':<13} | {'Batch Size':>10} | {'Epochs':>6} "
f"| {'KNN Test Accuracy':>18} | {'Time':>10} | {'Peak GPU Usage':>14} |"
)
print("-" * len(header))
print(header)
print("-" * len(header))
for model, results in bench_results.items():
runtime = np.array([result["runtime"] for result in results])
runtime = runtime.mean() / 60 # convert to min
accuracy = np.array([result["max_accuracy"] for result in results])
gpu_memory_usage = np.array([result["gpu_memory_usage"] for result in results])
gpu_memory_usage = gpu_memory_usage.max() / (1024**3) # convert to gbyte
if len(accuracy) > 1:
accuracy_msg = f"{accuracy.mean():>8.3f} +- {accuracy.std():>4.3f}"
else:
accuracy_msg = f"{accuracy.mean():>18.3f}"
print(
f"| {model:<13} | {batch_size:>10} | {max_epochs:>6} "
f"| {accuracy_msg} | {runtime:>6.1f} Min "
f"| {gpu_memory_usage:>8.1f} GByte |",
flush=True,
)
print("-" * len(header))
| 36,355 | 36.022403 | 122 | py |
lightly | lightly-master/docs/source/getting_started/benchmarks/imagenet100_benchmark.py | # -*- coding: utf-8 -*-
"""
Benchmark Results
Updated: 13.02.2023
------------------------------------------------------------------------------------------
| Model | Batch Size | Epochs | KNN Test Accuracy | Time | Peak GPU Usage |
------------------------------------------------------------------------------------------
| BarlowTwins | 256 | 200 | 0.465 | 1319.3 Min | 11.3 GByte |
| BYOL | 256 | 200 | 0.439 | 1315.4 Min | 12.9 GByte |
| DINO | 256 | 200 | 0.518 | 1868.5 Min | 17.4 GByte |
| FastSiam | 256 | 200 | 0.559 | 1856.2 Min | 22.0 GByte |
| Moco | 256 | 200 | 0.560 | 1314.2 Min | 13.1 GByte |
| NNCLR | 256 | 200 | 0.453 | 1198.6 Min | 11.8 GByte |
| SimCLR | 256 | 200 | 0.469 | 1207.7 Min | 11.3 GByte |
| SimSiam | 256 | 200 | 0.534 | 1175.0 Min | 11.1 GByte |
| SwaV | 256 | 200 | 0.678 | 1642.8 Min | 16.9 GByte |
------------------------------------------------------------------------------------------
Note that this benchmark also supports a multi-GPU setup. If you run it on
a system with multiple GPUs make sure that you kill all the processes when
killing the application. Due to the way we setup this benchmark the distributed
processes might continue the benchmark if one of the nodes is killed.
If you know how to fix this don't hesitate to create an issue or PR :)
Code has been tested on a A6000 GPU with 48GBytes of memory.
"""
import copy
import os
import time
import numpy as np
import pytorch_lightning as pl
import torch
import torch.nn as nn
import torchvision
from pl_bolts.optimizers.lars import LARS
from pl_bolts.optimizers.lr_scheduler import linear_warmup_decay
from pytorch_lightning.loggers import TensorBoardLogger
from torch.optim.lr_scheduler import LambdaLR
from lightly.data import LightlyDataset
from lightly.loss import (
BarlowTwinsLoss,
DINOLoss,
NegativeCosineSimilarity,
NTXentLoss,
SwaVLoss,
)
from lightly.models import modules, utils
from lightly.models.modules import heads
from lightly.transforms import (
DINOTransform,
FastSiamTransform,
SimCLRTransform,
SimSiamTransform,
SwaVTransform,
)
from lightly.transforms.utils import IMAGENET_NORMALIZE
from lightly.utils.benchmarking import BenchmarkModule
logs_root_dir = os.path.join(os.getcwd(), "benchmark_logs")
num_workers = 12
memory_bank_size = 2**16
# set max_epochs to 800 for long run (takes around 10h on a single V100)
max_epochs = 200
knn_k = 20
knn_t = 0.1
classes = 100
input_size = 224
# Set to True to enable Distributed Data Parallel training.
distributed = False
# Set to True to enable Synchronized Batch Norm (requires distributed=True).
# If enabled the batch norm is calculated over all gpus, otherwise the batch
# norm is only calculated from samples on the same gpu.
sync_batchnorm = False
# Set to True to gather features from all gpus before calculating
# the loss (requires distributed=True).
# If enabled then the loss on every gpu is calculated with features from all
# gpus, otherwise only features from the same gpu are used.
gather_distributed = False
# benchmark
n_runs = 1 # optional, increase to create multiple runs and report mean + std
batch_size = 256
lr_factor = batch_size / 256 # scales the learning rate linearly with batch size
# Number of devices and hardware to use for training.
devices = torch.cuda.device_count() if torch.cuda.is_available() else 1
accelerator = "gpu" if torch.cuda.is_available() else "cpu"
if distributed:
strategy = "ddp"
# reduce batch size for distributed training
batch_size = batch_size // devices
else:
strategy = None # Set to "auto" if using PyTorch Lightning >= 2.0
# limit to single device if not using distributed training
devices = min(devices, 1)
# The dataset structure should be like this:
path_to_train = "/datasets/imagenet100/train/"
path_to_test = "/datasets/imagenet100/val/"
# Use SimCLR augmentations
simclr_transform = SimCLRTransform(input_size=input_size)
# Use SimSiam augmentations
simsiam_transform = SimSiamTransform(input_size=input_size)
# Multi crop augmentation for FastSiam
fast_siam_transform = FastSiamTransform(input_size=input_size)
# Multi crop augmentation for SwAV
swav_transform = SwaVTransform()
# Multi crop augmentation for DINO, additionally, disable blur for cifar10
dino_transform = DINOTransform()
# No additional augmentations for the test set
test_transforms = torchvision.transforms.Compose(
[
torchvision.transforms.Resize(input_size),
torchvision.transforms.CenterCrop(224),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
mean=IMAGENET_NORMALIZE["mean"],
std=IMAGENET_NORMALIZE["std"],
),
]
)
dataset_train_ssl = LightlyDataset(input_dir=path_to_train)
# we use test transformations for getting the feature for kNN on train data
dataset_train_kNN = LightlyDataset(input_dir=path_to_train, transform=test_transforms)
dataset_test = LightlyDataset(input_dir=path_to_test, transform=test_transforms)
steps_per_epoch = len(LightlyDataset(input_dir=path_to_train)) // batch_size
def create_dataset_train_ssl(model):
"""Helper method to apply the correct transform for ssl.
Args:
model:
Model class for which to select the transform.
"""
model_to_transform = {
BarlowTwinsModel: simclr_transform,
BYOLModel: simclr_transform,
DINOModel: dino_transform,
FastSiamModel: fast_siam_transform,
MocoModel: simclr_transform,
NNCLRModel: simclr_transform,
SimCLRModel: simclr_transform,
SimSiamModel: simsiam_transform,
SwaVModel: swav_transform,
}
transform = model_to_transform[model]
return LightlyDataset(input_dir=path_to_train, transform=transform)
def get_data_loaders(batch_size: int, dataset_train_ssl):
"""Helper method to create dataloaders for ssl, kNN train and kNN test.
Args:
batch_size: Desired batch size for all dataloaders.
"""
dataloader_train_ssl = torch.utils.data.DataLoader(
dataset_train_ssl,
batch_size=batch_size,
shuffle=True,
drop_last=True,
num_workers=num_workers,
)
dataloader_train_kNN = torch.utils.data.DataLoader(
dataset_train_kNN,
batch_size=batch_size,
shuffle=False,
drop_last=False,
num_workers=num_workers,
)
dataloader_test = torch.utils.data.DataLoader(
dataset_test,
batch_size=batch_size,
shuffle=False,
drop_last=False,
num_workers=num_workers,
)
return dataloader_train_ssl, dataloader_train_kNN, dataloader_test
class MocoModel(BenchmarkModule):
def __init__(self, dataloader_kNN, num_classes):
super().__init__(dataloader_kNN, num_classes, knn_k=knn_k, knn_t=knn_t)
# create a ResNet backbone and remove the classification head
num_splits = 0 if sync_batchnorm else 8
# TODO: Add split batch norm to the resnet model
resnet = torchvision.models.resnet18()
feature_dim = list(resnet.children())[-1].in_features
self.backbone = nn.Sequential(*list(resnet.children())[:-1])
# create a moco model based on ResNet
self.projection_head = heads.MoCoProjectionHead(feature_dim, 2048, 128)
self.backbone_momentum = copy.deepcopy(self.backbone)
self.projection_head_momentum = copy.deepcopy(self.projection_head)
utils.deactivate_requires_grad(self.backbone_momentum)
utils.deactivate_requires_grad(self.projection_head_momentum)
# create our loss with the optional memory bank
self.criterion = NTXentLoss(temperature=0.07, memory_bank_size=memory_bank_size)
def forward(self, x):
x = self.backbone(x).flatten(start_dim=1)
return self.projection_head(x)
def training_step(self, batch, batch_idx):
(x0, x1), _, _ = batch
# update momentum
utils.update_momentum(self.backbone, self.backbone_momentum, 0.999)
utils.update_momentum(
self.projection_head, self.projection_head_momentum, 0.999
)
def step(x0_, x1_):
x1_, shuffle = utils.batch_shuffle(x1_, distributed=distributed)
x0_ = self.backbone(x0_).flatten(start_dim=1)
x0_ = self.projection_head(x0_)
x1_ = self.backbone_momentum(x1_).flatten(start_dim=1)
x1_ = self.projection_head_momentum(x1_)
x1_ = utils.batch_unshuffle(x1_, shuffle, distributed=distributed)
return x0_, x1_
# We use a symmetric loss (model trains faster at little compute overhead)
# https://colab.research.google.com/github/facebookresearch/moco/blob/colab-notebook/colab/moco_cifar10_demo.ipynb
loss_1 = self.criterion(*step(x0, x1))
loss_2 = self.criterion(*step(x1, x0))
loss = 0.5 * (loss_1 + loss_2)
self.log("train_loss_ssl", loss)
return loss
def configure_optimizers(self):
params = list(self.backbone.parameters()) + list(
self.projection_head.parameters()
)
optim = torch.optim.SGD(
params,
lr=0.03 * lr_factor,
momentum=0.9,
weight_decay=1e-4,
)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optim, max_epochs)
return [optim], [scheduler]
class SimCLRModel(BenchmarkModule):
def __init__(self, dataloader_kNN, num_classes):
super().__init__(dataloader_kNN, num_classes, knn_k=knn_k, knn_t=knn_t)
# create a ResNet backbone and remove the classification head
resnet = torchvision.models.resnet18()
feature_dim = list(resnet.children())[-1].in_features
self.backbone = nn.Sequential(*list(resnet.children())[:-1])
self.projection_head = heads.SimCLRProjectionHead(feature_dim, feature_dim, 128)
self.criterion = NTXentLoss(temperature=0.1)
def forward(self, x):
x = self.backbone(x).flatten(start_dim=1)
z = self.projection_head(x)
return z
def training_step(self, batch, batch_index):
(x0, x1), _, _ = batch
z0 = self.forward(x0)
z1 = self.forward(x1)
loss = self.criterion(z0, z1)
self.log("train_loss_ssl", loss)
return loss
def configure_optimizers(self):
optim = LARS(
self.parameters(),
lr=0.3 * lr_factor,
momentum=0.9,
weight_decay=1e-6,
)
scheduler = {
"scheduler": LambdaLR(
optimizer=optim,
lr_lambda=linear_warmup_decay(
warmup_steps=steps_per_epoch * 10,
total_steps=steps_per_epoch * max_epochs,
cosine=True,
),
),
"interval": "step",
"frequency": 1,
}
return [optim], [scheduler]
class SimSiamModel(BenchmarkModule):
def __init__(self, dataloader_kNN, num_classes):
super().__init__(dataloader_kNN, num_classes, knn_k=knn_k, knn_t=knn_t)
# create a ResNet backbone and remove the classification head
resnet = torchvision.models.resnet18()
feature_dim = list(resnet.children())[-1].in_features
self.backbone = nn.Sequential(*list(resnet.children())[:-1])
self.prediction_head = heads.SimSiamPredictionHead(2048, 512, 2048)
self.projection_head = heads.SimSiamProjectionHead(feature_dim, 512, 2048)
self.criterion = NegativeCosineSimilarity()
def forward(self, x):
f = self.backbone(x).flatten(start_dim=1)
z = self.projection_head(f)
p = self.prediction_head(z)
z = z.detach()
return z, p
def training_step(self, batch, batch_idx):
(x0, x1), _, _ = batch
z0, p0 = self.forward(x0)
z1, p1 = self.forward(x1)
loss = 0.5 * (self.criterion(z0, p1) + self.criterion(z1, p0))
self.log("train_loss_ssl", loss)
return loss
def configure_optimizers(self):
optim = torch.optim.SGD(
self.parameters(),
lr=0.05 * lr_factor,
momentum=0.9,
weight_decay=1e-4,
)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optim, max_epochs)
return [optim], [scheduler]
class FastSiamModel(SimSiamModel):
def __init__(self, dataloader_kNN, num_classes):
super().__init__(dataloader_kNN, num_classes)
def training_step(self, batch, batch_idx):
views, _, _ = batch
features = [self.forward(view) for view in views]
zs = torch.stack([z for z, _ in features])
ps = torch.stack([p for _, p in features])
loss = 0.0
for i in range(len(views)):
mask = torch.arange(len(views), device=self.device) != i
loss += self.criterion(ps[i], torch.mean(zs[mask], dim=0)) / len(views)
self.log("train_loss_ssl", loss)
return loss
class BarlowTwinsModel(BenchmarkModule):
def __init__(self, dataloader_kNN, num_classes):
super().__init__(dataloader_kNN, num_classes, knn_k=knn_k, knn_t=knn_t)
# create a ResNet backbone and remove the classification head
resnet = torchvision.models.resnet18()
feature_dim = list(resnet.children())[-1].in_features
self.backbone = nn.Sequential(*list(resnet.children())[:-1])
# use a 2-layer projection head for cifar10 as described in the paper
self.projection_head = heads.BarlowTwinsProjectionHead(feature_dim, 2048, 2048)
self.criterion = BarlowTwinsLoss(gather_distributed=gather_distributed)
def forward(self, x):
x = self.backbone(x).flatten(start_dim=1)
z = self.projection_head(x)
return z
def training_step(self, batch, batch_index):
(x0, x1), _, _ = batch
z0 = self.forward(x0)
z1 = self.forward(x1)
loss = self.criterion(z0, z1)
self.log("train_loss_ssl", loss)
return loss
def configure_optimizers(self):
optim = LARS(
self.parameters(),
lr=0.2 * lr_factor,
momentum=0.9,
weight_decay=1.5 * 1e-6,
)
scheduler = {
"scheduler": LambdaLR(
optimizer=optim,
lr_lambda=linear_warmup_decay(
warmup_steps=steps_per_epoch * 10,
total_steps=steps_per_epoch * max_epochs,
cosine=True,
),
),
"interval": "step",
"frequency": 1,
}
return [optim], [scheduler]
class BYOLModel(BenchmarkModule):
def __init__(self, dataloader_kNN, num_classes):
super().__init__(dataloader_kNN, num_classes, knn_k=knn_k, knn_t=knn_t)
# create a ResNet backbone and remove the classification head
resnet = torchvision.models.resnet18()
feature_dim = list(resnet.children())[-1].in_features
self.backbone = nn.Sequential(*list(resnet.children())[:-1])
# create a byol model based on ResNet
self.projection_head = heads.BYOLProjectionHead(feature_dim, 4096, 256)
self.prediction_head = heads.BYOLProjectionHead(256, 4096, 256)
self.backbone_momentum = copy.deepcopy(self.backbone)
self.projection_head_momentum = copy.deepcopy(self.projection_head)
utils.deactivate_requires_grad(self.backbone_momentum)
utils.deactivate_requires_grad(self.projection_head_momentum)
self.criterion = NegativeCosineSimilarity()
def forward(self, x):
y = self.backbone(x).flatten(start_dim=1)
z = self.projection_head(y)
p = self.prediction_head(z)
return p
def forward_momentum(self, x):
y = self.backbone_momentum(x).flatten(start_dim=1)
z = self.projection_head_momentum(y)
z = z.detach()
return z
def training_step(self, batch, batch_idx):
utils.update_momentum(self.backbone, self.backbone_momentum, m=0.999)
utils.update_momentum(
self.projection_head, self.projection_head_momentum, m=0.999
)
(x0, x1), _, _ = batch
p0 = self.forward(x0)
z0 = self.forward_momentum(x0)
p1 = self.forward(x1)
z1 = self.forward_momentum(x1)
loss = 0.5 * (self.criterion(p0, z1) + self.criterion(p1, z0))
self.log("train_loss_ssl", loss)
return loss
def configure_optimizers(self):
params = (
list(self.backbone.parameters())
+ list(self.projection_head.parameters())
+ list(self.prediction_head.parameters())
)
optim = LARS(
params,
lr=0.2 * lr_factor,
momentum=0.9,
weight_decay=1.5 * 1e-6,
)
scheduler = {
"scheduler": LambdaLR(
optimizer=optim,
lr_lambda=linear_warmup_decay(
warmup_steps=steps_per_epoch * 10,
total_steps=steps_per_epoch * max_epochs,
cosine=True,
),
),
"interval": "step",
"frequency": 1,
}
return [optim], [scheduler]
class NNCLRModel(BenchmarkModule):
def __init__(self, dataloader_kNN, num_classes):
super().__init__(dataloader_kNN, num_classes, knn_k=knn_k, knn_t=knn_t)
# create a ResNet backbone and remove the classification head
resnet = torchvision.models.resnet18()
feature_dim = list(resnet.children())[-1].in_features
self.backbone = nn.Sequential(*list(resnet.children())[:-1])
self.prediction_head = heads.NNCLRPredictionHead(256, 4096, 256)
self.projection_head = heads.NNCLRProjectionHead(feature_dim, 4096, 256)
self.criterion = NTXentLoss()
self.memory_bank = modules.NNMemoryBankModule(size=memory_bank_size)
def forward(self, x):
y = self.backbone(x).flatten(start_dim=1)
z = self.projection_head(y)
p = self.prediction_head(z)
z = z.detach()
return z, p
def training_step(self, batch, batch_idx):
(x0, x1), _, _ = batch
z0, p0 = self.forward(x0)
z1, p1 = self.forward(x1)
z0 = self.memory_bank(z0, update=False)
z1 = self.memory_bank(z1, update=True)
loss = 0.5 * (self.criterion(z0, p1) + self.criterion(z1, p0))
return loss
def configure_optimizers(self):
optim = LARS(
self.parameters(),
lr=0.3 * lr_factor,
momentum=0.9,
weight_decay=1e-6,
)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optim, max_epochs)
return [optim], [scheduler]
class SwaVModel(BenchmarkModule):
def __init__(self, dataloader_kNN, num_classes):
super().__init__(dataloader_kNN, num_classes, knn_k=knn_k, knn_t=knn_t)
# create a ResNet backbone and remove the classification head
resnet = torchvision.models.resnet18()
feature_dim = list(resnet.children())[-1].in_features
self.backbone = nn.Sequential(*list(resnet.children())[:-1])
self.projection_head = heads.SwaVProjectionHead(feature_dim, 2048, 128)
self.prototypes = heads.SwaVPrototypes(128, 3000) # use 3000 prototypes
self.criterion = SwaVLoss(sinkhorn_gather_distributed=gather_distributed)
def forward(self, x):
x = self.backbone(x).flatten(start_dim=1)
x = self.projection_head(x)
x = nn.functional.normalize(x, dim=1, p=2)
return self.prototypes(x)
def training_step(self, batch, batch_idx):
# normalize the prototypes so they are on the unit sphere
self.prototypes.normalize()
# the multi-crop dataloader returns a list of image crops where the
# first two items are the high resolution crops and the rest are low
# resolution crops
multi_crops, _, _ = batch
multi_crop_features = [self.forward(x) for x in multi_crops]
# split list of crop features into high and low resolution
high_resolution_features = multi_crop_features[:2]
low_resolution_features = multi_crop_features[2:]
# calculate the SwaV loss
loss = self.criterion(high_resolution_features, low_resolution_features)
self.log("train_loss_ssl", loss)
return loss
def configure_optimizers(self):
optim = torch.optim.SGD(
self.parameters(),
lr=4.8 * lr_factor,
momentum=0.9,
weight_decay=1e-6,
)
scheduler = {
"scheduler": LambdaLR(
optimizer=optim,
lr_lambda=linear_warmup_decay(
warmup_steps=steps_per_epoch * 10,
total_steps=steps_per_epoch * max_epochs,
cosine=True,
),
),
"interval": "step",
"frequency": 1,
}
return [optim], [scheduler]
class DINOModel(BenchmarkModule):
def __init__(self, dataloader_kNN, num_classes):
super().__init__(dataloader_kNN, num_classes, knn_k=knn_k, knn_t=knn_t)
# create a ResNet backbone and remove the classification head
resnet = torchvision.models.resnet18()
feature_dim = list(resnet.children())[-1].in_features
self.backbone = nn.Sequential(*list(resnet.children())[:-1])
self.head = heads.DINOProjectionHead(
feature_dim, 2048, 256, 2048, batch_norm=True
)
self.teacher_backbone = copy.deepcopy(self.backbone)
self.teacher_head = heads.DINOProjectionHead(
feature_dim, 2048, 256, 2048, batch_norm=True
)
utils.deactivate_requires_grad(self.teacher_backbone)
utils.deactivate_requires_grad(self.teacher_head)
self.criterion = DINOLoss(output_dim=2048)
def forward(self, x):
y = self.backbone(x).flatten(start_dim=1)
z = self.head(y)
return z
def forward_teacher(self, x):
y = self.teacher_backbone(x).flatten(start_dim=1)
z = self.teacher_head(y)
return z
def training_step(self, batch, batch_idx):
utils.update_momentum(self.backbone, self.teacher_backbone, m=0.999)
utils.update_momentum(self.head, self.teacher_head, m=0.999)
views, _, _ = batch
views = [view.to(self.device) for view in views]
global_views = views[:2]
teacher_out = [self.forward_teacher(view) for view in global_views]
student_out = [self.forward(view) for view in views]
loss = self.criterion(teacher_out, student_out, epoch=self.current_epoch)
self.log("train_loss_ssl", loss)
return loss
def configure_optimizers(self):
param = list(self.backbone.parameters()) + list(self.head.parameters())
optim = LARS(
param,
lr=0.3 * lr_factor,
weight_decay=1e-6,
momentum=0.9,
)
scheduler = {
"scheduler": LambdaLR(
optimizer=optim,
lr_lambda=linear_warmup_decay(
warmup_steps=steps_per_epoch * 10,
total_steps=steps_per_epoch * max_epochs,
cosine=True,
),
),
"interval": "step",
"frequency": 1,
}
return [optim], [scheduler]
models = [
BarlowTwinsModel,
BYOLModel,
DINOModel,
MocoModel,
NNCLRModel,
SimCLRModel,
SimSiamModel,
SwaVModel,
]
bench_results = dict()
experiment_version = None
# loop through configurations and train models
for BenchmarkModel in models:
runs = []
model_name = BenchmarkModel.__name__.replace("Model", "")
for seed in range(n_runs):
pl.seed_everything(seed)
dataset_train_ssl = create_dataset_train_ssl(BenchmarkModel)
dataloader_train_ssl, dataloader_train_kNN, dataloader_test = get_data_loaders(
batch_size=batch_size, dataset_train_ssl=dataset_train_ssl
)
benchmark_model = BenchmarkModel(dataloader_train_kNN, classes)
# Save logs to: {CWD}/benchmark_logs/imagenet/{experiment_version}/{model_name}/
# If multiple runs are specified a subdirectory for each run is created.
sub_dir = model_name if n_runs <= 1 else f"{model_name}/run{seed}"
logger = TensorBoardLogger(
save_dir=os.path.join(logs_root_dir, "imagenet"),
name="",
sub_dir=sub_dir,
version=experiment_version,
)
if experiment_version is None:
# Save results of all models under same version directory
experiment_version = logger.version
checkpoint_callback = pl.callbacks.ModelCheckpoint(
dirpath=os.path.join(logger.log_dir, "checkpoints")
)
trainer = pl.Trainer(
max_epochs=max_epochs,
devices=devices,
accelerator=accelerator,
default_root_dir=logs_root_dir,
strategy=strategy,
sync_batchnorm=sync_batchnorm,
logger=logger,
callbacks=[checkpoint_callback],
)
start = time.time()
trainer.fit(
benchmark_model,
train_dataloaders=dataloader_train_ssl,
val_dataloaders=dataloader_test,
)
end = time.time()
run = {
"model": model_name,
"batch_size": batch_size,
"epochs": max_epochs,
"max_accuracy": benchmark_model.max_accuracy,
"runtime": end - start,
"gpu_memory_usage": torch.cuda.max_memory_allocated(),
"seed": seed,
}
runs.append(run)
print(run)
# delete model and trainer + free up cuda memory
del benchmark_model
del trainer
torch.cuda.reset_peak_memory_stats()
torch.cuda.empty_cache()
bench_results[model_name] = runs
# print results table
header = (
f"| {'Model':<13} | {'Batch Size':>10} | {'Epochs':>6} "
f"| {'KNN Test Accuracy':>18} | {'Time':>10} | {'Peak GPU Usage':>14} |"
)
print("-" * len(header))
print(header)
print("-" * len(header))
for model, results in bench_results.items():
runtime = np.array([result["runtime"] for result in results])
runtime = runtime.mean() / 60 # convert to min
accuracy = np.array([result["max_accuracy"] for result in results])
gpu_memory_usage = np.array([result["gpu_memory_usage"] for result in results])
gpu_memory_usage = gpu_memory_usage.max() / (1024**3) # convert to gbyte
if len(accuracy) > 1:
accuracy_msg = f"{accuracy.mean():>8.3f} +- {accuracy.std():>4.3f}"
else:
accuracy_msg = f"{accuracy.mean():>18.3f}"
print(
f"| {model:<13} | {batch_size:>10} | {max_epochs:>6} "
f"| {accuracy_msg} | {runtime:>6.1f} Min "
f"| {gpu_memory_usage:>8.1f} GByte |",
flush=True,
)
print("-" * len(header))
| 27,467 | 35 | 122 | py |
lightly | lightly-master/docs/source/getting_started/benchmarks/imagenette_benchmark.py | # -*- coding: utf-8 -*-
"""
Note that this benchmark also supports a multi-GPU setup. If you run it on
a system with multiple GPUs make sure that you kill all the processes when
killing the application. Due to the way we setup this benchmark the distributed
processes might continue the benchmark if one of the nodes is killed.
If you know how to fix this don't hesitate to create an issue or PR :)
You can download the ImageNette dataset from here: https://github.com/fastai/imagenette
Code has been tested on a A6000 GPU with 48GBytes of memory.
Code to reproduce the benchmark results:
Results (4.5.2023):
-------------------------------------------------------------------------------------------------
| Model | Batch Size | Epochs | KNN Top1 Val Accuracy | Time | Peak GPU Usage |
-------------------------------------------------------------------------------------------------
| BarlowTwins | 256 | 200 | 0.651 | 85.0 Min | 4.0 GByte |
| BYOL | 256 | 200 | 0.705 | 54.4 Min | 4.3 GByte |
| DCL | 256 | 200 | 0.809 | 48.7 Min | 3.7 GByte |
| DCLW | 256 | 200 | 0.783 | 47.3 Min | 3.7 GByte |
| DINO (Res18) | 256 | 200 | 0.873 | 75.4 Min | 6.6 GByte |
| FastSiam | 256 | 200 | 0.779 | 88.2 Min | 7.3 GByte |
| MAE (ViT-S) | 256 | 200 | 0.454 | 62.0 Min | 4.4 GByte |
| MSN (ViT-S) | 256 | 200 | 0.713 | 127.0 Min | 14.7 GByte |
| Moco | 256 | 200 | 0.786 | 57.5 Min | 4.3 GByte |
| NNCLR | 256 | 200 | 0.809 | 51.5 Min | 3.8 GByte |
| PMSN (ViT-S) | 256 | 200 | 0.705 | 126.9 Min | 14.7 GByte |
| SimCLR | 256 | 200 | 0.835 | 49.7 Min | 3.7 GByte |
| SimMIM (ViT-B32) | 256 | 200 | 0.315 | 115.5 Min | 9.7 GByte |
| SimSiam | 256 | 200 | 0.752 | 58.2 Min | 3.9 GByte |
| SwaV | 256 | 200 | 0.861 | 73.3 Min | 6.4 GByte |
| SwaVQueue | 256 | 200 | 0.827 | 72.6 Min | 6.4 GByte |
| SMoG | 256 | 200 | 0.663 | 58.7 Min | 2.6 GByte |
| TiCo | 256 | 200 | 0.742 | 45.6 Min | 2.5 GByte |
| VICReg | 256 | 200 | 0.763 | 53.2 Min | 4.0 GByte |
| VICRegL | 256 | 200 | 0.689 | 56.7 Min | 4.0 GByte |
-------------------------------------------------------------------------------------------------
| BarlowTwins | 256 | 800 | 0.852 | 298.5 Min | 4.0 GByte |
| BYOL | 256 | 800 | 0.887 | 214.8 Min | 4.3 GByte |
| DCL | 256 | 800 | 0.861 | 189.1 Min | 3.7 GByte |
| DCLW | 256 | 800 | 0.865 | 192.2 Min | 3.7 GByte |
| DINO (Res18) | 256 | 800 | 0.888 | 312.3 Min | 6.6 GByte |
| FastSiam | 256 | 800 | 0.873 | 299.6 Min | 7.3 GByte |
| MAE (ViT-S) | 256 | 800 | 0.610 | 248.2 Min | 4.4 GByte |
| MSN (ViT-S) | 256 | 800 | 0.828 | 515.5 Min | 14.7 GByte |
| Moco | 256 | 800 | 0.874 | 231.7 Min | 4.3 GByte |
| NNCLR | 256 | 800 | 0.884 | 212.5 Min | 3.8 GByte |
| PMSN (ViT-S) | 256 | 800 | 0.822 | 505.8 Min | 14.7 GByte |
| SimCLR | 256 | 800 | 0.889 | 193.5 Min | 3.7 GByte |
| SimMIM (ViT-B32) | 256 | 800 | 0.343 | 446.5 Min | 9.7 GByte |
| SimSiam | 256 | 800 | 0.872 | 206.4 Min | 3.9 GByte |
| SwaV | 256 | 800 | 0.902 | 283.2 Min | 6.4 GByte |
| SwaVQueue | 256 | 800 | 0.890 | 282.7 Min | 6.4 GByte |
| SMoG | 256 | 800 | 0.788 | 232.1 Min | 2.6 GByte |
| TiCo | 256 | 800 | 0.856 | 177.8 Min | 2.5 GByte |
| VICReg | 256 | 800 | 0.845 | 205.6 Min | 4.0 GByte |
| VICRegL | 256 | 800 | 0.778 | 218.7 Min | 4.0 GByte |
-------------------------------------------------------------------------------------------------
"""
import copy
import os
import time
import numpy as np
import pytorch_lightning as pl
import torch
import torch.nn as nn
import torchvision
from pl_bolts.optimizers.lars import LARS
from pytorch_lightning.loggers import TensorBoardLogger
from lightly.data import LightlyDataset
from lightly.loss import (
BarlowTwinsLoss,
DCLLoss,
DCLWLoss,
DINOLoss,
MSNLoss,
NegativeCosineSimilarity,
NTXentLoss,
PMSNLoss,
SwaVLoss,
TiCoLoss,
VICRegLLoss,
VICRegLoss,
memory_bank,
)
from lightly.models import modules, utils
from lightly.models.modules import heads, masked_autoencoder
from lightly.transforms import (
DINOTransform,
FastSiamTransform,
MAETransform,
MSNTransform,
SimCLRTransform,
SimSiamTransform,
SMoGTransform,
SwaVTransform,
VICRegLTransform,
VICRegTransform,
)
from lightly.transforms.utils import IMAGENET_NORMALIZE
from lightly.utils import scheduler
from lightly.utils.benchmarking import BenchmarkModule
logs_root_dir = os.path.join(os.getcwd(), "benchmark_logs")
num_workers = 12
memory_bank_size = 4096
# set max_epochs to 800 for long run (takes around 10h on a single V100)
max_epochs = 200
knn_k = 200
knn_t = 0.1
classes = 10
input_size = 128
# Set to True to enable Distributed Data Parallel training.
distributed = False
# Set to True to enable Synchronized Batch Norm (requires distributed=True).
# If enabled the batch norm is calculated over all gpus, otherwise the batch
# norm is only calculated from samples on the same gpu.
sync_batchnorm = False
# Set to True to gather features from all gpus before calculating
# the loss (requires distributed=True).
# If enabled then the loss on every gpu is calculated with features from all
# gpus, otherwise only features from the same gpu are used.
gather_distributed = False
# benchmark
n_runs = 1 # optional, increase to create multiple runs and report mean + std
batch_size = 256
lr_factor = batch_size / 256 # scales the learning rate linearly with batch size
# Number of devices and hardware to use for training.
devices = torch.cuda.device_count() if torch.cuda.is_available() else 1
accelerator = "gpu" if torch.cuda.is_available() else "cpu"
if distributed:
strategy = "ddp"
# reduce batch size for distributed training
batch_size = batch_size // devices
else:
strategy = None # Set to "auto" if using PyTorch Lightning >= 2.0
# limit to single device if not using distributed training
devices = min(devices, 1)
# The dataset structure should be like this:
path_to_train = "/datasets/imagenette2-160/train/"
path_to_test = "/datasets/imagenette2-160/val/"
# Use SimCLR augmentations
simclr_transform = SimCLRTransform(
input_size=input_size,
cj_strength=0.5,
)
# Use SimCLR augmentations with larger image size for SimMIM
simmim_transform = SimCLRTransform(input_size=224)
# Use SimSiam augmentations
simsiam_transform = SimSiamTransform(input_size=input_size)
# Multi crop augmentation for FastSiam
fast_siam_transform = FastSiamTransform(input_size=input_size)
# Multi crop augmentation for SwAV
swav_transform = SwaVTransform(
crop_sizes=(128, 64),
crop_counts=(2, 6), # 2 crops @ 128x128px and 6 crops @ 64x64px
cj_strength=0.5,
)
# Multi crop augmentation for DINO, additionally, disable blur for cifar10
dino_transform = DINOTransform(
global_crop_size=128,
local_crop_size=64,
cj_strength=0.5,
)
# Two crops for SMoG
smog_transform = SMoGTransform(
crop_sizes=(128, 128),
crop_counts=(1, 1),
crop_min_scales=(0.2, 0.2),
crop_max_scales=(1.0, 1.0),
cj_strength=0.5,
)
# Single crop augmentation for MAE
mae_transform = MAETransform()
# Multi crop augmentation for MSN
msn_transform = MSNTransform(
random_size=128,
focal_size=64,
cj_strength=1.0, # Higher cj_strength works better for MSN on imagenette
)
vicreg_transform = VICRegTransform(
input_size=input_size,
cj_strength=0.5,
)
# Transform passing geometrical transformation for VICRegL
vicregl_transform = VICRegLTransform(
global_crop_size=128,
n_local_views=0,
global_grid_size=4,
cj_strength=0.5,
)
normalize_transform = torchvision.transforms.Normalize(
mean=IMAGENET_NORMALIZE["mean"],
std=IMAGENET_NORMALIZE["std"],
)
# No additional augmentations for the test set
test_transforms = torchvision.transforms.Compose(
[
torchvision.transforms.Resize(input_size),
torchvision.transforms.CenterCrop(128),
torchvision.transforms.ToTensor(),
normalize_transform,
]
)
# we use test transformations for getting the feature for kNN on train data
dataset_train_kNN = LightlyDataset(input_dir=path_to_train, transform=test_transforms)
dataset_test = LightlyDataset(input_dir=path_to_test, transform=test_transforms)
def create_dataset_train_ssl(model):
"""Helper method to apply the correct transform for ssl.
Args:
model:
Model class for which to select the transform.
"""
model_to_transform = {
BarlowTwinsModel: simclr_transform,
BYOLModel: simclr_transform,
DCL: simclr_transform,
DCLW: simclr_transform,
DINOModel: dino_transform,
FastSiamModel: fast_siam_transform,
MAEModel: mae_transform,
MSNModel: msn_transform,
MocoModel: simclr_transform,
NNCLRModel: simclr_transform,
PMSNModel: msn_transform,
SimCLRModel: simclr_transform,
SimMIMModel: simmim_transform,
SimSiamModel: simsiam_transform,
SwaVModel: swav_transform,
SwaVQueueModel: swav_transform,
SMoGModel: smog_transform,
TiCoModel: simclr_transform,
VICRegModel: vicreg_transform,
VICRegLModel: vicregl_transform,
}
transform = model_to_transform[model]
return LightlyDataset(input_dir=path_to_train, transform=transform)
def get_data_loaders(batch_size: int, dataset_train_ssl):
"""Helper method to create dataloaders for ssl, kNN train and kNN test.
Args:
batch_size: Desired batch size for all dataloaders.
"""
dataloader_train_ssl = torch.utils.data.DataLoader(
dataset_train_ssl,
batch_size=batch_size,
shuffle=True,
drop_last=True,
num_workers=num_workers,
)
dataloader_train_kNN = torch.utils.data.DataLoader(
dataset_train_kNN,
batch_size=batch_size,
shuffle=False,
drop_last=False,
num_workers=num_workers,
)
dataloader_test = torch.utils.data.DataLoader(
dataset_test,
batch_size=batch_size,
shuffle=False,
drop_last=False,
num_workers=num_workers,
)
return dataloader_train_ssl, dataloader_train_kNN, dataloader_test
class MocoModel(BenchmarkModule):
def __init__(self, dataloader_kNN, num_classes):
super().__init__(dataloader_kNN, num_classes)
# create a ResNet backbone and remove the classification head
num_splits = 0 if sync_batchnorm else 8
# TODO: Add split batch norm to the resnet model
resnet = torchvision.models.resnet18()
feature_dim = list(resnet.children())[-1].in_features
self.backbone = nn.Sequential(*list(resnet.children())[:-1])
# create a moco model based on ResNet
self.projection_head = heads.MoCoProjectionHead(feature_dim, 2048, 128)
self.backbone_momentum = copy.deepcopy(self.backbone)
self.projection_head_momentum = copy.deepcopy(self.projection_head)
utils.deactivate_requires_grad(self.backbone_momentum)
utils.deactivate_requires_grad(self.projection_head_momentum)
# create our loss with the optional memory bank
self.criterion = NTXentLoss(temperature=0.1, memory_bank_size=memory_bank_size)
def forward(self, x):
x = self.backbone(x).flatten(start_dim=1)
return self.projection_head(x)
def training_step(self, batch, batch_idx):
(x0, x1), _, _ = batch
# update momentum
utils.update_momentum(self.backbone, self.backbone_momentum, 0.99)
utils.update_momentum(self.projection_head, self.projection_head_momentum, 0.99)
def step(x0_, x1_):
x1_, shuffle = utils.batch_shuffle(x1_, distributed=distributed)
x0_ = self.backbone(x0_).flatten(start_dim=1)
x0_ = self.projection_head(x0_)
x1_ = self.backbone_momentum(x1_).flatten(start_dim=1)
x1_ = self.projection_head_momentum(x1_)
x1_ = utils.batch_unshuffle(x1_, shuffle, distributed=distributed)
return x0_, x1_
# We use a symmetric loss (model trains faster at little compute overhead)
# https://colab.research.google.com/github/facebookresearch/moco/blob/colab-notebook/colab/moco_cifar10_demo.ipynb
loss_1 = self.criterion(*step(x0, x1))
loss_2 = self.criterion(*step(x1, x0))
loss = 0.5 * (loss_1 + loss_2)
self.log("train_loss_ssl", loss)
return loss
def configure_optimizers(self):
params = list(self.backbone.parameters()) + list(
self.projection_head.parameters()
)
optim = torch.optim.SGD(
params,
lr=6e-2 * lr_factor,
momentum=0.9,
weight_decay=5e-4,
)
cosine_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optim, max_epochs)
return [optim], [cosine_scheduler]
class SimCLRModel(BenchmarkModule):
def __init__(self, dataloader_kNN, num_classes):
super().__init__(dataloader_kNN, num_classes)
# create a ResNet backbone and remove the classification head
resnet = torchvision.models.resnet18()
feature_dim = list(resnet.children())[-1].in_features
self.backbone = nn.Sequential(*list(resnet.children())[:-1])
self.projection_head = heads.SimCLRProjectionHead(feature_dim, feature_dim, 128)
self.criterion = NTXentLoss()
def forward(self, x):
x = self.backbone(x).flatten(start_dim=1)
z = self.projection_head(x)
return z
def training_step(self, batch, batch_index):
(x0, x1), _, _ = batch
z0 = self.forward(x0)
z1 = self.forward(x1)
loss = self.criterion(z0, z1)
self.log("train_loss_ssl", loss)
return loss
def configure_optimizers(self):
optim = torch.optim.SGD(
self.parameters(), lr=6e-2 * lr_factor, momentum=0.9, weight_decay=5e-4
)
cosine_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optim, max_epochs)
return [optim], [cosine_scheduler]
class SimSiamModel(BenchmarkModule):
def __init__(self, dataloader_kNN, num_classes):
super().__init__(dataloader_kNN, num_classes)
# create a ResNet backbone and remove the classification head
resnet = torchvision.models.resnet18()
feature_dim = list(resnet.children())[-1].in_features
self.backbone = nn.Sequential(*list(resnet.children())[:-1])
self.projection_head = heads.SimSiamProjectionHead(feature_dim, 2048, 2048)
self.prediction_head = heads.SimSiamPredictionHead(2048, 512, 2048)
self.criterion = NegativeCosineSimilarity()
def forward(self, x):
f = self.backbone(x).flatten(start_dim=1)
z = self.projection_head(f)
p = self.prediction_head(z)
z = z.detach()
return z, p
def training_step(self, batch, batch_idx):
(x0, x1), _, _ = batch
z0, p0 = self.forward(x0)
z1, p1 = self.forward(x1)
loss = 0.5 * (self.criterion(z0, p1) + self.criterion(z1, p0))
self.log("train_loss_ssl", loss)
return loss
def configure_optimizers(self):
optim = torch.optim.SGD(
self.parameters(),
lr=6e-2, # no lr-scaling, results in better training stability
momentum=0.9,
weight_decay=5e-4,
)
cosine_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optim, max_epochs)
return [optim], [cosine_scheduler]
class FastSiamModel(SimSiamModel):
def __init__(self, dataloader_kNN, num_classes):
super().__init__(dataloader_kNN, num_classes)
def training_step(self, batch, batch_idx):
views, _, _ = batch
features = [self.forward(view) for view in views]
zs = torch.stack([z for z, _ in features])
ps = torch.stack([p for _, p in features])
loss = 0.0
for i in range(len(views)):
mask = torch.arange(len(views), device=self.device) != i
loss += self.criterion(ps[i], torch.mean(zs[mask], dim=0)) / len(views)
self.log("train_loss_ssl", loss)
return loss
class BarlowTwinsModel(BenchmarkModule):
def __init__(self, dataloader_kNN, num_classes):
super().__init__(dataloader_kNN, num_classes)
# create a ResNet backbone and remove the classification head
resnet = torchvision.models.resnet18()
feature_dim = list(resnet.children())[-1].in_features
self.backbone = nn.Sequential(*list(resnet.children())[:-1])
# use a 2-layer projection head for cifar10 as described in the paper
self.projection_head = heads.BarlowTwinsProjectionHead(feature_dim, 2048, 2048)
self.criterion = BarlowTwinsLoss(gather_distributed=gather_distributed)
def forward(self, x):
x = self.backbone(x).flatten(start_dim=1)
z = self.projection_head(x)
return z
def training_step(self, batch, batch_index):
(x0, x1), _, _ = batch
z0 = self.forward(x0)
z1 = self.forward(x1)
loss = self.criterion(z0, z1)
self.log("train_loss_ssl", loss)
return loss
def configure_optimizers(self):
optim = torch.optim.SGD(
self.parameters(), lr=6e-2 * lr_factor, momentum=0.9, weight_decay=5e-4
)
cosine_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optim, max_epochs)
return [optim], [cosine_scheduler]
class BYOLModel(BenchmarkModule):
def __init__(self, dataloader_kNN, num_classes):
super().__init__(dataloader_kNN, num_classes)
# create a ResNet backbone and remove the classification head
resnet = torchvision.models.resnet18()
feature_dim = list(resnet.children())[-1].in_features
self.backbone = nn.Sequential(*list(resnet.children())[:-1])
# create a byol model based on ResNet
self.projection_head = heads.BYOLProjectionHead(feature_dim, 4096, 256)
self.prediction_head = heads.BYOLPredictionHead(256, 4096, 256)
self.backbone_momentum = copy.deepcopy(self.backbone)
self.projection_head_momentum = copy.deepcopy(self.projection_head)
utils.deactivate_requires_grad(self.backbone_momentum)
utils.deactivate_requires_grad(self.projection_head_momentum)
self.criterion = NegativeCosineSimilarity()
def forward(self, x):
y = self.backbone(x).flatten(start_dim=1)
z = self.projection_head(y)
p = self.prediction_head(z)
return p
def forward_momentum(self, x):
y = self.backbone_momentum(x).flatten(start_dim=1)
z = self.projection_head_momentum(y)
z = z.detach()
return z
def training_step(self, batch, batch_idx):
utils.update_momentum(self.backbone, self.backbone_momentum, m=0.99)
utils.update_momentum(
self.projection_head, self.projection_head_momentum, m=0.99
)
(x0, x1), _, _ = batch
p0 = self.forward(x0)
z0 = self.forward_momentum(x0)
p1 = self.forward(x1)
z1 = self.forward_momentum(x1)
loss = 0.5 * (self.criterion(p0, z1) + self.criterion(p1, z0))
self.log("train_loss_ssl", loss)
return loss
def configure_optimizers(self):
params = (
list(self.backbone.parameters())
+ list(self.projection_head.parameters())
+ list(self.prediction_head.parameters())
)
optim = torch.optim.SGD(
params,
lr=6e-2 * lr_factor,
momentum=0.9,
weight_decay=5e-4,
)
cosine_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optim, max_epochs)
return [optim], [cosine_scheduler]
class NNCLRModel(BenchmarkModule):
def __init__(self, dataloader_kNN, num_classes):
super().__init__(dataloader_kNN, num_classes)
# create a ResNet backbone and remove the classification head
resnet = torchvision.models.resnet18()
feature_dim = list(resnet.children())[-1].in_features
self.backbone = nn.Sequential(*list(resnet.children())[:-1])
self.projection_head = heads.NNCLRProjectionHead(feature_dim, 2048, 256)
self.prediction_head = heads.NNCLRPredictionHead(256, 4096, 256)
self.criterion = NTXentLoss()
self.memory_bank = modules.NNMemoryBankModule(size=4096)
def forward(self, x):
y = self.backbone(x).flatten(start_dim=1)
z = self.projection_head(y)
p = self.prediction_head(z)
z = z.detach()
return z, p
def training_step(self, batch, batch_idx):
(x0, x1), _, _ = batch
z0, p0 = self.forward(x0)
z1, p1 = self.forward(x1)
z0 = self.memory_bank(z0, update=False)
z1 = self.memory_bank(z1, update=True)
loss = 0.5 * (self.criterion(z0, p1) + self.criterion(z1, p0))
return loss
def configure_optimizers(self):
optim = torch.optim.SGD(
self.parameters(),
lr=6e-2 * lr_factor,
momentum=0.9,
weight_decay=5e-4,
)
cosine_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optim, max_epochs)
return [optim], [cosine_scheduler]
class SwaVModel(BenchmarkModule):
def __init__(self, dataloader_kNN, num_classes):
super().__init__(dataloader_kNN, num_classes)
# create a ResNet backbone and remove the classification head
resnet = torchvision.models.resnet18()
feature_dim = list(resnet.children())[-1].in_features
self.backbone = nn.Sequential(*list(resnet.children())[:-1])
self.projection_head = heads.SwaVProjectionHead(feature_dim, 2048, 128)
self.prototypes = heads.SwaVPrototypes(128, 3000) # use 3000 prototypes
self.criterion = SwaVLoss(sinkhorn_gather_distributed=gather_distributed)
def forward(self, x):
x = self.backbone(x).flatten(start_dim=1)
x = self.projection_head(x)
x = nn.functional.normalize(x, dim=1, p=2)
return self.prototypes(x)
def training_step(self, batch, batch_idx):
# normalize the prototypes so they are on the unit sphere
self.prototypes.normalize()
# the multi-crop dataloader returns a list of image crops where the
# first two items are the high resolution crops and the rest are low
# resolution crops
multi_crops, _, _ = batch
multi_crop_features = [self.forward(x) for x in multi_crops]
# split list of crop features into high and low resolution
high_resolution_features = multi_crop_features[:2]
low_resolution_features = multi_crop_features[2:]
# calculate the SwaV loss
loss = self.criterion(high_resolution_features, low_resolution_features)
self.log("train_loss_ssl", loss)
return loss
def configure_optimizers(self):
optim = torch.optim.Adam(
self.parameters(),
lr=1e-3 * lr_factor,
weight_decay=1e-6,
)
cosine_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optim, max_epochs)
return [optim], [cosine_scheduler]
class DINOModel(BenchmarkModule):
def __init__(self, dataloader_kNN, num_classes):
super().__init__(dataloader_kNN, num_classes)
# create a ResNet backbone and remove the classification head
resnet = torchvision.models.resnet18()
feature_dim = list(resnet.children())[-1].in_features
self.backbone = nn.Sequential(*list(resnet.children())[:-1])
self.head = heads.DINOProjectionHead(
feature_dim, 2048, 256, 2048, batch_norm=True
)
self.teacher_backbone = copy.deepcopy(self.backbone)
self.teacher_head = heads.DINOProjectionHead(
feature_dim, 2048, 256, 2048, batch_norm=True
)
utils.deactivate_requires_grad(self.teacher_backbone)
utils.deactivate_requires_grad(self.teacher_head)
self.criterion = DINOLoss(output_dim=2048)
def forward(self, x):
y = self.backbone(x).flatten(start_dim=1)
z = self.head(y)
return z
def forward_teacher(self, x):
y = self.teacher_backbone(x).flatten(start_dim=1)
z = self.teacher_head(y)
return z
def training_step(self, batch, batch_idx):
utils.update_momentum(self.backbone, self.teacher_backbone, m=0.99)
utils.update_momentum(self.head, self.teacher_head, m=0.99)
views, _, _ = batch
views = [view.to(self.device) for view in views]
global_views = views[:2]
teacher_out = [self.forward_teacher(view) for view in global_views]
student_out = [self.forward(view) for view in views]
loss = self.criterion(teacher_out, student_out, epoch=self.current_epoch)
self.log("train_loss_ssl", loss)
return loss
def configure_optimizers(self):
param = list(self.backbone.parameters()) + list(self.head.parameters())
optim = torch.optim.SGD(
param,
lr=6e-2 * lr_factor,
momentum=0.9,
weight_decay=5e-4,
)
cosine_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optim, max_epochs)
return [optim], [cosine_scheduler]
class DCL(BenchmarkModule):
def __init__(self, dataloader_kNN, num_classes):
super().__init__(dataloader_kNN, num_classes)
# create a ResNet backbone and remove the classification head
resnet = torchvision.models.resnet18()
feature_dim = list(resnet.children())[-1].in_features
self.backbone = nn.Sequential(*list(resnet.children())[:-1])
self.projection_head = heads.SimCLRProjectionHead(feature_dim, feature_dim, 128)
self.criterion = DCLLoss()
def forward(self, x):
x = self.backbone(x).flatten(start_dim=1)
z = self.projection_head(x)
return z
def training_step(self, batch, batch_index):
(x0, x1), _, _ = batch
z0 = self.forward(x0)
z1 = self.forward(x1)
loss = self.criterion(z0, z1)
self.log("train_loss_ssl", loss)
return loss
def configure_optimizers(self):
optim = torch.optim.SGD(
self.parameters(), lr=6e-2 * lr_factor, momentum=0.9, weight_decay=5e-4
)
cosine_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optim, max_epochs)
return [optim], [cosine_scheduler]
class DCLW(BenchmarkModule):
def __init__(self, dataloader_kNN, num_classes):
super().__init__(dataloader_kNN, num_classes)
# create a ResNet backbone and remove the classification head
resnet = torchvision.models.resnet18()
feature_dim = list(resnet.children())[-1].in_features
self.backbone = nn.Sequential(*list(resnet.children())[:-1])
self.projection_head = heads.SimCLRProjectionHead(feature_dim, feature_dim, 128)
self.criterion = DCLWLoss()
def forward(self, x):
x = self.backbone(x).flatten(start_dim=1)
z = self.projection_head(x)
return z
def training_step(self, batch, batch_index):
(x0, x1), _, _ = batch
z0 = self.forward(x0)
z1 = self.forward(x1)
loss = self.criterion(z0, z1)
self.log("train_loss_ssl", loss)
return loss
def configure_optimizers(self):
optim = torch.optim.SGD(
self.parameters(), lr=6e-2 * lr_factor, momentum=0.9, weight_decay=5e-4
)
cosine_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optim, max_epochs)
return [optim], [cosine_scheduler]
class MAEModel(BenchmarkModule):
def __init__(self, dataloader_kNN, num_classes):
super().__init__(dataloader_kNN, num_classes)
decoder_dim = 512
vit = torchvision.models.vit_b_32(pretrained=False)
self.warmup_epochs = 40 if max_epochs >= 800 else 20
self.mask_ratio = 0.75
self.patch_size = vit.patch_size
self.sequence_length = vit.seq_length
self.mask_token = nn.Parameter(torch.zeros(1, 1, decoder_dim))
self.backbone = masked_autoencoder.MAEBackbone.from_vit(vit)
self.decoder = masked_autoencoder.MAEDecoder(
seq_length=vit.seq_length,
num_layers=1,
num_heads=16,
embed_input_dim=vit.hidden_dim,
hidden_dim=decoder_dim,
mlp_dim=decoder_dim * 4,
out_dim=vit.patch_size**2 * 3,
dropout=0,
attention_dropout=0,
)
self.criterion = nn.MSELoss()
def forward_encoder(self, images, idx_keep=None):
return self.backbone.encode(images, idx_keep)
def forward_decoder(self, x_encoded, idx_keep, idx_mask):
# build decoder input
batch_size = x_encoded.shape[0]
x_decode = self.decoder.embed(x_encoded)
x_masked = utils.repeat_token(
self.mask_token, (batch_size, self.sequence_length)
)
x_masked = utils.set_at_index(x_masked, idx_keep, x_decode.type_as(x_masked))
# decoder forward pass
x_decoded = self.decoder.decode(x_masked)
# predict pixel values for masked tokens
x_pred = utils.get_at_index(x_decoded, idx_mask)
x_pred = self.decoder.predict(x_pred)
return x_pred
def training_step(self, batch, batch_idx):
images, _, _ = batch
images = images[0] # images is a list containing only one view
batch_size = images.shape[0]
idx_keep, idx_mask = utils.random_token_mask(
size=(batch_size, self.sequence_length),
mask_ratio=self.mask_ratio,
device=images.device,
)
x_encoded = self.forward_encoder(images, idx_keep)
x_pred = self.forward_decoder(x_encoded, idx_keep, idx_mask)
# get image patches for masked tokens
patches = utils.patchify(images, self.patch_size)
# must adjust idx_mask for missing class token
target = utils.get_at_index(patches, idx_mask - 1)
loss = self.criterion(x_pred, target)
self.log("train_loss_ssl", loss)
return loss
def configure_optimizers(self):
optim = torch.optim.AdamW(
self.parameters(),
lr=1.5e-4 * lr_factor,
weight_decay=0.05,
betas=(0.9, 0.95),
)
cosine_scheduler = scheduler.CosineWarmupScheduler(
optim, self.warmup_epochs, max_epochs
)
return [optim], [cosine_scheduler]
class MSNModel(BenchmarkModule):
def __init__(self, dataloader_kNN, num_classes):
super().__init__(dataloader_kNN, num_classes)
self.warmup_epochs = 15
# ViT small configuration (ViT-S/16)
self.mask_ratio = 0.15
self.backbone = masked_autoencoder.MAEBackbone(
image_size=224,
patch_size=16,
num_layers=12,
num_heads=6,
hidden_dim=384,
mlp_dim=384 * 4,
)
self.projection_head = heads.MSNProjectionHead(384)
self.anchor_backbone = copy.deepcopy(self.backbone)
self.anchor_projection_head = copy.deepcopy(self.projection_head)
utils.deactivate_requires_grad(self.backbone)
utils.deactivate_requires_grad(self.projection_head)
self.prototypes = nn.Linear(256, 1024, bias=False).weight
self.criterion = MSNLoss(gather_distributed=gather_distributed)
def training_step(self, batch, batch_idx):
utils.update_momentum(self.anchor_backbone, self.backbone, 0.996)
utils.update_momentum(self.anchor_projection_head, self.projection_head, 0.996)
views, _, _ = batch
views = [view.to(self.device, non_blocking=True) for view in views]
targets = views[0]
anchors = views[1]
anchors_focal = torch.concat(views[2:], dim=0)
targets_out = self.backbone(targets)
targets_out = self.projection_head(targets_out)
anchors_out = self.encode_masked(anchors)
anchors_focal_out = self.encode_masked(anchors_focal)
anchors_out = torch.cat([anchors_out, anchors_focal_out], dim=0)
loss = self.criterion(anchors_out, targets_out, self.prototypes.data)
self.log("train_loss_ssl", loss)
return loss
def encode_masked(self, anchors):
batch_size, _, _, width = anchors.shape
seq_length = (width // self.anchor_backbone.patch_size) ** 2
idx_keep, _ = utils.random_token_mask(
size=(batch_size, seq_length),
mask_ratio=self.mask_ratio,
device=self.device,
)
out = self.anchor_backbone(anchors, idx_keep)
return self.anchor_projection_head(out)
def configure_optimizers(self):
params = [
*list(self.anchor_backbone.parameters()),
*list(self.anchor_projection_head.parameters()),
self.prototypes,
]
optim = torch.optim.AdamW(
params=params,
lr=1.5e-4 * lr_factor,
weight_decay=0.05,
betas=(0.9, 0.95),
)
cosine_scheduler = scheduler.CosineWarmupScheduler(
optim, self.warmup_epochs, max_epochs
)
return [optim], [cosine_scheduler]
class PMSNModel(BenchmarkModule):
def __init__(self, dataloader_kNN, num_classes):
super().__init__(dataloader_kNN, num_classes)
self.warmup_epochs = 15
# ViT small configuration (ViT-S/16)
self.mask_ratio = 0.15
self.backbone = masked_autoencoder.MAEBackbone(
image_size=224,
patch_size=16,
num_layers=12,
num_heads=6,
hidden_dim=384,
mlp_dim=384 * 4,
)
self.projection_head = heads.MSNProjectionHead(384)
self.anchor_backbone = copy.deepcopy(self.backbone)
self.anchor_projection_head = copy.deepcopy(self.projection_head)
utils.deactivate_requires_grad(self.backbone)
utils.deactivate_requires_grad(self.projection_head)
self.prototypes = nn.Linear(256, 1024, bias=False).weight
self.criterion = PMSNLoss(gather_distributed=gather_distributed)
def training_step(self, batch, batch_idx):
utils.update_momentum(self.anchor_backbone, self.backbone, 0.996)
utils.update_momentum(self.anchor_projection_head, self.projection_head, 0.996)
views, _, _ = batch
views = [view.to(self.device, non_blocking=True) for view in views]
targets = views[0]
anchors = views[1]
anchors_focal = torch.concat(views[2:], dim=0)
targets_out = self.backbone(targets)
targets_out = self.projection_head(targets_out)
anchors_out = self.encode_masked(anchors)
anchors_focal_out = self.encode_masked(anchors_focal)
anchors_out = torch.cat([anchors_out, anchors_focal_out], dim=0)
loss = self.criterion(anchors_out, targets_out, self.prototypes.data)
self.log("train_loss_ssl", loss)
return loss
def encode_masked(self, anchors):
batch_size, _, _, width = anchors.shape
seq_length = (width // self.anchor_backbone.patch_size) ** 2
idx_keep, _ = utils.random_token_mask(
size=(batch_size, seq_length),
mask_ratio=self.mask_ratio,
device=self.device,
)
out = self.anchor_backbone(anchors, idx_keep)
return self.anchor_projection_head(out)
def configure_optimizers(self):
params = [
*list(self.anchor_backbone.parameters()),
*list(self.anchor_projection_head.parameters()),
self.prototypes,
]
optim = torch.optim.AdamW(
params=params,
lr=1.5e-4 * lr_factor,
weight_decay=0.05,
betas=(0.9, 0.95),
)
cosine_scheduler = scheduler.CosineWarmupScheduler(
optim, self.warmup_epochs, max_epochs
)
return [optim], [cosine_scheduler]
from sklearn.cluster import KMeans
class SMoGModel(BenchmarkModule):
def __init__(self, dataloader_kNN, num_classes):
super().__init__(dataloader_kNN, num_classes)
# create a ResNet backbone and remove the classification head
resnet = torchvision.models.resnet18()
self.backbone = nn.Sequential(*list(resnet.children())[:-1])
# create a model based on ResNet
self.projection_head = heads.SMoGProjectionHead(512, 2048, 128)
self.prediction_head = heads.SMoGPredictionHead(128, 2048, 128)
self.backbone_momentum = copy.deepcopy(self.backbone)
self.projection_head_momentum = copy.deepcopy(self.projection_head)
utils.deactivate_requires_grad(self.backbone_momentum)
utils.deactivate_requires_grad(self.projection_head_momentum)
# smog
self.n_groups = 300
memory_bank_size = 10000
self.memory_bank = memory_bank.MemoryBankModule(size=memory_bank_size)
# create our loss
group_features = torch.nn.functional.normalize(
torch.rand(self.n_groups, 128), dim=1
).to(self.device)
self.smog = heads.SMoGPrototypes(group_features=group_features, beta=0.99)
self.criterion = nn.CrossEntropyLoss()
def _cluster_features(self, features: torch.Tensor) -> torch.Tensor:
features = features.cpu().numpy()
kmeans = KMeans(self.n_groups).fit(features)
clustered = torch.from_numpy(kmeans.cluster_centers_).float()
clustered = torch.nn.functional.normalize(clustered, dim=1)
return clustered
def _reset_group_features(self):
# see https://arxiv.org/pdf/2207.06167.pdf Table 7b)
features = self.memory_bank.bank
group_features = self._cluster_features(features.t())
self.smog.set_group_features(group_features)
def _reset_momentum_weights(self):
# see https://arxiv.org/pdf/2207.06167.pdf Table 7b)
self.backbone_momentum = copy.deepcopy(self.backbone)
self.projection_head_momentum = copy.deepcopy(self.projection_head)
utils.deactivate_requires_grad(self.backbone_momentum)
utils.deactivate_requires_grad(self.projection_head_momentum)
def training_step(self, batch, batch_idx):
if self.global_step > 0 and self.global_step % 300 == 0:
# reset group features and weights every 300 iterations
self._reset_group_features()
self._reset_momentum_weights()
else:
# update momentum
utils.update_momentum(self.backbone, self.backbone_momentum, 0.99)
utils.update_momentum(
self.projection_head, self.projection_head_momentum, 0.99
)
(x0, x1), _, _ = batch
if batch_idx % 2:
# swap batches every second iteration
x0, x1 = x1, x0
x0_features = self.backbone(x0).flatten(start_dim=1)
x0_encoded = self.projection_head(x0_features)
x0_predicted = self.prediction_head(x0_encoded)
x1_features = self.backbone_momentum(x1).flatten(start_dim=1)
x1_encoded = self.projection_head_momentum(x1_features)
# update group features and get group assignments
assignments = self.smog.assign_groups(x1_encoded)
group_features = self.smog.get_updated_group_features(x0_encoded)
logits = self.smog(x0_predicted, group_features, temperature=0.1)
self.smog.set_group_features(group_features)
loss = self.criterion(logits, assignments)
# use memory bank to periodically reset the group features with k-means
self.memory_bank(x0_encoded, update=True)
return loss
def configure_optimizers(self):
params = (
list(self.backbone.parameters())
+ list(self.projection_head.parameters())
+ list(self.prediction_head.parameters())
)
optim = torch.optim.SGD(
params,
lr=0.01,
momentum=0.9,
weight_decay=1e-6,
)
cosine_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optim, max_epochs)
return [optim], [cosine_scheduler]
class SimMIMModel(BenchmarkModule):
def __init__(self, dataloader_kNN, num_classes):
super().__init__(dataloader_kNN, num_classes)
vit = torchvision.models.vit_b_32(pretrained=False)
self.warmup_epochs = 40 if max_epochs >= 800 else 20
decoder_dim = vit.hidden_dim
self.mask_ratio = 0.75
self.patch_size = vit.patch_size
self.sequence_length = vit.seq_length
self.mask_token = nn.Parameter(torch.zeros(1, 1, decoder_dim))
# same backbone as MAE
self.backbone = masked_autoencoder.MAEBackbone.from_vit(vit)
# the decoder is a simple linear layer
self.decoder = nn.Linear(vit.hidden_dim, vit.patch_size**2 * 3)
# L1 loss as paper suggestion
self.criterion = nn.L1Loss()
def forward_encoder(self, images, batch_size, idx_mask):
# pass all the tokens to the encoder, both masked and non masked ones
tokens = self.backbone.images_to_tokens(images, prepend_class_token=True)
tokens_masked = utils.mask_at_index(tokens, idx_mask, self.mask_token)
return self.backbone.encoder(tokens_masked)
def forward_decoder(self, x_encoded):
return self.decoder(x_encoded)
def training_step(self, batch, batch_idx):
images, _, _ = batch
images = images[0] # images is a list containing only one view
batch_size = images.shape[0]
idx_keep, idx_mask = utils.random_token_mask(
size=(batch_size, self.sequence_length),
mask_ratio=self.mask_ratio,
device=images.device,
)
# Encoding...
x_encoded = self.forward_encoder(images, batch_size, idx_mask)
x_encoded_masked = utils.get_at_index(x_encoded, idx_mask)
# Decoding...
x_out = self.forward_decoder(x_encoded_masked)
# get image patches for masked tokens
patches = utils.patchify(images, self.patch_size)
# must adjust idx_mask for missing class token
target = utils.get_at_index(patches, idx_mask - 1)
loss = self.criterion(x_out, target)
return loss
def configure_optimizers(self):
optim = torch.optim.AdamW(
self.parameters(),
lr=8e-4 * lr_factor,
weight_decay=0.05,
betas=(0.9, 0.999),
)
cosine_scheduler = scheduler.CosineWarmupScheduler(
optim, self.warmup_epochs, max_epochs
)
return [optim], [cosine_scheduler]
class VICRegModel(BenchmarkModule):
def __init__(self, dataloader_kNN, num_classes):
super().__init__(dataloader_kNN, num_classes)
# create a ResNet backbone and remove the classification head
resnet = torchvision.models.resnet18()
self.backbone = nn.Sequential(*list(resnet.children())[:-1])
self.projection_head = heads.BarlowTwinsProjectionHead(512, 2048, 2048)
self.criterion = VICRegLoss()
self.warmup_epochs = 40 if max_epochs >= 800 else 20
def forward(self, x):
x = self.backbone(x).flatten(start_dim=1)
z = self.projection_head(x)
return z
def training_step(self, batch, batch_index):
(x0, x1), _, _ = batch
z0 = self.forward(x0)
z1 = self.forward(x1)
loss = self.criterion(z0, z1)
return loss
def configure_optimizers(self):
# Training diverges without LARS
optim = LARS(
self.parameters(),
lr=0.3 * lr_factor,
weight_decay=1e-4,
momentum=0.9,
)
cosine_scheduler = scheduler.CosineWarmupScheduler(
optim, self.warmup_epochs, max_epochs
)
return [optim], [cosine_scheduler]
class VICRegLModel(BenchmarkModule):
def __init__(self, dataloader_kNN, num_classes):
super().__init__(dataloader_kNN, num_classes)
# create a ResNet backbone and remove the classification head
resnet = torchvision.models.resnet18()
# The train_backbone variable is introduced in order to fit with the
# structure of BenchmarkModule. During training, train_backbone is used
# to extract local and global features. Durig evaluation, backbone is used
# to evaluate global features.
self.train_backbone = nn.Sequential(*list(resnet.children())[:-2])
self.projection_head = heads.BarlowTwinsProjectionHead(512, 2048, 2048)
self.local_projection_head = heads.VicRegLLocalProjectionHead(512, 128, 128)
self.average_pool = nn.AdaptiveAvgPool2d(output_size=(1, 1))
self.criterion = VICRegLLoss(num_matches=(16, 4))
self.backbone = nn.Sequential(self.train_backbone, self.average_pool)
self.warmup_epochs = 20 if max_epochs >= 800 else 10
def forward(self, x):
x = self.train_backbone(x)
y = self.average_pool(x).flatten(start_dim=1)
z = self.projection_head(y)
y_local = x.permute(0, 2, 3, 1) # (B, D, H, W) to (B, H, W, D)
z_local = self.local_projection_head(y_local)
return z, z_local
def training_step(self, batch, batch_index):
views_and_grids = batch[0]
views = views_and_grids[: len(views_and_grids) // 2]
grids = views_and_grids[len(views_and_grids) // 2 :]
features = [self.forward(view) for view in views]
loss = self.criterion(
global_view_features=features[:2],
global_view_grids=grids[:2],
local_view_features=features[2:],
local_view_grids=grids[2:],
)
return loss
def configure_optimizers(self):
# Training diverges without LARS
optim = LARS(
self.parameters(),
lr=0.3 * lr_factor,
weight_decay=1e-4,
momentum=0.9,
)
cosine_scheduler = scheduler.CosineWarmupScheduler(
optim, self.warmup_epochs, max_epochs
)
return [optim], [cosine_scheduler]
class TiCoModel(BenchmarkModule):
def __init__(self, dataloader_kNN, num_classes):
super().__init__(dataloader_kNN, num_classes)
resnet = torchvision.models.resnet18()
self.backbone = nn.Sequential(*list(resnet.children())[:-1])
self.projection_head = heads.TiCoProjectionHead(512, 1024, 256)
self.backbone_momentum = copy.deepcopy(self.backbone)
self.projection_head_momentum = copy.deepcopy(self.projection_head)
utils.deactivate_requires_grad(self.backbone_momentum)
utils.deactivate_requires_grad(self.projection_head_momentum)
self.criterion = TiCoLoss()
self.warmup_epochs = 40 if max_epochs >= 800 else 20
def forward(self, x):
y = self.backbone(x).flatten(start_dim=1)
z = self.projection_head(y)
return z
def forward_momentum(self, x):
y = self.backbone_momentum(x).flatten(start_dim=1)
z = self.projection_head_momentum(y)
z = z.detach()
return z
def training_step(self, batch, batch_index):
(x0, x1), _, _ = batch
momentum = scheduler.cosine_schedule(self.current_epoch, max_epochs, 0.996, 1)
utils.update_momentum(self.backbone, self.backbone_momentum, m=momentum)
utils.update_momentum(
self.projection_head, self.projection_head_momentum, m=momentum
)
x0 = x0.to(self.device)
x1 = x1.to(self.device)
z0 = self.forward(x0)
z1 = self.forward_momentum(x1)
loss = self.criterion(z0, z1)
return loss
def configure_optimizers(self):
optim = torch.optim.SGD(
self.parameters(),
lr=0.3 * lr_factor,
weight_decay=1e-4,
momentum=0.9,
)
cosine_scheduler = scheduler.CosineWarmupScheduler(
optim, self.warmup_epochs, max_epochs
)
return [optim], [cosine_scheduler]
class SwaVQueueModel(BenchmarkModule):
def __init__(self, dataloader_kNN, num_classes):
super().__init__(dataloader_kNN, num_classes)
# create a ResNet backbone and remove the classification head
resnet = torchvision.models.resnet18()
feature_dim = list(resnet.children())[-1].in_features
self.backbone = nn.Sequential(*list(resnet.children())[:-1])
self.projection_head = heads.SwaVProjectionHead(feature_dim, 2048, 128)
self.prototypes = heads.SwaVPrototypes(128, 3000, 1)
self.start_queue_at_epoch = 15
self.queues = nn.ModuleList(
[memory_bank.MemoryBankModule(size=384) for _ in range(2)]
) # Queue size reduced in order to work with a smaller dataset
self.criterion = SwaVLoss()
def forward(self, x):
x = self._subforward(x)
return self.prototypes(x)
def training_step(self, batch, batch_idx):
batch_swav, _, _ = batch
high_resolution, low_resolution = batch_swav[:2], batch_swav[2:]
self.prototypes.normalize()
high_resolution_features = [self._subforward(x) for x in high_resolution]
low_resolution_features = [self._subforward(x) for x in low_resolution]
high_resolution_prototypes = [
self.prototypes(x, self.current_epoch) for x in high_resolution_features
]
low_resolution_prototypes = [
self.prototypes(x, self.current_epoch) for x in low_resolution_features
]
queue_prototypes = self._get_queue_prototypes(high_resolution_features)
loss = self.criterion(
high_resolution_prototypes, low_resolution_prototypes, queue_prototypes
)
return loss
def _subforward(self, input):
features = self.backbone(input).flatten(start_dim=1)
features = self.projection_head(features)
features = nn.functional.normalize(features, dim=1, p=2)
return features
@torch.no_grad()
def _get_queue_prototypes(self, high_resolution_features):
if len(high_resolution_features) != len(self.queues):
raise ValueError(
f"The number of queues ({len(self.queues)}) should be equal to the number of high "
f"resolution inputs ({len(high_resolution_features)}). Set `n_queues` accordingly."
)
# Get the queue features
queue_features = []
for i in range(len(self.queues)):
_, features = self.queues[i](high_resolution_features[i], update=True)
# Queue features are in (num_ftrs X queue_length) shape, while the high res
# features are in (batch_size X num_ftrs). Swap the axes for interoperability.
features = torch.permute(features, (1, 0))
queue_features.append(features)
# If loss calculation with queue prototypes starts at a later epoch,
# just queue the features and return None instead of queue prototypes.
if (
self.start_queue_at_epoch > 0
and self.current_epoch < self.start_queue_at_epoch
):
return None
# Assign prototypes
queue_prototypes = [
self.prototypes(x, self.current_epoch) for x in queue_features
]
return queue_prototypes
def configure_optimizers(self):
optim = torch.optim.Adam(
self.parameters(),
lr=1e-3 * lr_factor,
weight_decay=1e-6,
)
cosine_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optim, max_epochs)
return [optim], [cosine_scheduler]
models = [
BarlowTwinsModel,
BYOLModel,
DCL,
DCLW,
DINOModel,
FastSiamModel,
# MAEModel, # disabled by default because MAE uses larger images with size 224
MSNModel,
MocoModel,
NNCLRModel,
PMSNModel,
SimCLRModel,
# SimMIMModel, # disabled by default because SimMIM uses larger images with size 224
SimSiamModel,
SwaVModel,
SwaVQueueModel,
SMoGModel,
TiCoModel,
VICRegModel,
VICRegLModel,
]
bench_results = dict()
experiment_version = None
# loop through configurations and train models
for BenchmarkModel in models:
runs = []
model_name = BenchmarkModel.__name__.replace("Model", "")
for seed in range(n_runs):
pl.seed_everything(seed)
dataset_train_ssl = create_dataset_train_ssl(BenchmarkModel)
dataloader_train_ssl, dataloader_train_kNN, dataloader_test = get_data_loaders(
batch_size=batch_size, dataset_train_ssl=dataset_train_ssl
)
benchmark_model = BenchmarkModel(dataloader_train_kNN, classes)
# Save logs to: {CWD}/benchmark_logs/cifar10/{experiment_version}/{model_name}/
# If multiple runs are specified a subdirectory for each run is created.
sub_dir = model_name if n_runs <= 1 else f"{model_name}/run{seed}"
logger = TensorBoardLogger(
save_dir=os.path.join(logs_root_dir, "imagenette"),
name="",
sub_dir=sub_dir,
version=experiment_version,
)
if experiment_version is None:
# Save results of all models under same version directory
experiment_version = logger.version
checkpoint_callback = pl.callbacks.ModelCheckpoint(
dirpath=os.path.join(logger.log_dir, "checkpoints")
)
trainer = pl.Trainer(
max_epochs=max_epochs,
devices=devices,
accelerator=accelerator,
default_root_dir=logs_root_dir,
strategy=strategy,
sync_batchnorm=sync_batchnorm,
logger=logger,
callbacks=[checkpoint_callback],
)
start = time.time()
trainer.fit(
benchmark_model,
train_dataloaders=dataloader_train_ssl,
val_dataloaders=dataloader_test,
)
end = time.time()
run = {
"model": model_name,
"batch_size": batch_size,
"epochs": max_epochs,
"max_accuracy": benchmark_model.max_accuracy,
"runtime": end - start,
"gpu_memory_usage": torch.cuda.max_memory_allocated(),
"seed": seed,
}
runs.append(run)
print(run)
# delete model and trainer + free up cuda memory
del benchmark_model
del trainer
torch.cuda.reset_peak_memory_stats()
torch.cuda.empty_cache()
bench_results[model_name] = runs
# print results table
header = (
f"| {'Model':<13} | {'Batch Size':>10} | {'Epochs':>6} "
f"| {'KNN Test Accuracy':>18} | {'Time':>10} | {'Peak GPU Usage':>14} |"
)
print("-" * len(header))
print(header)
print("-" * len(header))
for model, results in bench_results.items():
runtime = np.array([result["runtime"] for result in results])
runtime = runtime.mean() / 60 # convert to min
accuracy = np.array([result["max_accuracy"] for result in results])
gpu_memory_usage = np.array([result["gpu_memory_usage"] for result in results])
gpu_memory_usage = gpu_memory_usage.max() / (1024**3) # convert to gbyte
if len(accuracy) > 1:
accuracy_msg = f"{accuracy.mean():>8.3f} +- {accuracy.std():>4.3f}"
else:
accuracy_msg = f"{accuracy.mean():>18.3f}"
print(
f"| {model:<13} | {batch_size:>10} | {max_epochs:>6} "
f"| {accuracy_msg} | {runtime:>6.1f} Min "
f"| {gpu_memory_usage:>8.1f} GByte |",
flush=True,
)
print("-" * len(header))
| 57,415 | 37.150166 | 122 | py |
lightly | lightly-master/docs/source/getting_started/code_examples/plot_image_augmentations.py | import glob
from PIL import Image
import lightly
# let's get all jpg filenames from a folder
glob_to_data = "/datasets/clothing-dataset/images/*.jpg"
fnames = glob.glob(glob_to_data)
# load the first two images using pillow
input_images = [Image.open(fname) for fname in fnames[:2]]
# create our colalte function
collate_fn_simclr = lightly.data.SimCLRCollateFunction()
# plot the images
fig = lightly.utils.debug.plot_augmented_images(input_images, collate_fn_simclr)
# let's disable blur
collate_fn_simclr_no_blur = lightly.data.SimCLRCollateFunction()
fig = lightly.utils.debug.plot_augmented_images(input_images, collate_fn_simclr_no_blur)
# we can also use the DINO collate function instead
collate_fn_dino = lightly.data.DINOCollateFunction()
fig = lightly.utils.debug.plot_augmented_images(input_images, collate_fn_dino)
| 836 | 30 | 88 | py |
lightly | lightly-master/docs/source/tutorials_source/package/Base-RCNN-FPN.yaml | MODEL:
META_ARCHITECTURE: "GeneralizedRCNN"
BACKBONE:
NAME: "build_resnet_fpn_backbone"
RESNETS:
OUT_FEATURES: ["res2", "res3", "res4", "res5"]
FPN:
IN_FEATURES: ["res2", "res3", "res4", "res5"]
ANCHOR_GENERATOR:
SIZES: [[32], [64], [128], [256], [512]] # One size for each in feature map
ASPECT_RATIOS: [[0.5, 1.0, 2.0]] # Three aspect ratios (same for all in feature maps)
RPN:
IN_FEATURES: ["p2", "p3", "p4", "p5", "p6"]
PRE_NMS_TOPK_TRAIN: 2000 # Per FPN level
PRE_NMS_TOPK_TEST: 1000 # Per FPN level
# Detectron1 uses 2000 proposals per-batch,
# (See "modeling/rpn/rpn_outputs.py" for details of this legacy issue)
# which is approximately 1000 proposals per-image since the default batch size for FPN is 2.
POST_NMS_TOPK_TRAIN: 1000
POST_NMS_TOPK_TEST: 1000
ROI_HEADS:
NAME: "StandardROIHeads"
IN_FEATURES: ["p2", "p3", "p4", "p5"]
ROI_BOX_HEAD:
NAME: "FastRCNNConvFCHead"
NUM_FC: 2
POOLER_RESOLUTION: 7
ROI_MASK_HEAD:
NAME: "MaskRCNNConvUpsampleHead"
NUM_CONV: 4
POOLER_RESOLUTION: 14
DATASETS:
TRAIN: ("coco_2017_train",)
TEST: ("coco_2017_val",)
SOLVER:
IMS_PER_BATCH: 16
BASE_LR: 0.02
STEPS: (60000, 80000)
MAX_ITER: 90000
INPUT:
MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800)
VERSION: 2
| 1,318 | 29.674419 | 96 | yaml |
lightly | lightly-master/docs/source/tutorials_source/package/tutorial_custom_augmentations.py | """
.. _lightly-custom-augmentation-5:
Tutorial 5: Custom Augmentations
==============================================
In this tutorial, we will train a model on chest X-ray images in a self-supervised manner.
In self-supervised learning, X-ray images can pose some problems: They are often more
than eight bits deep which makes them incompatible with certain standard torchvision
transforms such as, for example, random-resized cropping. Additionally, some augmentations
which are often used in self-supervised learning are ineffective on X-ray images.
For example, applying color jitter to an X-ray image with a single color channel
does not make sense.
We will show how to address these problems and how to train a ResNet-18 with MoCo
on a set of 16-bit X-ray images in TIFF format.
The original dataset this tutorial is based on can be found `on Kaggle <https://www.kaggle.com/c/vinbigdata-chest-xray-abnormalities-detection/overview>`_.
These images are in the DICOM format. For simplicity and efficiency reasons,
we randomly selected ~4000 images from the above dataset, resized them such that the
maximum of the width and height of each image is no larger than 512, and converted
them to the 16-bit TIFF format. To do so, we used ImageMagick which is preinstalled
on most Linux systems.
.. code::
mogrify -path path/to/new/dataset -resize 512x512 -format tiff "*.dicom"
"""
import copy
# %%
# Imports
# -------
#
# Import the Python frameworks we need for this tutorial.
import os
import matplotlib.pyplot as plt
import numpy as np
import pandas
import pytorch_lightning as pl
import torch
import torch.nn as nn
import torchvision
from PIL import Image
from sklearn.neighbors import NearestNeighbors
from sklearn.preprocessing import normalize
from lightly.data import LightlyDataset
from lightly.loss import NTXentLoss
from lightly.models.modules.heads import MoCoProjectionHead
from lightly.models.utils import (
batch_shuffle,
batch_unshuffle,
deactivate_requires_grad,
update_momentum,
)
from lightly.transforms.multi_view_transform import MultiViewTransform
# %%
# Configuration
# -------------
# Let's set the configuration parameters for our experiments.
#
# We will use eight workers to fetch the data from disc and a batch size of 128.
# The input size of the images is set to 128. With these settings, the training
# requires 2.5GB of GPU memory.
num_workers = 8
batch_size = 128
input_size = 128
seed = 1
max_epochs = 50
# %%
# Let's set the seed for our experiments.
pl.seed_everything(seed)
# %%
# Set the path to our dataset.
path_to_data = "/datasets/vinbigdata/train_small"
# %%
# Setup custom data augmentations
# -------------------------------
#
# The key to working with 16-bit X-ray images is to convert them to 8-bit images
# which are compatible with the torchvision augmentations without creating harmful
# artifacts. A good way to do so, is to use histogram normalization as described in
# `this paper <https://arxiv.org/pdf/2101.04909.pdf>`_ about Covid-19 prognosis.
#
# Let's write an augmentation, which takes as input a numpy array with 16-bit input
# depth and returns a histogram normalized 8-bit PIL image.
class HistogramNormalize:
"""Performs histogram normalization on numpy array and returns 8-bit image.
Code was taken and adapted from Facebook:
https://github.com/facebookresearch/CovidPrognosis
"""
def __init__(self, number_bins: int = 256):
self.number_bins = number_bins
def __call__(self, image: np.array) -> Image:
# Get the image histogram.
image_histogram, bins = np.histogram(
image.flatten(), self.number_bins, density=True
)
cdf = image_histogram.cumsum() # cumulative distribution function
cdf = 255 * cdf / cdf[-1] # normalize
# Use linear interpolation of cdf to find new pixel values.
image_equalized = np.interp(image.flatten(), bins[:-1], cdf)
return Image.fromarray(image_equalized.reshape(image.shape))
# %%
# Since we can't use color jitter on X-ray images, let's replace it and add some
# Gaussian noise instead. It's easiest to apply this after the image has been
# converted to a PyTorch tensor.
class GaussianNoise:
"""Applies random Gaussian noise to a tensor.
The intensity of the noise is dependent on the mean of the pixel values.
See https://arxiv.org/pdf/2101.04909.pdf for more information.
"""
def __call__(self, sample: torch.Tensor) -> torch.Tensor:
mu = sample.mean()
snr = np.random.randint(low=4, high=8)
sigma = mu / snr
noise = torch.normal(torch.zeros(sample.shape), sigma)
return sample + noise
# %%
# Now that we have implemented our custom augmentations, we can combine them
# with available augmentations from the torchvision library to get to the same
# set of augmentations as used in the aforementioned paper. Make sure, that
# the first augmentation is the histogram normalization, and that the Gaussian
# noise is applied after converting the image to a tensor.
#
# Note that we also transform the image from grayscale to RGB by simply repeating
# the single color channel three times. The reason for this is that our ResNet expects
# a three color channel input. This step can be skipped if a different backbone network
# is used.
# Compose the custom augmentations with available augmentations.
view_transform = torchvision.transforms.Compose(
[
HistogramNormalize(),
torchvision.transforms.Grayscale(num_output_channels=3),
torchvision.transforms.RandomResizedCrop(size=input_size, scale=(0.2, 1.0)),
torchvision.transforms.RandomHorizontalFlip(p=0.5),
torchvision.transforms.RandomVerticalFlip(p=0.5),
torchvision.transforms.GaussianBlur(21),
torchvision.transforms.ToTensor(),
GaussianNoise(),
]
)
# Create a multiview transform that returns two different augmentations of each image.
transform = MultiViewTransform(transforms=[view_transform, view_transform])
# %%
# Let's take a look at what our augmentation pipeline does to an image!
# We plot the original image on the left and two random augmentations on the
# right.
example_image_name = "55e8e3db7309febee415515d06418171.tiff"
example_image_path = os.path.join(path_to_data, example_image_name)
example_image = np.array(Image.open(example_image_path))
# Torch transform returns a 3 x W x H image, we only show one color channel.
augmented_image_1 = view_transform(example_image).numpy()[0]
augmented_image_2 = view_transform(example_image).numpy()[0]
fig, axs = plt.subplots(1, 3)
axs[0].imshow(example_image)
axs[0].set_axis_off()
axs[0].set_title("Original Image")
axs[1].imshow(augmented_image_1)
axs[1].set_axis_off()
axs[2].imshow(augmented_image_2)
axs[2].set_axis_off()
# %%
# Setup dataset and dataloader
# ------------------------------
#
# We create a dataset which loads the images in the input directory. Since the
# input images are 16 bits deep, we need to overwrite the image loader such that
# it doesn't convert the images to RGB (and hence to 8-bit) automatically.
#
# .. note:: The `LightlyDataset` uses a torchvision dataset underneath, which in turn uses
# an image loader which transforms the input image to an 8-bit RGB image. If a 16-bit
# grayscale image is loaded that way, all pixel values above 255 are simply clamped.
# Therefore, we overwrite the default image loader with our custom one.
def tiff_loader(f):
"""Loads a 16-bit tiff image and returns it as a numpy array."""
with open(f, "rb") as f:
image = Image.open(f)
return np.array(image)
# Create the dataset with the custom transform and overwrite the image loader.
dataset_train = LightlyDataset(input_dir=path_to_data, transform=transform)
dataset_train.dataset.loader = tiff_loader
# Setup the dataloader for training.
dataloader_train = torch.utils.data.DataLoader(
dataset_train,
batch_size=batch_size,
shuffle=True,
drop_last=True,
num_workers=num_workers,
)
# %%
# Create the MoCo model
# -----------------------
# Using the building blocks provided by lightly we can write our MoCo model.
# We implement it as a PyTorch Lightning module. For the criterion, we use
# the NTXentLoss which should always be used with MoCo.
#
# MoCo also requires a memory bank - we set its size to 4096 which is approximately
# the size of the input dataset. The temperature parameter of the loss is set to 0.1.
# This smoothens the cross entropy term in the loss function.
#
# The choice of the optimizer is left to the user. Here, we go with simple stochastic
# gradient descent with momentum.
class MoCoModel(pl.LightningModule):
def __init__(self):
super().__init__()
# Create a ResNet backbone and remove the classification head.
resnet = torchvision.models.resnet18()
self.backbone = nn.Sequential(
*list(resnet.children())[:-1],
)
# The backbone has output dimension 512 which also defines the size of
# the hidden dimension. We select 128 for the output dimension.
self.projection_head = MoCoProjectionHead(512, 512, 128)
# Add the momentum network.
self.backbone_momentum = copy.deepcopy(self.backbone)
self.projection_head_momentum = copy.deepcopy(self.projection_head)
deactivate_requires_grad(self.backbone_momentum)
deactivate_requires_grad(self.projection_head_momentum)
# Create the loss function with memory bank.
self.criterion = NTXentLoss(temperature=0.1, memory_bank_size=4096)
def training_step(self, batch, batch_idx):
(x_q, x_k), _, _ = batch
# Momentum update
update_momentum(self.backbone, self.backbone_momentum, 0.99)
update_momentum(self.projection_head, self.projection_head_momentum, 0.99)
# Get the queries.
q = self.backbone(x_q).flatten(start_dim=1)
q = self.projection_head(q)
# Get the keys.
k, shuffle = batch_shuffle(x_k)
k = self.backbone_momentum(k).flatten(start_dim=1)
k = self.projection_head_momentum(k)
k = batch_unshuffle(k, shuffle)
loss = self.criterion(q, k)
self.log("train_loss_ssl", loss)
return loss
def configure_optimizers(self):
# Use SGD optimizer with momentum and weight decay.
optim = torch.optim.SGD(
self.parameters(),
lr=0.1,
momentum=0.9,
weight_decay=1e-4,
)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optim, max_epochs)
return [optim], [scheduler]
# %%
# Train MoCo with custom augmentations
# -------------------------------------
# Training the self-supervised model is now very easy. We can create a new
# MoCoModel instance and pass it to the PyTorch Lightning trainer.
model = MoCoModel()
trainer = pl.Trainer(
max_epochs=max_epochs,
devices=1,
accelerator="gpu",
precision=16,
)
trainer.fit(model, dataloader_train)
# %%
# Evaluate the results
# ------------------------
# It's always a good idea to evaluate how good the learned representations really
# are. How to do this depends on the available data and metdata. Luckily, in our case,
# we have annotations of critical findings on the X-ray images. We can use this information
# to see, whether images with similar annotations are grouped together.
#
# We start by getting a vector representation of each image in the dataset. For this,
# we create a new dataloader. This time, we can pass the transform directly to the dataset.
# test transforms differ from training transforms as they do not introduce
# additional noise
test_transforms = torchvision.transforms.Compose(
[
HistogramNormalize(),
torchvision.transforms.Grayscale(num_output_channels=3),
torchvision.transforms.Resize(input_size),
torchvision.transforms.ToTensor(),
]
)
# Create the dataset and overwrite the image loader as before.
dataset_test = LightlyDataset(input_dir=path_to_data, transform=test_transforms)
dataset_test.dataset.loader = tiff_loader
# Create the test dataloader.
dataloader_test = torch.utils.data.DataLoader(
dataset_test, batch_size=1, shuffle=False, drop_last=False, num_workers=num_workers
)
# Next, we add a small helper function to generate embeddings of our images
def generate_embeddings(model, dataloader):
"""Generates representations for all images in the dataloader"""
embeddings = []
filenames = []
with torch.no_grad():
for img, label, fnames in dataloader:
img = img.to(model.device)
emb = model.backbone(img).flatten(start_dim=1)
embeddings.append(emb)
filenames.extend(fnames)
embeddings = torch.cat(embeddings, 0)
embeddings = normalize(embeddings)
return embeddings, filenames
# Generate the embeddings (remember to put the model in eval mode).
model.eval()
embeddings, fnames = generate_embeddings(model, dataloader_test)
# %%
# Now, we can use the embeddings to search for nearest neighbors.
#
# We choose three example images. For each example image, we find 50 nearest neighbors.
# Then, we plot the critical findings in the example image (dark blue) and the distribution
# of the critical findings in the nearest neighbor images (light blue) as bar plots.
# Transform the original bounding box annotations to multiclass labels.
fnames = [fname.split(".")[0] for fname in fnames]
df = pandas.read_csv("/datasets/vinbigdata/train.csv")
classes = list(np.unique(df.class_name))
filenames = list(np.unique(df.image_id))
# Iterate over all bounding boxes and add a one-hot label if an image contains
# a bounding box of a given class, after that, the array "multilabels" will
# contain a row for every image in the input dataset and each row of the
# array contains a one-hot vector of critical findings for this image.
multilabels = np.zeros((len(dataset_test.get_filenames()), len(classes)))
for filename, label in zip(df.image_id, df.class_name):
try:
i = fnames.index(filename.split(".")[0])
j = classes.index(label)
multilabels[i, j] = 1.0
except Exception:
pass
def plot_knn_multilabels(
embeddings, multilabels, samples_idx, filenames, n_neighbors=50
):
"""Plots multiple rows of random images with their nearest neighbors"""
# Let0s look at the nearest neighbors for some samples using the sklearn library.
nbrs = NearestNeighbors(n_neighbors=n_neighbors).fit(embeddings)
_, indices = nbrs.kneighbors(embeddings)
# Position the bars.
bar_width = 0.4
r1 = np.arange(multilabels.shape[1])
r2 = r1 + bar_width
# Loop through our randomly picked samples.
for idx in samples_idx:
fig = plt.figure()
bars1 = multilabels[idx]
bars2 = np.mean(multilabels[indices[idx]], axis=0)
plt.title(filenames[idx])
plt.bar(r1, bars1, color="steelblue", edgecolor="black", width=bar_width)
plt.bar(r2, bars2, color="lightsteelblue", edgecolor="black", width=bar_width)
plt.xticks(0.5 * (r1 + r2), classes, rotation=90)
plt.tight_layout()
# Plot the distribution of the multilabels of the k nearest neighbors of
# the three example images at indices 4111, 3340, and 1796.
k = 20
plot_knn_multilabels(embeddings, multilabels, [4111, 3340, 1796], fnames, n_neighbors=k)
| 15,483 | 34.513761 | 155 | py |
lightly | lightly-master/docs/source/tutorials_source/package/tutorial_moco_memory_bank.py | # -*- coding: utf-8 -*-
"""
.. _lightly-moco-tutorial-2:
Tutorial 2: Train MoCo on CIFAR-10
==============================================
In this tutorial, we will train a model based on the MoCo Paper
`Momentum Contrast for Unsupervised Visual Representation Learning <https://arxiv.org/abs/1911.05722>`_.
When training self-supervised models using contrastive loss we
usually face one big problem. To get good results, we need
many negative examples for the contrastive loss to work. Therefore,
we need a large batch size. However, not everyone has access to a cluster
full of GPUs or TPUs. To solve this problem, alternative approaches have been developed.
Some of them use a memory bank to store old negative examples we can query
to compensate for the smaller batch size. MoCo takes this approach
one step further by including a momentum encoder.
We use the **CIFAR-10** dataset for this tutorial.
In this tutorial you will learn:
- How to use lightly to load a dataset and train a model
- How to create a MoCo model with a memory bank
- How to use the pre-trained model after self-supervised learning for a
transfer learning task
"""
# %%
# Imports
# -------
#
# Import the Python frameworks we need for this tutorial.
# Make sure you have lightly installed.
#
# .. code-block:: console
#
# pip install lightly
import copy
import pytorch_lightning as pl
import torch
import torch.nn as nn
import torchvision
from lightly.data import LightlyDataset
from lightly.loss import NTXentLoss
from lightly.models import ResNetGenerator
from lightly.models.modules.heads import MoCoProjectionHead
from lightly.models.utils import (
batch_shuffle,
batch_unshuffle,
deactivate_requires_grad,
update_momentum,
)
from lightly.transforms import MoCoV2Transform, utils
# %%
# Configuration
# -------------
#
# We set some configuration parameters for our experiment.
# Feel free to change them and analyze the effect.
#
# The default configuration uses a batch size of 512. This requires around 6.4GB
# of GPU memory.
# When training for 100 epochs you should achieve around 73% test set accuracy.
# When training for 200 epochs accuracy increases to about 80%.
num_workers = 8
batch_size = 512
memory_bank_size = 4096
seed = 1
max_epochs = 100
# %%
# Replace the path with the location of your CIFAR-10 dataset.
# We assume we have a train folder with subfolders
# for each class and .png images inside.
#
# You can download `CIFAR-10 in folders from Kaggle
# <https://www.kaggle.com/swaroopkml/cifar10-pngs-in-folders>`_.
# The dataset structure should be like this:
# cifar10/train/
# L airplane/
# L 10008_airplane.png
# L ...
# L automobile/
# L bird/
# L cat/
# L deer/
# L dog/
# L frog/
# L horse/
# L ship/
# L truck/
path_to_train = "/datasets/cifar10/train/"
path_to_test = "/datasets/cifar10/test/"
# %%
# Let's set the seed to ensure reproducibility of the experiments
pl.seed_everything(seed)
# %%
# Setup data augmentations and loaders
# ------------------------------------
#
# We start with our data preprocessing pipeline. We can implement augmentations
# from the MoCo paper using the transforms provided by lightly.
# Images from the CIFAR-10 dataset have a resolution of 32x32 pixels. Let's use
# this resolution to train our model.
#
# .. note:: We could use a higher input resolution to train our model. However,
# since the original resolution of CIFAR-10 images is low there is no real value
# in increasing the resolution. A higher resolution results in higher memory
# consumption and to compensate for that we would need to reduce the batch size.
# disable blur because we're working with tiny images
transform = MoCoV2Transform(
input_size=32,
gaussian_blur=0.0,
)
# %%
# We don't want any augmentation for our test data. Therefore,
# we create custom, torchvision based data transformations.
# Let's ensure the size is correct and we normalize the data in
# the same way as we do with the training data.
# Augmentations typically used to train on cifar-10
train_classifier_transforms = torchvision.transforms.Compose(
[
torchvision.transforms.RandomCrop(32, padding=4),
torchvision.transforms.RandomHorizontalFlip(),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
mean=utils.IMAGENET_NORMALIZE["mean"],
std=utils.IMAGENET_NORMALIZE["std"],
),
]
)
# No additional augmentations for the test set
test_transforms = torchvision.transforms.Compose(
[
torchvision.transforms.Resize((32, 32)),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
mean=utils.IMAGENET_NORMALIZE["mean"],
std=utils.IMAGENET_NORMALIZE["std"],
),
]
)
# We use the moco augmentations for training moco
dataset_train_moco = LightlyDataset(input_dir=path_to_train, transform=transform)
# Since we also train a linear classifier on the pre-trained moco model we
# reuse the test augmentations here (MoCo augmentations are very strong and
# usually reduce accuracy of models which are not used for contrastive learning.
# Our linear layer will be trained using cross entropy loss and labels provided
# by the dataset. Therefore we chose light augmentations.)
dataset_train_classifier = LightlyDataset(
input_dir=path_to_train, transform=train_classifier_transforms
)
dataset_test = LightlyDataset(input_dir=path_to_test, transform=test_transforms)
# %%
# Create the dataloaders to load and preprocess the data
# in the background.
dataloader_train_moco = torch.utils.data.DataLoader(
dataset_train_moco,
batch_size=batch_size,
shuffle=True,
drop_last=True,
num_workers=num_workers,
)
dataloader_train_classifier = torch.utils.data.DataLoader(
dataset_train_classifier,
batch_size=batch_size,
shuffle=True,
drop_last=True,
num_workers=num_workers,
)
dataloader_test = torch.utils.data.DataLoader(
dataset_test,
batch_size=batch_size,
shuffle=False,
drop_last=False,
num_workers=num_workers,
)
# %%
# Create the MoCo Lightning Module
# --------------------------------
# Now we create our MoCo model. We use PyTorch Lightning to train
# our model. We follow the specification of the lightning module.
# In this example we set the number of features for the hidden dimension to 512.
# The momentum for the Momentum Encoder is set to 0.99 (default is 0.999) since
# other reports show that this works better for Cifar-10.
#
# For the backbone we use the lightly variant of a resnet-18. You can use another model following
# our `playground to use custom backbones <https://colab.research.google.com/drive/1ubepXnpANiWOSmq80e-mqAxjLx53m-zu?usp=sharing>`_.
#
# .. note:: We use a split batch norm to simulate multi-gpu behaviour. Combined
# with the use of batch shuffling, this prevents the model from communicating
# through the batch norm layers.
class MocoModel(pl.LightningModule):
def __init__(self):
super().__init__()
# create a ResNet backbone and remove the classification head
resnet = ResNetGenerator("resnet-18", 1, num_splits=8)
self.backbone = nn.Sequential(
*list(resnet.children())[:-1],
nn.AdaptiveAvgPool2d(1),
)
# create a moco model based on ResNet
self.projection_head = MoCoProjectionHead(512, 512, 128)
self.backbone_momentum = copy.deepcopy(self.backbone)
self.projection_head_momentum = copy.deepcopy(self.projection_head)
deactivate_requires_grad(self.backbone_momentum)
deactivate_requires_grad(self.projection_head_momentum)
# create our loss with the optional memory bank
self.criterion = NTXentLoss(temperature=0.1, memory_bank_size=memory_bank_size)
def training_step(self, batch, batch_idx):
(x_q, x_k), _, _ = batch
# update momentum
update_momentum(self.backbone, self.backbone_momentum, 0.99)
update_momentum(self.projection_head, self.projection_head_momentum, 0.99)
# get queries
q = self.backbone(x_q).flatten(start_dim=1)
q = self.projection_head(q)
# get keys
k, shuffle = batch_shuffle(x_k)
k = self.backbone_momentum(k).flatten(start_dim=1)
k = self.projection_head_momentum(k)
k = batch_unshuffle(k, shuffle)
loss = self.criterion(q, k)
self.log("train_loss_ssl", loss)
return loss
def on_train_epoch_end(self):
self.custom_histogram_weights()
# We provide a helper method to log weights in tensorboard
# which is useful for debugging.
def custom_histogram_weights(self):
for name, params in self.named_parameters():
self.logger.experiment.add_histogram(name, params, self.current_epoch)
def configure_optimizers(self):
optim = torch.optim.SGD(
self.parameters(),
lr=6e-2,
momentum=0.9,
weight_decay=5e-4,
)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optim, max_epochs)
return [optim], [scheduler]
# %%
# Create the Classifier Lightning Module
# --------------------------------------
# We create a linear classifier using the features we extract using MoCo
# and train it on the dataset
class Classifier(pl.LightningModule):
def __init__(self, backbone):
super().__init__()
# use the pretrained ResNet backbone
self.backbone = backbone
# freeze the backbone
deactivate_requires_grad(backbone)
# create a linear layer for our downstream classification model
self.fc = nn.Linear(512, 10)
self.criterion = nn.CrossEntropyLoss()
self.validation_step_outputs = []
def forward(self, x):
y_hat = self.backbone(x).flatten(start_dim=1)
y_hat = self.fc(y_hat)
return y_hat
def training_step(self, batch, batch_idx):
x, y, _ = batch
y_hat = self.forward(x)
loss = self.criterion(y_hat, y)
self.log("train_loss_fc", loss)
return loss
def on_train_epoch_end(self):
self.custom_histogram_weights()
# We provide a helper method to log weights in tensorboard
# which is useful for debugging.
def custom_histogram_weights(self):
for name, params in self.named_parameters():
self.logger.experiment.add_histogram(name, params, self.current_epoch)
def validation_step(self, batch, batch_idx):
x, y, _ = batch
y_hat = self.forward(x)
y_hat = torch.nn.functional.softmax(y_hat, dim=1)
# calculate number of correct predictions
_, predicted = torch.max(y_hat, 1)
num = predicted.shape[0]
correct = (predicted == y).float().sum()
self.validation_step_outputs.append((num, correct))
return num, correct
def on_validation_epoch_end(self):
# calculate and log top1 accuracy
if self.validation_step_outputs:
total_num = 0
total_correct = 0
for num, correct in self.validation_step_outputs:
total_num += num
total_correct += correct
acc = total_correct / total_num
self.log("val_acc", acc, on_epoch=True, prog_bar=True)
self.validation_step_outputs.clear()
def configure_optimizers(self):
optim = torch.optim.SGD(self.fc.parameters(), lr=30.0)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optim, max_epochs)
return [optim], [scheduler]
# %%
# Train the MoCo model
# --------------------
#
# We can instantiate the model and train it using the
# lightning trainer.
model = MocoModel()
trainer = pl.Trainer(max_epochs=max_epochs, devices=1, accelerator="gpu")
trainer.fit(model, dataloader_train_moco)
# %%
# Train the Classifier
model.eval()
classifier = Classifier(model.backbone)
trainer = pl.Trainer(max_epochs=max_epochs, devices=1, accelerator="gpu")
trainer.fit(classifier, dataloader_train_classifier, dataloader_test)
# %%
# Checkout the tensorboard logs while the model is training.
#
# Run `tensorboard --logdir lightning_logs/` to start tensorboard
#
# .. note:: If you run the code on a remote machine you can't just
# access the tensorboard logs. You need to forward the port.
# You can do this by using an editor such as Visual Studio Code
# which has a port forwarding functionality (make sure
# the remote extensions are installed and are connected with your machine).
#
# Or you can use a shell command similar to this one to forward port
# 6006 from your remote machine to your local machine:
#
# `ssh username:host -N -L localhost:6006:localhost:6006`
# %%
# Next Steps
# ------------
#
# Interested in exploring other self-supervised models? Check out our other
# tutorials:
#
# - :ref:`lightly-simclr-tutorial-3`
# - :ref:`lightly-simsiam-tutorial-4`
# - :ref:`lightly-custom-augmentation-5`
# - :ref:`lightly-detectron-tutorial-6`
#
| 13,053 | 31.472637 | 132 | py |
lightly | lightly-master/docs/source/tutorials_source/package/tutorial_pretrain_detectron2.py | """
.. _lightly-detectron-tutorial-6:
.. role:: bash(code)
:language: bash
Tutorial 6: Pre-train a Detectron2 Backbone with Lightly
==========================================================
In this tutorial we show how you can do self-supervised pre-training of a
`Detectron2 <https://github.com/facebookresearch/detectron2>`_ backbone with lightly.
The focus of this tutorial is on how to get and store a pre-trained ResNet50
backbone of the popular Detectron2 framework. If you want to learn more about
self-supervised learning in general, go check out the following tutorials:
- :ref:`lightly-moco-tutorial-2`
- :ref:`lightly-simclr-tutorial-3`:
- :ref:`lightly-simsiam-tutorial-4`
What you will learn:
- How to retrieve a Detectron2 ResNet50 backbone for pre-training
- How to do self-supervised learning with the backbone
- How to store the backbone in a checkpoint file which can be used by Detectron2
Introduction
----------------
For many tasks in computer vision it can be beneficial to pre-train a neural network
on a domain-specific dataset prior to finetuning it. For example, a retail detection
network can be pre-trained with self-supervised learning on a large retail detection
dataset. This way the neural network learns to extract relevant features from the images
without requiring any annotations at all. As a consequence, it's possible to finetune
the network with only a handful of annotated images. This tutorial will guide you
through the steps to pre-train a detection backbone from the popular
`Detectron2 <https://github.com/facebookresearch/detectron2>`_ framework.
Prerequisites:
----------------
For the purpose of this tutorial you will need to:
- Install Lightly: Follow the `instructions <https://docs.lightly.ai/getting_started/install.html>`__.
- Install Detectron2: Follow the `instructions <https://detectron2.readthedocs.io/en/latest/tutorials/install.html>`__.
- Download a dataset for pre-training (we will use the `Freiburg Groceries Dataset <https://github.com/PhilJd/freiburg_groceries_dataset>`_ dataset). You can download it by cloning the Github repository and running `download_dataset.py`. Alternatively, you can use this `download link <http://aisdatasets.informatik.uni-freiburg.de/freiburg_groceries_dataset/freiburg_groceries_dataset.tar.gz>`_
.. note::
The `Freiburg Groceries Dataset <https://github.com/PhilJd/freiburg_groceries_dataset>`_ consists of 5000 256x256 RGB images of 25 food classes.
Images show one or multiple instances of grocery products in shelves or similar scenarios.
Finally, you will need the Detectron2 configuration files. They are available `here <https://github.com/facebookresearch/detectron2/tree/main/configs>`_.
In this tutorial we use a Faster RCNN with a feature pyramid network (FPN), so make sure you have the relevant file (Base-RCNN-FPN.yaml) in your directory.
"""
# %%
# Imports
# -------
#
# Import the Python frameworks we need for this tutorial.
import torch
from detectron2 import config, modeling
from detectron2.checkpoint import DetectionCheckpointer
from lightly.data import LightlyDataset
from lightly.loss import NTXentLoss
from lightly.models.modules import SimCLRProjectionHead
from lightly.transforms import SimCLRTransform
# %%
# Configuration
# -------------
# Let's set the configuration parameters for our experiments.
#
# We use a batch size of 512 and an input size of 128 in order to fit everything
# on the available amount of memory on our GPU (16GB). The number of features
# is set to the default output size of the ResNet50 backbone.
#
# We only train for 5 epochs because the focus of this tutorial is on the
# integration with Detectron2.
num_workers = 8
batch_size = 512
input_size = 128
num_ftrs = 2048
seed = 1
max_epochs = 5
# use cuda if possible
device = "cuda" if torch.cuda.is_available() else "cpu"
# %%
# You might have downloaded the dataset somewhere else or are using a different one.
# Set the path to the dataset accordingly. Additionally, make sure to set the
# path to the config file of the Detectron2 model you want to use.
# We will be using an RCNN with a feature pyramid network (FPN).
data_path = "/datasets/freiburg_groceries_dataset/images"
cfg_path = "./Base-RCNN-FPN.yaml"
# %%
# Initialize the Detectron2 Model
# --------------------------------
#
# The output of the Detectron2 ResNet50 backbone is a dictionary with the keys
# `res1` through `res5` (see the `documentation <https://detectron2.readthedocs.io/en/latest/modules/modeling.html#detectron2.modeling.ResNet>`_).
# The keys correspond to the different stages of the ResNet. In this tutorial, we are only
# interested in the high-level abstractions from the last layer, `res5`. Therefore,
# we have to add an additional layer which picks the right output from the dictionary.
class SelectStage(torch.nn.Module):
"""Selects features from a given stage."""
def __init__(self, stage: str = "res5"):
super().__init__()
self.stage = stage
def forward(self, x):
return x[self.stage]
# %%
# Let's load the config file and make some adjustments to ensure smooth training.
cfg = config.get_cfg()
cfg.merge_from_file(cfg_path)
# use cuda if possible
cfg.MODEL.DEVICE = device
# randomly initialize network
cfg.MODEL.WEIGHTS = ""
# detectron2 uses BGR by default but pytorch/torchvision use RGB
cfg.INPUT.FORMAT = "RGB"
# %%
# Next, we can build the Detectron2 model and extract the ResNet50 backbone as
# follows:
detmodel = modeling.build_model(cfg)
simclr_backbone = torch.nn.Sequential(
detmodel.backbone.bottom_up,
SelectStage("res5"),
# res5 has shape bsz x 2048 x 4 x 4
torch.nn.AdaptiveAvgPool2d(1),
).to(device)
# %%
#
#
# .. note::
#
# The Detectron2 ResNet is missing the average pooling layer used to get a tensor of shape bsz x 2048.
# Therefore, we add an average pooling as in the `PyTorch ResNet <https://github.com/pytorch/pytorch/blob/1022443168b5fad55bbd03d087abf574c9d2e9df/benchmarks/functional_autograd_benchmark/torchvision_models.py#L147>`_.
#
# %%
# Finally, let's build SimCLR around the backbone as shown in the other
# tutorials. For this, we only require an additional projection head.
projection_head = SimCLRProjectionHead(
input_dim=num_ftrs,
hidden_dim=num_ftrs,
output_dim=128,
).to(device)
# %%
# Setup data augmentations and loaders
# ------------------------------------
#
# We start by defining the augmentations which should be used for training.
# We use the same ones as in the SimCLR paper but change the input size and
# minimum scale of the random crop to adjust to our dataset.
#
# We don't go into detail here about using the optimal augmentations.
# You can learn more about the different augmentations and learned invariances
# here: :ref:`lightly-advanced`.
transform = SimCLRTransform(input_size=input_size)
dataset_train_simclr = LightlyDataset(input_dir=data_path, transform=transform)
dataloader_train_simclr = torch.utils.data.DataLoader(
dataset_train_simclr,
batch_size=batch_size,
shuffle=True,
drop_last=True,
num_workers=num_workers,
)
# %%
# Self-supervised pre-training
# -----------------------------
# Now all we need to do is define a loss and optimizer and start training!
criterion = NTXentLoss()
optimizer = torch.optim.Adam(
list(simclr_backbone.parameters()) + list(projection_head.parameters()),
lr=1e-4,
)
for e in range(max_epochs):
mean_loss = 0.0
for (x0, x1), _, _ in dataloader_train_simclr:
x0 = x0.to(device)
x1 = x1.to(device)
y0 = projection_head(simclr_backbone(x0).flatten(start_dim=1))
y1 = projection_head(simclr_backbone(x1).flatten(start_dim=1))
# backpropagation
loss = criterion(y0, y1)
loss.backward()
optimizer.step()
optimizer.zero_grad()
# update average loss
mean_loss += loss.detach().cpu().item() / len(dataloader_train_simclr)
print(f"[Epoch {e:2d}] Mean Loss = {mean_loss:.2f}")
# %%
# Storing the checkpoint
# -----------------------
# Now, we can use the pre-trained backbone from the Detectron2 model. The code
# below shows how to save it as a Detectron2 checkpoint called `my_model.pth`.
# get the first module from the backbone (i.e. the detectron2 ResNet)
# backbone:
# L ResNet50
# L SelectStage
# L AdaptiveAvgPool2d
detmodel.backbone.bottom_up = simclr_backbone[0]
checkpointer = DetectionCheckpointer(detmodel, save_dir="./")
checkpointer.save("my_model")
# %%
# Finetuning with Detectron2
# ---------------------------
#
# The checkpoint from above can now be used by any Detectron2 script. For example,
# you can use the `train_net.py` script in the Detectron2 `tools`:
#
#
# %%
# .. code-block:: none
#
# python train_net.py --config-file ../configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml \
# MODEL.WEIGHTS path/to/my_model.pth \
# MODEL.PIXEL_MEAN 123.675,116.280,103.530 \
# MODEL.PIXEL_STD 58.395,57.120,57.375 \
# INPUT.FORMAT RGB
#
# %%
#
# The :py:class:`~lightly.transforms.simclr.SimCLRTransform` applies an ImageNet
# normalization of the input images by default. Therefore, we have to normalize
# the input images at training time, too. Since Detectron2 uses an input space
# in the range 0 - 255, we use the numbers above.
#
# %%
#
# .. note::
#
# Since the model was pre-trained with images in the RGB input format, it's
# necessary to set the permute the order of the pixel mean, and pixel std as shown above.
# %%
# Next Steps
# ------------
#
#
# Want to learn more about our self-supervised models and how to choose
# augmentations properly? Check out our other tutorials:
#
# - :ref:`lightly-moco-tutorial-2`
# - :ref:`lightly-simclr-tutorial-3`
# - :ref:`lightly-simsiam-tutorial-4`
# - :ref:`lightly-detectron-tutorial-6`
#
| 9,875 | 33.055172 | 395 | py |
lightly | lightly-master/docs/source/tutorials_source/package/tutorial_simclr_clothing.py | """
.. _lightly-simclr-tutorial-3:
Tutorial 3: Train SimCLR on Clothing
==============================================
In this tutorial, we will train a SimCLR model using lightly. The model,
augmentations and training procedure is from
`A Simple Framework for Contrastive Learning of Visual Representations <https://arxiv.org/abs/2002.05709>`_.
The paper explores a rather simple training procedure for contrastive learning.
Since we use the typical contrastive learning loss based on NCE the method
greatly benefits from having larger batch sizes. In this example, we use a batch
size of 256 and paired with the input resolution per image of 64x64 pixels and
a resnet-18 model this example requires 16GB of GPU memory.
We use the
`clothing dataset from Alex Grigorev <https://github.com/alexeygrigorev/clothing-dataset>`_
for this tutorial.
In this tutorial you will learn:
- How to create a SimCLR model
- How to generate image representations
- How different augmentations impact the learned representations
"""
# %%
# Imports
# -------
#
# Import the Python frameworks we need for this tutorial.
import os
import matplotlib.pyplot as plt
import numpy as np
import pytorch_lightning as pl
import torch
import torch.nn as nn
import torchvision
from PIL import Image
from sklearn.neighbors import NearestNeighbors
from sklearn.preprocessing import normalize
from lightly.data import LightlyDataset
from lightly.transforms import SimCLRTransform, utils
# %%
# Configuration
# -------------
#
# We set some configuration parameters for our experiment.
# Feel free to change them and analyze the effect.
#
# The default configuration with a batch size of 256 and input resolution of 128
# requires 6GB of GPU memory.
num_workers = 8
batch_size = 256
seed = 1
max_epochs = 20
input_size = 128
num_ftrs = 32
# %%
# Let's set the seed for our experiments
pl.seed_everything(seed)
# %%
# Make sure `path_to_data` points to the downloaded clothing dataset.
# You can download it using
# `git clone https://github.com/alexeygrigorev/clothing-dataset.git`
path_to_data = "/datasets/clothing-dataset/images"
# %%
# Setup data augmentations and loaders
# ------------------------------------
#
# The images from the dataset have been taken from above when the clothing was
# on a table, bed or floor. Therefore, we can make use of additional augmentations
# such as vertical flip or random rotation (90 degrees).
# By adding these augmentations we learn our model invariance regarding the
# orientation of the clothing piece. E.g. we don't care if a shirt is upside down
# but more about the strcture which make it a shirt.
#
# You can learn more about the different augmentations and learned invariances
# here: :ref:`lightly-advanced`.
transform = SimCLRTransform(input_size=input_size, vf_prob=0.5, rr_prob=0.5)
# We create a torchvision transformation for embedding the dataset after
# training
test_transform = torchvision.transforms.Compose(
[
torchvision.transforms.Resize((input_size, input_size)),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
mean=utils.IMAGENET_NORMALIZE["mean"],
std=utils.IMAGENET_NORMALIZE["std"],
),
]
)
dataset_train_simclr = LightlyDataset(input_dir=path_to_data, transform=transform)
dataset_test = LightlyDataset(input_dir=path_to_data, transform=test_transform)
dataloader_train_simclr = torch.utils.data.DataLoader(
dataset_train_simclr,
batch_size=batch_size,
shuffle=True,
drop_last=True,
num_workers=num_workers,
)
dataloader_test = torch.utils.data.DataLoader(
dataset_test,
batch_size=batch_size,
shuffle=False,
drop_last=False,
num_workers=num_workers,
)
# %%
# Create the SimCLR Model
# -----------------------
# Now we create the SimCLR model. We implement it as a PyTorch Lightning Module
# and use a ResNet-18 backbone from Torchvision. Lightly provides implementations
# of the SimCLR projection head and loss function in the `SimCLRProjectionHead`
# and `NTXentLoss` classes. We can simply import them and combine the building
# blocks in the module.
from lightly.loss import NTXentLoss
from lightly.models.modules.heads import SimCLRProjectionHead
class SimCLRModel(pl.LightningModule):
def __init__(self):
super().__init__()
# create a ResNet backbone and remove the classification head
resnet = torchvision.models.resnet18()
self.backbone = nn.Sequential(*list(resnet.children())[:-1])
hidden_dim = resnet.fc.in_features
self.projection_head = SimCLRProjectionHead(hidden_dim, hidden_dim, 128)
self.criterion = NTXentLoss()
def forward(self, x):
h = self.backbone(x).flatten(start_dim=1)
z = self.projection_head(h)
return z
def training_step(self, batch, batch_idx):
(x0, x1), _, _ = batch
z0 = self.forward(x0)
z1 = self.forward(x1)
loss = self.criterion(z0, z1)
self.log("train_loss_ssl", loss)
return loss
def configure_optimizers(self):
optim = torch.optim.SGD(
self.parameters(), lr=6e-2, momentum=0.9, weight_decay=5e-4
)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optim, max_epochs)
return [optim], [scheduler]
# %%
# Train the module using the PyTorch Lightning Trainer on a single GPU.
model = SimCLRModel()
trainer = pl.Trainer(max_epochs=max_epochs, devices=1, accelerator="gpu")
trainer.fit(model, dataloader_train_simclr)
# %%
# Next we create a helper function to generate embeddings
# from our test images using the model we just trained.
# Note that only the backbone is needed to generate embeddings,
# the projection head is only required for the training.
# Make sure to put the model into eval mode for this part!
def generate_embeddings(model, dataloader):
"""Generates representations for all images in the dataloader with
the given model
"""
embeddings = []
filenames = []
with torch.no_grad():
for img, _, fnames in dataloader:
img = img.to(model.device)
emb = model.backbone(img).flatten(start_dim=1)
embeddings.append(emb)
filenames.extend(fnames)
embeddings = torch.cat(embeddings, 0)
embeddings = normalize(embeddings)
return embeddings, filenames
model.eval()
embeddings, filenames = generate_embeddings(model, dataloader_test)
# %%
# Visualize Nearest Neighbors
# ----------------------------
# Let's look at the trained embedding and visualize the nearest neighbors for
# a few random samples.
#
# We create some helper functions to simplify the work
def get_image_as_np_array(filename: str):
"""Returns an image as an numpy array"""
img = Image.open(filename)
return np.asarray(img)
def plot_knn_examples(embeddings, filenames, n_neighbors=3, num_examples=6):
"""Plots multiple rows of random images with their nearest neighbors"""
# lets look at the nearest neighbors for some samples
# we use the sklearn library
nbrs = NearestNeighbors(n_neighbors=n_neighbors).fit(embeddings)
distances, indices = nbrs.kneighbors(embeddings)
# get 5 random samples
samples_idx = np.random.choice(len(indices), size=num_examples, replace=False)
# loop through our randomly picked samples
for idx in samples_idx:
fig = plt.figure()
# loop through their nearest neighbors
for plot_x_offset, neighbor_idx in enumerate(indices[idx]):
# add the subplot
ax = fig.add_subplot(1, len(indices[idx]), plot_x_offset + 1)
# get the correponding filename for the current index
fname = os.path.join(path_to_data, filenames[neighbor_idx])
# plot the image
plt.imshow(get_image_as_np_array(fname))
# set the title to the distance of the neighbor
ax.set_title(f"d={distances[idx][plot_x_offset]:.3f}")
# let's disable the axis
plt.axis("off")
# %%
# Let's do the plot of the images. The leftmost image is the query image whereas
# the ones next to it on the same row are the nearest neighbors.
# In the title we see the distance of the neigbor.
plot_knn_examples(embeddings, filenames)
# %%
# Color Invariance
# ---------------------
# Let's train again without color augmentation. This will force our model to
# respect the colors in the images.
# Set color jitter and gray scale probability to 0
new_transform = SimCLRTransform(
input_size=input_size, vf_prob=0.5, rr_prob=0.5, cj_prob=0.0, random_gray_scale=0.0
)
# let's update the transform on the training dataset
dataset_train_simclr.transform = new_transform
# then train a new model
model = SimCLRModel()
trainer = pl.Trainer(max_epochs=max_epochs, devices=1, accelerator="gpu")
trainer.fit(model, dataloader_train_simclr)
# and generate again embeddings from the test set
model.eval()
embeddings, filenames = generate_embeddings(model, dataloader_test)
# %%
# other example
plot_knn_examples(embeddings, filenames)
# %%
# What's next?
# You could use the pre-trained model and train a classifier on top.
pretrained_resnet_backbone = model.backbone
# you can also store the backbone and use it in another code
state_dict = {"resnet18_parameters": pretrained_resnet_backbone.state_dict()}
torch.save(state_dict, "model.pth")
# %%
# THIS COULD BE IN A NEW FILE (e.g. inference.py)
#
# Make sure you place the `model.pth` file in the same folder as this code
# load the model in a new file for inference
resnet18_new = torchvision.models.resnet18()
# note that we need to create exactly the same backbone in order to load the weights
backbone_new = nn.Sequential(*list(resnet18_new.children())[:-1])
ckpt = torch.load("model.pth")
backbone_new.load_state_dict(ckpt["resnet18_parameters"])
# %%
# Next Steps
# ------------
#
# Interested in exploring other self-supervised models? Check out our other
# tutorials:
#
# - :ref:`lightly-moco-tutorial-2`
# - :ref:`lightly-simsiam-tutorial-4`
# - :ref:`lightly-custom-augmentation-5`
# - :ref:`lightly-detectron-tutorial-6`
#
| 10,155 | 30.638629 | 108 | py |
lightly | lightly-master/docs/source/tutorials_source/package/tutorial_simsiam_esa.py | """
.. _lightly-simsiam-tutorial-4:
Tutorial 4: Train SimSiam on Satellite Images
==============================================
In this tutorial we will train a SimSiam model in old-school PyTorch style on a
set of satellite images of Italy. We will showcase how the generated embeddings
can be used for exploration and better understanding of the raw data.
You can read up on the model in the paper
`Exploring Simple Siamese Representation Learning <https://arxiv.org/abs/2011.10566>`_.
We will be using a dataset of satellite images from ESAs Sentinel-2 satellite over Italy.
If you're interested, you can get your own data from the `Copernicus Open Acces Hub <https://scihub.copernicus.eu/>`_.
The original images have been cropped into smaller tiles due to their immense size and
the dataset has been balanced based on a simple clustering of the mean RGB color values
to prevent a surplus of images of the sea.
In this tutorial you will learn:
- How to work with the SimSiam model
- How to do self-supervised learning using PyTorch
- How to check whether your embeddings have collapsed
"""
# %%
# Imports
# -------
#
# Import the Python frameworks we need for this tutorial.
import math
import numpy as np
import torch
import torch.nn as nn
import torchvision
from lightly.data import LightlyDataset
from lightly.loss import NegativeCosineSimilarity
from lightly.models.modules.heads import SimSiamPredictionHead, SimSiamProjectionHead
from lightly.transforms import SimCLRTransform, utils
# %%
# Configuration
# -------------
#
# We set some configuration parameters for our experiment.
#
# The default configuration with a batch size and input resolution of 256
# requires 16GB of GPU memory.
num_workers = 8
batch_size = 128
seed = 1
epochs = 50
input_size = 256
# dimension of the embeddings
num_ftrs = 512
# dimension of the output of the prediction and projection heads
out_dim = proj_hidden_dim = 512
# the prediction head uses a bottleneck architecture
pred_hidden_dim = 128
# %%
# Let's set the seed for our experiments and the path to our data
# seed torch and numpy
torch.manual_seed(0)
np.random.seed(0)
# set the path to the dataset
path_to_data = "/datasets/sentinel-2-italy-v1/"
# %%
# Setup data augmentations and loaders
# ------------------------------------
# Since we're working on satellite images, it makes sense to use horizontal and
# vertical flips as well as random rotation transformations. We apply weak color
# jitter to learn an invariance of the model with respect to slight changes in
# the color of the water.
#
# define the augmentations for self-supervised learning
transform = SimCLRTransform(
input_size=input_size,
# require invariance to flips and rotations
hf_prob=0.5,
vf_prob=0.5,
rr_prob=0.5,
# satellite images are all taken from the same height
# so we use only slight random cropping
min_scale=0.5,
# use a weak color jitter for invariance w.r.t small color changes
cj_prob=0.2,
cj_bright=0.1,
cj_contrast=0.1,
cj_hue=0.1,
cj_sat=0.1,
)
# create a lightly dataset for training with augmentations
dataset_train_simsiam = LightlyDataset(input_dir=path_to_data, transform=transform)
# create a dataloader for training
dataloader_train_simsiam = torch.utils.data.DataLoader(
dataset_train_simsiam,
batch_size=batch_size,
shuffle=True,
drop_last=True,
num_workers=num_workers,
)
# create a torchvision transformation for embedding the dataset after training
# here, we resize the images to match the input size during training and apply
# a normalization of the color channel based on statistics from imagenet
test_transforms = torchvision.transforms.Compose(
[
torchvision.transforms.Resize((input_size, input_size)),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
mean=utils.IMAGENET_NORMALIZE["mean"],
std=utils.IMAGENET_NORMALIZE["std"],
),
]
)
# create a lightly dataset for embedding
dataset_test = LightlyDataset(input_dir=path_to_data, transform=test_transforms)
# create a dataloader for embedding
dataloader_test = torch.utils.data.DataLoader(
dataset_test,
batch_size=batch_size,
shuffle=False,
drop_last=False,
num_workers=num_workers,
)
# %%
# Create the SimSiam model
# ------------------------
#
# Create a ResNet backbone and remove the classification head
class SimSiam(nn.Module):
def __init__(self, backbone, num_ftrs, proj_hidden_dim, pred_hidden_dim, out_dim):
super().__init__()
self.backbone = backbone
self.projection_head = SimSiamProjectionHead(num_ftrs, proj_hidden_dim, out_dim)
self.prediction_head = SimSiamPredictionHead(out_dim, pred_hidden_dim, out_dim)
def forward(self, x):
# get representations
f = self.backbone(x).flatten(start_dim=1)
# get projections
z = self.projection_head(f)
# get predictions
p = self.prediction_head(z)
# stop gradient
z = z.detach()
return z, p
# we use a pretrained resnet for this tutorial to speed
# up training time but you can also train one from scratch
resnet = torchvision.models.resnet18()
backbone = nn.Sequential(*list(resnet.children())[:-1])
model = SimSiam(backbone, num_ftrs, proj_hidden_dim, pred_hidden_dim, out_dim)
# %%
# SimSiam uses a symmetric negative cosine similarity loss and does therefore
# not require any negative samples. We build a criterion and an optimizer.
# SimSiam uses a symmetric negative cosine similarity loss
criterion = NegativeCosineSimilarity()
# scale the learning rate
lr = 0.05 * batch_size / 256
# use SGD with momentum and weight decay
optimizer = torch.optim.SGD(model.parameters(), lr=lr, momentum=0.9, weight_decay=5e-4)
# %%
# Train SimSiam
# --------------------
#
# To train the SimSiam model, you can use a classic PyTorch training loop:
# For every epoch, iterate over all batches in the training data, extract
# the two transforms of every image, pass them through the model, and calculate
# the loss. Then, simply update the weights with the optimizer. Don't forget to
# reset the gradients!
#
# Since SimSiam doesn't require negative samples, it is a good idea to check
# whether the outputs of the model have collapsed into a single direction. For
# this we can simply check the standard deviation of the L2 normalized output
# vectors. If it is close to one divided by the square root of the output
# dimension, everything is fine (you can read
# up on this idea `here <https://arxiv.org/abs/2011.10566>`_).
device = "cuda" if torch.cuda.is_available() else "cpu"
model.to(device)
avg_loss = 0.0
avg_output_std = 0.0
for e in range(epochs):
for (x0, x1), _, _ in dataloader_train_simsiam:
# move images to the gpu
x0 = x0.to(device)
x1 = x1.to(device)
# run the model on both transforms of the images
# we get projections (z0 and z1) and
# predictions (p0 and p1) as output
z0, p0 = model(x0)
z1, p1 = model(x1)
# apply the symmetric negative cosine similarity
# and run backpropagation
loss = 0.5 * (criterion(z0, p1) + criterion(z1, p0))
loss.backward()
optimizer.step()
optimizer.zero_grad()
# calculate the per-dimension standard deviation of the outputs
# we can use this later to check whether the embeddings are collapsing
output = p0.detach()
output = torch.nn.functional.normalize(output, dim=1)
output_std = torch.std(output, 0)
output_std = output_std.mean()
# use moving averages to track the loss and standard deviation
w = 0.9
avg_loss = w * avg_loss + (1 - w) * loss.item()
avg_output_std = w * avg_output_std + (1 - w) * output_std.item()
# the level of collapse is large if the standard deviation of the l2
# normalized output is much smaller than 1 / sqrt(dim)
collapse_level = max(0.0, 1 - math.sqrt(out_dim) * avg_output_std)
# print intermediate results
print(
f"[Epoch {e:3d}] "
f"Loss = {avg_loss:.2f} | "
f"Collapse Level: {collapse_level:.2f} / 1.00"
)
# %%
# To embed the images in the dataset we simply iterate over the test dataloader
# and feed the images to the model backbone. Make sure to disable gradients for
# this part.
embeddings = []
filenames = []
# disable gradients for faster calculations
model.eval()
with torch.no_grad():
for i, (x, _, fnames) in enumerate(dataloader_test):
# move the images to the gpu
x = x.to(device)
# embed the images with the pre-trained backbone
y = model.backbone(x).flatten(start_dim=1)
# store the embeddings and filenames in lists
embeddings.append(y)
filenames = filenames + list(fnames)
# concatenate the embeddings and convert to numpy
embeddings = torch.cat(embeddings, dim=0)
embeddings = embeddings.cpu().numpy()
# %%
# Scatter Plot and Nearest Neighbors
# ----------------------------------
# Now that we have the embeddings, we can visualize the data with a scatter plot.
# Further down, we also check out the nearest neighbors of a few example images.
#
# As a first step, we make a few additional imports.
# for plotting
import os
import matplotlib.offsetbox as osb
import matplotlib.pyplot as plt
# for resizing images to thumbnails
import torchvision.transforms.functional as functional
from matplotlib import rcParams as rcp
from PIL import Image
# for clustering and 2d representations
from sklearn import random_projection
# %%
# Then, we transform the embeddings using UMAP and rescale them to fit in the
# [0, 1] square.
#
# for the scatter plot we want to transform the images to a two-dimensional
# vector space using a random Gaussian projection
projection = random_projection.GaussianRandomProjection(n_components=2)
embeddings_2d = projection.fit_transform(embeddings)
# normalize the embeddings to fit in the [0, 1] square
M = np.max(embeddings_2d, axis=0)
m = np.min(embeddings_2d, axis=0)
embeddings_2d = (embeddings_2d - m) / (M - m)
# %%
# Let's start with a nice scatter plot of our dataset! The helper function
# below will create one.
def get_scatter_plot_with_thumbnails():
"""Creates a scatter plot with image overlays."""
# initialize empty figure and add subplot
fig = plt.figure()
fig.suptitle("Scatter Plot of the Sentinel-2 Dataset")
ax = fig.add_subplot(1, 1, 1)
# shuffle images and find out which images to show
shown_images_idx = []
shown_images = np.array([[1.0, 1.0]])
iterator = [i for i in range(embeddings_2d.shape[0])]
np.random.shuffle(iterator)
for i in iterator:
# only show image if it is sufficiently far away from the others
dist = np.sum((embeddings_2d[i] - shown_images) ** 2, 1)
if np.min(dist) < 2e-3:
continue
shown_images = np.r_[shown_images, [embeddings_2d[i]]]
shown_images_idx.append(i)
# plot image overlays
for idx in shown_images_idx:
thumbnail_size = int(rcp["figure.figsize"][0] * 2.0)
path = os.path.join(path_to_data, filenames[idx])
img = Image.open(path)
img = functional.resize(img, thumbnail_size)
img = np.array(img)
img_box = osb.AnnotationBbox(
osb.OffsetImage(img, cmap=plt.cm.gray_r),
embeddings_2d[idx],
pad=0.2,
)
ax.add_artist(img_box)
# set aspect ratio
ratio = 1.0 / ax.get_data_ratio()
ax.set_aspect(ratio, adjustable="box")
# get a scatter plot with thumbnail overlays
get_scatter_plot_with_thumbnails()
# %%
# Next, we plot example images and their nearest neighbors (calculated from the
# embeddings generated above). This is a very simple approach to find more images
# of a certain type where a few examples are already available. For example,
# when a subset of the data is already labelled and one class of images is clearly
# underrepresented, one can easily query more images of this class from the
# unlabelled dataset.
#
# Let's get to work! The plots are shown below.
example_images = [
"S2B_MSIL1C_20200526T101559_N0209_R065_T31TGE/tile_00154.png", # water 1
"S2B_MSIL1C_20200526T101559_N0209_R065_T32SLJ/tile_00527.png", # water 2
"S2B_MSIL1C_20200526T101559_N0209_R065_T32TNL/tile_00556.png", # land
"S2B_MSIL1C_20200526T101559_N0209_R065_T31SGD/tile_01731.png", # clouds 1
"S2B_MSIL1C_20200526T101559_N0209_R065_T32SMG/tile_00238.png", # clouds 2
]
def get_image_as_np_array(filename: str):
"""Loads the image with filename and returns it as a numpy array."""
img = Image.open(filename)
return np.asarray(img)
def get_image_as_np_array_with_frame(filename: str, w: int = 5):
"""Returns an image as a numpy array with a black frame of width w."""
img = get_image_as_np_array(filename)
ny, nx, _ = img.shape
# create an empty image with padding for the frame
framed_img = np.zeros((w + ny + w, w + nx + w, 3))
framed_img = framed_img.astype(np.uint8)
# put the original image in the middle of the new one
framed_img[w:-w, w:-w] = img
return framed_img
def plot_nearest_neighbors_3x3(example_image: str, i: int):
"""Plots the example image and its eight nearest neighbors."""
n_subplots = 9
# initialize empty figure
fig = plt.figure()
fig.suptitle(f"Nearest Neighbor Plot {i + 1}")
#
example_idx = filenames.index(example_image)
# get distances to the cluster center
distances = embeddings - embeddings[example_idx]
distances = np.power(distances, 2).sum(-1).squeeze()
# sort indices by distance to the center
nearest_neighbors = np.argsort(distances)[:n_subplots]
# show images
for plot_offset, plot_idx in enumerate(nearest_neighbors):
ax = fig.add_subplot(3, 3, plot_offset + 1)
# get the corresponding filename
fname = os.path.join(path_to_data, filenames[plot_idx])
if plot_offset == 0:
ax.set_title(f"Example Image")
plt.imshow(get_image_as_np_array_with_frame(fname))
else:
plt.imshow(get_image_as_np_array(fname))
# let's disable the axis
plt.axis("off")
# show example images for each cluster
for i, example_image in enumerate(example_images):
plot_nearest_neighbors_3x3(example_image, i)
# %%
# Next Steps
# ------------
#
# Interested in exploring other self-supervised models? Check out our other
# tutorials:
#
# - :ref:`lightly-moco-tutorial-2`
# - :ref:`lightly-simclr-tutorial-3`
# - :ref:`lightly-custom-augmentation-5`
# - :ref:`lightly-detectron-tutorial-6`
#
| 14,744 | 31.839644 | 118 | py |
lightly | lightly-master/docs/source/tutorials_source/platform/tutorial_label_studio_export.py | """
.. _lightly-tutorial-export-labelstudio:
Tutorial 10: Export to LabelStudio
=============================================
This tutorial shows how you can easily label all images of a tag from Lightly
using the open-source data labeling tool `LabelStudio <https://labelstud.io>`_.
What you will learn
--------------------
* Export a tag from Lightly in the `LabelStudio format <https://labelstud.io/guide/tasks.html#Basic-Label-Studio-JSON-format>`_.
* Import the tag into LabelStudio.
Requirements
------------
You have a dataset in the `Lightly Platform <https://app.lightly.ai>`_
and optionally already chosen a subset of it and created a tag for it.
Now you want to label all images of this tag using LabelStudio.
If you have not created your own dataset yet, you can use any dataset
(e.g. the playground dataset) or follow the `docs <https://docs.lightly.ai>`_ to create one.
Launch LabelStudio
------------------
Follow the documentation to `install and start Labelstudio <https://labelstud.io/guide/index.html#Quick-start>`_.
Then create a new project and click on import. Now you should be in the
import screen.
.. figure:: ../../tutorials_source/platform/images/tutorial_export_labelstudio/labelstudio_import_dialog.jpg
:align: center
:alt: Import dialog of LabelStudio.
Export from Lightly in the LabelStudio format
---------------------------------------------
- Now open your dataset in the `Lightly Platform <https://app.lightly.ai>`_.
- Select the tag you want to export at the top. By default, the initial tag is already chosen.
- Navigate to the *Download* page to see the different download options.
- Within *Export Reduced Dataset*, select *LabelStudio Tasks* from the dropdown \
of the list of supported export formats. Specify an expiration duration \
giving you enough time to label all images. \
The tasks include a url pointing to the real images, thus allowing everyone \
with the link to access the images. This is needed for LabelStudio to access the \
images without needing to login.
- After clicking the button 'Export to LabelStudio Tasks', they are downloaded \
as a single json file to your PC.
Import the tasks into LabelStudio
---------------------------------
Now head back to LabelStudio and import the file you just
downloaded. Either per drag-n-drop or browse your local files. Then finish
the import.
.. figure:: ../../tutorials_source/platform/images/tutorial_export_labelstudio/labelstudio_imported_file.jpg
:align: center
:alt: Imported file into LabelStudio.
Start labeling
--------------
Now you can start labeling your images! To see them, you might need
to change the type of the image column to 'img'.
.. figure:: ../../tutorials_source/platform/images/tutorial_export_labelstudio/labelstudio_import_finished.jpg
:align: center
:alt: LabelStudio tasks fully imported and showing images.
"""
| 2,887 | 37 | 128 | py |