repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
Few-shot-WSI | Few-shot-WSI-master/configs/base.py | train_cfg = {}
test_cfg = {}
optimizer_config = dict() # grad_clip, coalesce, bucket_size_mb
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
dict(type='TensorboardLoggerHook')
])
# yapf:enable
# runtime settings
dist_params = dict(backend='nccl')
cudnn_benchmark = True
log_level = 'INFO'
load_from = None
resume_from = None
workflow = [('train', 1)]
prefetch = False
| 433 | 20.7 | 64 | py |
Few-shot-WSI | Few-shot-WSI-master/configs/extraction/extract_feats_PAIP_test.py | _base_ = '../base.py'
# model settings
model = dict(
type='Extractor',
pretrained=None,
backbone=dict(
type='ResNet',
depth=18,
out_indices=[4], # 0: conv-1, x: stage-x
norm_cfg=dict(type='BN')))
# dataset settings
data_source_cfg = dict(type='ImageList')
data_root = 'data/PAIP19/data'
data_list = 'data/PAIP19/meta/paip_test.txt'
img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
extract_pipeline = [
dict(type='Resize', size=(224, 224)),
dict(type='ToTensor'),
dict(type='Normalize', **img_norm_cfg)
]
data = dict(
imgs_per_gpu=256,
workers_per_gpu=5,
extract=dict(
type='ExtractDataset',
data_source=dict(
list_file=data_list, root=data_root, **data_source_cfg),
pipeline=extract_pipeline),
)
| 842 | 23.085714 | 74 | py |
Few-shot-WSI | Few-shot-WSI-master/configs/extraction/extract_feats_NCT_train.py | _base_ = '../base.py'
# model settings
model = dict(
type='Extractor',
pretrained=None,
backbone=dict(
type='ResNet',
depth=18,
out_indices=[4], # 0: conv-1, x: stage-x
norm_cfg=dict(type='BN')))
# dataset settings
data_source_cfg = dict(type='ImageList')
data_root = 'data/NCT/data'
data_list = 'data/NCT/meta/train.txt'
img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
extract_pipeline = [
dict(type='Resize', size=(224, 224)),
dict(type='ToTensor'),
dict(type='Normalize', **img_norm_cfg)
]
data = dict(
imgs_per_gpu=256,
workers_per_gpu=5,
extract=dict(
type='ExtractDataset',
data_source=dict(
list_file=data_list, root=data_root, **data_source_cfg),
pipeline=extract_pipeline),
)
| 832 | 22.8 | 74 | py |
Few-shot-WSI | Few-shot-WSI-master/configs/extraction/extract_feats_LC.py | _base_ = '../base.py'
# model settings
model = dict(
type='Extractor',
pretrained=None,
backbone=dict(
type='ResNet',
depth=18,
out_indices=[4], # 0: conv-1, x: stage-x
norm_cfg=dict(type='BN')))
# dataset settings
data_source_cfg = dict(type='ImageList')
data_root = 'data/LC25000/data'
data_list = 'data/LC25000/meta/img_list.txt'
img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
extract_pipeline = [
dict(type='Resize', size=(224, 224)),
dict(type='ToTensor'),
dict(type='Normalize', **img_norm_cfg)
]
data = dict(
imgs_per_gpu=256,
workers_per_gpu=5,
extract=dict(
type='ExtractDataset',
data_source=dict(
list_file=data_list, root=data_root, **data_source_cfg),
pipeline=extract_pipeline),
)
| 843 | 23.114286 | 74 | py |
Few-shot-WSI | Few-shot-WSI-master/configs/extraction/extract_feats_PAIP_train.py | _base_ = '../base.py'
# model settings
model = dict(
type='Extractor',
pretrained=None,
backbone=dict(
type='ResNet',
depth=18,
out_indices=[4], # 0: conv-1, x: stage-x
norm_cfg=dict(type='BN')))
# dataset settings
data_source_cfg = dict(type='ImageList')
data_root = 'data/PAIP19/data'
data_list = 'data/PAIP19/meta/paip_train.txt'
img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
extract_pipeline = [
dict(type='Resize', size=(224, 224)),
dict(type='ToTensor'),
dict(type='Normalize', **img_norm_cfg)
]
data = dict(
imgs_per_gpu=256,
workers_per_gpu=5,
extract=dict(
type='ExtractDataset',
data_source=dict(
list_file=data_list, root=data_root, **data_source_cfg),
pipeline=extract_pipeline),
)
| 843 | 23.114286 | 74 | py |
Few-shot-WSI | Few-shot-WSI-master/configs/extraction/extract_feats_NCT_aug.py | _base_ = '../base.py'
# model settings
model = dict(
type='Extractor',
pretrained=None,
backbone=dict(
type='ResNet',
depth=18,
out_indices=[4], # 0: conv-1, x: stage-x
norm_cfg=dict(type='BN')))
# dataset settings
data_source_cfg = dict(type='ImageList')
data_root = 'data/NCT/data'
data_list = 'data/NCT/meta/train.txt'
img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
extract_pipeline = [
dict(type='RandomResizedCrop', size=224, scale=(0.8, 1.)),
dict(type='RandomHorizontalFlip'),
dict(
type='RandomAppliedTrans',
transforms=[
dict(
type='ColorJitter',
brightness=0.4,
contrast=0.4,
saturation=0.4,
hue=0.2)
],
p=0.8),
dict(type='ToTensor'), dict(type='Normalize', **img_norm_cfg)
]
data = dict(
imgs_per_gpu=256,
workers_per_gpu=5,
extract=dict(
type='ExtractDataset',
data_source=dict(
list_file=data_list, root=data_root, **data_source_cfg),
pipeline=extract_pipeline),
)
| 1,153 | 24.086957 | 74 | py |
Few-shot-WSI | Few-shot-WSI-master/configs/extraction/extract_feats_NCT_test.py | _base_ = '../base.py'
# model settings
model = dict(
type='Extractor',
pretrained=None,
backbone=dict(
type='ResNet',
depth=18,
out_indices=[4], # 0: conv-1, x: stage-x
norm_cfg=dict(type='BN')))
# dataset settings
data_source_cfg = dict(type='ImageList')
data_root = 'data/NCT/data'
data_list = 'data/NCT/meta/test.txt'
img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
extract_pipeline = [
dict(type='Resize', size=(224, 224)),
dict(type='ToTensor'),
dict(type='Normalize', **img_norm_cfg)
]
data = dict(
imgs_per_gpu=256,
workers_per_gpu=5,
extract=dict(
type='ExtractDataset',
data_source=dict(
list_file=data_list, root=data_root, **data_source_cfg),
pipeline=extract_pipeline),
)
| 831 | 22.771429 | 74 | py |
Few-shot-WSI | Few-shot-WSI-master/configs/extraction/r18_extract.py | _base_ = '../base.py'
# model settings
model = dict(
type='Extractor',
pretrained=None,
backbone=dict(
type='ResNet',
depth=18,
out_indices=[4], # 0: conv-1, x: stage-x
norm_cfg=dict(type='BN')))
# dataset settings
data_source_cfg = dict(type='ImageList')
data_root = 'data/NCT/data'
data_list = 'data/NCT/meta/img_list.txt'
img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
extract_pipeline = [
dict(type='Resize', size=(224, 224)),
dict(type='ToTensor'),
dict(type='Normalize', **img_norm_cfg)]
data = dict(
imgs_per_gpu=256,
workers_per_gpu=5,
extract=dict(
type='ExtractDataset',
data_source=dict(
list_file=data_list, root=data_root, **data_source_cfg),
pipeline=extract_pipeline),
)
| 829 | 24.151515 | 74 | py |
Few-shot-WSI | Few-shot-WSI-master/configs/classification/nct/r18_bs512_ep100_wo_4.py | _base_ = '../../base.py'
# model settings
model = dict(
type='Classification',
pretrained=None,
backbone=dict(
type='ResNet',
depth=18,
out_indices=[4], # 0: conv-1, x: stage-x
norm_cfg=dict(type='BN')),
head=dict(
type='ClsHead', with_avg_pool=True, in_channels=512,
num_classes=9)
)
# dataset settings
data_source_cfg = dict(type='ImageList')
data_root = 'data/NCT/data'
dataset_type = 'ClassificationDataset'
data_train_list = 'data/NCT/meta/wo_4_train_labeled.txt'
img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
train_pipeline = [
dict(type='RandomResizedCrop', size=224),
dict(type='RandomHorizontalFlip'),
dict(type='ToTensor'),
dict(type='Normalize', **img_norm_cfg),
]
test_pipeline = [
dict(type='Resize', size=224),
dict(type='ToTensor'),
dict(type='Normalize', **img_norm_cfg),
]
batch_size=512
data = dict(
imgs_per_gpu=batch_size//4,
workers_per_gpu=5,
train=dict(
type=dataset_type,
data_source=dict(
list_file=data_train_list,
root=data_root,
**data_source_cfg),
pipeline=train_pipeline),
)
prefetch=False
# optimizer
optimizer = dict(type='SGD', lr=0.5, momentum=0.9, weight_decay=0.)
# learning policy
lr_config = dict(policy='step', step=[30, 60, 90])
checkpoint_config = dict(interval=100)
# runtime settings
total_epochs = 100 | 1,454 | 25.454545 | 74 | py |
Few-shot-WSI | Few-shot-WSI-master/configs/classification/nct/r18_bs512_ep100_all.py | _base_ = '../../base.py'
# model settings
model = dict(
type='Classification',
pretrained=None,
backbone=dict(
type='ResNet',
depth=18,
out_indices=[4], # 0: conv-1, x: stage-x
norm_cfg=dict(type='BN')),
head=dict(
type='ClsHead', with_avg_pool=True, in_channels=512,
num_classes=9)
)
# dataset settings
data_source_cfg = dict(type='ImageList')
data_root = 'data/NCT/data'
dataset_type = 'ClassificationDataset'
data_train_list = 'data/NCT/meta/train_labeled.txt'
img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
train_pipeline = [
dict(type='RandomResizedCrop', size=224),
dict(type='RandomHorizontalFlip'),
dict(type='ToTensor'),
dict(type='Normalize', **img_norm_cfg),
]
test_pipeline = [
dict(type='Resize', size=224),
dict(type='ToTensor'),
dict(type='Normalize', **img_norm_cfg),
]
batch_size=512
data = dict(
imgs_per_gpu=batch_size//4,
workers_per_gpu=5,
train=dict(
type=dataset_type,
data_source=dict(
list_file=data_train_list,
root=data_root,
**data_source_cfg),
pipeline=train_pipeline),
)
prefetch=False
# optimizer
optimizer = dict(type='SGD', lr=0.5, momentum=0.9, weight_decay=0.)
# learning policy
lr_config = dict(policy='step', step=[30, 60, 90])
checkpoint_config = dict(interval=100)
# runtime settings
total_epochs = 100 | 1,449 | 25.363636 | 74 | py |
Few-shot-WSI | Few-shot-WSI-master/configs/classification/nct/r18_bs512_ep100_wo_3.py | _base_ = '../../base.py'
# model settings
model = dict(
type='Classification',
pretrained=None,
backbone=dict(
type='ResNet',
depth=18,
out_indices=[4], # 0: conv-1, x: stage-x
norm_cfg=dict(type='BN')),
head=dict(
type='ClsHead', with_avg_pool=True, in_channels=512,
num_classes=9)
)
# dataset settings
data_source_cfg = dict(type='ImageList')
data_root = 'data/NCT/data'
dataset_type = 'ClassificationDataset'
data_train_list = 'data/NCT/meta/wo_3_train_labeled.txt'
img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
train_pipeline = [
dict(type='RandomResizedCrop', size=224),
dict(type='RandomHorizontalFlip'),
dict(type='ToTensor'),
dict(type='Normalize', **img_norm_cfg),
]
test_pipeline = [
dict(type='Resize', size=224),
dict(type='ToTensor'),
dict(type='Normalize', **img_norm_cfg),
]
batch_size=512
data = dict(
imgs_per_gpu=batch_size//4,
workers_per_gpu=5,
train=dict(
type=dataset_type,
data_source=dict(
list_file=data_train_list,
root=data_root,
**data_source_cfg),
pipeline=train_pipeline),
)
prefetch=False
# optimizer
optimizer = dict(type='SGD', lr=0.5, momentum=0.9, weight_decay=0.)
# learning policy
lr_config = dict(policy='step', step=[30, 60, 90])
checkpoint_config = dict(interval=100)
# runtime settings
total_epochs = 100 | 1,454 | 25.454545 | 74 | py |
Few-shot-WSI | Few-shot-WSI-master/configs/classification/nct/r18_bs512_ep100_wo_7.py | _base_ = '../../base.py'
# model settings
model = dict(
type='Classification',
pretrained=None,
backbone=dict(
type='ResNet',
depth=18,
out_indices=[4], # 0: conv-1, x: stage-x
norm_cfg=dict(type='BN')),
head=dict(
type='ClsHead', with_avg_pool=True, in_channels=512,
num_classes=9)
)
# dataset settings
data_source_cfg = dict(type='ImageList')
data_root = 'data/NCT/data'
dataset_type = 'ClassificationDataset'
data_train_list = 'data/NCT/meta/wo_7_train_labeled.txt'
img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
train_pipeline = [
dict(type='RandomResizedCrop', size=224),
dict(type='RandomHorizontalFlip'),
dict(type='ToTensor'),
dict(type='Normalize', **img_norm_cfg),
]
test_pipeline = [
dict(type='Resize', size=224),
dict(type='ToTensor'),
dict(type='Normalize', **img_norm_cfg),
]
batch_size=512
data = dict(
imgs_per_gpu=batch_size//4,
workers_per_gpu=5,
train=dict(
type=dataset_type,
data_source=dict(
list_file=data_train_list,
root=data_root,
**data_source_cfg),
pipeline=train_pipeline),
)
prefetch=False
# optimizer
optimizer = dict(type='SGD', lr=0.5, momentum=0.9, weight_decay=0.)
# learning policy
lr_config = dict(policy='step', step=[30, 60, 90])
checkpoint_config = dict(interval=100)
# runtime settings
total_epochs = 100 | 1,454 | 25.454545 | 74 | py |
Few-shot-WSI | Few-shot-WSI-master/configs/classification/nct/r18_bs512_ep100_wo_0.py | _base_ = '../../base.py'
# model settings
model = dict(
type='Classification',
pretrained=None,
backbone=dict(
type='ResNet',
depth=18,
out_indices=[4], # 0: conv-1, x: stage-x
norm_cfg=dict(type='BN')),
head=dict(
type='ClsHead', with_avg_pool=True, in_channels=512,
num_classes=9)
)
# dataset settings
data_source_cfg = dict(type='ImageList')
data_root = 'data/NCT/data'
dataset_type = 'ClassificationDataset'
data_train_list = 'data/NCT/meta/wo_0_train_labeled.txt'
img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
train_pipeline = [
dict(type='RandomResizedCrop', size=224),
dict(type='RandomHorizontalFlip'),
dict(type='ToTensor'),
dict(type='Normalize', **img_norm_cfg),
]
test_pipeline = [
dict(type='Resize', size=224),
dict(type='ToTensor'),
dict(type='Normalize', **img_norm_cfg),
]
batch_size=512
data = dict(
imgs_per_gpu=batch_size//4,
workers_per_gpu=5,
train=dict(
type=dataset_type,
data_source=dict(
list_file=data_train_list,
root=data_root,
**data_source_cfg),
pipeline=train_pipeline),
)
prefetch=False
# optimizer
optimizer = dict(type='SGD', lr=0.5, momentum=0.9, weight_decay=0.)
# learning policy
lr_config = dict(policy='step', step=[30, 60, 90])
checkpoint_config = dict(interval=100)
# runtime settings
total_epochs = 100 | 1,454 | 25.454545 | 74 | py |
Few-shot-WSI | Few-shot-WSI-master/configs/classification/nct/r18_bs512_ep100_wo_6.py | _base_ = '../../base.py'
# model settings
model = dict(
type='Classification',
pretrained=None,
backbone=dict(
type='ResNet',
depth=18,
out_indices=[4], # 0: conv-1, x: stage-x
norm_cfg=dict(type='BN')),
head=dict(
type='ClsHead', with_avg_pool=True, in_channels=512,
num_classes=9)
)
# dataset settings
data_source_cfg = dict(type='ImageList')
data_root = 'data/NCT/data'
dataset_type = 'ClassificationDataset'
data_train_list = 'data/NCT/meta/wo_6_train_labeled.txt'
img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
train_pipeline = [
dict(type='RandomResizedCrop', size=224),
dict(type='RandomHorizontalFlip'),
dict(type='ToTensor'),
dict(type='Normalize', **img_norm_cfg),
]
test_pipeline = [
dict(type='Resize', size=224),
dict(type='ToTensor'),
dict(type='Normalize', **img_norm_cfg),
]
batch_size=512
data = dict(
imgs_per_gpu=batch_size//4,
workers_per_gpu=5,
train=dict(
type=dataset_type,
data_source=dict(
list_file=data_train_list,
root=data_root,
**data_source_cfg),
pipeline=train_pipeline),
)
prefetch=False
# optimizer
optimizer = dict(type='SGD', lr=0.5, momentum=0.9, weight_decay=0.)
# learning policy
lr_config = dict(policy='step', step=[30, 60, 90])
checkpoint_config = dict(interval=100)
# runtime settings
total_epochs = 100 | 1,454 | 25.454545 | 74 | py |
Few-shot-WSI | Few-shot-WSI-master/configs/classification/nct/r18_bs512_ep100_wo_2.py | _base_ = '../../base.py'
# model settings
model = dict(
type='Classification',
pretrained=None,
backbone=dict(
type='ResNet',
depth=18,
out_indices=[4], # 0: conv-1, x: stage-x
norm_cfg=dict(type='BN')),
head=dict(
type='ClsHead', with_avg_pool=True, in_channels=512,
num_classes=9)
)
# dataset settings
data_source_cfg = dict(type='ImageList')
data_root = 'data/NCT/data'
dataset_type = 'ClassificationDataset'
data_train_list = 'data/NCT/meta/wo_2_train_labeled.txt'
img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
train_pipeline = [
dict(type='RandomResizedCrop', size=224),
dict(type='RandomHorizontalFlip'),
dict(type='ToTensor'),
dict(type='Normalize', **img_norm_cfg),
]
test_pipeline = [
dict(type='Resize', size=224),
dict(type='ToTensor'),
dict(type='Normalize', **img_norm_cfg),
]
batch_size=512
data = dict(
imgs_per_gpu=batch_size//4,
workers_per_gpu=5,
train=dict(
type=dataset_type,
data_source=dict(
list_file=data_train_list,
root=data_root,
**data_source_cfg),
pipeline=train_pipeline),
)
prefetch=False
# optimizer
optimizer = dict(type='SGD', lr=0.5, momentum=0.9, weight_decay=0.)
# learning policy
lr_config = dict(policy='step', step=[30, 60, 90])
checkpoint_config = dict(interval=100)
# runtime settings
total_epochs = 100 | 1,454 | 25.454545 | 74 | py |
Few-shot-WSI | Few-shot-WSI-master/configs/classification/nct/r18_bs512_ep100_wo_1.py | _base_ = '../../base.py'
# model settings
model = dict(
type='Classification',
pretrained=None,
backbone=dict(
type='ResNet',
depth=18,
out_indices=[4], # 0: conv-1, x: stage-x
norm_cfg=dict(type='BN')),
head=dict(
type='ClsHead', with_avg_pool=True, in_channels=512,
num_classes=9)
)
# dataset settings
data_source_cfg = dict(type='ImageList')
data_root = 'data/NCT/data'
dataset_type = 'ClassificationDataset'
data_train_list = 'data/NCT/meta/wo_1_train_labeled.txt'
img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
train_pipeline = [
dict(type='RandomResizedCrop', size=224),
dict(type='RandomHorizontalFlip'),
dict(type='ToTensor'),
dict(type='Normalize', **img_norm_cfg),
]
test_pipeline = [
dict(type='Resize', size=224),
dict(type='ToTensor'),
dict(type='Normalize', **img_norm_cfg),
]
batch_size=512
data = dict(
imgs_per_gpu=batch_size//4,
workers_per_gpu=5,
train=dict(
type=dataset_type,
data_source=dict(
list_file=data_train_list,
root=data_root,
**data_source_cfg),
pipeline=train_pipeline),
)
prefetch=False
# optimizer
optimizer = dict(type='SGD', lr=0.5, momentum=0.9, weight_decay=0.)
# learning policy
lr_config = dict(policy='step', step=[30, 60, 90])
checkpoint_config = dict(interval=100)
# runtime settings
total_epochs = 100 | 1,454 | 25.454545 | 74 | py |
Few-shot-WSI | Few-shot-WSI-master/configs/classification/nct/r18_bs512_ep100_wo_78.py | _base_ = '../../base.py'
# model settings
model = dict(
type='Classification',
pretrained=None,
backbone=dict(
type='ResNet',
depth=18,
out_indices=[4], # 0: conv-1, x: stage-x
norm_cfg=dict(type='BN')),
head=dict(
type='ClsHead', with_avg_pool=True, in_channels=512,
num_classes=9)
)
# dataset settings
data_source_cfg = dict(type='ImageList')
data_root = 'data/NCT/data'
dataset_type = 'ClassificationDataset'
data_train_list = 'data/NCT/meta/wo_78_train_labeled.txt'
img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
train_pipeline = [
dict(type='RandomResizedCrop', size=224),
dict(type='RandomHorizontalFlip'),
dict(type='ToTensor'),
dict(type='Normalize', **img_norm_cfg),
]
test_pipeline = [
dict(type='Resize', size=224),
dict(type='ToTensor'),
dict(type='Normalize', **img_norm_cfg),
]
batch_size=512
data = dict(
imgs_per_gpu=batch_size//4,
workers_per_gpu=5,
train=dict(
type=dataset_type,
data_source=dict(
list_file=data_train_list,
root=data_root,
**data_source_cfg),
pipeline=train_pipeline),
)
prefetch=False
# optimizer
optimizer = dict(type='SGD', lr=0.5, momentum=0.9, weight_decay=0.)
# learning policy
lr_config = dict(policy='step', step=[30, 60, 90])
checkpoint_config = dict(interval=100)
# runtime settings
total_epochs = 100 | 1,455 | 25.472727 | 74 | py |
Few-shot-WSI | Few-shot-WSI-master/configs/classification/nct/r18_bs512_ep100_wo_8.py | _base_ = '../../base.py'
# model settings
model = dict(
type='Classification',
pretrained=None,
backbone=dict(
type='ResNet',
depth=18,
out_indices=[4], # 0: conv-1, x: stage-x
norm_cfg=dict(type='BN')),
head=dict(
type='ClsHead', with_avg_pool=True, in_channels=512,
num_classes=9)
)
# dataset settings
data_source_cfg = dict(type='ImageList')
data_root = 'data/NCT/data'
dataset_type = 'ClassificationDataset'
data_train_list = 'data/NCT/meta/wo_8_train_labeled.txt'
img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
train_pipeline = [
dict(type='RandomResizedCrop', size=224),
dict(type='RandomHorizontalFlip'),
dict(type='ToTensor'),
dict(type='Normalize', **img_norm_cfg),
]
test_pipeline = [
dict(type='Resize', size=224),
dict(type='ToTensor'),
dict(type='Normalize', **img_norm_cfg),
]
batch_size=512
data = dict(
imgs_per_gpu=batch_size//4,
workers_per_gpu=5,
train=dict(
type=dataset_type,
data_source=dict(
list_file=data_train_list,
root=data_root,
**data_source_cfg),
pipeline=train_pipeline),
)
prefetch=False
# optimizer
optimizer = dict(type='SGD', lr=0.5, momentum=0.9, weight_decay=0.)
# learning policy
lr_config = dict(policy='step', step=[30, 60, 90])
checkpoint_config = dict(interval=100)
# runtime settings
total_epochs = 100 | 1,454 | 25.454545 | 74 | py |
Few-shot-WSI | Few-shot-WSI-master/configs/classification/nct/r18_bs512_ep100_wo_5.py | _base_ = '../../base.py'
# model settings
model = dict(
type='Classification',
pretrained=None,
backbone=dict(
type='ResNet',
depth=18,
out_indices=[4], # 0: conv-1, x: stage-x
norm_cfg=dict(type='BN')),
head=dict(
type='ClsHead', with_avg_pool=True, in_channels=512,
num_classes=9)
)
# dataset settings
data_source_cfg = dict(type='ImageList')
data_root = 'data/NCT/data'
dataset_type = 'ClassificationDataset'
data_train_list = 'data/NCT/meta/wo_5_train_labeled.txt'
img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
train_pipeline = [
dict(type='RandomResizedCrop', size=224),
dict(type='RandomHorizontalFlip'),
dict(type='ToTensor'),
dict(type='Normalize', **img_norm_cfg),
]
test_pipeline = [
dict(type='Resize', size=224),
dict(type='ToTensor'),
dict(type='Normalize', **img_norm_cfg),
]
batch_size=512
data = dict(
imgs_per_gpu=batch_size//4,
workers_per_gpu=5,
train=dict(
type=dataset_type,
data_source=dict(
list_file=data_train_list,
root=data_root,
**data_source_cfg),
pipeline=train_pipeline),
)
prefetch=False
# optimizer
optimizer = dict(type='SGD', lr=0.5, momentum=0.9, weight_decay=0.)
# learning policy
lr_config = dict(policy='step', step=[30, 60, 90])
checkpoint_config = dict(interval=100)
# runtime settings
total_epochs = 100 | 1,454 | 25.454545 | 74 | py |
Few-shot-WSI | Few-shot-WSI-master/configs/wsi_selfsup/moco_v3/r18_bs256_ep200_wo_8.py | _base_ = '../../base.py'
# model settings
model = dict(
type='MOCOv3',
base_momentum=0.996,
backbone=dict(
type='ResNet',
depth=18,
in_channels=3,
out_indices=[4], # 0: conv-1, x: stage-x
norm_cfg=dict(type='BN')),
projector=dict(
type='NonLinearNeckSimCLR',
in_channels=512,
hid_channels=1024,
out_channels=256,
num_layers=3,
sync_bn=True,
with_bias=False,
with_last_bn=True,
with_avg_pool=True),
predictor=dict(
type='NonLinearNeckSimCLR',
in_channels=256,
hid_channels=1024,
out_channels=256,
num_layers=2,
sync_bn=True,
with_bias=False,
with_last_bn=True,
with_avg_pool=False),
temperature=1,
)
# dataset settings
data_source_cfg = dict(type='ImageList')
data_train_list = 'data/NCT/meta/wo_8_train.txt'
data_train_root = 'data/NCT/data'
dataset_type = 'ContrastiveDataset'
img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
train_pipeline = [
dict(type='RandomResizedCrop', size=224, scale=(0.2, 1.)),
dict(type='RandomHorizontalFlip'),
dict(
type='RandomAppliedTrans',
transforms=[
dict(
type='ColorJitter',
brightness=0.4,
contrast=0.4,
saturation=0.4,
hue=0.4)
],
p=0.8),
dict(type='RandomGrayscale', p=0.2),
dict(
type='RandomAppliedTrans',
transforms=[
dict(
type='GaussianBlur',
sigma_min=0.1,
sigma_max=2.0)
],
p=0.5),
dict(type='RandomAppliedTrans',
transforms=[dict(type='Solarization')], p=0.2)
]
test_pipeline = [
dict(type='Resize', size=224),
]
# prefetch
prefetch = True
if not prefetch:
train_pipeline.extend([dict(type='ToTensor'), dict(type='Normalize', **img_norm_cfg)])
batch_size =256
data = dict(
imgs_per_gpu=batch_size//8, # total 32*8=256
#imgs_per_gpu=16,
workers_per_gpu=5,
train=dict(
type=dataset_type,
data_source=dict(
list_file=data_train_list, root=data_train_root,
**data_source_cfg),
pipeline=train_pipeline,
prefetch=prefetch),
)
# additional hooks
custom_hooks = [dict(type='BYOLHook', end_momentum=1.)]
# optimizer
optimizer = dict(type='LARS', lr=0.3, weight_decay=1.5e-6, momentum=0.9,)
# lr schedule
lr_config = dict(policy='CosineAnnealing', min_lr=0.)
checkpoint_config = dict(interval=50)
# runtime settings
total_epochs = 200
| 2,665 | 23.458716 | 90 | py |
Few-shot-WSI | Few-shot-WSI-master/configs/wsi_selfsup/moco_v3/r18_bs256_ep200_wo_4.py | _base_ = '../../base.py'
# model settings
model = dict(
type='MOCOv3',
base_momentum=0.996,
backbone=dict(
type='ResNet',
depth=18,
in_channels=3,
out_indices=[4], # 0: conv-1, x: stage-x
norm_cfg=dict(type='BN')),
projector=dict(
type='NonLinearNeckSimCLR',
in_channels=512,
hid_channels=1024,
out_channels=256,
num_layers=3,
sync_bn=True,
with_bias=False,
with_last_bn=True,
with_avg_pool=True),
predictor=dict(
type='NonLinearNeckSimCLR',
in_channels=256,
hid_channels=1024,
out_channels=256,
num_layers=2,
sync_bn=True,
with_bias=False,
with_last_bn=True,
with_avg_pool=False),
temperature=1,
)
# dataset settings
data_source_cfg = dict(type='ImageList')
data_train_list = 'data/NCT/meta/wo_4_train.txt'
data_train_root = 'data/NCT/data'
dataset_type = 'ContrastiveDataset'
img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
train_pipeline = [
dict(type='RandomResizedCrop', size=224, scale=(0.2, 1.)),
dict(type='RandomHorizontalFlip'),
dict(
type='RandomAppliedTrans',
transforms=[
dict(
type='ColorJitter',
brightness=0.4,
contrast=0.4,
saturation=0.4,
hue=0.4)
],
p=0.8),
dict(type='RandomGrayscale', p=0.2),
dict(
type='RandomAppliedTrans',
transforms=[
dict(
type='GaussianBlur',
sigma_min=0.1,
sigma_max=2.0)
],
p=0.5),
dict(type='RandomAppliedTrans',
transforms=[dict(type='Solarization')], p=0.2)
]
test_pipeline = [
dict(type='Resize', size=224),
]
# prefetch
prefetch = True
if not prefetch:
train_pipeline.extend([dict(type='ToTensor'), dict(type='Normalize', **img_norm_cfg)])
batch_size =256
data = dict(
imgs_per_gpu=batch_size//8, # total 32*8=256
#imgs_per_gpu=16,
workers_per_gpu=5,
train=dict(
type=dataset_type,
data_source=dict(
list_file=data_train_list, root=data_train_root,
**data_source_cfg),
pipeline=train_pipeline,
prefetch=prefetch),
)
# additional hooks
custom_hooks = [dict(type='BYOLHook', end_momentum=1.)]
# optimizer
optimizer = dict(type='LARS', lr=0.3, weight_decay=1.5e-6, momentum=0.9,)
# lr schedule
lr_config = dict(policy='CosineAnnealing', min_lr=0.)
checkpoint_config = dict(interval=50)
# runtime settings
total_epochs = 200
| 2,665 | 23.458716 | 90 | py |
Few-shot-WSI | Few-shot-WSI-master/configs/wsi_selfsup/moco_v3/r18_bs256_ep200_wo_5.py | _base_ = '../../base.py'
# model settings
model = dict(
type='MOCOv3',
base_momentum=0.996,
backbone=dict(
type='ResNet',
depth=18,
in_channels=3,
out_indices=[4], # 0: conv-1, x: stage-x
norm_cfg=dict(type='BN')),
projector=dict(
type='NonLinearNeckSimCLR',
in_channels=512,
hid_channels=1024,
out_channels=256,
num_layers=3,
sync_bn=True,
with_bias=False,
with_last_bn=True,
with_avg_pool=True),
predictor=dict(
type='NonLinearNeckSimCLR',
in_channels=256,
hid_channels=1024,
out_channels=256,
num_layers=2,
sync_bn=True,
with_bias=False,
with_last_bn=True,
with_avg_pool=False),
temperature=1,
)
# dataset settings
data_source_cfg = dict(type='ImageList')
data_train_list = 'data/NCT/meta/wo_5_train.txt'
data_train_root = 'data/NCT/data'
dataset_type = 'ContrastiveDataset'
img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
train_pipeline = [
dict(type='RandomResizedCrop', size=224, scale=(0.2, 1.)),
dict(type='RandomHorizontalFlip'),
dict(
type='RandomAppliedTrans',
transforms=[
dict(
type='ColorJitter',
brightness=0.4,
contrast=0.4,
saturation=0.4,
hue=0.4)
],
p=0.8),
dict(type='RandomGrayscale', p=0.2),
dict(
type='RandomAppliedTrans',
transforms=[
dict(
type='GaussianBlur',
sigma_min=0.1,
sigma_max=2.0)
],
p=0.5),
dict(type='RandomAppliedTrans',
transforms=[dict(type='Solarization')], p=0.2)
]
test_pipeline = [
dict(type='Resize', size=224),
]
# prefetch
prefetch = True
if not prefetch:
train_pipeline.extend([dict(type='ToTensor'), dict(type='Normalize', **img_norm_cfg)])
batch_size =256
data = dict(
imgs_per_gpu=batch_size//8, # total 32*8=256
#imgs_per_gpu=16,
workers_per_gpu=5,
train=dict(
type=dataset_type,
data_source=dict(
list_file=data_train_list, root=data_train_root,
**data_source_cfg),
pipeline=train_pipeline,
prefetch=prefetch),
)
# additional hooks
custom_hooks = [dict(type='BYOLHook', end_momentum=1.)]
# optimizer
optimizer = dict(type='LARS', lr=0.3, weight_decay=1.5e-6, momentum=0.9,)
# lr schedule
lr_config = dict(policy='CosineAnnealing', min_lr=0.)
checkpoint_config = dict(interval=50)
# runtime settings
total_epochs = 200
| 2,665 | 23.458716 | 90 | py |
Few-shot-WSI | Few-shot-WSI-master/configs/wsi_selfsup/moco_v3/r18_bs256_ep200_wo_78.py | _base_ = '../../base.py'
# model settings
model = dict(
type='MOCOv3',
base_momentum=0.996,
backbone=dict(
type='ResNet',
depth=18,
in_channels=3,
out_indices=[4], # 0: conv-1, x: stage-x
norm_cfg=dict(type='BN')),
projector=dict(
type='NonLinearNeckSimCLR',
in_channels=512,
hid_channels=1024,
out_channels=256,
num_layers=3,
sync_bn=True,
with_bias=False,
with_last_bn=True,
with_avg_pool=True),
predictor=dict(
type='NonLinearNeckSimCLR',
in_channels=256,
hid_channels=1024,
out_channels=256,
num_layers=2,
sync_bn=True,
with_bias=False,
with_last_bn=True,
with_avg_pool=False),
temperature=1,
)
# dataset settings
data_source_cfg = dict(type='ImageList')
data_train_list = 'data/NCT/meta/wo_78_train.txt'
data_train_root = 'data/NCT/data'
dataset_type = 'ContrastiveDataset'
img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
train_pipeline = [
dict(type='RandomResizedCrop', size=224, scale=(0.2, 1.)),
dict(type='RandomHorizontalFlip'),
dict(
type='RandomAppliedTrans',
transforms=[
dict(
type='ColorJitter',
brightness=0.4,
contrast=0.4,
saturation=0.4,
hue=0.4)
],
p=0.8),
dict(type='RandomGrayscale', p=0.2),
dict(
type='RandomAppliedTrans',
transforms=[
dict(
type='GaussianBlur',
sigma_min=0.1,
sigma_max=2.0)
],
p=0.5),
dict(type='RandomAppliedTrans',
transforms=[dict(type='Solarization')], p=0.2)
]
test_pipeline = [
dict(type='Resize', size=224),
]
# prefetch
prefetch = True
if not prefetch:
train_pipeline.extend([dict(type='ToTensor'), dict(type='Normalize', **img_norm_cfg)])
batch_size =256
data = dict(
imgs_per_gpu=batch_size//8, # total 32*8=256
#imgs_per_gpu=16,
workers_per_gpu=5,
train=dict(
type=dataset_type,
data_source=dict(
list_file=data_train_list, root=data_train_root,
**data_source_cfg),
pipeline=train_pipeline,
prefetch=prefetch),
)
# additional hooks
custom_hooks = [dict(type='BYOLHook', end_momentum=1.)]
# optimizer
optimizer = dict(type='LARS', lr=0.3, weight_decay=1.5e-6, momentum=0.9,)
# lr schedule
lr_config = dict(policy='CosineAnnealing', min_lr=0.)
checkpoint_config = dict(interval=50)
# runtime settings
total_epochs = 200
| 2,666 | 23.46789 | 90 | py |
Few-shot-WSI | Few-shot-WSI-master/configs/wsi_selfsup/moco_v3/r18_bs256_ep200_wo_2.py | _base_ = '../../base.py'
# model settings
model = dict(
type='MOCOv3',
base_momentum=0.996,
backbone=dict(
type='ResNet',
depth=18,
in_channels=3,
out_indices=[4], # 0: conv-1, x: stage-x
norm_cfg=dict(type='BN')),
projector=dict(
type='NonLinearNeckSimCLR',
in_channels=512,
hid_channels=1024,
out_channels=256,
num_layers=3,
sync_bn=True,
with_bias=False,
with_last_bn=True,
with_avg_pool=True),
predictor=dict(
type='NonLinearNeckSimCLR',
in_channels=256,
hid_channels=1024,
out_channels=256,
num_layers=2,
sync_bn=True,
with_bias=False,
with_last_bn=True,
with_avg_pool=False),
temperature=1,
)
# dataset settings
data_source_cfg = dict(type='ImageList')
data_train_list = 'data/NCT/meta/wo_2_train.txt'
data_train_root = 'data/NCT/data'
dataset_type = 'ContrastiveDataset'
img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
train_pipeline = [
dict(type='RandomResizedCrop', size=224, scale=(0.2, 1.)),
dict(type='RandomHorizontalFlip'),
dict(
type='RandomAppliedTrans',
transforms=[
dict(
type='ColorJitter',
brightness=0.4,
contrast=0.4,
saturation=0.4,
hue=0.4)
],
p=0.8),
dict(type='RandomGrayscale', p=0.2),
dict(
type='RandomAppliedTrans',
transforms=[
dict(
type='GaussianBlur',
sigma_min=0.1,
sigma_max=2.0)
],
p=0.5),
dict(type='RandomAppliedTrans',
transforms=[dict(type='Solarization')], p=0.2)
]
test_pipeline = [
dict(type='Resize', size=224),
]
# prefetch
prefetch = True
if not prefetch:
train_pipeline.extend([dict(type='ToTensor'), dict(type='Normalize', **img_norm_cfg)])
batch_size =256
data = dict(
imgs_per_gpu=batch_size//8, # total 32*8=256
#imgs_per_gpu=16,
workers_per_gpu=5,
train=dict(
type=dataset_type,
data_source=dict(
list_file=data_train_list, root=data_train_root,
**data_source_cfg),
pipeline=train_pipeline,
prefetch=prefetch),
)
# additional hooks
custom_hooks = [dict(type='BYOLHook', end_momentum=1.)]
# optimizer
optimizer = dict(type='LARS', lr=0.3, weight_decay=1.5e-6, momentum=0.9,)
# lr schedule
lr_config = dict(policy='CosineAnnealing', min_lr=0.)
checkpoint_config = dict(interval=50)
# runtime settings
total_epochs = 200
| 2,665 | 23.458716 | 90 | py |
Few-shot-WSI | Few-shot-WSI-master/configs/wsi_selfsup/moco_v3/r18_bs256_ep200_wo_6.py | _base_ = '../../base.py'
# model settings
model = dict(
type='MOCOv3',
base_momentum=0.996,
backbone=dict(
type='ResNet',
depth=18,
in_channels=3,
out_indices=[4], # 0: conv-1, x: stage-x
norm_cfg=dict(type='BN')),
projector=dict(
type='NonLinearNeckSimCLR',
in_channels=512,
hid_channels=1024,
out_channels=256,
num_layers=3,
sync_bn=True,
with_bias=False,
with_last_bn=True,
with_avg_pool=True),
predictor=dict(
type='NonLinearNeckSimCLR',
in_channels=256,
hid_channels=1024,
out_channels=256,
num_layers=2,
sync_bn=True,
with_bias=False,
with_last_bn=True,
with_avg_pool=False),
temperature=1,
)
# dataset settings
data_source_cfg = dict(type='ImageList')
data_train_list = 'data/NCT/meta/wo_6_train.txt'
data_train_root = 'data/NCT/data'
dataset_type = 'ContrastiveDataset'
img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
train_pipeline = [
dict(type='RandomResizedCrop', size=224, scale=(0.2, 1.)),
dict(type='RandomHorizontalFlip'),
dict(
type='RandomAppliedTrans',
transforms=[
dict(
type='ColorJitter',
brightness=0.4,
contrast=0.4,
saturation=0.4,
hue=0.4)
],
p=0.8),
dict(type='RandomGrayscale', p=0.2),
dict(
type='RandomAppliedTrans',
transforms=[
dict(
type='GaussianBlur',
sigma_min=0.1,
sigma_max=2.0)
],
p=0.5),
dict(type='RandomAppliedTrans',
transforms=[dict(type='Solarization')], p=0.2)
]
test_pipeline = [
dict(type='Resize', size=224),
]
# prefetch
prefetch = True
if not prefetch:
train_pipeline.extend([dict(type='ToTensor'), dict(type='Normalize', **img_norm_cfg)])
batch_size =256
data = dict(
imgs_per_gpu=batch_size//8, # total 32*8=256
#imgs_per_gpu=16,
workers_per_gpu=5,
train=dict(
type=dataset_type,
data_source=dict(
list_file=data_train_list, root=data_train_root,
**data_source_cfg),
pipeline=train_pipeline,
prefetch=prefetch),
)
# additional hooks
custom_hooks = [dict(type='BYOLHook', end_momentum=1.)]
# optimizer
optimizer = dict(type='LARS', lr=0.3, weight_decay=1.5e-6, momentum=0.9,)
# lr schedule
lr_config = dict(policy='CosineAnnealing', min_lr=0.)
checkpoint_config = dict(interval=50)
# runtime settings
total_epochs = 200
| 2,665 | 23.458716 | 90 | py |
Few-shot-WSI | Few-shot-WSI-master/configs/wsi_selfsup/moco_v3/r18_bs256_ep200_wo_0.py | _base_ = '../../base.py'
# model settings
model = dict(
type='MOCOv3',
base_momentum=0.996,
backbone=dict(
type='ResNet',
depth=18,
in_channels=3,
out_indices=[4], # 0: conv-1, x: stage-x
norm_cfg=dict(type='BN')),
projector=dict(
type='NonLinearNeckSimCLR',
in_channels=512,
hid_channels=1024,
out_channels=256,
num_layers=3,
sync_bn=True,
with_bias=False,
with_last_bn=True,
with_avg_pool=True),
predictor=dict(
type='NonLinearNeckSimCLR',
in_channels=256,
hid_channels=1024,
out_channels=256,
num_layers=2,
sync_bn=True,
with_bias=False,
with_last_bn=True,
with_avg_pool=False),
temperature=1,
)
# dataset settings
data_source_cfg = dict(type='ImageList')
data_train_list = 'data/NCT/meta/wo_0_train.txt'
data_train_root = 'data/NCT/data'
dataset_type = 'ContrastiveDataset'
img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
train_pipeline = [
dict(type='RandomResizedCrop', size=224, scale=(0.2, 1.)),
dict(type='RandomHorizontalFlip'),
dict(
type='RandomAppliedTrans',
transforms=[
dict(
type='ColorJitter',
brightness=0.4,
contrast=0.4,
saturation=0.4,
hue=0.4)
],
p=0.8),
dict(type='RandomGrayscale', p=0.2),
dict(
type='RandomAppliedTrans',
transforms=[
dict(
type='GaussianBlur',
sigma_min=0.1,
sigma_max=2.0)
],
p=0.5),
dict(type='RandomAppliedTrans',
transforms=[dict(type='Solarization')], p=0.2)
]
test_pipeline = [
dict(type='Resize', size=224),
]
# prefetch
prefetch = True
if not prefetch:
train_pipeline.extend([dict(type='ToTensor'), dict(type='Normalize', **img_norm_cfg)])
batch_size =256
data = dict(
imgs_per_gpu=batch_size//8, # total 32*8=256
#imgs_per_gpu=16,
workers_per_gpu=5,
train=dict(
type=dataset_type,
data_source=dict(
list_file=data_train_list, root=data_train_root,
**data_source_cfg),
pipeline=train_pipeline,
prefetch=prefetch),
)
# additional hooks
custom_hooks = [dict(type='BYOLHook', end_momentum=1.)]
# optimizer
optimizer = dict(type='LARS', lr=0.3, weight_decay=1.5e-6, momentum=0.9,)
# lr schedule
lr_config = dict(policy='CosineAnnealing', min_lr=0.)
checkpoint_config = dict(interval=50)
# runtime settings
total_epochs = 200
| 2,665 | 23.458716 | 90 | py |
Few-shot-WSI | Few-shot-WSI-master/configs/wsi_selfsup/moco_v3/r18_bs256_ep200_all.py | _base_ = '../../base.py'
# model settings
model = dict(
type='MOCOv3',
base_momentum=0.996,
backbone=dict(
type='ResNet',
depth=18,
in_channels=3,
out_indices=[4], # 0: conv-1, x: stage-x
norm_cfg=dict(type='BN')),
projector=dict(
type='NonLinearNeckSimCLR',
in_channels=512,
hid_channels=1024,
out_channels=256,
num_layers=3,
sync_bn=True,
with_bias=False,
with_last_bn=True,
with_avg_pool=True),
predictor=dict(
type='NonLinearNeckSimCLR',
in_channels=256,
hid_channels=1024,
out_channels=256,
num_layers=2,
sync_bn=True,
with_bias=False,
with_last_bn=True,
with_avg_pool=False),
temperature=1,
)
# dataset settings
data_source_cfg = dict(type='ImageList')
data_train_list = 'data/NCT/meta/train.txt'
data_train_root = 'data/NCT/data'
dataset_type = 'ContrastiveDataset'
img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
train_pipeline = [
dict(type='RandomResizedCrop', size=224, scale=(0.2, 1.)),
dict(type='RandomHorizontalFlip'),
dict(
type='RandomAppliedTrans',
transforms=[
dict(
type='ColorJitter',
brightness=0.4,
contrast=0.4,
saturation=0.4,
hue=0.4)
],
p=0.8),
dict(type='RandomGrayscale', p=0.2),
dict(
type='RandomAppliedTrans',
transforms=[
dict(
type='GaussianBlur',
sigma_min=0.1,
sigma_max=2.0)
],
p=0.5),
dict(type='RandomAppliedTrans',
transforms=[dict(type='Solarization')], p=0.2)
]
test_pipeline = [
dict(type='Resize', size=224),
]
# prefetch
prefetch = True
if not prefetch:
train_pipeline.extend([dict(type='ToTensor'), dict(type='Normalize', **img_norm_cfg)])
batch_size =256
data = dict(
imgs_per_gpu=batch_size//8, # total 32*8=256
#imgs_per_gpu=16,
workers_per_gpu=5,
train=dict(
type=dataset_type,
data_source=dict(
list_file=data_train_list, root=data_train_root,
**data_source_cfg),
pipeline=train_pipeline,
prefetch=prefetch),
)
# additional hooks
custom_hooks = [dict(type='BYOLHook', end_momentum=1.)]
# optimizer
optimizer = dict(type='LARS', lr=0.3, weight_decay=1.5e-6, momentum=0.9,)
# lr schedule
lr_config = dict(policy='CosineAnnealing', min_lr=0.)
checkpoint_config = dict(interval=50)
# runtime settings
total_epochs = 200
| 2,660 | 23.412844 | 90 | py |
Few-shot-WSI | Few-shot-WSI-master/configs/wsi_selfsup/moco_v3/r18_bs256_ep200_wo_7.py | _base_ = '../../base.py'
# model settings
model = dict(
type='MOCOv3',
base_momentum=0.996,
backbone=dict(
type='ResNet',
depth=18,
in_channels=3,
out_indices=[4], # 0: conv-1, x: stage-x
norm_cfg=dict(type='BN')),
projector=dict(
type='NonLinearNeckSimCLR',
in_channels=512,
hid_channels=1024,
out_channels=256,
num_layers=3,
sync_bn=True,
with_bias=False,
with_last_bn=True,
with_avg_pool=True),
predictor=dict(
type='NonLinearNeckSimCLR',
in_channels=256,
hid_channels=1024,
out_channels=256,
num_layers=2,
sync_bn=True,
with_bias=False,
with_last_bn=True,
with_avg_pool=False),
temperature=1,
)
# dataset settings
data_source_cfg = dict(type='ImageList')
data_train_list = 'data/NCT/meta/wo_7_train.txt'
data_train_root = 'data/NCT/data'
dataset_type = 'ContrastiveDataset'
img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
train_pipeline = [
dict(type='RandomResizedCrop', size=224, scale=(0.2, 1.)),
dict(type='RandomHorizontalFlip'),
dict(
type='RandomAppliedTrans',
transforms=[
dict(
type='ColorJitter',
brightness=0.4,
contrast=0.4,
saturation=0.4,
hue=0.4)
],
p=0.8),
dict(type='RandomGrayscale', p=0.2),
dict(
type='RandomAppliedTrans',
transforms=[
dict(
type='GaussianBlur',
sigma_min=0.1,
sigma_max=2.0)
],
p=0.5),
dict(type='RandomAppliedTrans',
transforms=[dict(type='Solarization')], p=0.2)
]
test_pipeline = [
dict(type='Resize', size=224),
]
# prefetch
prefetch = True
if not prefetch:
train_pipeline.extend([dict(type='ToTensor'), dict(type='Normalize', **img_norm_cfg)])
batch_size =256
data = dict(
imgs_per_gpu=batch_size//8, # total 32*8=256
#imgs_per_gpu=16,
workers_per_gpu=5,
train=dict(
type=dataset_type,
data_source=dict(
list_file=data_train_list, root=data_train_root,
**data_source_cfg),
pipeline=train_pipeline,
prefetch=prefetch),
)
# additional hooks
custom_hooks = [dict(type='BYOLHook', end_momentum=1.)]
# optimizer
optimizer = dict(type='LARS', lr=0.3, weight_decay=1.5e-6, momentum=0.9,)
# lr schedule
lr_config = dict(policy='CosineAnnealing', min_lr=0.)
checkpoint_config = dict(interval=50)
# runtime settings
total_epochs = 200
| 2,665 | 23.458716 | 90 | py |
Few-shot-WSI | Few-shot-WSI-master/configs/wsi_selfsup/moco_v3/r18_bs256_ep200_wo_3.py | _base_ = '../../base.py'
# model settings
model = dict(
type='MOCOv3',
base_momentum=0.996,
backbone=dict(
type='ResNet',
depth=18,
in_channels=3,
out_indices=[4], # 0: conv-1, x: stage-x
norm_cfg=dict(type='BN')),
projector=dict(
type='NonLinearNeckSimCLR',
in_channels=512,
hid_channels=1024,
out_channels=256,
num_layers=3,
sync_bn=True,
with_bias=False,
with_last_bn=True,
with_avg_pool=True),
predictor=dict(
type='NonLinearNeckSimCLR',
in_channels=256,
hid_channels=1024,
out_channels=256,
num_layers=2,
sync_bn=True,
with_bias=False,
with_last_bn=True,
with_avg_pool=False),
temperature=1,
)
# dataset settings
data_source_cfg = dict(type='ImageList')
data_train_list = 'data/NCT/meta/wo_3_train.txt'
data_train_root = 'data/NCT/data'
dataset_type = 'ContrastiveDataset'
img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
train_pipeline = [
dict(type='RandomResizedCrop', size=224, scale=(0.2, 1.)),
dict(type='RandomHorizontalFlip'),
dict(
type='RandomAppliedTrans',
transforms=[
dict(
type='ColorJitter',
brightness=0.4,
contrast=0.4,
saturation=0.4,
hue=0.4)
],
p=0.8),
dict(type='RandomGrayscale', p=0.2),
dict(
type='RandomAppliedTrans',
transforms=[
dict(
type='GaussianBlur',
sigma_min=0.1,
sigma_max=2.0)
],
p=0.5),
dict(type='RandomAppliedTrans',
transforms=[dict(type='Solarization')], p=0.2)
]
test_pipeline = [
dict(type='Resize', size=224),
]
# prefetch
prefetch = True
if not prefetch:
train_pipeline.extend([dict(type='ToTensor'), dict(type='Normalize', **img_norm_cfg)])
batch_size =256
data = dict(
imgs_per_gpu=batch_size//8, # total 32*8=256
#imgs_per_gpu=16,
workers_per_gpu=5,
train=dict(
type=dataset_type,
data_source=dict(
list_file=data_train_list, root=data_train_root,
**data_source_cfg),
pipeline=train_pipeline,
prefetch=prefetch),
)
# additional hooks
custom_hooks = [dict(type='BYOLHook', end_momentum=1.)]
# optimizer
optimizer = dict(type='LARS', lr=0.3, weight_decay=1.5e-6, momentum=0.9,)
# lr schedule
lr_config = dict(policy='CosineAnnealing', min_lr=0.)
checkpoint_config = dict(interval=50)
# runtime settings
total_epochs = 200
| 2,665 | 23.458716 | 90 | py |
Few-shot-WSI | Few-shot-WSI-master/configs/wsi_selfsup/moco_v3/r18_bs256_ep200_wo_1.py | _base_ = '../../base.py'
# model settings
model = dict(
type='MOCOv3',
base_momentum=0.996,
backbone=dict(
type='ResNet',
depth=18,
in_channels=3,
out_indices=[4], # 0: conv-1, x: stage-x
norm_cfg=dict(type='BN')),
projector=dict(
type='NonLinearNeckSimCLR',
in_channels=512,
hid_channels=1024,
out_channels=256,
num_layers=3,
sync_bn=True,
with_bias=False,
with_last_bn=True,
with_avg_pool=True),
predictor=dict(
type='NonLinearNeckSimCLR',
in_channels=256,
hid_channels=1024,
out_channels=256,
num_layers=2,
sync_bn=True,
with_bias=False,
with_last_bn=True,
with_avg_pool=False),
temperature=1,
)
# dataset settings
data_source_cfg = dict(type='ImageList')
data_train_list = 'data/NCT/meta/wo_1_train.txt'
data_train_root = 'data/NCT/data'
dataset_type = 'ContrastiveDataset'
img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
train_pipeline = [
dict(type='RandomResizedCrop', size=224, scale=(0.2, 1.)),
dict(type='RandomHorizontalFlip'),
dict(
type='RandomAppliedTrans',
transforms=[
dict(
type='ColorJitter',
brightness=0.4,
contrast=0.4,
saturation=0.4,
hue=0.4)
],
p=0.8),
dict(type='RandomGrayscale', p=0.2),
dict(
type='RandomAppliedTrans',
transforms=[
dict(
type='GaussianBlur',
sigma_min=0.1,
sigma_max=2.0)
],
p=0.5),
dict(type='RandomAppliedTrans',
transforms=[dict(type='Solarization')], p=0.2)
]
test_pipeline = [
dict(type='Resize', size=224),
]
# prefetch
prefetch = True
if not prefetch:
train_pipeline.extend([dict(type='ToTensor'), dict(type='Normalize', **img_norm_cfg)])
batch_size =256
data = dict(
imgs_per_gpu=batch_size//8, # total 32*8=256
#imgs_per_gpu=16,
workers_per_gpu=5,
train=dict(
type=dataset_type,
data_source=dict(
list_file=data_train_list, root=data_train_root,
**data_source_cfg),
pipeline=train_pipeline,
prefetch=prefetch),
)
# additional hooks
custom_hooks = [dict(type='BYOLHook', end_momentum=1.)]
# optimizer
optimizer = dict(type='LARS', lr=0.3, weight_decay=1.5e-6, momentum=0.9,)
# lr schedule
lr_config = dict(policy='CosineAnnealing', min_lr=0.)
checkpoint_config = dict(interval=50)
# runtime settings
total_epochs = 200
| 2,665 | 23.458716 | 90 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/__init__.py | from .version import __version__, short_version
__all__ = ['__version__', 'short_version']
| 92 | 22.25 | 47 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/apis/__init__.py | from .train import get_root_logger, set_random_seed, train_model
| 65 | 32 | 64 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/apis/train.py | import random
import re
from collections import OrderedDict
import numpy as np
import torch
import torch.distributed as dist
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmcv.runner import DistSamplerSeedHook, Runner, obj_from_dict
from openselfsup.datasets import build_dataloader
from openselfsup.hooks import build_hook, DistOptimizerHook
from openselfsup.utils import get_root_logger, optimizers, print_log
try:
import apex
except:
print('apex is not installed')
def set_random_seed(seed, deterministic=False):
"""Set random seed.
Args:
seed (int): Seed to be used.
deterministic (bool): Whether to set the deterministic option for
CUDNN backend, i.e., set `torch.backends.cudnn.deterministic`
to True and `torch.backends.cudnn.benchmark` to False.
Default: False.
"""
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
if deterministic:
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def parse_losses(losses):
log_vars = OrderedDict()
for loss_name, loss_value in losses.items():
if isinstance(loss_value, torch.Tensor):
log_vars[loss_name] = loss_value.mean()
elif isinstance(loss_value, list):
log_vars[loss_name] = sum(_loss.mean() for _loss in loss_value)
else:
raise TypeError(
'{} is not a tensor or list of tensors'.format(loss_name))
loss = sum(_value for _key, _value in log_vars.items() if 'loss' in _key)
log_vars['loss'] = loss
for loss_name, loss_value in log_vars.items():
# reduce loss when distributed training
if dist.is_available() and dist.is_initialized():
loss_value = loss_value.data.clone()
dist.all_reduce(loss_value.div_(dist.get_world_size()))
log_vars[loss_name] = loss_value.item()
return loss, log_vars
def batch_processor(model, data, train_mode):
"""Process a data batch.
This method is required as an argument of Runner, which defines how to
process a data batch and obtain proper outputs. The first 3 arguments of
batch_processor are fixed.
Args:
model (nn.Module): A PyTorch model.
data (dict): The data batch in a dict.
train_mode (bool): Training mode or not. It may be useless for some
models.
Returns:
dict: A dict containing losses and log vars.
"""
losses = model(**data)
loss, log_vars = parse_losses(losses)
outputs = dict(
loss=loss, log_vars=log_vars, num_samples=len(data['img'].data))
return outputs
def train_model(model,
dataset,
cfg,
distributed=False,
timestamp=None,
meta=None):
logger = get_root_logger(cfg.log_level)
# start training
if distributed:
_dist_train(
model, dataset, cfg, logger=logger, timestamp=timestamp, meta=meta)
else:
_non_dist_train(
model, dataset, cfg, logger=logger, timestamp=timestamp, meta=meta)
def build_optimizer(model, optimizer_cfg):
"""Build optimizer from configs.
Args:
model (:obj:`nn.Module`): The model with parameters to be optimized.
optimizer_cfg (dict): The config dict of the optimizer.
Positional fields are:
- type: class name of the optimizer.
- lr: base learning rate.
Optional fields are:
- any arguments of the corresponding optimizer type, e.g.,
weight_decay, momentum, etc.
- paramwise_options: a dict with regular expression as keys
to match parameter names and a dict containing options as
values. Options include 6 fields: lr, lr_mult, momentum,
momentum_mult, weight_decay, weight_decay_mult.
Returns:
torch.optim.Optimizer: The initialized optimizer.
Example:
>>> model = torch.nn.modules.Conv1d(1, 1, 1)
>>> paramwise_options = {
>>> '(bn|gn)(\d+)?.(weight|bias)': dict(weight_decay_mult=0.1),
>>> '\Ahead.': dict(lr_mult=10, momentum=0)}
>>> optimizer_cfg = dict(type='SGD', lr=0.01, momentum=0.9,
>>> weight_decay=0.0001,
>>> paramwise_options=paramwise_options)
>>> optimizer = build_optimizer(model, optimizer_cfg)
"""
if hasattr(model, 'module'):
model = model.module
optimizer_cfg = optimizer_cfg.copy()
paramwise_options = optimizer_cfg.pop('paramwise_options', None)
# if no paramwise option is specified, just use the global setting
if paramwise_options is None:
return obj_from_dict(optimizer_cfg, optimizers,
dict(params=model.parameters()))
else:
assert isinstance(paramwise_options, dict)
params = []
for name, param in model.named_parameters():
param_group = {'params': [param]}
if not param.requires_grad:
params.append(param_group)
continue
for regexp, options in paramwise_options.items():
if re.search(regexp, name):
for key, value in options.items():
if key.endswith('_mult'): # is a multiplier
key = key[:-5]
assert key in optimizer_cfg, \
"{} not in optimizer_cfg".format(key)
value = optimizer_cfg[key] * value
param_group[key] = value
if not dist.is_initialized() or dist.get_rank() == 0:
print_log('paramwise_options -- {}: {}={}'.format(
name, key, value))
# otherwise use the global settings
params.append(param_group)
optimizer_cls = getattr(optimizers, optimizer_cfg.pop('type'))
return optimizer_cls(params, **optimizer_cfg)
def _dist_train(model, dataset, cfg, logger=None, timestamp=None, meta=None):
# prepare data loaders
dataset = dataset if isinstance(dataset, (list, tuple)) else [dataset]
data_loaders = [
build_dataloader(
ds,
cfg.data.imgs_per_gpu,
cfg.data.workers_per_gpu,
dist=True,
shuffle=True,
replace=getattr(cfg.data, 'sampling_replace', False),
seed=cfg.seed,
drop_last=getattr(cfg.data, 'drop_last', False),
prefetch=cfg.prefetch,
img_norm_cfg=cfg.img_norm_cfg) for ds in dataset
]
optimizer = build_optimizer(model, cfg.optimizer)
if 'use_fp16' in cfg and cfg.use_fp16:
model, optimizer = apex.amp.initialize(model.cuda(), optimizer, opt_level="O1")
print_log('**** Initializing mixed precision done. ****')
# put model on gpus
model = MMDistributedDataParallel(
model if next(model.parameters()).is_cuda else model.cuda(),
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False)
# build runner
runner = Runner(
model,
batch_processor,
optimizer,
cfg.work_dir,
logger=logger,
meta=meta)
# an ugly walkaround to make the .log and .log.json filenames the same
runner.timestamp = timestamp
optimizer_config = DistOptimizerHook(**cfg.optimizer_config)
# register hooks
runner.register_training_hooks(cfg.lr_config, optimizer_config,
cfg.checkpoint_config, cfg.log_config)
runner.register_hook(DistSamplerSeedHook())
# register custom hooks
for hook in cfg.get('custom_hooks', ()):
if hook.type == 'DeepClusterHook':
common_params = dict(dist_mode=True, data_loaders=data_loaders)
else:
common_params = dict(dist_mode=True)
runner.register_hook(build_hook(hook, common_params))
if cfg.resume_from:
runner.resume(cfg.resume_from)
elif cfg.load_from:
runner.load_checkpoint(cfg.load_from)
runner.run(data_loaders, cfg.workflow, cfg.total_epochs)
def _non_dist_train(model,
dataset,
cfg,
validate=False,
logger=None,
timestamp=None,
meta=None):
# prepare data loaders
dataset = dataset if isinstance(dataset, (list, tuple)) else [dataset]
data_loaders = [
build_dataloader(
ds,
cfg.data.imgs_per_gpu,
cfg.data.workers_per_gpu,
cfg.gpus,
dist=False,
shuffle=True,
replace=getattr(cfg.data, 'sampling_replace', False),
seed=cfg.seed,
drop_last=getattr(cfg.data, 'drop_last', False),
prefetch=cfg.prefetch,
img_norm_cfg=cfg.img_norm_cfg) for ds in dataset
]
if 'use_fp16' in cfg and cfg.use_fp16 == True:
raise NotImplementedError('apex do not support non_dist_train!')
# put model on gpus
model = MMDataParallel(model, device_ids=range(cfg.gpus)).cuda()
# build runner
optimizer = build_optimizer(model, cfg.optimizer)
runner = Runner(
model,
batch_processor,
optimizer,
cfg.work_dir,
logger=logger,
meta=meta)
# an ugly walkaround to make the .log and .log.json filenames the same
runner.timestamp = timestamp
optimizer_config = cfg.optimizer_config
runner.register_training_hooks(cfg.lr_config, optimizer_config,
cfg.checkpoint_config, cfg.log_config)
# register custom hooks
for hook in cfg.get('custom_hooks', ()):
if hook.type == 'DeepClusterHook':
common_params = dict(dist_mode=False, data_loaders=data_loaders)
else:
common_params = dict(dist_mode=False)
runner.register_hook(build_hook(hook, common_params))
if cfg.resume_from:
runner.resume(cfg.resume_from)
elif cfg.load_from:
runner.load_checkpoint(cfg.load_from)
runner.run(data_loaders, cfg.workflow, cfg.total_epochs)
| 10,378 | 34.913495 | 87 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/third_party/clustering.py | # This file is modified from
# https://github.com/facebookresearch/deepcluster/blob/master/clustering.py
import time
import numpy as np
import faiss
import torch
from scipy.sparse import csr_matrix
__all__ = ['Kmeans', 'PIC']
def preprocess_features(npdata, pca):
"""Preprocess an array of features.
Args:
npdata (np.array N * ndim): features to preprocess
pca (int): dim of output
Returns:
np.array of dim N * pca: data PCA-reduced, whitened and L2-normalized
"""
_, ndim = npdata.shape
#npdata = npdata.astype('float32')
assert npdata.dtype == np.float32
if np.any(np.isnan(npdata)):
raise Exception("nan occurs")
if pca != -1:
print("\nPCA from dim {} to dim {}".format(ndim, pca))
mat = faiss.PCAMatrix(ndim, pca, eigen_power=-0.5)
mat.train(npdata)
assert mat.is_trained
npdata = mat.apply_py(npdata)
if np.any(np.isnan(npdata)):
percent = np.isnan(npdata).sum().item() / float(np.size(npdata)) * 100
if percent > 0.1:
raise Exception(
"More than 0.1% nan occurs after pca, percent: {}%".format(
percent))
else:
npdata[np.isnan(npdata)] = 0.
# L2 normalization
row_sums = np.linalg.norm(npdata, axis=1)
npdata = npdata / (row_sums[:, np.newaxis] + 1e-10)
return npdata
def make_graph(xb, nnn):
"""Builds a graph of nearest neighbors.
Args:
xb (np.array): data
nnn (int): number of nearest neighbors
Returns:
list: for each data the list of ids to its nnn nearest neighbors
list: for each data the list of distances to its nnn NN
"""
N, dim = xb.shape
# we need only a StandardGpuResources per GPU
res = faiss.StandardGpuResources()
# L2
flat_config = faiss.GpuIndexFlatConfig()
flat_config.device = int(torch.cuda.device_count()) - 1
index = faiss.GpuIndexFlatL2(res, dim, flat_config)
index.add(xb)
D, I = index.search(xb, nnn + 1)
return I, D
def run_kmeans(x, nmb_clusters, verbose=False, seed=None):
"""Runs kmeans on 1 GPU.
Args:
x: data
nmb_clusters (int): number of clusters
Returns:
list: ids of data in each cluster
"""
n_data, d = x.shape
# faiss implementation of k-means
clus = faiss.Clustering(d, nmb_clusters)
# Change faiss seed at each k-means so that the randomly picked
# initialization centroids do not correspond to the same feature ids
# from an epoch to another.
if seed is not None:
clus.seed = seed
else:
clus.seed = np.random.randint(1234)
clus.niter = 20
clus.max_points_per_centroid = 10000000
res = faiss.StandardGpuResources()
flat_config = faiss.GpuIndexFlatConfig()
flat_config.useFloat16 = False
flat_config.device = 0
index = faiss.GpuIndexFlatL2(res, d, flat_config)
# perform the training
clus.train(x, index)
_, I = index.search(x, 1)
losses = faiss.vector_to_array(clus.obj)
centroids = faiss.vector_to_array(clus.centroids).reshape(nmb_clusters,d)
if verbose:
print('k-means loss evolution: {0}'.format(losses))
return [int(n[0]) for n in I], losses[-1], centroids
def arrange_clustering(images_lists):
pseudolabels = []
image_indexes = []
for cluster, images in enumerate(images_lists):
image_indexes.extend(images)
pseudolabels.extend([cluster] * len(images))
indexes = np.argsort(image_indexes)
return np.asarray(pseudolabels)[indexes]
class Kmeans:
def __init__(self, k, pca_dim=256):
self.k = k
self.pca_dim = pca_dim
def cluster(self, feat, verbose=False, seed=None):
"""Performs k-means clustering.
Args:
x_data (np.array N * dim): data to cluster
"""
end = time.time()
# PCA-reducing, whitening and L2-normalization
xb = preprocess_features(feat, self.pca_dim)
# cluster the data
I, loss, centroids = run_kmeans(xb, self.k, verbose, seed=seed)
self.centroids = centroids
self.labels = np.array(I)
if verbose:
print('k-means time: {0:.0f} s'.format(time.time() - end))
return loss
def make_adjacencyW(I, D, sigma):
"""Create adjacency matrix with a Gaussian kernel.
Args:
I (numpy array): for each vertex the ids to its nnn linked vertices
+ first column of identity.
D (numpy array): for each data the l2 distances to its nnn linked vertices
+ first column of zeros.
sigma (float): Bandwith of the Gaussian kernel.
Returns:
csr_matrix: affinity matrix of the graph.
"""
V, k = I.shape
k = k - 1
indices = np.reshape(np.delete(I, 0, 1), (1, -1))
indptr = np.multiply(k, np.arange(V + 1))
def exp_ker(d):
return np.exp(-d / sigma**2)
exp_ker = np.vectorize(exp_ker)
res_D = exp_ker(D)
data = np.reshape(np.delete(res_D, 0, 1), (1, -1))
adj_matrix = csr_matrix((data[0], indices[0], indptr), shape=(V, V))
return adj_matrix
def run_pic(I, D, sigma, alpha):
"""Run PIC algorithm"""
a = make_adjacencyW(I, D, sigma)
graph = a + a.transpose()
cgraph = graph
nim = graph.shape[0]
W = graph
t0 = time.time()
v0 = np.ones(nim) / nim
# power iterations
v = v0.astype('float32')
t0 = time.time()
dt = 0
for i in range(200):
vnext = np.zeros(nim, dtype='float32')
vnext = vnext + W.transpose().dot(v)
vnext = alpha * vnext + (1 - alpha) / nim
# L1 normalize
vnext /= vnext.sum()
v = vnext
if (i == 200 - 1):
clust = find_maxima_cluster(W, v)
return [int(i) for i in clust]
def find_maxima_cluster(W, v):
n, m = W.shape
assert (n == m)
assign = np.zeros(n)
# for each node
pointers = list(range(n))
for i in range(n):
best_vi = 0
l0 = W.indptr[i]
l1 = W.indptr[i + 1]
for l in range(l0, l1):
j = W.indices[l]
vi = W.data[l] * (v[j] - v[i])
if vi > best_vi:
best_vi = vi
pointers[i] = j
n_clus = 0
cluster_ids = -1 * np.ones(n)
for i in range(n):
if pointers[i] == i:
cluster_ids[i] = n_clus
n_clus = n_clus + 1
for i in range(n):
# go from pointers to pointers starting from i until reached a local optim
current_node = i
while pointers[current_node] != current_node:
current_node = pointers[current_node]
assign[i] = cluster_ids[current_node]
assert (assign[i] >= 0)
return assign
class PIC():
"""Class to perform Power Iteration Clustering on a graph of nearest neighbors.
Args:
args: for consistency with k-means init
sigma (float): bandwith of the Gaussian kernel (default 0.2)
nnn (int): number of nearest neighbors (default 5)
alpha (float): parameter in PIC (default 0.001)
distribute_singletons (bool): If True, reassign each singleton to
the cluster of its closest non
singleton nearest neighbors (up to nnn
nearest neighbors).
Attributes:
images_lists (list of list): for each cluster, the list of image indexes
belonging to this cluster
"""
def __init__(self,
args=None,
sigma=0.2,
nnn=5,
alpha=0.001,
distribute_singletons=True,
pca_dim=256):
self.sigma = sigma
self.alpha = alpha
self.nnn = nnn
self.distribute_singletons = distribute_singletons
self.pca_dim = pca_dim
def cluster(self, data, verbose=False):
end = time.time()
# preprocess the data
xb = preprocess_features(data, self.pca_dim)
# construct nnn graph
I, D = make_graph(xb, self.nnn)
# run PIC
clust = run_pic(I, D, self.sigma, self.alpha)
images_lists = {}
for h in set(clust):
images_lists[h] = []
for data, c in enumerate(clust):
images_lists[c].append(data)
# allocate singletons to clusters of their closest NN not singleton
if self.distribute_singletons:
clust_NN = {}
for i in images_lists:
# if singleton
if len(images_lists[i]) == 1:
s = images_lists[i][0]
# for NN
for n in I[s, 1:]:
# if NN is not a singleton
if not len(images_lists[clust[n]]) == 1:
clust_NN[s] = n
break
for s in clust_NN:
del images_lists[clust[s]]
clust[s] = clust[clust_NN[s]]
images_lists[clust[s]].append(s)
self.images_lists = []
self.labels = -1 * np.ones((data.shape[0], ), dtype=np.int)
for i, c in enumerate(images_lists):
self.images_lists.append(images_lists[c])
self.labels[images_lists[c]] = i
assert np.all(self.labels != -1)
if verbose:
print('pic time: {0:.0f} s'.format(time.time() - end))
return 0
| 9,576 | 29.5 | 84 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/models/classification.py | import numpy as np
import torch.nn as nn
from openselfsup.utils import print_log
from . import builder
from .registry import MODELS
from .utils import Sobel
@MODELS.register_module
class Classification(nn.Module):
"""Simple image classification.
Args:
backbone (dict): Config dict for module of backbone ConvNet.
with_sobel (bool): Whether to apply a Sobel filter on images. Default: False.
head (dict): Config dict for module of loss functions. Default: None.
pretrained (str, optional): Path to pre-trained weights. Default: None.
"""
def __init__(self,
backbone,
with_sobel=False,
head=None,
pretrained=None):
super(Classification, self).__init__()
self.with_sobel = with_sobel
if with_sobel:
self.sobel_layer = Sobel()
self.backbone = builder.build_backbone(backbone)
if head is not None:
self.head = builder.build_head(head)
self.init_weights(pretrained=pretrained)
def init_weights(self, pretrained=None):
"""Initialize the weights of model.
Args:
pretrained (str, optional): Path to pre-trained weights.
Default: None.
"""
if pretrained is not None:
print_log('load model from: {}'.format(pretrained), logger='root')
self.backbone.init_weights(pretrained=pretrained)
self.head.init_weights()
def forward_backbone(self, img):
"""Forward backbone.
Args:
img (Tensor): Input images of shape (N, C, H, W).
Typically these should be mean centered and std scaled.
Returns:
tuple[Tensor]: backbone outputs.
"""
if self.with_sobel:
img = self.sobel_layer(img)
x = self.backbone(img)
return x
def forward_train(self, img, gt_label, **kwargs):
"""Forward computation during training.
Args:
img (Tensor): Input images of shape (N, C, H, W).
Typically these should be mean centered and std scaled.
gt_label (Tensor): Ground-truth labels.
kwargs: Any keyword arguments to be used to forward.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
x = self.forward_backbone(img)
outs = self.head(x)
loss_inputs = (outs, gt_label)
losses = self.head.loss(*loss_inputs)
return losses
def forward_test(self, img, **kwargs):
x = self.forward_backbone(img) # tuple
outs = self.head(x)
keys = ['head{}'.format(i) for i in range(len(outs))]
out_tensors = [out.cpu() for out in outs] # NxC
return dict(zip(keys, out_tensors))
def aug_test(self, imgs):
raise NotImplemented
outs = np.mean([self.head(x) for x in self.forward_backbone(imgs)],
axis=0)
return outs
def forward(self, img, mode='train', **kwargs):
if mode == 'train':
return self.forward_train(img, **kwargs)
elif mode == 'test':
return self.forward_test(img, **kwargs)
elif mode == 'extract':
return self.forward_backbone(img)
else:
raise Exception("No such mode: {}".format(mode))
| 3,370 | 31.413462 | 85 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/models/simclr.py | import torch
import torch.nn as nn
from openselfsup.utils import print_log
from . import builder
from .registry import MODELS
from .utils import GatherLayer
@MODELS.register_module
class SimCLR(nn.Module):
"""SimCLR.
Implementation of "A Simple Framework for Contrastive Learning
of Visual Representations (https://arxiv.org/abs/2002.05709)".
Args:
backbone (dict): Config dict for module of backbone ConvNet.
neck (dict): Config dict for module of deep features to compact feature vectors.
Default: None.
head (dict): Config dict for module of loss functions. Default: None.
pretrained (str, optional): Path to pre-trained weights. Default: None.
"""
def __init__(self, backbone, neck=None, head=None, pretrained=None):
super(SimCLR, self).__init__()
self.backbone = builder.build_backbone(backbone)
self.neck = builder.build_neck(neck)
self.head = builder.build_head(head)
self.init_weights(pretrained=pretrained)
@staticmethod
def _create_buffer(N):
mask = 1 - torch.eye(N * 2, dtype=torch.uint8).cuda()
pos_ind = (torch.arange(N * 2).cuda(),
2 * torch.arange(N, dtype=torch.long).unsqueeze(1).repeat(
1, 2).view(-1, 1).squeeze().cuda())
neg_mask = torch.ones((N * 2, N * 2 - 1), dtype=torch.uint8).cuda()
neg_mask[pos_ind] = 0
return mask, pos_ind, neg_mask
def init_weights(self, pretrained=None):
"""Initialize the weights of model.
Args:
pretrained (str, optional): Path to pre-trained weights.
Default: None.
"""
if pretrained is not None:
print_log('load model from: {}'.format(pretrained), logger='root')
self.backbone.init_weights(pretrained=pretrained)
self.neck.init_weights(init_linear='kaiming')
def forward_backbone(self, img):
"""Forward backbone.
Args:
img (Tensor): Input images of shape (N, C, H, W).
Typically these should be mean centered and std scaled.
Returns:
tuple[Tensor]: backbone outputs.
"""
x = self.backbone(img)
return x
def forward_train(self, img, **kwargs):
"""Forward computation during training.
Args:
img (Tensor): Input of two concatenated images of shape (N, 2, C, H, W).
Typically these should be mean centered and std scaled.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
assert img.dim() == 5, \
"Input must have 5 dims, got: {}".format(img.dim())
img = img.reshape(
img.size(0) * 2, img.size(2), img.size(3), img.size(4))
x = self.forward_backbone(img) # 2n
z = self.neck(x)[0] # (2n)xd
z = z / (torch.norm(z, p=2, dim=1, keepdim=True) + 1e-10)
z = torch.cat(GatherLayer.apply(z), dim=0) # (2N)xd
assert z.size(0) % 2 == 0
N = z.size(0) // 2
s = torch.matmul(z, z.permute(1, 0)) # (2N)x(2N)
mask, pos_ind, neg_mask = self._create_buffer(N)
# remove diagonal, (2N)x(2N-1)
s = torch.masked_select(s, mask == 1).reshape(s.size(0), -1)
positive = s[pos_ind].unsqueeze(1) # (2N)x1
# select negative, (2N)x(2N-2)
negative = torch.masked_select(s, neg_mask == 1).reshape(s.size(0), -1)
losses = self.head(positive, negative)
return losses
def forward_test(self, img, **kwargs):
pass
def forward(self, img, mode='train', **kwargs):
if mode == 'train':
return self.forward_train(img, **kwargs)
elif mode == 'test':
return self.forward_test(img, **kwargs)
elif mode == 'extract':
return self.forward_backbone(img)
else:
raise Exception("No such mode: {}".format(mode))
| 3,961 | 35.018182 | 88 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/models/rotation_pred.py | import torch
import torch.nn as nn
from openselfsup.utils import print_log
from . import builder
from .registry import MODELS
@MODELS.register_module
class RotationPred(nn.Module):
"""Rotation prediction.
Implementation of "Unsupervised Representation Learning
by Predicting Image Rotations (https://arxiv.org/abs/1803.07728)".
Args:
backbone (dict): Config dict for module of backbone ConvNet.
head (dict): Config dict for module of loss functions. Default: None.
pretrained (str, optional): Path to pre-trained weights. Default: None.
"""
def __init__(self, backbone, head=None, pretrained=None):
super(RotationPred, self).__init__()
self.backbone = builder.build_backbone(backbone)
if head is not None:
self.head = builder.build_head(head)
self.init_weights(pretrained=pretrained)
def init_weights(self, pretrained=None):
"""Initialize the weights of model.
Args:
pretrained (str, optional): Path to pre-trained weights.
Default: None.
"""
if pretrained is not None:
print_log('load model from: {}'.format(pretrained), logger='root')
self.backbone.init_weights(pretrained=pretrained)
self.head.init_weights(init_linear='kaiming')
def forward_backbone(self, img):
"""Forward backbone.
Args:
img (Tensor): Input images of shape (N, C, H, W).
Typically these should be mean centered and std scaled.
Returns:
tuple[Tensor]: backbone outputs.
"""
x = self.backbone(img)
return x
def forward_train(self, img, rot_label, **kwargs):
"""Forward computation during training.
Args:
img (Tensor): Input images of shape (N, C, H, W).
Typically these should be mean centered and std scaled.
rot_label (Tensor): Labels for the rotations.
kwargs: Any keyword arguments to be used to forward.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
x = self.forward_backbone(img)
outs = self.head(x)
loss_inputs = (outs, rot_label)
losses = self.head.loss(*loss_inputs)
return losses
def forward_test(self, img, **kwargs):
x = self.forward_backbone(img) # tuple
outs = self.head(x)
keys = ['head{}'.format(i) for i in range(len(outs))]
out_tensors = [out.cpu() for out in outs] # NxC
return dict(zip(keys, out_tensors))
def forward(self, img, rot_label=None, mode='train', **kwargs):
if mode != "extract" and img.dim() == 5: # Nx4xCxHxW
assert rot_label.dim() == 2 # Nx4
img = img.view(
img.size(0) * img.size(1), img.size(2), img.size(3),
img.size(4)) # (4N)xCxHxW
rot_label = torch.flatten(rot_label) # (4N)
if mode == 'train':
return self.forward_train(img, rot_label, **kwargs)
elif mode == 'test':
return self.forward_test(img, **kwargs)
elif mode == 'extract':
return self.forward_backbone(img)
else:
raise Exception("No such mode: {}".format(mode))
| 3,294 | 33.684211 | 79 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/models/deepcluster.py | import numpy as np
import torch
import torch.nn as nn
from openselfsup.utils import print_log
from . import builder
from .registry import MODELS
from .utils import Sobel
@MODELS.register_module
class DeepCluster(nn.Module):
"""DeepCluster.
Implementation of "Deep Clustering for Unsupervised Learning
of Visual Features (https://arxiv.org/abs/1807.05520)".
Args:
backbone (dict): Config dict for module of backbone ConvNet.
with_sobel (bool): Whether to apply a Sobel filter on images. Default: False.
neck (dict): Config dict for module of deep features to compact feature vectors.
Default: None.
head (dict): Config dict for module of loss functions. Default: None.
pretrained (str, optional): Path to pre-trained weights. Default: None.
"""
def __init__(self,
backbone,
with_sobel=False,
neck=None,
head=None,
pretrained=None):
super(DeepCluster, self).__init__()
self.with_sobel = with_sobel
if with_sobel:
self.sobel_layer = Sobel()
self.backbone = builder.build_backbone(backbone)
self.neck = builder.build_neck(neck)
if head is not None:
self.head = builder.build_head(head)
self.init_weights(pretrained=pretrained)
# reweight
self.num_classes = head.num_classes
self.loss_weight = torch.ones((self.num_classes, ),
dtype=torch.float32).cuda()
self.loss_weight /= self.loss_weight.sum()
def init_weights(self, pretrained=None):
"""Initialize the weights of model.
Args:
pretrained (str, optional): Path to pre-trained weights.
Default: None.
"""
if pretrained is not None:
print_log('load model from: {}'.format(pretrained), logger='root')
self.backbone.init_weights(pretrained=pretrained)
self.neck.init_weights(init_linear='kaiming')
self.head.init_weights(init_linear='normal')
def forward_backbone(self, img):
"""Forward backbone.
Args:
img (Tensor): Input images of shape (N, C, H, W).
Typically these should be mean centered and std scaled.
Returns:
tuple[Tensor]: backbone outputs.
"""
if self.with_sobel:
img = self.sobel_layer(img)
x = self.backbone(img)
return x
def forward_train(self, img, pseudo_label, **kwargs):
"""Forward computation during training.
Args:
img (Tensor): Input images of shape (N, C, H, W).
Typically these should be mean centered and std scaled.
pseudo_label (Tensor): Label assignments.
kwargs: Any keyword arguments to be used to forward.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
x = self.forward_backbone(img)
assert len(x) == 1
feature = self.neck(x)
outs = self.head(feature)
loss_inputs = (outs, pseudo_label)
losses = self.head.loss(*loss_inputs)
return losses
def forward_test(self, img, **kwargs):
x = self.forward_backbone(img) # tuple
outs = self.head(x)
keys = ['head{}'.format(i) for i in range(len(outs))]
out_tensors = [out.cpu() for out in outs] # NxC
return dict(zip(keys, out_tensors))
def forward(self, img, mode='train', **kwargs):
if mode == 'train':
return self.forward_train(img, **kwargs)
elif mode == 'test':
return self.forward_test(img, **kwargs)
elif mode == 'extract':
return self.forward_backbone(img)
else:
raise Exception("No such mode: {}".format(mode))
def set_reweight(self, labels, reweight_pow=0.5):
"""Loss re-weighting.
Re-weighting the loss according to the number of samples in each class.
Args:
labels (numpy.ndarray): Label assignments.
reweight_pow (float): The power of re-weighting. Default: 0.5.
"""
hist = np.bincount(
labels, minlength=self.num_classes).astype(np.float32)
inv_hist = (1. / (hist + 1e-10))**reweight_pow
weight = inv_hist / inv_hist.sum()
self.loss_weight.copy_(torch.from_numpy(weight))
self.head.criterion = nn.CrossEntropyLoss(weight=self.loss_weight)
| 4,526 | 33.557252 | 88 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/models/relative_loc.py | import torch
import torch.nn as nn
from openselfsup.utils import print_log
from . import builder
from .registry import MODELS
@MODELS.register_module
class RelativeLoc(nn.Module):
"""Relative patch location.
Implementation of "Unsupervised Visual Representation Learning
by Context Prediction (https://arxiv.org/abs/1505.05192)".
Args:
backbone (dict): Config dict for module of backbone ConvNet.
neck (dict): Config dict for module of deep features to compact feature vectors.
Default: None.
head (dict): Config dict for module of loss functions. Default: None.
pretrained (str, optional): Path to pre-trained weights. Default: None.
"""
def __init__(self, backbone, neck=None, head=None, pretrained=None):
super(RelativeLoc, self).__init__()
self.backbone = builder.build_backbone(backbone)
if neck is not None:
self.neck = builder.build_neck(neck)
if head is not None:
self.head = builder.build_head(head)
self.init_weights(pretrained=pretrained)
def init_weights(self, pretrained=None):
"""Initialize the weights of model.
Args:
pretrained (str, optional): Path to pre-trained weights.
Default: None.
"""
if pretrained is not None:
print_log('load model from: {}'.format(pretrained), logger='root')
self.backbone.init_weights(pretrained=pretrained)
self.neck.init_weights(init_linear='normal')
self.head.init_weights(init_linear='normal', std=0.005)
def forward_backbone(self, img):
"""Forward backbone.
Args:
img (Tensor): Input images of shape (N, C, H, W).
Typically these should be mean centered and std scaled.
Returns:
tuple[Tensor]: backbone outputs.
"""
x = self.backbone(img)
return x
def forward_train(self, img, patch_label, **kwargs):
"""Forward computation during training.
Args:
img (Tensor): Input images of shape (N, C, H, W).
Typically these should be mean centered and std scaled.
patch_label (Tensor): Labels for the relative patch locations.
kwargs: Any keyword arguments to be used to forward.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
img1, img2 = torch.chunk(img, 2, dim=1)
x1 = self.forward_backbone(img1) # tuple
x2 = self.forward_backbone(img2) # tuple
x = (torch.cat((x1[0], x2[0]), dim=1),)
x = self.neck(x)
outs = self.head(x)
loss_inputs = (outs, patch_label)
losses = self.head.loss(*loss_inputs)
return losses
def forward_test(self, img, **kwargs):
img1, img2 = torch.chunk(img, 2, dim=1)
x1 = self.forward_backbone(img1) # tuple
x2 = self.forward_backbone(img2) # tuple
x = (torch.cat((x1[0], x2[0]), dim=1),)
x = self.neck(x)
outs = self.head(x)
keys = ['head{}'.format(i) for i in range(len(outs))]
out_tensors = [out.cpu() for out in outs]
return dict(zip(keys, out_tensors))
def forward(self, img, patch_label=None, mode='train', **kwargs):
if mode != "extract" and img.dim() == 5: # Nx8x(2C)xHxW
assert patch_label.dim() == 2 # Nx8
img = img.view(
img.size(0) * img.size(1), img.size(2), img.size(3),
img.size(4)) # (8N)x(2C)xHxW
patch_label = torch.flatten(patch_label) # (8N)
if mode == 'train':
return self.forward_train(img, patch_label, **kwargs)
elif mode == 'test':
return self.forward_test(img, **kwargs)
elif mode == 'extract':
return self.forward_backbone(img)
else:
raise Exception("No such mode: {}".format(mode))
| 3,948 | 35.564815 | 88 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/models/moco.py | import torch
import torch.nn as nn
from openselfsup.utils import print_log
from . import builder
from .registry import MODELS
@MODELS.register_module
class MOCO(nn.Module):
"""MOCO.
Implementation of "Momentum Contrast for Unsupervised Visual
Representation Learning (https://arxiv.org/abs/1911.05722)".
Part of the code is borrowed from:
"https://github.com/facebookresearch/moco/blob/master/moco/builder.py".
Args:
backbone (dict): Config dict for module of backbone ConvNet.
neck (dict): Config dict for module of deep features to compact feature vectors.
Default: None.
head (dict): Config dict for module of loss functions. Default: None.
pretrained (str, optional): Path to pre-trained weights. Default: None.
queue_len (int): Number of negative keys maintained in the queue.
Default: 65536.
feat_dim (int): Dimension of compact feature vectors. Default: 128.
momentum (float): Momentum coefficient for the momentum-updated encoder.
Default: 0.999.
"""
def __init__(self,
backbone,
neck=None,
head=None,
pretrained=None,
queue_len=65536,
feat_dim=128,
momentum=0.999,
**kwargs):
super(MOCO, self).__init__()
self.encoder_q = nn.Sequential(
builder.build_backbone(backbone), builder.build_neck(neck))
self.encoder_k = nn.Sequential(
builder.build_backbone(backbone), builder.build_neck(neck))
self.backbone = self.encoder_q[0]
for param in self.encoder_k.parameters():
param.requires_grad = False
self.head = builder.build_head(head)
self.init_weights(pretrained=pretrained)
self.queue_len = queue_len
self.momentum = momentum
# create the queue
self.register_buffer("queue", torch.randn(feat_dim, queue_len))
self.queue = nn.functional.normalize(self.queue, dim=0)
self.register_buffer("queue_ptr", torch.zeros(1, dtype=torch.long))
def init_weights(self, pretrained=None):
"""Initialize the weights of model.
Args:
pretrained (str, optional): Path to pre-trained weights.
Default: None.
"""
if pretrained is not None:
print_log('load model from: {}'.format(pretrained), logger='root')
self.encoder_q[0].init_weights(pretrained=pretrained)
self.encoder_q[1].init_weights(init_linear='kaiming')
for param_q, param_k in zip(self.encoder_q.parameters(),
self.encoder_k.parameters()):
param_k.data.copy_(param_q.data)
@torch.no_grad()
def _momentum_update_key_encoder(self):
"""Momentum update of the key encoder."""
for param_q, param_k in zip(self.encoder_q.parameters(),
self.encoder_k.parameters()):
param_k.data = param_k.data * self.momentum + \
param_q.data * (1. - self.momentum)
@torch.no_grad()
def _dequeue_and_enqueue(self, keys):
"""Update queue."""
# gather keys before updating queue
keys = concat_all_gather(keys)
batch_size = keys.shape[0]
ptr = int(self.queue_ptr)
assert self.queue_len % batch_size == 0 # for simplicity
# replace the keys at ptr (dequeue and enqueue)
self.queue[:, ptr:ptr + batch_size] = keys.transpose(0, 1)
ptr = (ptr + batch_size) % self.queue_len # move pointer
self.queue_ptr[0] = ptr
@torch.no_grad()
def _batch_shuffle_ddp(self, x):
"""Batch shuffle, for making use of BatchNorm.
*** Only support DistributedDataParallel (DDP) model. ***
"""
# gather from all gpus
batch_size_this = x.shape[0]
x_gather = concat_all_gather(x)
batch_size_all = x_gather.shape[0]
num_gpus = batch_size_all // batch_size_this
# random shuffle index
idx_shuffle = torch.randperm(batch_size_all).cuda()
# broadcast to all gpus
torch.distributed.broadcast(idx_shuffle, src=0)
# index for restoring
idx_unshuffle = torch.argsort(idx_shuffle)
# shuffled index for this gpu
gpu_idx = torch.distributed.get_rank()
idx_this = idx_shuffle.view(num_gpus, -1)[gpu_idx]
return x_gather[idx_this], idx_unshuffle
@torch.no_grad()
def _batch_unshuffle_ddp(self, x, idx_unshuffle):
"""Undo batch shuffle.
*** Only support DistributedDataParallel (DDP) model. ***
"""
# gather from all gpus
batch_size_this = x.shape[0]
x_gather = concat_all_gather(x)
batch_size_all = x_gather.shape[0]
num_gpus = batch_size_all // batch_size_this
# restored index for this gpu
gpu_idx = torch.distributed.get_rank()
idx_this = idx_unshuffle.view(num_gpus, -1)[gpu_idx]
return x_gather[idx_this]
def forward_train(self, img, **kwargs):
"""Forward computation during training.
Args:
img (Tensor): Input of two concatenated images of shape (N, 2, C, H, W).
Typically these should be mean centered and std scaled.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
assert img.dim() == 5, \
"Input must have 5 dims, got: {}".format(img.dim())
im_q = img[:, 0, ...].contiguous()
im_k = img[:, 1, ...].contiguous()
# compute query features
q = self.encoder_q(im_q)[0] # queries: NxC
q = nn.functional.normalize(q, dim=1)
# compute key features
with torch.no_grad(): # no gradient to keys
self._momentum_update_key_encoder() # update the key encoder
# shuffle for making use of BN
im_k, idx_unshuffle = self._batch_shuffle_ddp(im_k)
k = self.encoder_k(im_k)[0] # keys: NxC
k = nn.functional.normalize(k, dim=1)
# undo shuffle
k = self._batch_unshuffle_ddp(k, idx_unshuffle)
# compute logits
# Einstein sum is more intuitive
# positive logits: Nx1
l_pos = torch.einsum('nc,nc->n', [q, k]).unsqueeze(-1)
# negative logits: NxK
l_neg = torch.einsum('nc,ck->nk', [q, self.queue.clone().detach()])
losses = self.head(l_pos, l_neg)
self._dequeue_and_enqueue(k)
return losses
def forward_test(self, img, **kwargs):
pass
def forward(self, img, mode='train', **kwargs):
if mode == 'train':
return self.forward_train(img, **kwargs)
elif mode == 'test':
return self.forward_test(img, **kwargs)
elif mode == 'extract':
return self.backbone(img)
else:
raise Exception("No such mode: {}".format(mode))
# utils
@torch.no_grad()
def concat_all_gather(tensor):
"""Performs all_gather operation on the provided tensors.
*** Warning ***: torch.distributed.all_gather has no gradient.
"""
tensors_gather = [
torch.ones_like(tensor)
for _ in range(torch.distributed.get_world_size())
]
torch.distributed.all_gather(tensors_gather, tensor, async_op=False)
output = torch.cat(tensors_gather, dim=0)
return output
| 7,486 | 33.187215 | 88 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/models/moco_v3.py | import torch
import torch.nn as nn
from openselfsup.utils import print_log
from . import builder
from .registry import MODELS
import torch.nn.functional as F
@MODELS.register_module
class MOCOv3(nn.Module):
def __init__(self,
backbone,
projector=None,
predictor=None,
base_momentum=0.999,
temperature=1,
**kwargs):
super(MOCOv3, self).__init__()
self.encoder_q = nn.Sequential(
builder.build_backbone(backbone),
builder.build_neck(projector),
builder.build_neck(predictor))
self.encoder_k = nn.Sequential(
builder.build_backbone(backbone),
builder.build_neck(projector))
self.backbone = self.encoder_q[0]
self.base_momentum = base_momentum
self.momentum = base_momentum
self.criterion = nn.CrossEntropyLoss()
self.temperature = temperature
for param_q, param_k in zip(self.encoder_q.parameters(),
self.encoder_k.parameters()):
param_k.data.copy_(param_q.data)
param_k.requires_grad = False
@torch.no_grad()
def _momentum_update_key_encoder(self):
"""Momentum update of the key encoder."""
for param_q, param_k in zip(self.encoder_q.parameters(),
self.encoder_k.parameters()):
param_k.data = param_k.data * self.momentum + \
param_q.data * (1. - self.momentum)
@torch.no_grad()
def momentum_update(self):
self._momentum_update_key_encoder()
def forward_train(self, img, **kwargs):
assert img.dim() == 5, \
"Input must have 5 dims, got: {}".format(img.dim())
x1, x2 = img[:, 0, ...].contiguous(), img[:, 1, ...].contiguous()
# compute query features
q1, q2 = self.encoder_q(x1)[0], self.encoder_q(x2)[0] # queries: NxC
q1, q2 = F.normalize(q1), F.normalize(q2)
with torch.no_grad():
k1, k2 = self.encoder_k(x1)[0], self.encoder_k(x2)[0]
k1, k2 = F.normalize(k1), F.normalize(k2)
labels = torch.arange(len(k1)).cuda()
logits1, logits2 = q1 @ k2.T, q2 @ k1.T
loss = 2 * self.temperature \
* (self.criterion(logits1/self.temperature, labels)
+ self.criterion(logits2/self.temperature, labels))
return dict(loss=loss)
def forward_test(self, img, **kwargs):
backbone_feats = self.backbone(img)
last_layer_feat = nn.functional.avg_pool2d(backbone_feats[-1],7)
last_layer_feat = last_layer_feat.view(last_layer_feat.size(0), -1)
return dict(backbone=last_layer_feat.cpu())
def forward(self, img, mode='train', **kwargs):
if mode == 'train':
return self.forward_train(img, **kwargs)
elif mode == 'test':
return self.forward_test(img, **kwargs)
elif mode == 'extract':
return self.forward_test(img, **kwargs)
else:
raise Exception("No such mode: {}".format(mode))
| 3,177 | 33.923077 | 77 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/models/registry.py | from openselfsup.utils import Registry
MODELS = Registry('model')
BACKBONES = Registry('backbone')
NECKS = Registry('neck')
HEADS = Registry('head')
MEMORIES = Registry('memory')
LOSSES = Registry('loss')
| 206 | 22 | 38 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/models/extractor.py | import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import cv2
import math
from sklearn.cluster import KMeans
from openselfsup.utils import print_log
from . import builder
from .registry import MODELS
from .utils import Sobel
### For visualization.
@MODELS.register_module
class Extractor(nn.Module):
img_id = 0
def __init__(self,
backbone,
pretrained=None):
super(Extractor, self).__init__()
self.backbone = builder.build_backbone(backbone)
self.avgpool = nn.AdaptiveAvgPool2d((1,1))
self.init_weights(pretrained=pretrained)
def init_weights(self, pretrained=None):
if pretrained is not None:
print_log('load model from: {}'.format(pretrained), logger='root')
self.backbone.init_weights(pretrained=pretrained)
def forward(self, img, mode='extract', **kwargs):
if mode == 'extract':
return self.forward_extract(img)
elif mode == 'forward_backbone':
return self.forward_backbone(img)
elif mode == 'multi_layer_map':
return self.forward_multi_layer_visulization(img)
elif mode == 'multi_layer_map_tmp':
return self.forward_multi_layer_visulization_tmp(img)
else:
raise Exception("No such mode: {}".format(mode))
def forward_extract(self, img, **kwargs):
backbone_feats = self.backbone(img)
backbone_feats = self.avgpool(backbone_feats[-1])
backbone_feats = backbone_feats.view(backbone_feats.size(0), -1)
backbone_feats = F.normalize(backbone_feats, p=2, dim=1)
return dict(backbone=backbone_feats.cpu())
def forward_backbone(self, img, **kwargs):
backbone_feats = self.backbone(img)
return backbone_feats
def forward_multi_layer_visulization(self, img, **kwargs):
backbone_feats = self.backbone(img)
batch_img = img.cpu()
out_dir = 'path to saving dir'
size_upsample = (448, 448)
mean = np.array([0.485, 0.456, 0.406])*255
std = np.array([0.229, 0.224, 0.225])*255
for i in range(3):
batch_img[:,i,...] = batch_img[:,i,...] * std[i] + mean[i]
batch_img = np.uint8(batch_img).transpose(0,2,3,1)
selected_ids = np.arange(200)
for b in range(len(batch_img)):
multi_resuts = []
for x in backbone_feats:
if self.img_id not in selected_ids: # only save these two
continue
global_x = self.avgpool(x).view(x.size(0), -1)
global_x = F.normalize(global_x, p=2, dim=1)
x = F.normalize(x, p=2, dim=1) # B, C, H, W
patch = x[b].permute(1,2,0) # H, W, C
patch = patch.view(-1, patch.size(-1))
patch_size = int(math.sqrt(patch.size(0)))
attention_map = self.get_cam(global_feat=global_x[b],
local_feats=patch,
img=batch_img[b],
patch_size=patch_size,
size_upsample=size_upsample)
cluster_map = self.get_clustered_local_feats(
local_feats=patch, img=batch_img[b], patch_size=patch_size, size_upsample=size_upsample
)
multi_resuts.append(cv2.hconcat([*attention_map, *cluster_map]))
final_img = cv2.vconcat(multi_resuts)
if self.img_id in selected_ids:
cv2.imwrite(f'{out_dir}/{self.img_id}.jpg', final_img)
print(f'\n saving to {out_dir}/{self.img_id}.jpg')
self.img_id+=1
if self.img_id > selected_ids[-1]:
exit()
@staticmethod
def get_cam(global_feat, local_feats, img, patch_size, size_upsample=(448,448)):
absolute_cam = (local_feats @ global_feat.unsqueeze(1)).view(-1)
normalized_cam = absolute_cam.clone()
absolute_cam *= 255
absolute_cam = np.uint8(absolute_cam.view(patch_size,-1).cpu().numpy())
absolute_cam = cv2.resize(absolute_cam, size_upsample)
absolute_cam = cv2.applyColorMap(absolute_cam, cv2.COLORMAP_JET)
normalized_cam = (normalized_cam - normalized_cam.min())/(normalized_cam.max() - normalized_cam.min())
normalized_cam *= 255
normalized_cam = np.uint8(normalized_cam.view(patch_size,-1).cpu().numpy())
normalized_cam = cv2.resize(normalized_cam, size_upsample)
normalized_cam = cv2.applyColorMap(normalized_cam, cv2.COLORMAP_JET)
_img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
white = [255,255,255]
absolute_cam = absolute_cam * 0.4 + _img * 0.6
normalized_cam = normalized_cam * 0.4 + _img * 0.6
src_img = cv2.copyMakeBorder(np.uint8(_img),10, 10, 10, 10,cv2.BORDER_CONSTANT,value=white)
absolute_cam = cv2.copyMakeBorder(np.uint8(absolute_cam),10, 10, 10, 10,cv2.BORDER_CONSTANT,value=white)
normalized_cam = cv2.copyMakeBorder(np.uint8(normalized_cam),10, 10, 10, 10,cv2.BORDER_CONSTANT,value=white)
attention_map = [src_img, absolute_cam, normalized_cam]
return attention_map
@staticmethod
def get_clustered_local_feats(local_feats, img, patch_size, size_upsample=(448,448), num_clusters=[2,4,6]):
white = [255,255,255]
_img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
local_feats = np.ascontiguousarray(local_feats.cpu().numpy())
cluster_results = []
for k in num_clusters:
kmeans = KMeans(n_clusters=k, random_state=0).fit(local_feats)
assignments = np.reshape(kmeans.labels_, (patch_size, patch_size))
cluster_map = cv2.applyColorMap(np.uint8(assignments/k * 255), cv2.COLORMAP_RAINBOW)
cluster_map = cv2.resize(cluster_map, size_upsample, interpolation=cv2.INTER_NEAREST)
cluster_result = cluster_map * 0.4 + _img * 0.6
cluster_result = cv2.copyMakeBorder(np.uint8(cluster_result),10, 10, 10, 10,cv2.BORDER_CONSTANT,value=white )
cluster_results.append(cluster_result)
return cluster_results
def forward_multi_layer_visulization_tmp(self, img, **kwargs):
backbone_feats = self.backbone(img)
batch_img = img.cpu()
novel_dict = {
0 : 'colon_aca',
1 : 'colon_benign',
2 : 'lung_aca',
3 : 'lung_benign',
4 : 'lung_scc',
}
size_upsample = (448, 448)
mean = np.array([0.485, 0.456, 0.406])*255
std = np.array([0.229, 0.224, 0.225])*255
for i in range(3):
batch_img[:,i,...] = batch_img[:,i,...] * std[i] + mean[i]
batch_img = np.uint8(batch_img).transpose(0,2,3,1)
# selected_ids = [62, 74, 113, 119, 154]
selected_ids = [154]
for b in range(len(batch_img)):
multi_resuts = []
print(self.img_id)
for x in backbone_feats:
if self.img_id not in selected_ids: # only save these two
continue
global_x = self.avgpool(x).view(x.size(0), -1)
global_x = F.normalize(global_x, p=2, dim=1)
x = F.normalize(x, p=2, dim=1) # B, C, H, W
patch = x[b].permute(1,2,0) # H, W, C
patch = patch.view(-1, patch.size(-1))
patch_size = int(math.sqrt(patch.size(0)))
assignments_list = self.get_clustered_local_feats_tmp(
local_feats=patch, img=batch_img[b], patch_size=patch_size, size_upsample=size_upsample
)
multi_resuts.append(assignments_list)
if self.img_id in selected_ids:
print(self.img_id, 'now in it')
self.img_id+=1
return multi_resuts, batch_img[b]
self.img_id+=1
if self.img_id > selected_ids[-1]:
exit()
# break
@staticmethod
def get_clustered_local_feats_tmp(local_feats, img, patch_size, size_upsample=(448,448), num_clusters=[2,4,6]):
white = [255,255,255]
_img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
local_feats = np.ascontiguousarray(local_feats.cpu().numpy())
assignment_list = []
for k in num_clusters:
kmeans = KMeans(n_clusters=k, random_state=0).fit(local_feats)
assignment_list.append(kmeans.labels_)
return assignment_list | 8,632 | 41.318627 | 121 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/models/npid.py | import torch
import torch.nn as nn
from openselfsup.utils import print_log
from . import builder
from .registry import MODELS
@MODELS.register_module
class NPID(nn.Module):
"""NPID.
Implementation of "Unsupervised Feature Learning via Non-parametric
Instance Discrimination (https://arxiv.org/abs/1805.01978)".
Args:
backbone (dict): Config dict for module of backbone ConvNet.
neck (dict): Config dict for module of deep features to compact feature vectors.
Default: None.
head (dict): Config dict for module of loss functions. Default: None.
memory_bank (dict): Config dict for module of memory banks. Default: None.
neg_num (int): Number of negative samples for each image. Default: 65536.
ensure_neg (bool): If False, there is a small probability
that negative samples contain positive ones. Default: False.
pretrained (str, optional): Path to pre-trained weights. Default: None.
"""
def __init__(self,
backbone,
neck=None,
head=None,
memory_bank=None,
neg_num=65536,
ensure_neg=False,
pretrained=None):
super(NPID, self).__init__()
self.backbone = builder.build_backbone(backbone)
self.neck = builder.build_neck(neck)
self.head = builder.build_head(head)
self.memory_bank = builder.build_memory(memory_bank)
self.init_weights(pretrained=pretrained)
self.neg_num = neg_num
self.ensure_neg = ensure_neg
def init_weights(self, pretrained=None):
"""Initialize the weights of model.
Args:
pretrained (str, optional): Path to pre-trained weights.
Default: None.
"""
if pretrained is not None:
print_log('load model from: {}'.format(pretrained), logger='root')
self.backbone.init_weights(pretrained=pretrained)
self.neck.init_weights(init_linear='kaiming')
def forward_backbone(self, img):
"""Forward backbone.
Args:
img (Tensor): Input images of shape (N, C, H, W).
Typically these should be mean centered and std scaled.
Returns:
tuple[Tensor]: backbone outputs.
"""
x = self.backbone(img)
return x
def forward_train(self, img, idx, **kwargs):
"""Forward computation during training.
Args:
img (Tensor): Input images of shape (N, C, H, W).
Typically these should be mean centered and std scaled.
idx (Tensor): Index corresponding to each image.
kwargs: Any keyword arguments to be used to forward.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
x = self.forward_backbone(img)
idx = idx.cuda()
feature = self.neck(x)[0]
feature = nn.functional.normalize(feature) # BxC
bs, feat_dim = feature.shape[:2]
neg_idx = self.memory_bank.multinomial.draw(bs * self.neg_num)
if self.ensure_neg:
neg_idx = neg_idx.view(bs, -1)
while True:
wrong = (neg_idx == idx.view(-1, 1))
if wrong.sum().item() > 0:
neg_idx[wrong] = self.memory_bank.multinomial.draw(
wrong.sum().item())
else:
break
neg_idx = neg_idx.flatten()
pos_feat = torch.index_select(self.memory_bank.feature_bank, 0,
idx) # BXC
neg_feat = torch.index_select(self.memory_bank.feature_bank, 0,
neg_idx).view(bs, self.neg_num,
feat_dim) # BxKxC
pos_logits = torch.einsum('nc,nc->n',
[pos_feat, feature]).unsqueeze(-1)
neg_logits = torch.bmm(neg_feat, feature.unsqueeze(2)).squeeze(2)
losses = self.head(pos_logits, neg_logits)
# update memory bank
with torch.no_grad():
self.memory_bank.update(idx, feature.detach())
return losses
def forward_test(self, img, **kwargs):
pass
def forward(self, img, mode='train', **kwargs):
if mode == 'train':
return self.forward_train(img, **kwargs)
elif mode == 'test':
return self.forward_test(img, **kwargs)
elif mode == 'extract':
return self.forward_backbone(img)
else:
raise Exception("No such mode: {}".format(mode))
| 4,658 | 34.564885 | 88 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/models/byol.py | import torch
import torch.nn as nn
from openselfsup.utils import print_log
from . import builder
from .registry import MODELS
@MODELS.register_module
class BYOL(nn.Module):
"""BYOL.
Implementation of "Bootstrap Your Own Latent: A New Approach to
Self-Supervised Learning (https://arxiv.org/abs/2006.07733)".
Args:
backbone (dict): Config dict for module of backbone ConvNet.
neck (dict): Config dict for module of deep features to compact feature vectors.
Default: None.
head (dict): Config dict for module of loss functions. Default: None.
pretrained (str, optional): Path to pre-trained weights. Default: None.
base_momentum (float): The base momentum coefficient for the target network.
Default: 0.996.
"""
def __init__(self,
backbone,
neck=None,
head=None,
pretrained=None,
base_momentum=0.996,
**kwargs):
super(BYOL, self).__init__()
self.online_net = nn.Sequential(
builder.build_backbone(backbone), builder.build_neck(neck))
self.target_net = nn.Sequential(
builder.build_backbone(backbone), builder.build_neck(neck))
self.backbone = self.online_net[0]
for param in self.target_net.parameters():
param.requires_grad = False
self.head = builder.build_head(head)
self.init_weights(pretrained=pretrained)
self.base_momentum = base_momentum
self.momentum = base_momentum
def init_weights(self, pretrained=None):
"""Initialize the weights of model.
Args:
pretrained (str, optional): Path to pre-trained weights.
Default: None.
"""
if pretrained is not None:
print_log('load model from: {}'.format(pretrained), logger='root')
self.online_net[0].init_weights(pretrained=pretrained) # backbone
self.online_net[1].init_weights(init_linear='kaiming') # projection
for param_ol, param_tgt in zip(self.online_net.parameters(),
self.target_net.parameters()):
param_tgt.data.copy_(param_ol.data)
# init the predictor in the head
self.head.init_weights()
@torch.no_grad()
def _momentum_update(self):
"""Momentum update of the target network."""
for param_ol, param_tgt in zip(self.online_net.parameters(),
self.target_net.parameters()):
param_tgt.data = param_tgt.data * self.momentum + \
param_ol.data * (1. - self.momentum)
@torch.no_grad()
def momentum_update(self):
self._momentum_update()
def forward_train(self, img, **kwargs):
"""Forward computation during training.
Args:
img (Tensor): Input of two concatenated images of shape (N, 2, C, H, W).
Typically these should be mean centered and std scaled.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
assert img.dim() == 5, \
"Input must have 5 dims, got: {}".format(img.dim())
img_v1 = img[:, 0, ...].contiguous()
img_v2 = img[:, 1, ...].contiguous()
# compute query features
proj_online_v1 = self.online_net(img_v1)[0]
proj_online_v2 = self.online_net(img_v2)[0]
with torch.no_grad():
proj_target_v1 = self.target_net(img_v1)[0].clone().detach()
proj_target_v2 = self.target_net(img_v2)[0].clone().detach()
loss = self.head(proj_online_v1, proj_target_v2)['loss'] + \
self.head(proj_online_v2, proj_target_v1)['loss']
return dict(loss=loss)
def forward_test(self, img, **kwargs):
pass
def forward(self, img, mode='train', **kwargs):
if mode == 'train':
return self.forward_train(img, **kwargs)
elif mode == 'test':
return self.forward_test(img, **kwargs)
elif mode == 'extract':
return self.backbone(img)
else:
raise Exception("No such mode: {}".format(mode))
| 4,225 | 36.070175 | 88 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/models/__init__.py | from .backbones import * # noqa: F401,F403
from .builder import (build_backbone, build_model, build_head, build_loss)
from .byol import BYOL
from .heads import *
from .classification import Classification
from .deepcluster import DeepCluster
from .odc import ODC
from .necks import *
from .npid import NPID
from .memories import *
from .moco import MOCO
from .registry import (BACKBONES, MODELS, NECKS, MEMORIES, HEADS, LOSSES)
from .rotation_pred import RotationPred
from .relative_loc import RelativeLoc
from .simclr import SimCLR
from .moco_v3 import MOCOv3
from .extractor import Extractor | 594 | 34 | 74 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/models/builder.py | from torch import nn
from openselfsup.utils import build_from_cfg
from .registry import (BACKBONES, MODELS, NECKS, HEADS, MEMORIES, LOSSES)
def build(cfg, registry, default_args=None):
"""Build a module.
Args:
cfg (dict, list[dict]): The config of modules, it is either a dict
or a list of configs.
registry (:obj:`Registry`): A registry the module belongs to.
default_args (dict, optional): Default arguments to build the module.
Default: None.
Returns:
nn.Module: A built nn module.
"""
if isinstance(cfg, list):
modules = [
build_from_cfg(cfg_, registry, default_args) for cfg_ in cfg
]
return nn.Sequential(*modules)
else:
return build_from_cfg(cfg, registry, default_args)
def build_backbone(cfg):
"""Build backbone."""
return build(cfg, BACKBONES)
def build_neck(cfg):
"""Build neck."""
return build(cfg, NECKS)
def build_memory(cfg):
"""Build memory."""
return build(cfg, MEMORIES)
def build_head(cfg):
"""Build head."""
return build(cfg, HEADS)
def build_loss(cfg):
"""Build loss."""
return build(cfg, LOSSES)
def build_model(cfg):
"""Build model."""
return build(cfg, MODELS)
| 1,274 | 21.368421 | 77 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/models/odc.py | import numpy as np
import torch
import torch.nn as nn
from openselfsup.utils import print_log
from . import builder
from .registry import MODELS
from .utils import Sobel
@MODELS.register_module
class ODC(nn.Module):
"""ODC.
Official implementation of
"Online Deep Clustering for Unsupervised Representation Learning
(https://arxiv.org/abs/2006.10645)".
Args:
backbone (dict): Config dict for module of backbone ConvNet.
with_sobel (bool): Whether to apply a Sobel filter on images. Default: False.
neck (dict): Config dict for module of deep features to compact feature vectors.
Default: None.
head (dict): Config dict for module of loss functions. Default: None.
memory_bank (dict): Module of memory banks. Default: None.
pretrained (str, optional): Path to pre-trained weights. Default: None.
"""
def __init__(self,
backbone,
with_sobel=False,
neck=None,
head=None,
memory_bank=None,
pretrained=None):
super(ODC, self).__init__()
self.with_sobel = with_sobel
if with_sobel:
self.sobel_layer = Sobel()
self.backbone = builder.build_backbone(backbone)
self.neck = builder.build_neck(neck)
if head is not None:
self.head = builder.build_head(head)
if memory_bank is not None:
self.memory_bank = builder.build_memory(memory_bank)
self.init_weights(pretrained=pretrained)
# set reweight tensors
self.num_classes = head.num_classes
self.loss_weight = torch.ones((self.num_classes, ),
dtype=torch.float32).cuda()
self.loss_weight /= self.loss_weight.sum()
def init_weights(self, pretrained=None):
"""Initialize the weights of model.
Args:
pretrained (str, optional): Path to pre-trained weights.
Default: None.
"""
if pretrained is not None:
print_log('load model from: {}'.format(pretrained), logger='root')
self.backbone.init_weights(pretrained=pretrained)
self.neck.init_weights(init_linear='kaiming')
self.head.init_weights(init_linear='normal')
def forward_backbone(self, img):
"""Forward backbone.
Args:
img (Tensor): Input images of shape (N, C, H, W).
Typically these should be mean centered and std scaled.
Returns:
tuple[Tensor]: backbone outputs.
"""
if self.with_sobel:
img = self.sobel_layer(img)
x = self.backbone(img)
return x
def forward_train(self, img, idx, **kwargs):
"""Forward computation during training.
Args:
img (Tensor): Input images of shape (N, C, H, W).
Typically these should be mean centered and std scaled.
idx (Tensor): Index corresponding to each image.
kwargs: Any keyword arguments to be used to forward.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
# forward & backward
x = self.forward_backbone(img)
feature = self.neck(x)
outs = self.head(feature)
if self.memory_bank.label_bank.is_cuda:
loss_inputs = (outs, self.memory_bank.label_bank[idx])
else:
loss_inputs = (outs, self.memory_bank.label_bank[idx.cpu()].cuda())
losses = self.head.loss(*loss_inputs)
# update samples memory
change_ratio = self.memory_bank.update_samples_memory(
idx, feature[0].detach())
losses['change_ratio'] = change_ratio
return losses
def forward_test(self, img, **kwargs):
x = self.forward_backbone(img) # tuple
outs = self.head(x)
keys = ['head{}'.format(i) for i in range(len(outs))]
out_tensors = [out.cpu() for out in outs] # NxC
return dict(zip(keys, out_tensors))
def forward(self, img, mode='train', **kwargs):
if mode == 'train':
return self.forward_train(img, **kwargs)
elif mode == 'test':
return self.forward_test(img, **kwargs)
elif mode == 'extract':
return self.forward_backbone(img)
else:
raise Exception("No such mode: {}".format(mode))
def set_reweight(self, labels=None, reweight_pow=0.5):
"""Loss re-weighting.
Re-weighting the loss according to the number of samples in each class.
Args:
labels (numpy.ndarray): Label assignments. Default: None.
reweight_pow (float): The power of re-weighting. Default: 0.5.
"""
if labels is None:
if self.memory_bank.label_bank.is_cuda:
labels = self.memory_bank.label_bank.cpu().numpy()
else:
labels = self.memory_bank.label_bank.numpy()
hist = np.bincount(
labels, minlength=self.num_classes).astype(np.float32)
inv_hist = (1. / (hist + 1e-5))**reweight_pow
weight = inv_hist / inv_hist.sum()
self.loss_weight.copy_(torch.from_numpy(weight))
self.head.criterion = nn.CrossEntropyLoss(weight=self.loss_weight)
| 5,322 | 34.966216 | 88 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/models/necks.py | import torch
import torch.nn as nn
from packaging import version
from mmcv.cnn import kaiming_init, normal_init
from .registry import NECKS
from .utils import build_norm_layer
def _init_weights(module, init_linear='normal', std=0.01, bias=0.):
assert init_linear in ['normal', 'kaiming'], \
"Undefined init_linear: {}".format(init_linear)
for m in module.modules():
if isinstance(m, nn.Linear):
if init_linear == 'normal':
normal_init(m, std=std, bias=bias)
else:
kaiming_init(m, mode='fan_in', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm1d, nn.BatchNorm2d,
nn.GroupNorm, nn.SyncBatchNorm)):
if m.weight is not None:
nn.init.constant_(m.weight, 1)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
@NECKS.register_module
class LinearNeck(nn.Module):
"""Linear neck: fc only.
"""
def __init__(self, in_channels, out_channels, with_avg_pool=True):
super(LinearNeck, self).__init__()
self.with_avg_pool = with_avg_pool
if with_avg_pool:
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(in_channels, out_channels)
def init_weights(self, init_linear='normal'):
_init_weights(self, init_linear)
def forward(self, x):
assert len(x) == 1
x = x[0]
if self.with_avg_pool:
x = self.avgpool(x)
return [self.fc(x.view(x.size(0), -1))]
@NECKS.register_module
class RelativeLocNeck(nn.Module):
"""Relative patch location neck: fc-bn-relu-dropout.
"""
def __init__(self,
in_channels,
out_channels,
sync_bn=False,
with_avg_pool=True):
super(RelativeLocNeck, self).__init__()
self.with_avg_pool = with_avg_pool
if with_avg_pool:
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
if version.parse(torch.__version__) < version.parse("1.4.0"):
self.expand_for_syncbn = True
else:
self.expand_for_syncbn = False
self.fc = nn.Linear(in_channels * 2, out_channels)
if sync_bn:
_, self.bn = build_norm_layer(
dict(type='SyncBN', momentum=0.003),
out_channels)
else:
self.bn = nn.BatchNorm1d(
out_channels, momentum=0.003)
self.relu = nn.ReLU(inplace=True)
self.drop = nn.Dropout()
self.sync_bn = sync_bn
def init_weights(self, init_linear='normal'):
_init_weights(self, init_linear, std=0.005, bias=0.1)
def _forward_syncbn(self, module, x):
assert x.dim() == 2
if self.expand_for_syncbn:
x = module(x.unsqueeze(-1).unsqueeze(-1)).squeeze(-1).squeeze(-1)
else:
x = module(x)
return x
def forward(self, x):
assert len(x) == 1
x = x[0]
if self.with_avg_pool:
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
if self.sync_bn:
x = self._forward_syncbn(self.bn, x)
else:
x = self.bn(x)
x = self.relu(x)
x = self.drop(x)
return [x]
@NECKS.register_module
class NonLinearNeckV0(nn.Module):
"""The non-linear neck in ODC, fc-bn-relu-dropout-fc-relu.
"""
def __init__(self,
in_channels,
hid_channels,
out_channels,
sync_bn=False,
with_avg_pool=True):
super(NonLinearNeckV0, self).__init__()
self.with_avg_pool = with_avg_pool
if with_avg_pool:
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
if version.parse(torch.__version__) < version.parse("1.4.0"):
self.expand_for_syncbn = True
else:
self.expand_for_syncbn = False
self.fc0 = nn.Linear(in_channels, hid_channels)
if sync_bn:
_, self.bn0 = build_norm_layer(
dict(type='SyncBN', momentum=0.001, affine=False),
hid_channels)
else:
self.bn0 = nn.BatchNorm1d(
hid_channels, momentum=0.001, affine=False)
self.fc1 = nn.Linear(hid_channels, out_channels)
self.relu = nn.ReLU(inplace=True)
self.drop = nn.Dropout()
self.sync_bn = sync_bn
def init_weights(self, init_linear='normal'):
_init_weights(self, init_linear)
def _forward_syncbn(self, module, x):
assert x.dim() == 2
if self.expand_for_syncbn:
x = module(x.unsqueeze(-1).unsqueeze(-1)).squeeze(-1).squeeze(-1)
else:
x = module(x)
return x
def forward(self, x):
assert len(x) == 1
x = x[0]
if self.with_avg_pool:
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc0(x)
if self.sync_bn:
x = self._forward_syncbn(self.bn0, x)
else:
x = self.bn0(x)
x = self.relu(x)
x = self.drop(x)
x = self.fc1(x)
x = self.relu(x)
return [x]
@NECKS.register_module
class NonLinearNeckV1(nn.Module):
"""The non-linear neck in MoCo v2: fc-relu-fc.
"""
def __init__(self,
in_channels,
hid_channels,
out_channels,
with_avg_pool=True):
super(NonLinearNeckV1, self).__init__()
self.with_avg_pool = with_avg_pool
if with_avg_pool:
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.mlp = nn.Sequential(
nn.Linear(in_channels, hid_channels), nn.ReLU(inplace=True),
nn.Linear(hid_channels, out_channels))
def init_weights(self, init_linear='normal'):
_init_weights(self, init_linear)
def forward(self, x):
assert len(x) == 1
x = x[0]
if self.with_avg_pool:
x = self.avgpool(x)
return [self.mlp(x.view(x.size(0), -1))]
@NECKS.register_module
class NonLinearNeckV2(nn.Module):
"""The non-linear neck in byol: fc-bn-relu-fc.
"""
def __init__(self,
in_channels,
hid_channels,
out_channels,
with_avg_pool=True):
super(NonLinearNeckV2, self).__init__()
self.with_avg_pool = with_avg_pool
if with_avg_pool:
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.mlp = nn.Sequential(
nn.Linear(in_channels, hid_channels),
nn.BatchNorm1d(hid_channels),
nn.ReLU(inplace=True),
nn.Linear(hid_channels, out_channels))
def init_weights(self, init_linear='normal'):
_init_weights(self, init_linear)
def forward(self, x):
assert len(x) == 1, "Got: {}".format(len(x))
x = x[0]
if self.with_avg_pool:
x = self.avgpool(x)
return [self.mlp(x.view(x.size(0), -1))]
@NECKS.register_module
class NonLinearNeckSimCLR(nn.Module):
"""SimCLR non-linear neck.
Structure: fc(no_bias)-bn(has_bias)-[relu-fc(no_bias)-bn(no_bias)].
The substructures in [] can be repeated. For the SimCLR default setting,
the repeat time is 1.
However, PyTorch does not support to specify (weight=True, bias=False).
It only support \"affine\" including the weight and bias. Hence, the
second BatchNorm has bias in this implementation. This is different from
the official implementation of SimCLR.
Since SyncBatchNorm in pytorch<1.4.0 does not support 2D input, the input is
expanded to 4D with shape: (N,C,1,1). Not sure if this workaround
has no bugs. See the pull request here:
https://github.com/pytorch/pytorch/pull/29626.
Args:
num_layers (int): Number of fc layers, it is 2 in the SimCLR default setting.
"""
def __init__(self,
in_channels,
hid_channels,
out_channels,
num_layers=2,
sync_bn=True,
with_bias=False,
with_last_bn=True,
with_avg_pool=True):
super(NonLinearNeckSimCLR, self).__init__()
self.sync_bn = sync_bn
self.with_last_bn = with_last_bn
self.with_avg_pool = with_avg_pool
if with_avg_pool:
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
if version.parse(torch.__version__) < version.parse("1.4.0"):
self.expand_for_syncbn = True
else:
self.expand_for_syncbn = False
self.relu = nn.ReLU(inplace=True)
self.fc0 = nn.Linear(in_channels, hid_channels, bias=with_bias)
if sync_bn:
_, self.bn0 = build_norm_layer(
dict(type='SyncBN'), hid_channels)
else:
self.bn0 = nn.BatchNorm1d(hid_channels)
self.fc_names = []
self.bn_names = []
for i in range(1, num_layers):
this_channels = out_channels if i == num_layers - 1 \
else hid_channels
self.add_module(
"fc{}".format(i),
nn.Linear(hid_channels, this_channels, bias=with_bias))
self.fc_names.append("fc{}".format(i))
if i != num_layers - 1 or self.with_last_bn:
if sync_bn:
self.add_module(
"bn{}".format(i),
build_norm_layer(dict(type='SyncBN'), this_channels)[1])
else:
self.add_module(
"bn{}".format(i),
nn.BatchNorm1d(this_channels))
self.bn_names.append("bn{}".format(i))
else:
self.bn_names.append(None)
def init_weights(self, init_linear='normal'):
_init_weights(self, init_linear)
def _forward_syncbn(self, module, x):
assert x.dim() == 2
if self.expand_for_syncbn:
x = module(x.unsqueeze(-1).unsqueeze(-1)).squeeze(-1).squeeze(-1)
else:
x = module(x)
return x
def forward(self, x):
assert len(x) == 1
x = x[0]
if self.with_avg_pool:
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc0(x)
if self.sync_bn:
x = self._forward_syncbn(self.bn0, x)
else:
x = self.bn0(x)
for fc_name, bn_name in zip(self.fc_names, self.bn_names):
fc = getattr(self, fc_name)
x = self.relu(x)
x = fc(x)
if bn_name is not None:
bn = getattr(self, bn_name)
if self.sync_bn:
x = self._forward_syncbn(bn, x)
else:
x = bn(x)
return [x]
@NECKS.register_module
class AvgPoolNeck(nn.Module):
"""Average pooling neck.
"""
def __init__(self):
super(AvgPoolNeck, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
def init_weights(self, **kwargs):
pass
def forward(self, x):
assert len(x) == 1
return [self.avg_pool(x[0])]
| 11,269 | 30.836158 | 85 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/models/memories/simple_memory.py | import torch
import torch.nn as nn
import torch.distributed as dist
from mmcv.runner import get_dist_info
from openselfsup.utils import AliasMethod
from ..registry import MEMORIES
@MEMORIES.register_module
class SimpleMemory(nn.Module):
"""Simple memory bank for NPID.
Args:
length (int): Number of features stored in the memory bank.
feat_dim (int): Dimension of stored features.
momentum (float): Momentum coefficient for updating features.
"""
def __init__(self, length, feat_dim, momentum, **kwargs):
super(SimpleMemory, self).__init__()
self.rank, self.num_replicas = get_dist_info()
self.feature_bank = torch.randn(length, feat_dim).cuda()
self.feature_bank = nn.functional.normalize(self.feature_bank)
self.momentum = momentum
self.multinomial = AliasMethod(torch.ones(length))
self.multinomial.cuda()
def update(self, ind, feature):
"""Update features in memory bank.
Args:
ind (Tensor): Indices for the batch of features.
feature (Tensor): Batch of features.
"""
feature_norm = nn.functional.normalize(feature)
ind, feature_norm = self._gather(ind, feature_norm)
feature_old = self.feature_bank[ind, ...]
feature_new = (1 - self.momentum) * feature_old + \
self.momentum * feature_norm
feature_new_norm = nn.functional.normalize(feature_new)
self.feature_bank[ind, ...] = feature_new_norm
def _gather(self, ind, feature):
"""Gather indices and features.
Args:
ind (Tensor): Indices for the batch of features.
feature (Tensor): Batch of features.
Returns:
Tensor: Gathered indices.
Tensor: Gathered features.
"""
ind_gathered = [
torch.ones_like(ind).cuda() for _ in range(self.num_replicas)
]
feature_gathered = [
torch.ones_like(feature).cuda() for _ in range(self.num_replicas)
]
dist.all_gather(ind_gathered, ind)
dist.all_gather(feature_gathered, feature)
ind_gathered = torch.cat(ind_gathered, dim=0)
feature_gathered = torch.cat(feature_gathered, dim=0)
return ind_gathered, feature_gathered
| 2,305 | 33.939394 | 77 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/models/memories/odc_memory.py | import numpy as np
from sklearn.cluster import KMeans
import torch
import torch.nn as nn
import torch.distributed as dist
from mmcv.runner import get_dist_info
from ..registry import MEMORIES
@MEMORIES.register_module
class ODCMemory(nn.Module):
"""Memory modules for ODC.
Args:
length (int): Number of features stored in samples memory.
feat_dim (int): Dimension of stored features.
momentum (float): Momentum coefficient for updating features.
num_classes (int): Number of clusters.
min_cluster (int): Minimal cluster size.
"""
def __init__(self, length, feat_dim, momentum, num_classes, min_cluster,
**kwargs):
super(ODCMemory, self).__init__()
self.rank, self.num_replicas = get_dist_info()
if self.rank == 0:
self.feature_bank = torch.zeros((length, feat_dim),
dtype=torch.float32)
self.label_bank = torch.zeros((length, ), dtype=torch.long)
self.centroids = torch.zeros((num_classes, feat_dim),
dtype=torch.float32).cuda()
self.kmeans = KMeans(n_clusters=2, random_state=0, max_iter=20)
self.feat_dim = feat_dim
self.initialized = False
self.momentum = momentum
self.num_classes = num_classes
self.min_cluster = min_cluster
self.debug = kwargs.get('debug', False)
def init_memory(self, feature, label):
"""Initialize memory modules."""
self.initialized = True
self.label_bank.copy_(torch.from_numpy(label).long())
# make sure no empty clusters
assert (np.bincount(label, minlength=self.num_classes) != 0).all()
if self.rank == 0:
feature /= (np.linalg.norm(feature, axis=1).reshape(-1, 1) + 1e-10)
self.feature_bank.copy_(torch.from_numpy(feature))
centroids = self._compute_centroids()
self.centroids.copy_(centroids)
dist.broadcast(self.centroids, 0)
def _compute_centroids_ind(self, cinds):
"""Compute a few centroids."""
assert self.rank == 0
num = len(cinds)
centroids = torch.zeros((num, self.feat_dim), dtype=torch.float32)
for i, c in enumerate(cinds):
ind = np.where(self.label_bank.numpy() == c)[0]
centroids[i, :] = self.feature_bank[ind, :].mean(dim=0)
return centroids
def _compute_centroids(self):
"""Compute all non-empty centroids."""
assert self.rank == 0
l = self.label_bank.numpy()
argl = np.argsort(l)
sortl = l[argl]
diff_pos = np.where(sortl[1:] - sortl[:-1] != 0)[0] + 1
start = np.insert(diff_pos, 0, 0)
end = np.insert(diff_pos, len(diff_pos), len(l))
class_start = sortl[start]
# keep empty class centroids unchanged
centroids = self.centroids.cpu().clone()
for i, st, ed in zip(class_start, start, end):
centroids[i, :] = self.feature_bank[argl[st:ed], :].mean(dim=0)
return centroids
def _gather(self, ind, feature):
"""Gather indices and features."""
# if not hasattr(self, 'ind_gathered'):
# self.ind_gathered = [torch.ones_like(ind).cuda()
# for _ in range(self.num_replicas)]
# if not hasattr(self, 'feature_gathered'):
# self.feature_gathered = [torch.ones_like(feature).cuda()
# for _ in range(self.num_replicas)]
ind_gathered = [
torch.ones_like(ind).cuda() for _ in range(self.num_replicas)
]
feature_gathered = [
torch.ones_like(feature).cuda() for _ in range(self.num_replicas)
]
dist.all_gather(ind_gathered, ind)
dist.all_gather(feature_gathered, feature)
ind_gathered = torch.cat(ind_gathered, dim=0)
feature_gathered = torch.cat(feature_gathered, dim=0)
return ind_gathered, feature_gathered
def update_samples_memory(self, ind, feature):
"""Update samples memory."""
assert self.initialized
feature_norm = feature / (feature.norm(dim=1).view(-1, 1) + 1e-10
) # normalize
ind, feature_norm = self._gather(
ind, feature_norm) # ind: (N*w), feature: (N*w)xk, cuda tensor
ind = ind.cpu()
if self.rank == 0:
feature_old = self.feature_bank[ind, ...].cuda()
feature_new = (1 - self.momentum) * feature_old + \
self.momentum * feature_norm
feature_norm = feature_new / (
feature_new.norm(dim=1).view(-1, 1) + 1e-10)
self.feature_bank[ind, ...] = feature_norm.cpu()
dist.barrier()
dist.broadcast(feature_norm, 0)
# compute new labels
similarity_to_centroids = torch.mm(self.centroids,
feature_norm.permute(1, 0)) # CxN
newlabel = similarity_to_centroids.argmax(dim=0) # cuda tensor
newlabel_cpu = newlabel.cpu()
change_ratio = (newlabel_cpu !=
self.label_bank[ind]).sum().float().cuda() \
/ float(newlabel_cpu.shape[0])
self.label_bank[ind] = newlabel_cpu.clone() # copy to cpu
return change_ratio
def deal_with_small_clusters(self):
"""Deal with small clusters."""
# check empty class
hist = np.bincount(self.label_bank.numpy(), minlength=self.num_classes)
small_clusters = np.where(hist < self.min_cluster)[0].tolist()
if self.debug and self.rank == 0:
print("mincluster: {}, num of small class: {}".format(
hist.min(), len(small_clusters)))
if len(small_clusters) == 0:
return
# re-assign samples in small clusters to make them empty
for s in small_clusters:
ind = np.where(self.label_bank.numpy() == s)[0]
if len(ind) > 0:
inclusion = torch.from_numpy(
np.setdiff1d(
np.arange(self.num_classes),
np.array(small_clusters),
assume_unique=True)).cuda()
if self.rank == 0:
target_ind = torch.mm(
self.centroids[inclusion, :],
self.feature_bank[ind, :].cuda().permute(
1, 0)).argmax(dim=0)
target = inclusion[target_ind]
else:
target = torch.zeros((ind.shape[0], ),
dtype=torch.int64).cuda()
dist.all_reduce(target)
self.label_bank[ind] = torch.from_numpy(target.cpu().numpy())
# deal with empty cluster
self._redirect_empty_clusters(small_clusters)
def update_centroids_memory(self, cinds=None):
"""Update centroids memory."""
if self.rank == 0:
if self.debug:
print("updating centroids ...")
if cinds is None:
center = self._compute_centroids()
self.centroids.copy_(center)
else:
center = self._compute_centroids_ind(cinds)
self.centroids[
torch.LongTensor(cinds).cuda(), :] = center.cuda()
dist.broadcast(self.centroids, 0)
def _partition_max_cluster(self, max_cluster):
"""Partition the largest cluster into two sub-clusters."""
assert self.rank == 0
max_cluster_inds = np.where(self.label_bank == max_cluster)[0]
assert len(max_cluster_inds) >= 2
max_cluster_features = self.feature_bank[max_cluster_inds, :]
if np.any(np.isnan(max_cluster_features.numpy())):
raise Exception("Has nan in features.")
kmeans_ret = self.kmeans.fit(max_cluster_features)
sub_cluster1_ind = max_cluster_inds[kmeans_ret.labels_ == 0]
sub_cluster2_ind = max_cluster_inds[kmeans_ret.labels_ == 1]
if not (len(sub_cluster1_ind) > 0 and len(sub_cluster2_ind) > 0):
print(
"Warning: kmeans partition fails, resort to random partition.")
sub_cluster1_ind = np.random.choice(
max_cluster_inds, len(max_cluster_inds) // 2, replace=False)
sub_cluster2_ind = np.setdiff1d(
max_cluster_inds, sub_cluster1_ind, assume_unique=True)
return sub_cluster1_ind, sub_cluster2_ind
def _redirect_empty_clusters(self, empty_clusters):
"""Re-direct empty clusters."""
for e in empty_clusters:
assert (self.label_bank != e).all().item(), \
"Cluster #{} is not an empty cluster.".format(e)
max_cluster = np.bincount(
self.label_bank, minlength=self.num_classes).argmax().item()
# gather partitioning indices
if self.rank == 0:
sub_cluster1_ind, sub_cluster2_ind = self._partition_max_cluster(
max_cluster)
size1 = torch.LongTensor([len(sub_cluster1_ind)]).cuda()
size2 = torch.LongTensor([len(sub_cluster2_ind)]).cuda()
sub_cluster1_ind_tensor = torch.from_numpy(
sub_cluster1_ind).long().cuda()
sub_cluster2_ind_tensor = torch.from_numpy(
sub_cluster2_ind).long().cuda()
else:
size1 = torch.LongTensor([0]).cuda()
size2 = torch.LongTensor([0]).cuda()
dist.all_reduce(size1)
dist.all_reduce(size2)
if self.rank != 0:
sub_cluster1_ind_tensor = torch.zeros(
(size1, ), dtype=torch.int64).cuda()
sub_cluster2_ind_tensor = torch.zeros(
(size2, ), dtype=torch.int64).cuda()
dist.broadcast(sub_cluster1_ind_tensor, 0)
dist.broadcast(sub_cluster2_ind_tensor, 0)
if self.rank != 0:
sub_cluster1_ind = sub_cluster1_ind_tensor.cpu().numpy()
sub_cluster2_ind = sub_cluster2_ind_tensor.cpu().numpy()
# reassign samples in partition #2 to the empty class
self.label_bank[sub_cluster2_ind] = e
# update centroids of max_cluster and e
self.update_centroids_memory([max_cluster, e])
| 10,441 | 43.623932 | 81 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/models/memories/__init__.py | from .odc_memory import ODCMemory
from .simple_memory import SimpleMemory
| 74 | 24 | 39 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/models/utils/multi_pooling.py | import torch.nn as nn
class MultiPooling(nn.Module):
"""Pooling layers for features from multiple depth."""
POOL_PARAMS = {
'resnet50': [
dict(kernel_size=10, stride=10, padding=4),
dict(kernel_size=16, stride=8, padding=0),
dict(kernel_size=13, stride=5, padding=0),
dict(kernel_size=8, stride=3, padding=0),
dict(kernel_size=6, stride=1, padding=0)
]
}
POOL_SIZES = {'resnet50': [12, 6, 4, 3, 2]}
POOL_DIMS = {'resnet50': [9216, 9216, 8192, 9216, 8192]}
def __init__(self,
pool_type='adaptive',
in_indices=(0, ),
backbone='resnet50'):
super(MultiPooling, self).__init__()
assert pool_type in ['adaptive', 'specified']
if pool_type == 'adaptive':
self.pools = nn.ModuleList([
nn.AdaptiveAvgPool2d(self.POOL_SIZES[backbone][l])
for l in in_indices
])
else:
self.pools = nn.ModuleList([
nn.AvgPool2d(**self.POOL_PARAMS[backbone][l])
for l in in_indices
])
def forward(self, x):
assert isinstance(x, (list, tuple))
return [p(xx) for p, xx in zip(self.pools, x)]
| 1,280 | 31.846154 | 66 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/models/utils/norm.py | import torch.nn as nn
norm_cfg = {
# format: layer_type: (abbreviation, module)
'BN': ('bn', nn.BatchNorm2d),
'SyncBN': ('bn', nn.SyncBatchNorm),
'GN': ('gn', nn.GroupNorm),
# and potentially 'SN'
}
def build_norm_layer(cfg, num_features, postfix=''):
"""Build normalization layer.
Args:
cfg (dict): cfg should contain:
type (str): identify norm layer type.
layer args: args needed to instantiate a norm layer.
requires_grad (bool): [optional] whether stop gradient updates
num_features (int): number of channels from input.
postfix (int, str): appended into norm abbreviation to
create named layer.
Returns:
name (str): abbreviation + postfix
layer (nn.Module): created norm layer
"""
assert isinstance(cfg, dict) and 'type' in cfg
cfg_ = cfg.copy()
layer_type = cfg_.pop('type')
if layer_type not in norm_cfg:
raise KeyError('Unrecognized norm type {}'.format(layer_type))
else:
abbr, norm_layer = norm_cfg[layer_type]
if norm_layer is None:
raise NotImplementedError
assert isinstance(postfix, (int, str))
name = abbr + str(postfix)
requires_grad = cfg_.pop('requires_grad', True)
cfg_.setdefault('eps', 1e-5)
if layer_type != 'GN':
layer = norm_layer(num_features, **cfg_)
if layer_type == 'SyncBN':
layer._specify_ddp_gpu_num(1)
else:
assert 'num_groups' in cfg_
layer = norm_layer(num_channels=num_features, **cfg_)
for param in layer.parameters():
param.requires_grad = requires_grad
return name, layer
| 1,684 | 29.089286 | 74 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/models/utils/scale.py | import torch
import torch.nn as nn
class Scale(nn.Module):
"""A learnable scale parameter."""
def __init__(self, scale=1.0):
super(Scale, self).__init__()
self.scale = nn.Parameter(torch.tensor(scale, dtype=torch.float))
def forward(self, x):
return x * self.scale
| 305 | 20.857143 | 73 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/models/utils/sobel.py | import torch
import torch.nn as nn
class Sobel(nn.Module):
"""Sobel layer."""
def __init__(self):
super(Sobel, self).__init__()
grayscale = nn.Conv2d(3, 1, kernel_size=1, stride=1, padding=0)
grayscale.weight.data.fill_(1.0 / 3.0)
grayscale.bias.data.zero_()
sobel_filter = nn.Conv2d(1, 2, kernel_size=3, stride=1, padding=1)
sobel_filter.weight.data[0, 0].copy_(
torch.FloatTensor([[1, 0, -1], [2, 0, -2], [1, 0, -1]]))
sobel_filter.weight.data[1, 0].copy_(
torch.FloatTensor([[1, 2, 1], [0, 0, 0], [-1, -2, -1]]))
sobel_filter.bias.data.zero_()
self.sobel = nn.Sequential(grayscale, sobel_filter)
for p in self.sobel.parameters():
p.requires_grad = False
def forward(self, x):
return self.sobel(x)
| 840 | 32.64 | 74 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/models/utils/conv_ws.py | import torch.nn as nn
import torch.nn.functional as F
def conv_ws_2d(input,
weight,
bias=None,
stride=1,
padding=0,
dilation=1,
groups=1,
eps=1e-5):
c_in = weight.size(0)
weight_flat = weight.view(c_in, -1)
mean = weight_flat.mean(dim=1, keepdim=True).view(c_in, 1, 1, 1)
std = weight_flat.std(dim=1, keepdim=True).view(c_in, 1, 1, 1)
weight = (weight - mean) / (std + eps)
return F.conv2d(input, weight, bias, stride, padding, dilation, groups)
class ConvWS2d(nn.Conv2d):
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True,
eps=1e-5):
super(ConvWS2d, self).__init__(
in_channels,
out_channels,
kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias)
self.eps = eps
def forward(self, x):
return conv_ws_2d(x, self.weight, self.bias, self.stride, self.padding,
self.dilation, self.groups, self.eps)
| 1,335 | 27.425532 | 79 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/models/utils/conv_module.py | import warnings
import torch.nn as nn
from mmcv.cnn import constant_init, kaiming_init
from .conv_ws import ConvWS2d
from .norm import build_norm_layer
conv_cfg = {
'Conv': nn.Conv2d,
'ConvWS': ConvWS2d,
}
def build_conv_layer(cfg, *args, **kwargs):
"""Build convolution layer.
Args:
cfg (None or dict): Cfg should contain:
type (str): Identify conv layer type.
layer args: Args needed to instantiate a conv layer.
Returns:
nn.Module: Created conv layer.
"""
if cfg is None:
cfg_ = dict(type='Conv')
else:
assert isinstance(cfg, dict) and 'type' in cfg
cfg_ = cfg.copy()
layer_type = cfg_.pop('type')
if layer_type not in conv_cfg:
raise KeyError('Unrecognized norm type {}'.format(layer_type))
else:
conv_layer = conv_cfg[layer_type]
layer = conv_layer(*args, **kwargs, **cfg_)
return layer
class ConvModule(nn.Module):
"""A conv block that contains conv/norm/activation layers.
Args:
in_channels (int): Same as nn.Conv2d.
out_channels (int): Same as nn.Conv2d.
kernel_size (int or tuple[int]): Same as nn.Conv2d.
stride (int or tuple[int]): Same as nn.Conv2d.
padding (int or tuple[int]): Same as nn.Conv2d.
dilation (int or tuple[int]): Same as nn.Conv2d.
groups (int): Same as nn.Conv2d.
bias (bool or str): If specified as `auto`, it will be decided by the
norm_cfg. Bias will be set as True if norm_cfg is None, otherwise
False.
conv_cfg (dict): Config dict for convolution layer.
norm_cfg (dict): Config dict for normalization layer.
activation (str or None): Activation type, "ReLU" by default.
inplace (bool): Whether to use inplace mode for activation.
order (tuple[str]): The order of conv/norm/activation layers. It is a
sequence of "conv", "norm" and "act". Examples are
("conv", "norm", "act") and ("act", "conv", "norm").
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias='auto',
conv_cfg=None,
norm_cfg=None,
activation='relu',
inplace=True,
order=('conv', 'norm', 'act')):
super(ConvModule, self).__init__()
assert conv_cfg is None or isinstance(conv_cfg, dict)
assert norm_cfg is None or isinstance(norm_cfg, dict)
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.activation = activation
self.inplace = inplace
self.order = order
assert isinstance(self.order, tuple) and len(self.order) == 3
assert set(order) == set(['conv', 'norm', 'act'])
self.with_norm = norm_cfg is not None
self.with_activation = activation is not None
# if the conv layer is before a norm layer, bias is unnecessary.
if bias == 'auto':
bias = False if self.with_norm else True
self.with_bias = bias
if self.with_norm and self.with_bias:
warnings.warn('ConvModule has norm and bias at the same time')
# build convolution layer
self.conv = build_conv_layer(
conv_cfg,
in_channels,
out_channels,
kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias)
# export the attributes of self.conv to a higher level for convenience
self.in_channels = self.conv.in_channels
self.out_channels = self.conv.out_channels
self.kernel_size = self.conv.kernel_size
self.stride = self.conv.stride
self.padding = self.conv.padding
self.dilation = self.conv.dilation
self.transposed = self.conv.transposed
self.output_padding = self.conv.output_padding
self.groups = self.conv.groups
# build normalization layers
if self.with_norm:
# norm layer is after conv layer
if order.index('norm') > order.index('conv'):
norm_channels = out_channels
else:
norm_channels = in_channels
self.norm_name, norm = build_norm_layer(norm_cfg, norm_channels)
self.add_module(self.norm_name, norm)
# build activation layer
if self.with_activation:
# TODO: introduce `act_cfg` and supports more activation layers
if self.activation not in ['relu']:
raise ValueError('{} is currently not supported.'.format(
self.activation))
if self.activation == 'relu':
self.activate = nn.ReLU(inplace=inplace)
# Use msra init by default
self.init_weights()
@property
def norm(self):
return getattr(self, self.norm_name)
def init_weights(self):
nonlinearity = 'relu' if self.activation is None else self.activation
kaiming_init(self.conv, mode='fan_in', nonlinearity=nonlinearity)
if self.with_norm:
constant_init(self.norm, 1, bias=0)
def forward(self, x, activate=True, norm=True):
for layer in self.order:
if layer == 'conv':
x = self.conv(x)
elif layer == 'norm' and norm and self.with_norm:
x = self.norm(x)
elif layer == 'act' and activate and self.with_activation:
x = self.activate(x)
return x
| 5,723 | 33.902439 | 78 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/models/utils/accuracy.py | import torch.nn as nn
def accuracy(pred, target, topk=1):
assert isinstance(topk, (int, tuple))
if isinstance(topk, int):
topk = (topk, )
return_single = True
else:
return_single = False
maxk = max(topk)
_, pred_label = pred.topk(maxk, dim=1)
pred_label = pred_label.t()
correct = pred_label.eq(target.view(1, -1).expand_as(pred_label))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / pred.size(0)))
return res[0] if return_single else res
class Accuracy(nn.Module):
def __init__(self, topk=(1, )):
super().__init__()
self.topk = topk
def forward(self, pred, target):
return accuracy(pred, target, self.topk)
| 801 | 24.0625 | 69 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/models/utils/gather_layer.py | import torch
import torch.distributed as dist
class GatherLayer(torch.autograd.Function):
"""Gather tensors from all process, supporting backward propagation.
"""
@staticmethod
def forward(ctx, input):
ctx.save_for_backward(input)
output = [torch.zeros_like(input) \
for _ in range(dist.get_world_size())]
dist.all_gather(output, input)
return tuple(output)
@staticmethod
def backward(ctx, *grads):
input, = ctx.saved_tensors
grad_out = torch.zeros_like(input)
grad_out[:] = grads[dist.get_rank()]
return grad_out
| 618 | 25.913043 | 72 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/models/utils/__init__.py | from .accuracy import Accuracy, accuracy
from .conv_module import ConvModule, build_conv_layer
from .conv_ws import ConvWS2d, conv_ws_2d
from .gather_layer import GatherLayer
from .multi_pooling import MultiPooling
from .norm import build_norm_layer
from .scale import Scale
#from .weight_init import (bias_init_with_prob, kaiming_init, normal_init,
# uniform_init, xavier_init)
from .sobel import Sobel
#__all__ = [
# 'conv_ws_2d', 'ConvWS2d', 'build_conv_layer', 'ConvModule',
# 'build_norm_layer', 'xavier_init', 'normal_init', 'uniform_init',
# 'kaiming_init', 'bias_init_with_prob', 'Scale', 'Sobel'
#]
| 643 | 36.882353 | 74 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/models/backbones/resnet.py | import torch.nn as nn
import torch.utils.checkpoint as cp
from mmcv.cnn import constant_init, kaiming_init
from mmcv.runner import load_checkpoint
from torch.nn.modules.batchnorm import _BatchNorm
from openselfsup.utils import get_root_logger
from ..registry import BACKBONES
from ..utils import build_conv_layer, build_norm_layer
class BasicBlock(nn.Module):
expansion = 1
def __init__(self,
inplanes,
planes,
stride=1,
dilation=1,
downsample=None,
style='pytorch',
with_cp=False,
conv_cfg=None,
norm_cfg=dict(type='BN')):
super(BasicBlock, self).__init__()
self.norm1_name, norm1 = build_norm_layer(norm_cfg, planes, postfix=1)
self.norm2_name, norm2 = build_norm_layer(norm_cfg, planes, postfix=2)
self.conv1 = build_conv_layer(
conv_cfg,
inplanes,
planes,
3,
stride=stride,
padding=dilation,
dilation=dilation,
bias=False)
self.add_module(self.norm1_name, norm1)
self.conv2 = build_conv_layer(
conv_cfg, planes, planes, 3, padding=1, bias=False)
self.add_module(self.norm2_name, norm2)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
self.dilation = dilation
assert not with_cp
@property
def norm1(self):
return getattr(self, self.norm1_name)
@property
def norm2(self):
return getattr(self, self.norm2_name)
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.norm2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self,
inplanes,
planes,
stride=1,
dilation=1,
downsample=None,
style='pytorch',
with_cp=False,
conv_cfg=None,
norm_cfg=dict(type='BN')):
"""Bottleneck block for ResNet.
If style is "pytorch", the stride-two layer is the 3x3 conv layer,
if it is "caffe", the stride-two layer is the first 1x1 conv layer.
"""
super(Bottleneck, self).__init__()
assert style in ['pytorch', 'caffe']
self.inplanes = inplanes
self.planes = planes
self.stride = stride
self.dilation = dilation
self.style = style
self.with_cp = with_cp
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
if self.style == 'pytorch':
self.conv1_stride = 1
self.conv2_stride = stride
else:
self.conv1_stride = stride
self.conv2_stride = 1
self.norm1_name, norm1 = build_norm_layer(norm_cfg, planes, postfix=1)
self.norm2_name, norm2 = build_norm_layer(norm_cfg, planes, postfix=2)
self.norm3_name, norm3 = build_norm_layer(
norm_cfg, planes * self.expansion, postfix=3)
self.conv1 = build_conv_layer(
conv_cfg,
inplanes,
planes,
kernel_size=1,
stride=self.conv1_stride,
bias=False)
self.add_module(self.norm1_name, norm1)
self.conv2 = build_conv_layer(
conv_cfg,
planes,
planes,
kernel_size=3,
stride=self.conv2_stride,
padding=dilation,
dilation=dilation,
bias=False)
self.add_module(self.norm2_name, norm2)
self.conv3 = build_conv_layer(
conv_cfg,
planes,
planes * self.expansion,
kernel_size=1,
bias=False)
self.add_module(self.norm3_name, norm3)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
@property
def norm1(self):
return getattr(self, self.norm1_name)
@property
def norm2(self):
return getattr(self, self.norm2_name)
@property
def norm3(self):
return getattr(self, self.norm3_name)
def forward(self, x):
def _inner_forward(x):
identity = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.norm2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.norm3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
return out
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
out = self.relu(out)
return out
def make_res_layer(block,
inplanes,
planes,
blocks,
stride=1,
dilation=1,
style='pytorch',
with_cp=False,
conv_cfg=None,
norm_cfg=dict(type='BN')):
downsample = None
if stride != 1 or inplanes != planes * block.expansion:
downsample = nn.Sequential(
build_conv_layer(
conv_cfg,
inplanes,
planes * block.expansion,
kernel_size=1,
stride=stride,
bias=False),
build_norm_layer(norm_cfg, planes * block.expansion)[1],
)
layers = []
layers.append(
block(
inplanes=inplanes,
planes=planes,
stride=stride,
dilation=dilation,
downsample=downsample,
style=style,
with_cp=with_cp,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg))
inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(
block(
inplanes=inplanes,
planes=planes,
stride=1,
dilation=dilation,
style=style,
with_cp=with_cp,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg))
return nn.Sequential(*layers)
@BACKBONES.register_module
class ResNet(nn.Module):
"""ResNet backbone.
Args:
depth (int): Depth of resnet, from {18, 34, 50, 101, 152}.
in_channels (int): Number of input image channels. Normally 3.
num_stages (int): Resnet stages, normally 4.
strides (Sequence[int]): Strides of the first block of each stage.
dilations (Sequence[int]): Dilation of each stage.
out_indices (Sequence[int]): Output from which stages.
style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two
layer is the 3x3 conv layer, otherwise the stride-two layer is
the first 1x1 conv layer.
frozen_stages (int): Stages to be frozen (stop grad and set eval mode).
-1 means not freezing any parameters.
norm_cfg (dict): dictionary to construct and config norm layer.
norm_eval (bool): Whether to set norm layers to eval mode, namely,
freeze running stats (mean and var). Note: Effect on Batch Norm
and its variants only.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed.
zero_init_residual (bool): whether to use zero init for last norm layer
in resblocks to let them behave as identity.
Example:
>>> from openselfsup.models import ResNet
>>> import torch
>>> self = ResNet(depth=18)
>>> self.eval()
>>> inputs = torch.rand(1, 3, 32, 32)
>>> level_outputs = self.forward(inputs)
>>> for level_out in level_outputs:
... print(tuple(level_out.shape))
(1, 64, 8, 8)
(1, 128, 4, 4)
(1, 256, 2, 2)
(1, 512, 1, 1)
"""
arch_settings = {
18: (BasicBlock, (2, 2, 2, 2)),
34: (BasicBlock, (3, 4, 6, 3)),
50: (Bottleneck, (3, 4, 6, 3)),
101: (Bottleneck, (3, 4, 23, 3)),
152: (Bottleneck, (3, 8, 36, 3))
}
def __init__(self,
depth,
in_channels=3,
num_stages=4,
strides=(1, 2, 2, 2),
dilations=(1, 1, 1, 1),
out_indices=(0, 1, 2, 3, 4),
style='pytorch',
frozen_stages=-1,
conv_cfg=None,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=False,
with_cp=False,
zero_init_residual=False):
super(ResNet, self).__init__()
if depth not in self.arch_settings:
raise KeyError('invalid depth {} for resnet'.format(depth))
self.depth = depth
self.num_stages = num_stages
assert num_stages >= 1 and num_stages <= 4
self.strides = strides
self.dilations = dilations
assert len(strides) == len(dilations) == num_stages
self.out_indices = out_indices
assert max(out_indices) < num_stages + 1
self.style = style
self.frozen_stages = frozen_stages
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.with_cp = with_cp
self.norm_eval = norm_eval
self.zero_init_residual = zero_init_residual
self.block, stage_blocks = self.arch_settings[depth]
self.stage_blocks = stage_blocks[:num_stages]
self.inplanes = 64
self._make_stem_layer(in_channels)
self.res_layers = []
for i, num_blocks in enumerate(self.stage_blocks):
stride = strides[i]
dilation = dilations[i]
planes = 64 * 2**i
res_layer = make_res_layer(
self.block,
self.inplanes,
planes,
num_blocks,
stride=stride,
dilation=dilation,
style=self.style,
with_cp=with_cp,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg)
self.inplanes = planes * self.block.expansion
layer_name = 'layer{}'.format(i + 1)
self.add_module(layer_name, res_layer)
self.res_layers.append(layer_name)
self._freeze_stages()
self.feat_dim = self.block.expansion * 64 * 2**(
len(self.stage_blocks) - 1)
@property
def norm1(self):
return getattr(self, self.norm1_name)
def _make_stem_layer(self, in_channels):
self.conv1 = build_conv_layer(
self.conv_cfg,
in_channels,
64,
kernel_size=7,
stride=2,
padding=3,
bias=False)
self.norm1_name, norm1 = build_norm_layer(self.norm_cfg, 64, postfix=1)
self.add_module(self.norm1_name, norm1)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
def _freeze_stages(self):
if self.frozen_stages >= 0:
self.norm1.eval()
for m in [self.conv1, self.norm1]:
for param in m.parameters():
param.requires_grad = False
for i in range(1, self.frozen_stages + 1):
m = getattr(self, 'layer{}'.format(i))
m.eval()
for param in m.parameters():
param.requires_grad = False
def init_weights(self, pretrained=None):
if isinstance(pretrained, str):
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=True, logger=logger)
elif pretrained is None:
for m in self.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m, mode='fan_in', nonlinearity='relu')
elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
constant_init(m, 1)
if self.zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
constant_init(m.norm3, 0)
elif isinstance(m, BasicBlock):
constant_init(m.norm2, 0)
else:
raise TypeError('pretrained must be a str or None')
def forward(self, x):
outs = []
x = self.conv1(x)
x = self.norm1(x)
x = self.relu(x) # r50: 64x128x128
if 0 in self.out_indices:
outs.append(x)
x = self.maxpool(x) # r50: 64x56x56
for i, layer_name in enumerate(self.res_layers):
res_layer = getattr(self, layer_name)
x = res_layer(x)
if i + 1 in self.out_indices:
outs.append(x)
# r50: 1-256x56x56; 2-512x28x28; 3-1024x14x14; 4-2048x7x7
return tuple(outs)
def train(self, mode=True):
super(ResNet, self).train(mode)
self._freeze_stages()
if mode and self.norm_eval:
for m in self.modules():
# trick: eval have effect on BatchNorm only
if isinstance(m, _BatchNorm):
m.eval()
| 13,648 | 30.74186 | 79 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/models/backbones/resnext.py | import math
import torch.nn as nn
from ..registry import BACKBONES
from ..utils import build_conv_layer, build_norm_layer
from .resnet import Bottleneck as _Bottleneck
from .resnet import ResNet
class Bottleneck(_Bottleneck):
def __init__(self, inplanes, planes, groups=1, base_width=4, **kwargs):
"""Bottleneck block for ResNeXt.
If style is "pytorch", the stride-two layer is the 3x3 conv layer,
if it is "caffe", the stride-two layer is the first 1x1 conv layer.
"""
super(Bottleneck, self).__init__(inplanes, planes, **kwargs)
if groups == 1:
width = self.planes
else:
width = math.floor(self.planes * (base_width / 64)) * groups
self.norm1_name, norm1 = build_norm_layer(
self.norm_cfg, width, postfix=1)
self.norm2_name, norm2 = build_norm_layer(
self.norm_cfg, width, postfix=2)
self.norm3_name, norm3 = build_norm_layer(
self.norm_cfg, self.planes * self.expansion, postfix=3)
self.conv1 = build_conv_layer(
self.conv_cfg,
self.inplanes,
width,
kernel_size=1,
stride=self.conv1_stride,
bias=False)
self.add_module(self.norm1_name, norm1)
fallback_on_stride = False
self.with_modulated_dcn = False
if self.with_dcn:
fallback_on_stride = self.dcn.pop('fallback_on_stride', False)
if not self.with_dcn or fallback_on_stride:
self.conv2 = build_conv_layer(
self.conv_cfg,
width,
width,
kernel_size=3,
stride=self.conv2_stride,
padding=self.dilation,
dilation=self.dilation,
groups=groups,
bias=False)
else:
assert self.conv_cfg is None, 'conv_cfg must be None for DCN'
self.conv2 = build_conv_layer(
self.dcn,
width,
width,
kernel_size=3,
stride=self.conv2_stride,
padding=self.dilation,
dilation=self.dilation,
groups=groups,
bias=False)
self.add_module(self.norm2_name, norm2)
self.conv3 = build_conv_layer(
self.conv_cfg,
width,
self.planes * self.expansion,
kernel_size=1,
bias=False)
self.add_module(self.norm3_name, norm3)
def make_res_layer(block,
inplanes,
planes,
blocks,
stride=1,
dilation=1,
groups=1,
base_width=4,
style='pytorch',
with_cp=False,
conv_cfg=None,
norm_cfg=dict(type='BN'),
dcn=None,
gcb=None):
downsample = None
if stride != 1 or inplanes != planes * block.expansion:
downsample = nn.Sequential(
build_conv_layer(
conv_cfg,
inplanes,
planes * block.expansion,
kernel_size=1,
stride=stride,
bias=False),
build_norm_layer(norm_cfg, planes * block.expansion)[1],
)
layers = []
layers.append(
block(
inplanes=inplanes,
planes=planes,
stride=stride,
dilation=dilation,
downsample=downsample,
groups=groups,
base_width=base_width,
style=style,
with_cp=with_cp,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
dcn=dcn,
gcb=gcb))
inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(
block(
inplanes=inplanes,
planes=planes,
stride=1,
dilation=dilation,
groups=groups,
base_width=base_width,
style=style,
with_cp=with_cp,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
dcn=dcn,
gcb=gcb))
return nn.Sequential(*layers)
@BACKBONES.register_module
class ResNeXt(ResNet):
"""ResNeXt backbone.
Args:
depth (int): Depth of resnet, from {18, 34, 50, 101, 152}.
in_channels (int): Number of input image channels. Normally 3.
num_stages (int): Resnet stages, normally 4.
groups (int): Group of resnext.
base_width (int): Base width of resnext.
strides (Sequence[int]): Strides of the first block of each stage.
dilations (Sequence[int]): Dilation of each stage.
out_indices (Sequence[int]): Output from which stages.
style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two
layer is the 3x3 conv layer, otherwise the stride-two layer is
the first 1x1 conv layer.
frozen_stages (int): Stages to be frozen (all param fixed). -1 means
not freezing any parameters.
norm_cfg (dict): dictionary to construct and config norm layer.
norm_eval (bool): Whether to set norm layers to eval mode, namely,
freeze running stats (mean and var). Note: Effect on Batch Norm
and its variants only.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed.
zero_init_residual (bool): whether to use zero init for last norm layer
in resblocks to let them behave as identity.
Example:
>>> from openselfsup.models import ResNeXt
>>> import torch
>>> self = ResNeXt(depth=50)
>>> self.eval()
>>> inputs = torch.rand(1, 3, 32, 32)
>>> level_outputs = self.forward(inputs)
>>> for level_out in level_outputs:
... print(tuple(level_out.shape))
(1, 256, 8, 8)
(1, 512, 4, 4)
(1, 1024, 2, 2)
(1, 2048, 1, 1)
"""
arch_settings = {
50: (Bottleneck, (3, 4, 6, 3)),
101: (Bottleneck, (3, 4, 23, 3)),
152: (Bottleneck, (3, 8, 36, 3))
}
def __init__(self, groups=1, base_width=4, **kwargs):
super(ResNeXt, self).__init__(**kwargs)
self.groups = groups
self.base_width = base_width
self.inplanes = 64
self.res_layers = []
for i, num_blocks in enumerate(self.stage_blocks):
stride = self.strides[i]
dilation = self.dilations[i]
dcn = self.dcn if self.stage_with_dcn[i] else None
gcb = self.gcb if self.stage_with_gcb[i] else None
planes = 64 * 2**i
res_layer = make_res_layer(
self.block,
self.inplanes,
planes,
num_blocks,
stride=stride,
dilation=dilation,
groups=self.groups,
base_width=self.base_width,
style=self.style,
with_cp=self.with_cp,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
dcn=dcn,
gcb=gcb)
self.inplanes = planes * self.block.expansion
layer_name = 'layer{}'.format(i + 1)
self.add_module(layer_name, res_layer)
self.res_layers.append(layer_name)
self._freeze_stages()
| 7,594 | 33.058296 | 79 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/models/backbones/__init__.py | from .resnet import ResNet, make_res_layer
| 43 | 21 | 42 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/models/heads/contrastive_head.py | import torch
import torch.nn as nn
from ..registry import HEADS
@HEADS.register_module
class ContrastiveHead(nn.Module):
"""Head for contrastive learning.
Args:
temperature (float): The temperature hyper-parameter that
controls the concentration level of the distribution.
Default: 0.1.
"""
def __init__(self, temperature=0.1):
super(ContrastiveHead, self).__init__()
self.criterion = nn.CrossEntropyLoss()
self.temperature = temperature
def forward(self, pos, neg):
"""Forward head.
Args:
pos (Tensor): Nx1 positive similarity.
neg (Tensor): Nxk negative similarity.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
N = pos.size(0)
logits = torch.cat((pos, neg), dim=1)
logits /= self.temperature
labels = torch.zeros((N, ), dtype=torch.long).cuda()
losses = dict()
losses['loss'] = self.criterion(logits, labels)
return losses
| 1,053 | 26.025641 | 65 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/models/heads/cls_head.py | import torch.nn as nn
from mmcv.cnn import kaiming_init, normal_init
from ..utils import accuracy
from ..registry import HEADS
@HEADS.register_module
class ClsHead(nn.Module):
"""Simplest classifier head, with only one fc layer.
"""
def __init__(self,
with_avg_pool=False,
in_channels=2048,
num_classes=1000):
super(ClsHead, self).__init__()
self.with_avg_pool = with_avg_pool
self.in_channels = in_channels
self.num_classes = num_classes
self.criterion = nn.CrossEntropyLoss()
if self.with_avg_pool:
self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
self.fc_cls = nn.Linear(in_channels, num_classes)
def init_weights(self, init_linear='normal', std=0.01, bias=0.):
assert init_linear in ['normal', 'kaiming'], \
"Undefined init_linear: {}".format(init_linear)
for m in self.modules():
if isinstance(m, nn.Linear):
if init_linear == 'normal':
normal_init(m, std=std, bias=bias)
else:
kaiming_init(m, mode='fan_in', nonlinearity='relu')
elif isinstance(m,
(nn.BatchNorm2d, nn.GroupNorm, nn.SyncBatchNorm)):
if m.weight is not None:
nn.init.constant_(m.weight, 1)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def forward(self, x):
assert isinstance(x, (tuple, list)) and len(x) == 1
x = x[0]
if self.with_avg_pool:
assert x.dim() == 4, \
"Tensor must has 4 dims, got: {}".format(x.dim())
x = self.avg_pool(x)
x = x.view(x.size(0), -1)
cls_score = self.fc_cls(x)
return [cls_score]
def loss(self, cls_score, labels):
losses = dict()
assert isinstance(cls_score, (tuple, list)) and len(cls_score) == 1
losses['loss'] = self.criterion(cls_score[0], labels)
losses['acc'] = accuracy(cls_score[0], labels)
return losses
| 2,119 | 33.754098 | 78 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/models/heads/__init__.py | from .contrastive_head import ContrastiveHead
from .cls_head import ClsHead
from .latent_pred_head import LatentPredictHead
from .multi_cls_head import MultiClsHead
| 165 | 32.2 | 47 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/models/heads/multi_cls_head.py | import torch.nn as nn
from ..utils import accuracy
from ..registry import HEADS
from ..utils import build_norm_layer, MultiPooling
@HEADS.register_module
class MultiClsHead(nn.Module):
"""Multiple classifier heads.
"""
FEAT_CHANNELS = {'resnet50': [64, 256, 512, 1024, 2048]}
FEAT_LAST_UNPOOL = {'resnet50': 2048 * 7 * 7}
def __init__(self,
pool_type='adaptive',
in_indices=(0, ),
with_last_layer_unpool=False,
backbone='resnet50',
norm_cfg=dict(type='BN'),
num_classes=1000):
super(MultiClsHead, self).__init__()
assert norm_cfg['type'] in ['BN', 'SyncBN', 'GN', 'null']
self.with_last_layer_unpool = with_last_layer_unpool
self.with_norm = norm_cfg['type'] != 'null'
self.criterion = nn.CrossEntropyLoss()
self.multi_pooling = MultiPooling(pool_type, in_indices, backbone)
if self.with_norm:
self.norms = nn.ModuleList([
build_norm_layer(norm_cfg, self.FEAT_CHANNELS[backbone][l])[1]
for l in in_indices
])
self.fcs = nn.ModuleList([
nn.Linear(self.multi_pooling.POOL_DIMS[backbone][l], num_classes)
for l in in_indices
])
if with_last_layer_unpool:
self.fcs.append(
nn.Linear(self.FEAT_LAST_UNPOOL[backbone], num_classes))
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
elif isinstance(m,
(nn.BatchNorm2d, nn.GroupNorm, nn.SyncBatchNorm)):
if m.weight is not None:
nn.init.constant_(m.weight, 1)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def forward(self, x):
assert isinstance(x, (list, tuple))
if self.with_last_layer_unpool:
last_x = x[-1]
x = self.multi_pooling(x)
if self.with_norm:
x = [n(xx) for n, xx in zip(self.norms, x)]
if self.with_last_layer_unpool:
x.append(last_x)
x = [xx.view(xx.size(0), -1) for xx in x]
x = [fc(xx) for fc, xx in zip(self.fcs, x)]
return x
def loss(self, cls_score, labels):
losses = dict()
for i, s in enumerate(cls_score):
# keys must contain "loss"
losses['loss.{}'.format(i + 1)] = self.criterion(s, labels)
losses['acc.{}'.format(i + 1)] = accuracy(s, labels)
return losses
| 2,682 | 32.962025 | 78 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/models/heads/latent_pred_head.py | import torch
import torch.nn as nn
from mmcv.cnn import normal_init
from ..registry import HEADS
from .. import builder
@HEADS.register_module
class LatentPredictHead(nn.Module):
"""Head for contrastive learning.
"""
def __init__(self, predictor, size_average=True):
super(LatentPredictHead, self).__init__()
self.predictor = builder.build_neck(predictor)
self.size_average = size_average
def init_weights(self, init_linear='normal'):
self.predictor.init_weights(init_linear=init_linear)
def forward(self, input, target):
"""Forward head.
Args:
input (Tensor): NxC input features.
target (Tensor): NxC target features.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
pred = self.predictor([input])[0]
pred_norm = nn.functional.normalize(pred, dim=1)
target_norm = nn.functional.normalize(target, dim=1)
loss = -2 * (pred_norm * target_norm).sum()
if self.size_average:
loss /= input.size(0)
return dict(loss=loss)
@HEADS.register_module
class LatentClsHead(nn.Module):
"""Head for contrastive learning.
"""
def __init__(self, predictor):
super(LatentClsHead, self).__init__()
self.predictor = nn.Linear(predictor.in_channels,
predictor.num_classes)
self.criterion = nn.CrossEntropyLoss()
def init_weights(self, init_linear='normal'):
normal_init(self.predictor, std=0.01)
def forward(self, input, target):
"""Forward head.
Args:
input (Tensor): NxC input features.
target (Tensor): NxC target features.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
pred = self.predictor(input)
with torch.no_grad():
label = torch.argmax(self.predictor(target), dim=1).detach()
loss = self.criterion(pred, label)
return dict(loss=loss)
| 2,048 | 28.695652 | 72 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/datasets/base.py | from abc import ABCMeta, abstractmethod
import torch
from torch.utils.data import Dataset
from openselfsup.utils import print_log, build_from_cfg
from torchvision.transforms import Compose
from .registry import DATASETS, PIPELINES
from .builder import build_datasource
class BaseDataset(Dataset, metaclass=ABCMeta):
"""Base dataset.
Args:
data_source (dict): Data source defined in
`openselfsup.datasets.data_sources`.
pipeline (list[dict]): A list of dict, where each element represents
an operation defined in `oenselfsup.datasets.pipelines`.
"""
def __init__(self, data_source, pipeline, prefetch=False):
self.data_source = build_datasource(data_source)
pipeline = [build_from_cfg(p, PIPELINES) for p in pipeline]
self.pipeline = Compose(pipeline)
self.prefetch = prefetch
def __len__(self):
return self.data_source.get_length()
@abstractmethod
def __getitem__(self, idx):
pass
@abstractmethod
def evaluate(self, scores, keyword, logger=None, **kwargs):
pass
| 1,105 | 26.65 | 76 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/datasets/classification.py | import torch
from openselfsup.utils import print_log
from .registry import DATASETS
from .base import BaseDataset
from .utils import to_numpy
@DATASETS.register_module
class ClassificationDataset(BaseDataset):
"""Dataset for classification.
"""
def __init__(self, data_source, pipeline, prefetch=False):
super(ClassificationDataset, self).__init__(data_source, pipeline, prefetch)
def __getitem__(self, idx):
img, target = self.data_source.get_sample(idx)
img = self.pipeline(img)
if self.prefetch:
img = torch.from_numpy(to_numpy(img))
return dict(img=img, gt_label=target)
def evaluate(self, scores, keyword, logger=None, topk=(1, 5)):
eval_res = {}
target = torch.LongTensor(self.data_source.labels)
assert scores.size(0) == target.size(0), \
"Inconsistent length for results and labels, {} vs {}".format(
scores.size(0), target.size(0))
num = scores.size(0)
_, pred = scores.topk(max(topk), dim=1, largest=True, sorted=True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred)) # KxN
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0).item()
acc = correct_k * 100.0 / num
eval_res["{}_top{}".format(keyword, k)] = acc
if logger is not None and logger != 'silent':
print_log(
"{}_top{}: {:.03f}".format(keyword, k, acc),
logger=logger)
return eval_res
| 1,565 | 33.8 | 84 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/datasets/extraction.py | from .registry import DATASETS
from .base import BaseDataset
@DATASETS.register_module
class ExtractDataset(BaseDataset):
"""Dataset for feature extraction.
"""
def __init__(self, data_source, pipeline):
super(ExtractDataset, self).__init__(data_source, pipeline)
def __getitem__(self, idx):
img = self.data_source.get_sample(idx)
img = self.pipeline(img)
return dict(img=img)
def evaluate(self, scores, keyword, logger=None):
raise NotImplemented
| 513 | 24.7 | 67 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/datasets/rotation_pred.py | import torch
from PIL import Image
from .registry import DATASETS
from .base import BaseDataset
def rotate(img):
"""Rotate input image with 0, 90, 180, and 270 degrees.
Args:
img (Tensor): input image of shape (C, H, W).
Returns:
list[Tensor]: A list of four rotated images.
"""
return [
img,
torch.flip(img.transpose(1, 2), [1]),
torch.flip(img, [1, 2]),
torch.flip(img, [1]).transpose(1, 2)
]
@DATASETS.register_module
class RotationPredDataset(BaseDataset):
"""Dataset for rotation prediction.
"""
def __init__(self, data_source, pipeline):
super(RotationPredDataset, self).__init__(data_source, pipeline)
def __getitem__(self, idx):
img = self.data_source.get_sample(idx)
assert isinstance(img, Image.Image), \
'The output from the data source must be an Image, got: {}. \
Please ensure that the list file does not contain labels.'.format(
type(img))
img = self.pipeline(img)
img = torch.stack(rotate(img), dim=0)
rotation_labels = torch.LongTensor([0, 1, 2, 3])
return dict(img=img, rot_label=rotation_labels)
def evaluate(self, scores, keyword, logger=None):
raise NotImplemented
| 1,288 | 27.021739 | 78 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/datasets/deepcluster.py | from PIL import Image
from .registry import DATASETS
from .base import BaseDataset
@DATASETS.register_module
class DeepClusterDataset(BaseDataset):
"""Dataset for DC and ODC.
"""
def __init__(self, data_source, pipeline):
super(DeepClusterDataset, self).__init__(data_source, pipeline)
# init clustering labels
self.labels = [-1 for _ in range(self.data_source.get_length())]
def __getitem__(self, idx):
img = self.data_source.get_sample(idx)
assert isinstance(img, Image.Image), \
'The output from the data source must be an Image, got: {}. \
Please ensure that the list file does not contain labels.'.format(
type(img))
label = self.labels[idx]
img = self.pipeline(img)
return dict(img=img, pseudo_label=label, idx=idx)
def assign_labels(self, labels):
assert len(self.labels) == len(labels), \
"Inconsistent lenght of asigned labels, \
{} vs {}".format(len(self.labels), len(labels))
self.labels = labels[:]
def evaluate(self, scores, keyword, logger=None):
raise NotImplemented
| 1,161 | 32.2 | 78 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/datasets/relative_loc.py | from openselfsup.utils import build_from_cfg
import torch
from PIL import Image
from torchvision.transforms import Compose, RandomCrop
import torchvision.transforms.functional as TF
from .registry import DATASETS, PIPELINES
from .base import BaseDataset
def image_to_patches(img):
"""Crop split_per_side x split_per_side patches from input image.
Args:
img (PIL Image): input image.
Returns:
list[PIL Image]: A list of cropped patches.
"""
split_per_side = 3 # split of patches per image side
patch_jitter = 21 # jitter of each patch from each grid
h, w = img.size
h_grid = h // split_per_side
w_grid = w // split_per_side
h_patch = h_grid - patch_jitter
w_patch = w_grid - patch_jitter
assert h_patch > 0 and w_patch > 0
patches = []
for i in range(split_per_side):
for j in range(split_per_side):
p = TF.crop(img, i * h_grid, j * w_grid, h_grid, w_grid)
p = RandomCrop((h_patch, w_patch))(p)
patches.append(p)
return patches
@DATASETS.register_module
class RelativeLocDataset(BaseDataset):
"""Dataset for relative patch location.
"""
def __init__(self, data_source, pipeline, format_pipeline):
super(RelativeLocDataset, self).__init__(data_source, pipeline)
format_pipeline = [build_from_cfg(p, PIPELINES) for p in format_pipeline]
self.format_pipeline = Compose(format_pipeline)
def __getitem__(self, idx):
img = self.data_source.get_sample(idx)
assert isinstance(img, Image.Image), \
'The output from the data source must be an Image, got: {}. \
Please ensure that the list file does not contain labels.'.format(
type(img))
img = self.pipeline(img)
patches = image_to_patches(img)
patches = [self.format_pipeline(p) for p in patches]
perms = []
# create a list of patch pairs
[perms.append(torch.cat((patches[i], patches[4]), dim=0)) for i in range(9) if i != 4]
# create corresponding labels for patch pairs
patch_labels = torch.LongTensor([0, 1, 2, 3, 4, 5, 6, 7])
return dict(img=torch.stack(perms), patch_label=patch_labels) # 8(2C)HW, 8
def evaluate(self, scores, keyword, logger=None):
raise NotImplemented
| 2,327 | 34.272727 | 94 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/datasets/registry.py | from openselfsup.utils import Registry
DATASOURCES = Registry('datasource')
DATASETS = Registry('dataset')
PIPELINES = Registry('pipeline')
| 141 | 22.666667 | 38 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/datasets/utils.py | import numpy as np
def to_numpy(pil_img):
np_img = np.array(pil_img, dtype=np.uint8)
if np_img.ndim < 3:
np_img = np.expand_dims(np_img, axis=-1)
np_img = np.rollaxis(np_img, 2) # HWC to CHW
return np_img
| 232 | 22.3 | 49 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/datasets/dataset_wrappers.py | import numpy as np
from torch.utils.data.dataset import ConcatDataset as _ConcatDataset
from .registry import DATASETS
@DATASETS.register_module
class ConcatDataset(_ConcatDataset):
"""A wrapper of concatenated dataset.
Same as :obj:`torch.utils.data.dataset.ConcatDataset`, but
concat the group flag for image aspect ratio.
Args:
datasets (list[:obj:`Dataset`]): A list of datasets.
"""
def __init__(self, datasets):
super(ConcatDataset, self).__init__(datasets)
self.CLASSES = datasets[0].CLASSES
if hasattr(datasets[0], 'flag'):
flags = []
for i in range(0, len(datasets)):
flags.append(datasets[i].flag)
self.flag = np.concatenate(flags)
@DATASETS.register_module
class RepeatDataset(object):
"""A wrapper of repeated dataset.
The length of repeated dataset will be `times` larger than the original
dataset. This is useful when the data loading time is long but the dataset
is small. Using RepeatDataset can reduce the data loading time between
epochs.
Args:
dataset (:obj:`Dataset`): The dataset to be repeated.
times (int): Repeat times.
"""
def __init__(self, dataset, times):
self.dataset = dataset
self.times = times
self.CLASSES = dataset.CLASSES
if hasattr(self.dataset, 'flag'):
self.flag = np.tile(self.dataset.flag, times)
self._ori_len = len(self.dataset)
def __getitem__(self, idx):
return self.dataset[idx % self._ori_len]
def __len__(self):
return self.times * self._ori_len
| 1,639 | 28.285714 | 78 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/datasets/npid.py | from PIL import Image
from .registry import DATASETS
from .base import BaseDataset
@DATASETS.register_module
class NPIDDataset(BaseDataset):
"""Dataset for NPID.
"""
def __init__(self, data_source, pipeline):
super(NPIDDataset, self).__init__(data_source, pipeline)
def __getitem__(self, idx):
img = self.data_source.get_sample(idx)
assert isinstance(img, Image.Image), \
'The output from the data source must be an Image, got: {}. \
Please ensure that the list file does not contain labels.'.format(
type(img))
img = self.pipeline(img)
return dict(img=img, idx=idx)
def evaluate(self, scores, keyword, logger=None):
raise NotImplemented
| 748 | 27.807692 | 78 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/datasets/byol.py | import torch
from torch.utils.data import Dataset
from openselfsup.utils import build_from_cfg
from torchvision.transforms import Compose
from .registry import DATASETS, PIPELINES
from .builder import build_datasource
from .utils import to_numpy
@DATASETS.register_module
class BYOLDataset(Dataset):
"""Dataset for BYOL.
"""
def __init__(self, data_source, pipeline1, pipeline2, prefetch=False):
self.data_source = build_datasource(data_source)
pipeline1 = [build_from_cfg(p, PIPELINES) for p in pipeline1]
self.pipeline1 = Compose(pipeline1)
pipeline2 = [build_from_cfg(p, PIPELINES) for p in pipeline2]
self.pipeline2 = Compose(pipeline2)
self.prefetch = prefetch
def __len__(self):
return self.data_source.get_length()
def __getitem__(self, idx):
img = self.data_source.get_sample(idx)
img1 = self.pipeline1(img)
img2 = self.pipeline2(img)
if self.prefetch:
img1 = torch.from_numpy(to_numpy(img1))
img2 = torch.from_numpy(to_numpy(img2))
img_cat = torch.cat((img1.unsqueeze(0), img2.unsqueeze(0)), dim=0)
return dict(img=img_cat)
def evaluate(self, scores, keyword, logger=None, **kwargs):
raise NotImplemented
| 1,284 | 29.595238 | 74 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/datasets/__init__.py | from .builder import build_dataset
from .byol import BYOLDataset
from .data_sources import *
from .pipelines import *
from .classification import ClassificationDataset
from .deepcluster import DeepClusterDataset
from .extraction import ExtractDataset
from .npid import NPIDDataset
from .rotation_pred import RotationPredDataset
from .relative_loc import RelativeLocDataset
from .contrastive import ContrastiveDataset
from .dataset_wrappers import ConcatDataset, RepeatDataset
from .loader import DistributedGroupSampler, GroupSampler, build_dataloader
from .registry import DATASETS
| 583 | 37.933333 | 75 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/datasets/contrastive.py | import torch
from PIL import Image
from .registry import DATASETS
from .base import BaseDataset
from .utils import to_numpy
@DATASETS.register_module
class ContrastiveDataset(BaseDataset):
"""Dataset for contrastive learning methods that forward
two views of the image at a time (MoCo, SimCLR).
"""
def __init__(self, data_source, pipeline, prefetch=False):
data_source['return_label'] = False
super(ContrastiveDataset, self).__init__(data_source, pipeline, prefetch)
def __getitem__(self, idx):
img = self.data_source.get_sample(idx)
assert isinstance(img, Image.Image), \
'The output from the data source must be an Image, got: {}. \
Please ensure that the list file does not contain labels.'.format(
type(img))
img1 = self.pipeline(img)
img2 = self.pipeline(img)
if self.prefetch:
img1 = torch.from_numpy(to_numpy(img1))
img2 = torch.from_numpy(to_numpy(img2))
img_cat = torch.cat((img1.unsqueeze(0), img2.unsqueeze(0)), dim=0)
return dict(img=img_cat)
def evaluate(self, scores, keyword, logger=None, **kwargs):
raise NotImplemented
| 1,210 | 34.617647 | 81 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/datasets/builder.py | import copy
from openselfsup.utils import build_from_cfg
from .dataset_wrappers import ConcatDataset, RepeatDataset
from .registry import DATASETS, DATASOURCES
def _concat_dataset(cfg, default_args=None):
ann_files = cfg['ann_file']
img_prefixes = cfg.get('img_prefix', None)
seg_prefixes = cfg.get('seg_prefix', None)
proposal_files = cfg.get('proposal_file', None)
datasets = []
num_dset = len(ann_files)
for i in range(num_dset):
data_cfg = copy.deepcopy(cfg)
data_cfg['ann_file'] = ann_files[i]
if isinstance(img_prefixes, (list, tuple)):
data_cfg['img_prefix'] = img_prefixes[i]
if isinstance(seg_prefixes, (list, tuple)):
data_cfg['seg_prefix'] = seg_prefixes[i]
if isinstance(proposal_files, (list, tuple)):
data_cfg['proposal_file'] = proposal_files[i]
datasets.append(build_dataset(data_cfg, default_args))
return ConcatDataset(datasets)
def build_dataset(cfg, default_args=None):
if isinstance(cfg, (list, tuple)):
dataset = ConcatDataset([build_dataset(c, default_args) for c in cfg])
elif cfg['type'] == 'RepeatDataset':
dataset = RepeatDataset(
build_dataset(cfg['dataset'], default_args), cfg['times'])
else:
dataset = build_from_cfg(cfg, DATASETS, default_args)
return dataset
def build_datasource(cfg):
return build_from_cfg(cfg, DATASOURCES)
| 1,441 | 31.772727 | 78 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/datasets/data_sources/image_list.py | import os
from PIL import Image
from ..registry import DATASOURCES
from .utils import McLoader
@DATASOURCES.register_module
class ImageList(object):
def __init__(self, root, list_file, memcached=False, mclient_path=None, return_label=True):
with open(list_file, 'r') as f:
lines = f.readlines()
self.has_labels = len(lines[0].split()) == 2
self.return_label = return_label
if self.has_labels:
self.fns, self.labels = zip(*[l.strip().split() for l in lines])
self.labels = [int(l) for l in self.labels]
else:
# assert self.return_label is False
self.fns = [l.strip() for l in lines]
self.fns = [os.path.join(root, fn) for fn in self.fns]
self.memcached = memcached
self.mclient_path = mclient_path
self.initialized = False
def _init_memcached(self):
if not self.initialized:
assert self.mclient_path is not None
self.mc_loader = McLoader(self.mclient_path)
self.initialized = True
def get_length(self):
return len(self.fns)
def get_sample(self, idx):
if self.memcached:
self._init_memcached()
if self.memcached:
img = self.mc_loader(self.fns[idx])
else:
img = Image.open(self.fns[idx])
img = img.convert('RGB')
if self.has_labels and self.return_label:
target = self.labels[idx]
return img, target
else:
return img
| 1,541 | 30.469388 | 95 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/datasets/data_sources/utils.py | import io
from PIL import Image
try:
import mc
except ImportError as E:
pass
def pil_loader(img_str):
buff = io.BytesIO(img_str)
return Image.open(buff)
class McLoader(object):
def __init__(self, mclient_path):
assert mclient_path is not None, \
"Please specify 'data_mclient_path' in the config."
self.mclient_path = mclient_path
server_list_config_file = "{}/server_list.conf".format(
self.mclient_path)
client_config_file = "{}/client.conf".format(self.mclient_path)
self.mclient = mc.MemcachedClient.GetInstance(server_list_config_file,
client_config_file)
def __call__(self, fn):
try:
img_value = mc.pyvector()
self.mclient.Get(fn, img_value)
img_value_str = mc.ConvertBuffer(img_value)
img = pil_loader(img_value_str)
except:
print('Read image failed ({})'.format(fn))
return None
else:
return img
| 1,060 | 27.675676 | 78 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/datasets/data_sources/__init__.py | from .cifar import Cifar10, Cifar100
from .image_list import ImageList
from .imagenet import ImageNet
from .places205 import Places205
| 135 | 26.2 | 36 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/datasets/data_sources/cifar.py | from abc import ABCMeta, abstractmethod
from PIL import Image
from torchvision.datasets import CIFAR10, CIFAR100
from ..registry import DATASOURCES
class Cifar(metaclass=ABCMeta):
CLASSES = None
def __init__(self, root, split, return_label=True):
assert split in ['train', 'test']
self.root = root
self.split = split
self.return_label = return_label
self.cifar = None
self.set_cifar()
self.labels = self.cifar.targets
@abstractmethod
def set_cifar(self):
pass
def get_length(self):
return len(self.cifar)
def get_sample(self, idx):
img = Image.fromarray(self.cifar.data[idx])
if self.return_label:
target = self.labels[idx] # img: HWC, RGB
return img, target
else:
return img
@DATASOURCES.register_module
class Cifar10(Cifar):
CLASSES = [
'airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog',
'horse', 'ship', 'truck'
]
def __init__(self, root, split, return_label=True):
super().__init__(root, split, return_label)
def set_cifar(self):
try:
self.cifar = CIFAR10(
root=self.root, train=self.split == 'train', download=False)
except:
raise Exception("Please download CIFAR10 manually, \
in case of downloading the dataset parallelly \
that may corrupt the dataset.")
@DATASOURCES.register_module
class Cifar100(Cifar):
def __init__(self, root, split, return_label=True):
super().__init__(root, split, return_label)
def set_cifar(self):
try:
self.cifar = CIFAR100(
root=self.root, train=self.split == 'train', download=False)
except:
raise Exception("Please download CIFAR10 manually, \
in case of downloading the dataset parallelly \
that may corrupt the dataset.")
| 1,990 | 26.273973 | 76 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/datasets/data_sources/imagenet.py | from ..registry import DATASOURCES
from .image_list import ImageList
@DATASOURCES.register_module
class ImageNet(ImageList):
def __init__(self, root, list_file, memcached, mclient_path, return_label=True, *args, **kwargs):
super(ImageNet, self).__init__(
root, list_file, memcached, mclient_path, return_label)
| 338 | 29.818182 | 101 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/datasets/data_sources/places205.py | from ..registry import DATASOURCES
from .image_list import ImageList
@DATASOURCES.register_module
class Places205(ImageList):
def __init__(self, root, list_file, memcached, mclient_path, return_label=True, *args, **kwargs):
super(Places205, self).__init__(
root, list_file, memcached, mclient_path, return_label)
| 340 | 30 | 101 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/datasets/loader/sampler.py | from __future__ import division
import math
import numpy as np
import torch
from mmcv.runner import get_dist_info
from torch.utils.data import DistributedSampler as _DistributedSampler
from torch.utils.data import Sampler
class DistributedSampler(_DistributedSampler):
def __init__(self,
dataset,
num_replicas=None,
rank=None,
shuffle=True,
replace=False):
super().__init__(dataset, num_replicas=num_replicas, rank=rank)
self.shuffle = shuffle
self.replace = replace
self.unif_sampling_flag = False
def __iter__(self):
# deterministically shuffle based on epoch
if not self.unif_sampling_flag:
self.generate_new_list()
else:
self.unif_sampling_flag = False
return iter(self.indices[self.rank * self.num_samples:(self.rank + 1) *
self.num_samples])
def generate_new_list(self):
if self.shuffle:
g = torch.Generator()
g.manual_seed(self.epoch)
if self.replace:
indices = torch.randint(
low=0,
high=len(self.dataset),
size=(len(self.dataset), ),
generator=g).tolist()
else:
indices = torch.randperm(
len(self.dataset), generator=g).tolist()
else:
indices = torch.arange(len(self.dataset)).tolist()
# add extra samples to make it evenly divisible
indices += indices[:(self.total_size - len(indices))]
assert len(indices) == self.total_size
self.indices = indices
def set_uniform_indices(self, labels, num_classes):
self.unif_sampling_flag = True
assert self.shuffle, "Using uniform sampling, the indices must be shuffled."
np.random.seed(self.epoch)
assert (len(labels) == len(self.dataset))
N = len(labels)
size_per_label = int(N / num_classes) + 1
indices = []
images_lists = [[] for i in range(num_classes)]
for i, l in enumerate(labels):
images_lists[l].append(i)
for i, l in enumerate(images_lists):
if len(l) == 0:
continue
indices.extend(
np.random.choice(
l, size_per_label, replace=(len(l) <= size_per_label)))
indices = np.array(indices)
np.random.shuffle(indices)
indices = indices[:N].astype(np.int).tolist()
# add extra samples to make it evenly divisible
assert len(indices) <= self.total_size, \
"{} vs {}".format(len(indices), self.total_size)
indices += indices[:(self.total_size - len(indices))]
assert len(indices) == self.total_size, \
"{} vs {}".format(len(indices), self.total_size)
self.indices = indices
class GroupSampler(Sampler):
def __init__(self, dataset, samples_per_gpu=1):
assert hasattr(dataset, 'flag')
self.dataset = dataset
self.samples_per_gpu = samples_per_gpu
self.flag = dataset.flag.astype(np.int64)
self.group_sizes = np.bincount(self.flag)
self.num_samples = 0
for i, size in enumerate(self.group_sizes):
self.num_samples += int(np.ceil(
size / self.samples_per_gpu)) * self.samples_per_gpu
def __iter__(self):
indices = []
for i, size in enumerate(self.group_sizes):
if size == 0:
continue
indice = np.where(self.flag == i)[0]
assert len(indice) == size
np.random.shuffle(indice)
num_extra = int(np.ceil(size / self.samples_per_gpu)
) * self.samples_per_gpu - len(indice)
indice = np.concatenate(
[indice, np.random.choice(indice, num_extra)])
indices.append(indice)
indices = np.concatenate(indices)
indices = [
indices[i * self.samples_per_gpu:(i + 1) * self.samples_per_gpu]
for i in np.random.permutation(
range(len(indices) // self.samples_per_gpu))
]
indices = np.concatenate(indices)
indices = indices.astype(np.int64).tolist()
assert len(indices) == self.num_samples
return iter(indices)
def __len__(self):
return self.num_samples
class DistributedGroupSampler(Sampler):
"""Sampler that restricts data loading to a subset of the dataset.
It is especially useful in conjunction with
:class:`torch.nn.parallel.DistributedDataParallel`. In such case, each
process can pass a DistributedSampler instance as a DataLoader sampler,
and load a subset of the original dataset that is exclusive to it.
.. note::
Dataset is assumed to be of constant size.
Arguments:
dataset: Dataset used for sampling.
num_replicas (optional): Number of processes participating in
distributed training.
rank (optional): Rank of the current process within num_replicas.
"""
def __init__(self,
dataset,
samples_per_gpu=1,
num_replicas=None,
rank=None):
_rank, _num_replicas = get_dist_info()
if num_replicas is None:
num_replicas = _num_replicas
if rank is None:
rank = _rank
self.dataset = dataset
self.samples_per_gpu = samples_per_gpu
self.num_replicas = num_replicas
self.rank = rank
self.epoch = 0
assert hasattr(self.dataset, 'flag')
self.flag = self.dataset.flag
self.group_sizes = np.bincount(self.flag)
self.num_samples = 0
for i, j in enumerate(self.group_sizes):
self.num_samples += int(
math.ceil(self.group_sizes[i] * 1.0 / self.samples_per_gpu /
self.num_replicas)) * self.samples_per_gpu
self.total_size = self.num_samples * self.num_replicas
def __iter__(self):
# deterministically shuffle based on epoch
g = torch.Generator()
g.manual_seed(self.epoch)
indices = []
for i, size in enumerate(self.group_sizes):
if size > 0:
indice = np.where(self.flag == i)[0]
assert len(indice) == size
indice = indice[list(torch.randperm(int(size),
generator=g))].tolist()
extra = int(
math.ceil(
size * 1.0 / self.samples_per_gpu / self.num_replicas)
) * self.samples_per_gpu * self.num_replicas - len(indice)
# pad indice
tmp = indice.copy()
for _ in range(extra // size):
indice.extend(tmp)
indice.extend(tmp[:extra % size])
indices.extend(indice)
assert len(indices) == self.total_size
indices = [
indices[j] for i in list(
torch.randperm(
len(indices) // self.samples_per_gpu, generator=g))
for j in range(i * self.samples_per_gpu, (i + 1) *
self.samples_per_gpu)
]
# subsample
offset = self.num_samples * self.rank
indices = indices[offset:offset + self.num_samples]
assert len(indices) == self.num_samples
return iter(indices)
def __len__(self):
return self.num_samples
def set_epoch(self, epoch):
self.epoch = epoch
class DistributedGivenIterationSampler(Sampler):
def __init__(self,
dataset,
total_iter,
batch_size,
num_replicas=None,
rank=None,
last_iter=-1):
rank, world_size = get_dist_info()
assert rank < world_size
self.dataset = dataset
self.total_iter = total_iter
self.batch_size = batch_size
self.world_size = world_size
self.rank = rank
self.last_iter = last_iter
self.total_size = self.total_iter * self.batch_size
self.indices = self.gen_new_list()
def __iter__(self):
return iter(self.indices[(self.last_iter + 1) * self.batch_size:])
def set_uniform_indices(self, labels, num_classes):
np.random.seed(0)
assert (len(labels) == len(self.dataset))
N = len(labels)
size_per_label = int(N / num_classes) + 1
indices = []
images_lists = [[] for i in range(num_classes)]
for i, l in enumerate(labels):
images_lists[l].append(i)
for i, l in enumerate(images_lists):
if len(l) == 0:
continue
indices.extend(
np.random.choice(
l, size_per_label, replace=(len(l) <= size_per_label)))
indices = np.array(indices)
np.random.shuffle(indices)
indices = indices[:N].astype(np.int)
# repeat
all_size = self.total_size * self.world_size
indices = indices[:all_size]
num_repeat = (all_size - 1) // indices.shape[0] + 1
indices = np.tile(indices, num_repeat)
indices = indices[:all_size]
np.random.shuffle(indices)
# slice
beg = self.total_size * self.rank
indices = indices[beg:beg + self.total_size]
assert len(indices) == self.total_size
# set
self.indices = indices
def gen_new_list(self):
# each process shuffle all list with same seed, and pick one piece according to rank
np.random.seed(0)
all_size = self.total_size * self.world_size
indices = np.arange(len(self.dataset))
indices = indices[:all_size]
num_repeat = (all_size - 1) // indices.shape[0] + 1
indices = np.tile(indices, num_repeat)
indices = indices[:all_size]
np.random.shuffle(indices)
beg = self.total_size * self.rank
indices = indices[beg:beg + self.total_size]
assert len(indices) == self.total_size
return indices
def __len__(self):
# note here we do not take last iter into consideration, since __len__
# should only be used for displaying, the correct remaining size is
# handled by dataloader
#return self.total_size - (self.last_iter+1)*self.batch_size
return self.total_size
def set_epoch(self, epoch):
pass
| 10,628 | 34.079208 | 92 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/datasets/loader/build_loader.py | import platform
import random
import torch
from functools import partial
import numpy as np
from mmcv.parallel import collate
from mmcv.runner import get_dist_info
from torch.utils.data import DataLoader
#from .sampler import DistributedGroupSampler, DistributedSampler, GroupSampler
from .sampler import DistributedSampler, DistributedGivenIterationSampler
from torch.utils.data import RandomSampler
if platform.system() != 'Windows':
# https://github.com/pytorch/pytorch/issues/973
import resource
rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
resource.setrlimit(resource.RLIMIT_NOFILE, (4096, rlimit[1]))
def build_dataloader(dataset,
imgs_per_gpu,
workers_per_gpu,
num_gpus=1,
dist=True,
shuffle=True,
replace=False,
seed=None,
**kwargs):
"""Build PyTorch DataLoader.
In distributed training, each GPU/process has a dataloader.
In non-distributed training, there is only one dataloader for all GPUs.
Args:
dataset (Dataset): A PyTorch dataset.
imgs_per_gpu (int): Number of images on each GPU, i.e., batch size of
each GPU.
workers_per_gpu (int): How many subprocesses to use for data loading
for each GPU.
num_gpus (int): Number of GPUs. Only used in non-distributed training.
dist (bool): Distributed training/test or not. Default: True.
shuffle (bool): Whether to shuffle the data at every epoch.
Default: True.
replace (bool): Replace or not in random shuffle.
It works on when shuffle is True.
kwargs: any keyword argument to be used to initialize DataLoader
Returns:
DataLoader: A PyTorch dataloader.
"""
if dist:
rank, world_size = get_dist_info()
sampler = DistributedSampler(
dataset, world_size, rank, shuffle=shuffle, replace=replace)
batch_size = imgs_per_gpu
num_workers = workers_per_gpu
else:
if replace:
raise NotImplemented
sampler = RandomSampler(
dataset) if shuffle else None # TODO: set replace
batch_size = num_gpus * imgs_per_gpu
num_workers = num_gpus * workers_per_gpu
if kwargs.get('prefetch') is not None:
prefetch = kwargs.pop('prefetch')
img_norm_cfg = kwargs.pop('img_norm_cfg')
else:
prefetch = False
data_loader = DataLoader(
dataset,
batch_size=batch_size,
sampler=sampler,
num_workers=num_workers,
collate_fn=partial(collate, samples_per_gpu=imgs_per_gpu),
pin_memory=False,
worker_init_fn=worker_init_fn if seed is not None else None,
**kwargs)
if prefetch:
data_loader = PrefetchLoader(data_loader, img_norm_cfg['mean'], img_norm_cfg['std'])
return data_loader
def worker_init_fn(seed):
np.random.seed(seed)
random.seed(seed)
class PrefetchLoader:
"""
A data loader wrapper for prefetching data
"""
def __init__(self, loader, mean, std):
self.loader = loader
self._mean = mean
self._std = std
def __iter__(self):
stream = torch.cuda.Stream()
first = True
self.mean = torch.tensor([x * 255 for x in self._mean]).cuda().view(1, 3, 1, 1)
self.std = torch.tensor([x * 255 for x in self._std]).cuda().view(1, 3, 1, 1)
for next_input_dict in self.loader:
with torch.cuda.stream(stream):
data = next_input_dict['img'].cuda(non_blocking=True)
next_input_dict['img'] = data.float().sub_(self.mean).div_(self.std)
if not first:
yield input
else:
first = False
torch.cuda.current_stream().wait_stream(stream)
input = next_input_dict
yield input
def __len__(self):
return len(self.loader)
@property
def sampler(self):
return self.loader.sampler
@property
def dataset(self):
return self.loader.dataset | 4,179 | 30.428571 | 92 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/datasets/loader/__init__.py | from .build_loader import build_dataloader
from .sampler import DistributedGroupSampler, GroupSampler, DistributedGivenIterationSampler
__all__ = [
'GroupSampler', 'DistributedGroupSampler', 'build_dataloader',
'DistributedGivenIterationSampler'
]
| 257 | 31.25 | 92 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/datasets/pipelines/__init__.py | from .transforms import *
| 26 | 12.5 | 25 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/datasets/pipelines/transforms.py | import cv2
import inspect
import numpy as np
from PIL import Image, ImageFilter
import torch
from torchvision import transforms as _transforms
from openselfsup.utils import build_from_cfg
from ..registry import PIPELINES
# register all existing transforms in torchvision
_EXCLUDED_TRANSFORMS = ['GaussianBlur']
for m in inspect.getmembers(_transforms, inspect.isclass):
if m[0] not in _EXCLUDED_TRANSFORMS:
PIPELINES.register_module(m[1])
@PIPELINES.register_module
class RandomAppliedTrans(object):
"""Randomly applied transformations.
Args:
transforms (list[dict]): List of transformations in dictionaries.
p (float): Probability.
"""
def __init__(self, transforms, p=0.5):
t = [build_from_cfg(t, PIPELINES) for t in transforms]
self.trans = _transforms.RandomApply(t, p=p)
def __call__(self, img):
return self.trans(img)
def __repr__(self):
repr_str = self.__class__.__name__
return repr_str
# custom transforms
@PIPELINES.register_module
class Lighting(object):
"""Lighting noise(AlexNet - style PCA - based noise)."""
_IMAGENET_PCA = {
'eigval':
torch.Tensor([0.2175, 0.0188, 0.0045]),
'eigvec':
torch.Tensor([
[-0.5675, 0.7192, 0.4009],
[-0.5808, -0.0045, -0.8140],
[-0.5836, -0.6948, 0.4203],
])
}
def __init__(self):
self.alphastd = 0.1
self.eigval = self._IMAGENET_PCA['eigval']
self.eigvec = self._IMAGENET_PCA['eigvec']
def __call__(self, img):
assert isinstance(img, torch.Tensor), \
"Expect torch.Tensor, got {}".format(type(img))
if self.alphastd == 0:
return img
alpha = img.new().resize_(3).normal_(0, self.alphastd)
rgb = self.eigvec.type_as(img).clone()\
.mul(alpha.view(1, 3).expand(3, 3))\
.mul(self.eigval.view(1, 3).expand(3, 3))\
.sum(1).squeeze()
return img.add(rgb.view(3, 1, 1).expand_as(img))
def __repr__(self):
repr_str = self.__class__.__name__
return repr_str
@PIPELINES.register_module
class GaussianBlur(object):
"""Gaussian blur augmentation in SimCLR https://arxiv.org/abs/2002.05709."""
def __init__(self, sigma_min, sigma_max):
self.sigma_min = sigma_min
self.sigma_max = sigma_max
def __call__(self, img):
sigma = np.random.uniform(self.sigma_min, self.sigma_max)
img = img.filter(ImageFilter.GaussianBlur(radius=sigma))
return img
def __repr__(self):
repr_str = self.__class__.__name__
return repr_str
@PIPELINES.register_module
class Solarization(object):
"""Solarization augmentation in BYOL https://arxiv.org/abs/2006.07733."""
def __init__(self, threshold=128):
self.threshold = threshold
def __call__(self, img):
img = np.array(img)
img = np.where(img < self.threshold, img, 255 -img)
return Image.fromarray(img.astype(np.uint8))
def __repr__(self):
repr_str = self.__class__.__name__
return repr_str
| 3,142 | 26.330435 | 80 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/hooks/optimizer_hook.py | from mmcv.runner import OptimizerHook
try:
import apex
except:
print('apex is not installed')
class DistOptimizerHook(OptimizerHook):
"""Optimizer hook for distributed training."""
def __init__(self, update_interval=1, grad_clip=None, coalesce=True, bucket_size_mb=-1, use_fp16=False):
self.grad_clip = grad_clip
self.coalesce = coalesce
self.bucket_size_mb = bucket_size_mb
self.update_interval = update_interval
self.use_fp16 = use_fp16
def before_run(self, runner):
runner.optimizer.zero_grad()
def after_train_iter(self, runner):
runner.outputs['loss'] /= self.update_interval
if self.use_fp16:
with apex.amp.scale_loss(runner.outputs['loss'], runner.optimizer) as scaled_loss:
scaled_loss.backward()
else:
runner.outputs['loss'].backward()
if self.every_n_iters(runner, self.update_interval):
if self.grad_clip is not None:
self.clip_grads(runner.model.parameters())
runner.optimizer.step()
runner.optimizer.zero_grad()
| 1,125 | 34.1875 | 108 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/hooks/byol_hook.py | from math import cos, pi
from mmcv.runner import Hook
from mmcv.parallel import is_module_wrapper
from .registry import HOOKS
@HOOKS.register_module
class BYOLHook(Hook):
"""Hook for BYOL.
This hook includes momentum adjustment in BYOL following:
m = 1 - ( 1- m_0) * (cos(pi * k / K) + 1) / 2,
k: current step, K: total steps.
Args:
end_momentum (float): The final momentum coefficient
for the target network. Default: 1.
"""
def __init__(self, end_momentum=1., update_interval=1, **kwargs):
self.end_momentum = end_momentum
self.update_interval = update_interval
def before_train_iter(self, runner):
assert hasattr(runner.model.module, 'momentum'), \
"The runner must have attribute \"momentum\" in BYOLHook."
assert hasattr(runner.model.module, 'base_momentum'), \
"The runner must have attribute \"base_momentum\" in BYOLHook."
if self.every_n_iters(runner, self.update_interval):
cur_iter = runner.iter
max_iter = runner.max_iters
base_m = runner.model.module.base_momentum
m = self.end_momentum - (self.end_momentum - base_m) * (
cos(pi * cur_iter / float(max_iter)) + 1) / 2
runner.model.module.momentum = m
def after_train_iter(self, runner):
if self.every_n_iters(runner, self.update_interval):
if is_module_wrapper(runner.model):
runner.model.module.momentum_update()
else:
runner.model.momentum_update()
| 1,590 | 35.159091 | 75 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/hooks/registry.py | from openselfsup.utils import Registry
HOOKS = Registry('hook')
| 65 | 15.5 | 38 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/hooks/extractor.py | import torch.nn as nn
from torch.utils.data import Dataset
from openselfsup.utils import nondist_forward_collect, dist_forward_collect
class Extractor(object):
"""Feature extractor.
Args:
dataset (Dataset | dict): A PyTorch dataset or dict that indicates
the dataset.
imgs_per_gpu (int): Number of images on each GPU, i.e., batch size of
each GPU.
workers_per_gpu (int): How many subprocesses to use for data loading
for each GPU.
dist_mode (bool): Use distributed extraction or not. Default: False.
"""
def __init__(self,
dataset,
imgs_per_gpu,
workers_per_gpu,
dist_mode=False):
from openselfsup import datasets
if isinstance(dataset, Dataset):
self.dataset = dataset
elif isinstance(dataset, dict):
self.dataset = datasets.build_dataset(dataset)
else:
raise TypeError(
'dataset must be a Dataset object or a dict, not {}'.format(
type(dataset)))
self.data_loader = datasets.build_dataloader(
self.dataset,
imgs_per_gpu,
workers_per_gpu,
dist=dist_mode,
shuffle=False)
self.dist_mode = dist_mode
self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
def _forward_func(self, runner, **x):
backbone_feat = runner.model(mode='extract', **x)
last_layer_feat = runner.model.module.neck([backbone_feat[-1]])[0]
last_layer_feat = last_layer_feat.view(last_layer_feat.size(0), -1)
return dict(feature=last_layer_feat.cpu())
def __call__(self, runner):
func = lambda **x: self._forward_func(runner, **x)
if self.dist_mode:
feats = dist_forward_collect(
func,
self.data_loader,
runner.rank,
len(self.dataset),
ret_rank=-1)['feature'] # NxD
else:
feats = nondist_forward_collect(func, self.data_loader,
len(self.dataset))['feature']
return feats
| 2,196 | 34.435484 | 77 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/hooks/validate_hook.py | from mmcv.runner import Hook
import torch
from torch.utils.data import Dataset
from openselfsup.utils import nondist_forward_collect, dist_forward_collect
from .registry import HOOKS
@HOOKS.register_module
class ValidateHook(Hook):
"""Validation hook.
Args:
dataset (Dataset | dict): A PyTorch dataset or dict that indicates
the dataset.
dist_mode (bool): Use distributed evaluation or not. Default: True.
initial (bool): Whether to evaluate before the training starts.
Default: True.
interval (int): Evaluation interval (by epochs). Default: 1.
**eval_kwargs: Evaluation arguments fed into the evaluate function of
the dataset.
"""
def __init__(self,
dataset,
dist_mode=True,
initial=True,
interval=1,
**eval_kwargs):
from openselfsup import datasets
if isinstance(dataset, Dataset):
self.dataset = dataset
elif isinstance(dataset, dict):
self.dataset = datasets.build_dataset(dataset)
else:
raise TypeError(
'dataset must be a Dataset object or a dict, not {}'.format(
type(dataset)))
self.data_loader = datasets.build_dataloader(
self.dataset,
eval_kwargs['imgs_per_gpu'],
eval_kwargs['workers_per_gpu'],
dist=dist_mode,
shuffle=False,
prefetch=eval_kwargs.get('prefetch', False),
img_norm_cfg=eval_kwargs.get('img_norm_cfg', dict()),
)
self.dist_mode = dist_mode
self.initial = initial
self.interval = interval
self.eval_kwargs = eval_kwargs
def before_run(self, runner):
if self.initial:
self._run_validate(runner)
def after_train_epoch(self, runner):
if not self.every_n_epochs(runner, self.interval):
return
self._run_validate(runner)
def _run_validate(self, runner):
runner.model.eval()
func = lambda **x: runner.model(mode='test', **x)
if self.dist_mode:
results = dist_forward_collect(
func, self.data_loader, runner.rank,
len(self.dataset)) # dict{key: np.ndarray}
else:
results = nondist_forward_collect(func, self.data_loader,
len(self.dataset))
if runner.rank == 0:
for name, val in results.items():
self._evaluate(runner, torch.from_numpy(val), name)
runner.model.train()
def _evaluate(self, runner, results, keyword):
eval_res = self.dataset.evaluate(
results,
keyword=keyword,
logger=runner.logger,
**self.eval_kwargs['eval_param'])
for name, val in eval_res.items():
runner.log_buffer.output[name] = val
runner.log_buffer.ready = True
| 3,003 | 33.528736 | 77 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/hooks/odc_hook.py | import numpy as np
from mmcv.runner import Hook
from openselfsup.utils import print_log
from .registry import HOOKS
@HOOKS.register_module
class ODCHook(Hook):
"""Hook for ODC.
Args:
centroids_update_interval (int): Frequency of iterations
to update centroids.
deal_with_small_clusters_interval (int): Frequency of iterations
to deal with small clusters.
evaluate_interval (int): Frequency of iterations to evaluate clusters.
reweight (bool): Whether to perform loss re-weighting.
reweight_pow (float): The power of re-weighting.
dist_mode (bool): Use distributed training or not. Default: True.
"""
def __init__(self,
centroids_update_interval,
deal_with_small_clusters_interval,
evaluate_interval,
reweight,
reweight_pow,
dist_mode=True):
assert dist_mode, "non-dist mode is not implemented"
self.centroids_update_interval = centroids_update_interval
self.deal_with_small_clusters_interval = \
deal_with_small_clusters_interval
self.evaluate_interval = evaluate_interval
self.reweight = reweight
self.reweight_pow = reweight_pow
def after_train_iter(self, runner):
# centroids update
if self.every_n_iters(runner, self.centroids_update_interval):
runner.model.module.memory_bank.update_centroids_memory()
# deal with small clusters
if self.every_n_iters(runner, self.deal_with_small_clusters_interval):
runner.model.module.memory_bank.deal_with_small_clusters()
# reweight
runner.model.module.set_reweight()
# evaluate
if self.every_n_iters(runner, self.evaluate_interval):
new_labels = runner.model.module.memory_bank.label_bank
if new_labels.is_cuda:
new_labels = new_labels.cpu()
self.evaluate(runner, new_labels.numpy())
def after_train_epoch(self, runner):
# save cluster
if self.every_n_epochs(runner, 10) and runner.rank == 0:
new_labels = runner.model.module.memory_bank.label_bank
if new_labels.is_cuda:
new_labels = new_labels.cpu()
np.save(
"{}/cluster_epoch_{}.npy".format(runner.work_dir,
runner.epoch),
new_labels.numpy())
def evaluate(self, runner, new_labels):
hist = np.bincount(
new_labels, minlength=runner.model.module.memory_bank.num_classes)
empty_cls = (hist == 0).sum()
minimal_cls_size, maximal_cls_size = hist.min(), hist.max()
if runner.rank == 0:
print_log(
"empty_num: {}\tmin_cluster: {}\tmax_cluster:{}".format(
empty_cls.item(), minimal_cls_size.item(),
maximal_cls_size.item()),
logger='root')
| 3,020 | 36.7625 | 78 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.