Search is not available for this dataset
repo stringlengths 2 152 ⌀ | file stringlengths 15 239 | code stringlengths 0 58.4M | file_length int64 0 58.4M | avg_line_length float64 0 1.81M | max_line_length int64 0 12.7M | extension_type stringclasses 364 values |
|---|---|---|---|---|---|---|
null | OpenOOD-main/openood/networks/__init__.py | from .ash_net import ASHNet
from .densenet import DenseNet3
# from .mmcls_featext import ImageClassifierWithReturnFeature
from .resnet18_32x32 import ResNet18_32x32
from .resnet18_224x224 import ResNet18_224x224
from .resnet50 import ResNet50
from .utils import get_network
from .wrn import WideResNet
from .swin_t import Swin_T
from .vit_b_16 import ViT_B_16
| 360 | 31.818182 | 61 | py |
null | OpenOOD-main/openood/networks/arpl_net.py | ## reference code https://github.com/pytorch/examples/blob/master/dcgan/main.py
import operator
from collections import OrderedDict
from itertools import islice
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.functional as F
from torch.nn.modules.conv import _ConvNd
from torch.nn.modules.utils import _ntuple
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data.normal_(0.0, 0.02)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
class _netD32(nn.Module):
def __init__(self, ngpu, nc, ndf):
super(_netD32, self).__init__()
self.ngpu = ngpu
self.main = nn.Sequential(
# input size. (nc) x 32 x 32
nn.Conv2d(nc, ndf * 2, 4, 2, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*2) x 16 x 16
nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 4),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*4) x 8 x 8
nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 8),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*8) x 4 x 4
nn.Conv2d(ndf * 8, ndf * 16, 4, 1, 0, bias=False),
nn.Sigmoid())
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.classifier = nn.Sequential(nn.Linear(ndf * 16, 1), nn.Sigmoid())
def forward(self, input):
if isinstance(input.data, torch.cuda.FloatTensor) and self.ngpu > 1:
output = nn.parallel.data_parallel(self.main, input,
range(self.ngpu))
else:
output = self.main(input)
output = self.avgpool(output)
output = torch.flatten(output, 1)
output = self.classifier(output).flatten()
return output
class _netG32(nn.Module):
def __init__(self, ngpu, nz, ngf, nc):
super(_netG32, self).__init__()
self.ngpu = ngpu
self.main = nn.Sequential(
# input is Z, going into a convolution
nn.ConvTranspose2d(nz, ngf * 8, 4, 1, 0, bias=False),
nn.BatchNorm2d(ngf * 8),
nn.ReLU(True),
# state size. (ngf*8) x 4 x 4
nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 4),
nn.ReLU(True),
# state size. (ngf*4) x 8 x 8
nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 2),
nn.ReLU(True),
# state size. (ngf*2) x 16 x 16
nn.ConvTranspose2d(ngf * 2, nc, 4, 2, 1, bias=False),
# nn.Sigmoid()
# state size. (nc) x 32 x 32
)
def forward(self, input):
if isinstance(input.data, torch.cuda.FloatTensor) and self.ngpu > 1:
output = nn.parallel.data_parallel(self.main, input,
range(self.ngpu))
else:
output = self.main(input)
return output
def Generator32(n_gpu, nz, ngf, nc):
model = _netG32(n_gpu, nz, ngf, nc)
model.apply(weights_init)
return model
def Discriminator32(n_gpu, nc, ndf):
model = _netD32(n_gpu, nc, ndf)
model.apply(weights_init)
return model
class _netD(nn.Module):
def __init__(self, ngpu, nc, ndf):
super(_netD, self).__init__()
self.ngpu = ngpu
self.main = nn.Sequential(
# input size. (nc) x 32 x 32
nn.Conv2d(nc, ndf * 2, 4, 2, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*2) x 16 x 16
nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 4),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*4) x 8 x 8
nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 8),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*8) x 4 x 4
nn.Conv2d(ndf * 8, ndf * 16, 4, 1, 0, bias=False),
nn.Sigmoid())
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.classifier = nn.Sequential(nn.Linear(ndf * 16, 1), nn.Sigmoid())
def forward(self, input):
if isinstance(input.data, torch.cuda.FloatTensor) and self.ngpu > 1:
output = nn.parallel.data_parallel(self.main, input,
range(self.ngpu))
else:
output = self.main(input)
output = self.avgpool(output)
output = torch.flatten(output, 1)
output = self.classifier(output).flatten()
return output
class _netG(nn.Module):
def __init__(self, ngpu, nz, ngf, nc):
super(_netG, self).__init__()
self.ngpu = ngpu
self.main = nn.Sequential(
# input is Z, going into a convolution
nn.ConvTranspose2d(nz, ngf * 8, 4, 1, 0, bias=False),
nn.BatchNorm2d(ngf * 8),
nn.ReLU(True),
# state size. (ngf*8) x 4 x 4
nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 4),
nn.ReLU(True),
# state size. (ngf*4) x 8 x 8
nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 2),
nn.ReLU(True),
# state size. (ngf*2) x 16 x 16
nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf),
nn.ReLU(True),
# state size. (nc) x 32 x 32
nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False),
# nn.Sigmoid()
)
def forward(self, input):
if isinstance(input.data, torch.cuda.FloatTensor) and self.ngpu > 1:
output = nn.parallel.data_parallel(self.main, input,
range(self.ngpu))
else:
output = self.main(input)
return output
def Generator(n_gpu, nz, ngf, nc):
model = _netG(n_gpu, nz, ngf, nc)
model.apply(weights_init)
return model
def Discriminator(n_gpu, nc, ndf):
model = _netD(n_gpu, nc, ndf)
model.apply(weights_init)
return model
class _MultiBatchNorm(nn.Module):
_version = 2
def __init__(self,
num_features,
num_classes,
eps=1e-5,
momentum=0.1,
affine=True,
track_running_stats=True):
super(_MultiBatchNorm, self).__init__()
# self.bns = nn.ModuleList([nn.modules.batchnorm._BatchNorm(
# num_features, eps, momentum, affine, track_running_stats)
# for _ in range(num_classes)])
self.bns = nn.ModuleList([
nn.BatchNorm2d(num_features, eps, momentum, affine,
track_running_stats) for _ in range(num_classes)
])
def reset_running_stats(self):
for bn in self.bns:
bn.reset_running_stats()
def reset_parameters(self):
for bn in self.bns:
bn.reset_parameters()
def _check_input_dim(self, input):
raise NotImplementedError
def forward(self, x, domain_label):
self._check_input_dim(x)
bn = self.bns[domain_label[0]]
return bn(x), domain_label
class MultiBatchNorm(_MultiBatchNorm):
def _check_input_dim(self, input):
if input.dim() != 4:
raise ValueError('expected 4D input (got {}D input)'.format(
input.dim()))
_pair = _ntuple(2)
__all__ = [
'resnet18ABN', 'resnet34ABN', 'resnet50ABN', 'resnet101ABN', 'resnet152ABN'
]
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding."""
return nn.Conv2d(in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=1,
bias=False)
class Conv2d(_ConvNd):
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True):
kernel_size = _pair(kernel_size)
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
super(Conv2d, self).__init__(in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation,
False,
_pair(0),
groups,
bias,
padding_mode='zeros')
def forward(self, input, domain_label):
return F.conv2d(input, self.weight, self.bias, self.stride,
self.padding, self.dilation, self.groups), domain_label
class TwoInputSequential(nn.Module):
r"""A sequential container forward with two inputs.
"""
def __init__(self, *args):
super(TwoInputSequential, self).__init__()
if len(args) == 1 and isinstance(args[0], OrderedDict):
for key, module in args[0].items():
self.add_module(key, module)
else:
for idx, module in enumerate(args):
self.add_module(str(idx), module)
def _get_item_by_idx(self, iterator, idx):
"""Get the idx-th item of the iterator."""
size = len(self)
idx = operator.index(idx)
if not -size <= idx < size:
raise IndexError('index {} is out of range'.format(idx))
idx %= size
return next(islice(iterator, idx, None))
def __getitem__(self, idx):
if isinstance(idx, slice):
return TwoInputSequential(
OrderedDict(list(self._modules.items())[idx]))
else:
return self._get_item_by_idx(self._modules.values(), idx)
def __setitem__(self, idx, module):
key = self._get_item_by_idx(self._modules.keys(), idx)
return setattr(self, key, module)
def __delitem__(self, idx):
if isinstance(idx, slice):
for key in list(self._modules.keys())[idx]:
delattr(self, key)
else:
key = self._get_item_by_idx(self._modules.keys(), idx)
delattr(self, key)
def __len__(self):
return len(self._modules)
def __dir__(self):
keys = super(TwoInputSequential, self).__dir__()
keys = [key for key in keys if not key.isdigit()]
return keys
def forward(self, input1, input2):
for module in self._modules.values():
input1, input2 = module(input1, input2)
return input1, input2
def resnet18ABN(num_classes=10, num_bns=2):
model = ResNetABN(BasicBlock, [2, 2, 2, 2],
num_classes=num_classes,
num_bns=num_bns)
return model
def resnet34ABN(num_classes=10, num_bns=2):
model = ResNetABN(BasicBlock, [3, 4, 6, 3],
num_classes=num_classes,
num_bns=num_bns)
return model
def resnet50ABN(num_classes=10, num_bns=2):
model = ResNetABN(Bottleneck, [3, 4, 6, 3],
num_classes=num_classes,
num_bns=num_bns)
return model
def _update_initial_weights_ABN(state_dict,
num_classes=1000,
num_bns=2,
ABN_type='all'):
new_state_dict = state_dict.copy()
for key, val in state_dict.items():
update_dict = False
if ((('bn' in key or 'downsample.1' in key) and ABN_type == 'all')
or (('bn1' in key) and ABN_type == 'partial-bn1')):
update_dict = True
if (update_dict):
if 'weight' in key:
for d in range(num_bns):
new_state_dict[
key[0:-6] +
'bns.{}.weight'.format(d)] = val.data.clone()
elif 'bias' in key:
for d in range(num_bns):
new_state_dict[key[0:-4] +
'bns.{}.bias'.format(d)] = val.data.clone()
if 'running_mean' in key:
for d in range(num_bns):
new_state_dict[
key[0:-12] +
'bns.{}.running_mean'.format(d)] = val.data.clone()
if 'running_var' in key:
for d in range(num_bns):
new_state_dict[
key[0:-11] +
'bns.{}.running_var'.format(d)] = val.data.clone()
if 'num_batches_tracked' in key:
for d in range(num_bns):
new_state_dict[key[0:-len('num_batches_tracked')] +
'bns.{}.num_batches_tracked'.format(
d)] = val.data.clone()
if num_classes != 1000 or len(
[key for key in new_state_dict.keys() if 'fc' in key]) > 1:
key_list = list(new_state_dict.keys())
for key in key_list:
if 'fc' in key:
print('pretrained {} are not used as initial params.'.format(
key))
del new_state_dict[key]
return new_state_dict
class ResNetABN(nn.Module):
def __init__(self, block, layers, num_classes=10, num_bns=2):
self.inplanes = 64
self.num_bns = num_bns
self.num_classes = num_classes
super(ResNetABN, self).__init__()
self.conv1 = conv3x3(3, 64)
self.bn1 = MultiBatchNorm(64, self.num_bns)
self.layer1 = self._make_layer(block,
64,
layers[0],
stride=1,
num_bns=self.num_bns)
self.layer2 = self._make_layer(block,
128,
layers[1],
stride=2,
num_bns=self.num_bns)
self.layer3 = self._make_layer(block,
256,
layers[2],
stride=2,
num_bns=self.num_bns)
self.layer4 = self._make_layer(block,
512,
layers[3],
stride=2,
num_bns=self.num_bns)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
def _make_layer(self, block, planes, blocks, stride=1, num_bns=2):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = TwoInputSequential(
Conv2d(self.inplanes,
planes * block.expansion,
kernel_size=1,
stride=stride,
bias=False),
MultiBatchNorm(planes * block.expansion, num_bns),
)
layers = []
layers.append(
block(self.inplanes, planes, stride, downsample, num_bns=num_bns))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, num_bns=num_bns))
return TwoInputSequential(*layers)
def forward(self, x, return_feature=False, domain_label=None):
if domain_label is None:
domain_label = 0 * torch.ones(x.shape[0], dtype=torch.long).cuda()
x = self.conv1(x)
x, _ = self.bn1(x, domain_label)
x = F.relu(x)
x, _ = self.layer1(x, domain_label)
x, _ = self.layer2(x, domain_label)
x, _ = self.layer3(x, domain_label)
x, _ = self.layer4(x, domain_label)
x = self.avgpool(x)
feat = x.view(x.size(0), -1)
x = self.fc(feat)
if return_feature:
return x, feat
else:
return x
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, num_bns=2):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = MultiBatchNorm(planes, num_bns)
self.conv2 = conv3x3(planes, planes)
self.bn2 = MultiBatchNorm(planes, num_bns)
self.downsample = downsample
self.stride = stride
def forward(self, x, domain_label):
residual = x
out = self.conv1(x)
out, _ = self.bn1(out, domain_label)
out = F.relu(out)
out = self.conv2(out)
out, _ = self.bn2(out, domain_label)
if self.downsample is not None:
residual, _ = self.downsample(x, domain_label)
out += residual
out = F.relu(out)
return out, domain_label
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, num_bns=2):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = MultiBatchNorm(planes, num_bns)
self.conv2 = nn.Conv2d(planes,
planes,
kernel_size=3,
stride=stride,
padding=1,
bias=False)
self.bn2 = MultiBatchNorm(planes, num_bns)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = MultiBatchNorm(planes * 4, num_bns)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x, domain_label):
residual = x
out = self.conv1(x)
out, _ = self.bn1(out, domain_label)
out = self.relu(out)
out = self.conv2(out)
out, _ = self.bn2(out, domain_label)
out = self.relu(out)
out = self.conv3(out)
out, _ = self.bn3(out, domain_label)
if self.downsample is not None:
residual, _ = self.downsample(x, domain_label)
out += residual
out = self.relu(out)
return out, domain_label
class Dist(nn.Module):
def __init__(self,
num_classes=10,
num_centers=1,
feat_dim=2,
init='random'):
super(Dist, self).__init__()
self.feat_dim = feat_dim
self.num_classes = num_classes
self.num_centers = num_centers
if init == 'random':
self.centers = nn.Parameter(
0.1 * torch.randn(num_classes * num_centers, self.feat_dim))
else:
self.centers = nn.Parameter(
torch.Tensor(num_classes * num_centers, self.feat_dim))
self.centers.data.fill_(0)
def forward(self, features, center=None, metric='l2'):
if metric == 'l2':
f_2 = torch.sum(torch.pow(features, 2), dim=1, keepdim=True)
if center is None:
c_2 = torch.sum(torch.pow(self.centers, 2),
dim=1,
keepdim=True)
dist = f_2 - 2 * torch.matmul(
features, torch.transpose(self.centers, 1,
0)) + torch.transpose(c_2, 1, 0)
else:
c_2 = torch.sum(torch.pow(center, 2), dim=1, keepdim=True)
dist = f_2 - 2 * torch.matmul(
features, torch.transpose(center, 1, 0)) + torch.transpose(
c_2, 1, 0)
dist = dist / float(features.shape[1])
else:
if center is None:
center = self.centers
else:
center = center
dist = features.matmul(center.t())
dist = torch.reshape(dist, [-1, self.num_classes, self.num_centers])
dist = torch.mean(dist, dim=2)
return dist
class ARPLayer(nn.Module):
def __init__(self, feat_dim=2, num_classes=10, weight_pl=0.1, temp=1.0):
super(ARPLayer, self).__init__()
self.weight_pl = weight_pl
self.temp = temp
self.Dist = Dist(num_classes, feat_dim=feat_dim)
self.points = self.Dist.centers
self.radius = nn.Parameter(torch.Tensor(1))
self.radius.data.fill_(0)
self.margin_loss = nn.MarginRankingLoss(margin=1.0)
def forward(self, x, labels=None):
dist_dot_p = self.Dist(x, center=self.points, metric='dot')
dist_l2_p = self.Dist(x, center=self.points)
logits = dist_l2_p - dist_dot_p
if labels is None: return logits
loss = F.cross_entropy(logits / self.temp, labels)
center_batch = self.points[labels, :]
_dis_known = (x - center_batch).pow(2).mean(1)
target = torch.ones(_dis_known.size()).cuda()
loss_r = self.margin_loss(self.radius, _dis_known, target)
loss = loss + self.weight_pl * loss_r
return logits, loss
def fake_loss(self, x):
logits = self.Dist(x, center=self.points)
prob = F.softmax(logits, dim=1)
loss = (prob * torch.log(prob)).sum(1).mean().exp()
return loss
| 22,297 | 33.410494 | 79 | py |
null | OpenOOD-main/openood/networks/ash_net.py | import numpy as np
import torch
import torch.nn as nn
class ASHNet(nn.Module):
def __init__(self, backbone):
super(ASHNet, self).__init__()
self.backbone = backbone
def forward(self, x, return_feature=False, return_feature_list=False):
try:
return self.backbone(x, return_feature, return_feature_list)
except TypeError:
return self.backbone(x, return_feature)
def forward_threshold(self, x, percentile):
_, feature = self.backbone(x, return_feature=True)
feature = ash_b(feature.view(feature.size(0), -1, 1, 1), percentile)
feature = feature.view(feature.size(0), -1)
logits_cls = self.backbone.get_fc_layer()(feature)
return logits_cls
def get_fc(self):
fc = self.backbone.fc
return fc.weight.cpu().detach().numpy(), fc.bias.cpu().detach().numpy()
def ash_b(x, percentile=65):
assert x.dim() == 4
assert 0 <= percentile <= 100
b, c, h, w = x.shape
# calculate the sum of the input per sample
s1 = x.sum(dim=[1, 2, 3])
n = x.shape[1:].numel()
k = n - int(np.round(n * percentile / 100.0))
t = x.view((b, c * h * w))
v, i = torch.topk(t, k, dim=1)
fill = s1 / k
fill = fill.unsqueeze(dim=1).expand(v.shape)
t.zero_().scatter_(dim=1, index=i, src=fill)
return x
def ash_p(x, percentile=65):
assert x.dim() == 4
assert 0 <= percentile <= 100
b, c, h, w = x.shape
n = x.shape[1:].numel()
k = n - int(np.round(n * percentile / 100.0))
t = x.view((b, c * h * w))
v, i = torch.topk(t, k, dim=1)
t.zero_().scatter_(dim=1, index=i, src=v)
return x
def ash_s(x, percentile=65):
assert x.dim() == 4
assert 0 <= percentile <= 100
b, c, h, w = x.shape
# calculate the sum of the input per sample
s1 = x.sum(dim=[1, 2, 3])
n = x.shape[1:].numel()
k = n - int(np.round(n * percentile / 100.0))
t = x.view((b, c * h * w))
v, i = torch.topk(t, k, dim=1)
t.zero_().scatter_(dim=1, index=i, src=v)
# calculate new sum of the input per sample after pruning
s2 = x.sum(dim=[1, 2, 3])
# apply sharpening
scale = s1 / s2
x = x * torch.exp(scale[:, None, None, None])
return x
def ash_rand(x, percentile=65, r1=0, r2=10):
assert x.dim() == 4
assert 0 <= percentile <= 100
b, c, h, w = x.shape
n = x.shape[1:].numel()
k = n - int(np.round(n * percentile / 100.0))
t = x.view((b, c * h * w))
v, i = torch.topk(t, k, dim=1)
v = v.uniform_(r1, r2)
t.zero_().scatter_(dim=1, index=i, src=v)
return x
| 2,614 | 25.958763 | 79 | py |
null | OpenOOD-main/openood/networks/bit.py | """Bottleneck ResNet v2 with GroupNorm and Weight Standardization."""
from collections import OrderedDict
import torch
import torch.nn as nn
import torch.nn.functional as F
class Reshape(nn.Module):
def __init__(self, *args):
super(Reshape, self).__init__()
self.shape = args
def forward(self, x):
return x.view(self.shape)
class StdConv2d(nn.Conv2d):
def forward(self, x):
w = self.weight
v, m = torch.var_mean(w, dim=[1, 2, 3], keepdim=True, unbiased=False)
w = (w - m) / torch.sqrt(v + 1e-10)
return F.conv2d(x, w, self.bias, self.stride, self.padding,
self.dilation, self.groups)
def conv3x3(cin, cout, stride=1, groups=1, bias=False):
return StdConv2d(cin,
cout,
kernel_size=3,
stride=stride,
padding=1,
bias=bias,
groups=groups)
def conv1x1(cin, cout, stride=1, bias=False):
return StdConv2d(cin,
cout,
kernel_size=1,
stride=stride,
padding=0,
bias=bias)
def tf2th(conv_weights):
"""Possibly convert HWIO to OIHW."""
if conv_weights.ndim == 4:
conv_weights = conv_weights.transpose([3, 2, 0, 1])
return torch.from_numpy(conv_weights)
class PreActBottleneck(nn.Module):
"""Pre-activation (v2) bottleneck block.
Follows the implementation of
"Identity Mappings in Deep Residual Networks":
https://github.com/KaimingHe/resnet-1k-layers/blob/master/resnet-pre-act.lua
Except it puts the stride on 3x3 conv when available.
"""
def __init__(self, cin, cout=None, cmid=None, stride=1):
super().__init__()
cout = cout or cin
cmid = cmid or cout // 4
self.gn1 = nn.GroupNorm(32, cin)
self.conv1 = conv1x1(cin, cmid)
self.gn2 = nn.GroupNorm(32, cmid)
self.conv2 = conv3x3(cmid, cmid,
stride) # Original code has it on conv1!!
self.gn3 = nn.GroupNorm(32, cmid)
self.conv3 = conv1x1(cmid, cout)
self.relu = nn.ReLU(inplace=True)
if (stride != 1 or cin != cout):
# Projection also with pre-activation according to paper.
self.downsample = conv1x1(cin, cout, stride)
def forward(self, x):
out = self.relu(self.gn1(x))
# Residual branch
residual = x
if hasattr(self, 'downsample'):
residual = self.downsample(out)
# Unit's branch
out = self.conv1(out)
out = self.conv2(self.relu(self.gn2(out)))
out = self.conv3(self.relu(self.gn3(out)))
return out + residual
def load_from(self, weights, prefix=''):
convname = 'standardized_conv2d'
with torch.no_grad():
self.conv1.weight.copy_(
tf2th(weights[f'{prefix}a/{convname}/kernel']))
self.conv2.weight.copy_(
tf2th(weights[f'{prefix}b/{convname}/kernel']))
self.conv3.weight.copy_(
tf2th(weights[f'{prefix}c/{convname}/kernel']))
self.gn1.weight.copy_(tf2th(
weights[f'{prefix}a/group_norm/gamma']))
self.gn2.weight.copy_(tf2th(
weights[f'{prefix}b/group_norm/gamma']))
self.gn3.weight.copy_(tf2th(
weights[f'{prefix}c/group_norm/gamma']))
self.gn1.bias.copy_(tf2th(weights[f'{prefix}a/group_norm/beta']))
self.gn2.bias.copy_(tf2th(weights[f'{prefix}b/group_norm/beta']))
self.gn3.bias.copy_(tf2th(weights[f'{prefix}c/group_norm/beta']))
if hasattr(self, 'downsample'):
w = weights[f'{prefix}a/proj/{convname}/kernel']
self.downsample.weight.copy_(tf2th(w))
class ResNetV2(nn.Module):
"""Implementation of Pre-activation (v2) ResNet mode."""
def __init__(self,
block_units,
width_factor,
head_size=1000,
zero_head=False,
num_block_open=-1):
super().__init__()
self.zero_head = zero_head
wf = width_factor # shortcut 'cause we'll use it a lot.
if num_block_open == -1:
self.fix_parts = []
self.fix_gn1 = None
elif num_block_open == 0:
self.fix_parts = [
'root', 'block1', 'block2', 'block3', 'block4', 'before_head'
]
self.fix_gn1 = None
elif num_block_open == 1:
self.fix_parts = ['root', 'block1', 'block2', 'block3']
self.fix_gn1 = 'block4'
elif num_block_open == 2:
self.fix_parts = ['root', 'block1', 'block2']
self.fix_gn1 = 'block3'
elif num_block_open == 3:
self.fix_parts = ['root', 'block1']
self.fix_gn1 = 'block2'
elif num_block_open == 4:
self.fix_parts = ['root']
self.fix_gn1 = 'block1'
else:
raise ValueError(
'Unexpected block number {}'.format(num_block_open))
self.root = nn.Sequential(
OrderedDict([
('conv',
StdConv2d(3,
64 * wf,
kernel_size=7,
stride=2,
padding=3,
bias=False)),
('pad', nn.ConstantPad2d(1, 0)),
('pool', nn.MaxPool2d(kernel_size=3, stride=2, padding=0)),
# The following is subtly not the same!
# ('pool', nn.MaxPool2d(kernel_size=3, stride=2, padding=1)),
]))
self.body = nn.Sequential(
OrderedDict([
('block1',
nn.Sequential(
OrderedDict(
[('unit01',
PreActBottleneck(
cin=64 * wf, cout=256 * wf, cmid=64 * wf))] +
[(f'unit{i:02d}',
PreActBottleneck(
cin=256 * wf, cout=256 * wf, cmid=64 * wf))
for i in range(2, block_units[0] + 1)], ))),
('block2',
nn.Sequential(
OrderedDict(
[('unit01',
PreActBottleneck(cin=256 * wf,
cout=512 * wf,
cmid=128 * wf,
stride=2))] +
[(f'unit{i:02d}',
PreActBottleneck(
cin=512 * wf, cout=512 * wf, cmid=128 * wf))
for i in range(2, block_units[1] + 1)], ))),
('block3',
nn.Sequential(
OrderedDict(
[('unit01',
PreActBottleneck(cin=512 * wf,
cout=1024 * wf,
cmid=256 * wf,
stride=2))] +
[(f'unit{i:02d}',
PreActBottleneck(
cin=1024 * wf, cout=1024 * wf, cmid=256 * wf))
for i in range(2, block_units[2] + 1)], ))),
('block4',
nn.Sequential(
OrderedDict(
[('unit01',
PreActBottleneck(cin=1024 * wf,
cout=2048 * wf,
cmid=512 * wf,
stride=2))] +
[(f'unit{i:02d}',
PreActBottleneck(
cin=2048 * wf, cout=2048 * wf, cmid=512 * wf))
for i in range(2, block_units[3] + 1)], ))),
]))
self.before_head = nn.Sequential(
OrderedDict([
('gn', nn.GroupNorm(32, 2048 * wf)),
('relu', nn.ReLU(inplace=True)),
('avg', nn.AdaptiveAvgPool2d(output_size=1)),
]))
self.head = nn.Sequential(
OrderedDict([
('conv',
nn.Conv2d(2048 * wf, head_size, kernel_size=1, bias=True)),
]))
if 'root' in self.fix_parts:
for param in self.root.parameters():
param.requires_grad = False
for bname, block in self.body.named_children():
if bname in self.fix_parts:
for param in block.parameters():
param.requires_grad = False
elif bname == self.fix_gn1:
for param in block.unit01.gn1.parameters():
param.requires_grad = False
def intermediate_forward(self, x, layer_index=None):
if layer_index == 'all':
out_list = []
out = self.root(x)
out_list.append(out)
out = self.body.block1(out)
out_list.append(out)
out = self.body.block2(out)
out_list.append(out)
out = self.body.block3(out)
out_list.append(out)
out = self.body.block4(out)
out_list.append(out)
out = self.head(self.before_head(out))
return out[..., 0, 0], out_list
out = self.root(x)
if layer_index == 1:
out = self.body.block1(out)
elif layer_index == 2:
out = self.body.block1(out)
out = self.body.block2(out)
elif layer_index == 3:
out = self.body.block1(out)
out = self.body.block2(out)
out = self.body.block3(out)
elif layer_index == 4:
out = self.body.block1(out)
out = self.body.block2(out)
out = self.body.block3(out)
out = self.body.block4(out)
elif layer_index == 5:
out = self.body.block1(out)
out = self.body.block2(out)
out = self.body.block3(out)
out = self.body.block4(out)
out = self.before_head(out)
return out
def get_fc(self):
w = self.head.conv.weight.cpu().detach().squeeze().numpy()
b = self.head.conv.bias.cpu().detach().squeeze().numpy()
return w, b
def forward(self, x, layer_index=None, return_feature=False):
if return_feature:
return x, self.intermediate_forward(x, 5)[..., 0, 0]
if layer_index is not None:
return self.intermediate_forward(x, layer_index)
if 'root' in self.fix_parts:
with torch.no_grad():
x = self.root(x)
else:
x = self.root(x)
for bname, block in self.body.named_children():
if bname in self.fix_parts:
with torch.no_grad():
x = block(x)
else:
x = block(x)
if 'before_head' in self.fix_parts:
with torch.no_grad():
x = self.before_head(x)
else:
x = self.before_head(x)
x = self.head(x)
assert x.shape[-2:] == (1, 1) # We should have no spatial shape left.
return x[..., 0, 0]
def load_state_dict_custom(self, state_dict):
state_dict_new = {}
for k, v in state_dict.items():
state_dict_new[k[len('module.'):]] = v
self.load_state_dict(state_dict_new, strict=True)
def load_from(self, weights, prefix='resnet/'):
with torch.no_grad():
self.root.conv.weight.copy_(
tf2th(
weights[f'{prefix}root_block/standardized_conv2d/kernel']))
# pylint: disable=line-too-long
self.before_head.gn.weight.copy_(
tf2th(weights[f'{prefix}group_norm/gamma']))
self.before_head.gn.bias.copy_(
tf2th(weights[f'{prefix}group_norm/beta']))
if self.zero_head:
nn.init.zeros_(self.head.conv.weight)
nn.init.zeros_(self.head.conv.bias)
else:
self.head.conv.weight.copy_(
tf2th(weights[f'{prefix}head/conv2d/kernel']))
# pylint: disable=line-too-long
self.head.conv.bias.copy_(
tf2th(weights[f'{prefix}head/conv2d/bias']))
for bname, block in self.body.named_children():
for uname, unit in block.named_children():
unit.load_from(weights, prefix=f'{prefix}{bname}/{uname}/')
def train(self, mode=True):
self.training = mode
for module in self.children():
module.train(mode)
self.head.train(mode)
if 'root' in self.fix_parts:
self.root.eval()
else:
self.root.train(mode)
for bname, block in self.body.named_children():
if bname in self.fix_parts:
block.eval()
elif bname == self.fix_gn1:
block.train(mode)
block.unit01.gn1.eval()
else:
block.train(mode)
if 'before_head' in self.fix_parts:
self.before_head.eval()
else:
self.before_head.train(mode)
return self
KNOWN_MODELS = OrderedDict([
('BiT-M-R50x1', lambda *a, **kw: ResNetV2([3, 4, 6, 3], 1, *a, **kw)),
('BiT-M-R50x3', lambda *a, **kw: ResNetV2([3, 4, 6, 3], 3, *a, **kw)),
('BiT-M-R101x1', lambda *a, **kw: ResNetV2([3, 4, 23, 3], 1, *a, **kw)),
('BiT-M-R101x3', lambda *a, **kw: ResNetV2([3, 4, 23, 3], 3, *a, **kw)),
('BiT-M-R152x2', lambda *a, **kw: ResNetV2([3, 8, 36, 3], 2, *a, **kw)),
('BiT-M-R152x4', lambda *a, **kw: ResNetV2([3, 8, 36, 3], 4, *a, **kw)),
('BiT-S-R50x1', lambda *a, **kw: ResNetV2([3, 4, 6, 3], 1, *a, **kw)),
('BiT-S-R50x3', lambda *a, **kw: ResNetV2([3, 4, 6, 3], 3, *a, **kw)),
('BiT-S-R101x1', lambda *a, **kw: ResNetV2([3, 4, 23, 3], 1, *a, **kw)),
('BiT-S-R101x3', lambda *a, **kw: ResNetV2([3, 4, 23, 3], 3, *a, **kw)),
('BiT-S-R152x2', lambda *a, **kw: ResNetV2([3, 8, 36, 3], 2, *a, **kw)),
('BiT-S-R152x4', lambda *a, **kw: ResNetV2([3, 8, 36, 3], 4, *a, **kw)),
])
| 14,517 | 36.611399 | 80 | py |
null | OpenOOD-main/openood/networks/cider_net.py | import torch.nn as nn
import torch.nn.functional as F
class CIDERNet(nn.Module):
def __init__(self, backbone, head, feat_dim, num_classes):
super(CIDERNet, self).__init__()
self.backbone = backbone
if hasattr(self.backbone, 'fc'):
# remove fc otherwise ddp will
# report unused params
self.backbone.fc = nn.Identity()
try:
feature_size = backbone.feature_size
except AttributeError:
feature_size = backbone.module.feature_size
if head == 'linear':
self.head = nn.Linear(feature_size, feat_dim)
elif head == 'mlp':
self.head = nn.Sequential(nn.Linear(feature_size, feature_size),
nn.ReLU(inplace=True),
nn.Linear(feature_size, feat_dim))
def forward(self, x):
feat = self.backbone(x).squeeze()
unnorm_features = self.head(feat)
features = F.normalize(unnorm_features, dim=1)
return features
def intermediate_forward(self, x):
feat = self.backbone(x).squeeze()
return F.normalize(feat, dim=1)
| 1,174 | 31.638889 | 76 | py |
null | OpenOOD-main/openood/networks/conf_branch_net.py | import torch.nn as nn
class ConfBranchNet(nn.Module):
def __init__(self, backbone, num_classes):
super(ConfBranchNet, self).__init__()
self.backbone = backbone
if hasattr(self.backbone, 'fc'):
# remove fc otherwise ddp will
# report unused params
self.backbone.fc = nn.Identity()
try:
feature_size = backbone.feature_size
except AttributeError:
feature_size = backbone.module.feature_size
self.fc = nn.Linear(feature_size, num_classes)
self.confidence = nn.Linear(feature_size, 1)
# test conf
def forward(self, x, return_confidence=False):
_, feature = self.backbone(x, return_feature=True)
pred = self.fc(feature)
confidence = self.confidence(feature)
if return_confidence:
return pred, confidence
else:
return pred
| 918 | 26.029412 | 58 | py |
null | OpenOOD-main/openood/networks/csi_net.py | import torch.nn as nn
def get_csi_linear_layers(feature_size,
num_classes,
simclr_dim,
shift_trans_type='rotation'):
simclr_layer = nn.Sequential(
nn.Linear(feature_size, feature_size),
nn.ReLU(),
nn.Linear(feature_size, simclr_dim),
)
shift_cls_layer = nn.Linear(feature_size,
get_shift_module(shift_trans_type))
joint_distribution_layer = nn.Linear(feature_size, 4 * num_classes)
linear = nn.Linear(feature_size, num_classes)
return {
'simclr_layer': simclr_layer,
'shift_cls_layer': shift_cls_layer,
'joint_distribution_layer': joint_distribution_layer,
'linear': linear,
}
class CSINet(nn.Module):
def __init__(self,
backbone,
feature_size,
num_classes=10,
simclr_dim=128,
shift_trans_type='rotation'):
super(CSINet, self).__init__()
self.backbone = backbone
if hasattr(self.backbone, 'fc'):
# remove fc otherwise ddp will
# report unused params
self.backbone.fc = nn.Identity()
self.linear = nn.Linear(feature_size, num_classes)
self.simclr_layer = nn.Sequential(
nn.Linear(feature_size, feature_size),
nn.ReLU(),
nn.Linear(feature_size, simclr_dim),
)
self.feature_size = feature_size
self.joint_distribution_layer = nn.Linear(feature_size,
4 * num_classes)
self.K_shift = get_shift_module(shift_trans_type)
self.shift_cls_layer = nn.Linear(feature_size, self.K_shift)
def forward(self,
inputs,
penultimate=False,
simclr=False,
shift=False,
joint=False):
_aux = {}
_return_aux = False
_, features = self.backbone(inputs, return_feature=True)
output = self.linear(features)
if penultimate:
_return_aux = True
_aux['penultimate'] = features
if simclr:
_return_aux = True
_aux['simclr'] = self.simclr_layer(features)
if shift:
_return_aux = True
_aux['shift'] = self.shift_cls_layer(features)
if joint:
_return_aux = True
_aux['joint'] = self.joint_distribution_layer(features)
if _return_aux:
return output, _aux
return output
def get_shift_module(shift_trans_type):
if shift_trans_type == 'rotation':
K_shift = 4
elif shift_trans_type == 'cutperm':
K_shift = 4
else:
K_shift = 1
return K_shift
| 2,816 | 27.744898 | 71 | py |
null | OpenOOD-main/openood/networks/de_resnet18_256x256.py | import torch
import torch.nn as nn
from torch import Tensor
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1, upsample=None):
super(BasicBlock, self).__init__()
self.stride = stride
if self.stride == 2:
self.conv1 = nn.ConvTranspose2d(in_planes,
planes,
kernel_size=2,
stride=stride,
bias=False)
else:
self.conv1 = nn.Conv2d(in_planes,
planes,
kernel_size=3,
stride=stride,
padding=1,
bias=False)
self.upsample = upsample
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes,
planes,
kernel_size=3,
stride=1,
padding=1,
bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.upsample is not None:
identity = self.upsample(x)
out += identity
out = self.relu(out)
return out
class De_ResNet18_256x256(nn.Module):
def __init__(self, block=BasicBlock, num_blocks=None, num_classes=10):
super(De_ResNet18_256x256, self).__init__()
self._norm_layer = nn.BatchNorm2d
if num_blocks is None:
num_blocks = [2, 2, 2, 2]
self.inplanes = 512 * block.expansion
self.layer1 = self._make_layer(block, 256, num_blocks[0], stride=2)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 64, num_blocks[2], stride=2)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight,
mode='fan_out',
nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer(self, block, planes, blocks, stride):
norm_layer = self._norm_layer
upsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
upsample = nn.Sequential(
nn.ConvTranspose2d(self.inplanes,
planes * block.expansion,
kernel_size=2,
stride=stride,
bias=False),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, upsample))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
feature_a = self.layer1(x) # 512*8*8->256*16*16
feature_b = self.layer2(feature_a) # 256*16*16->128*32*32
feature_c = self.layer3(feature_b) # 128*32*32->64*64*64
return [feature_c, feature_b, feature_a]
class AttnBasicBlock(nn.Module):
expansion: int = 1
def __init__(self,
inplanes: int,
planes: int,
stride: int = 1,
downsample=None) -> None:
super(AttnBasicBlock, self).__init__()
norm_layer = nn.BatchNorm2d
self.conv1 = nn.Conv2d(inplanes,
planes,
kernel_size=3,
stride=stride,
padding=1,
bias=False)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(planes,
planes,
kernel_size=3,
stride=1,
padding=1,
bias=False)
self.bn2 = norm_layer(planes)
self.stride = stride
self.downsample = downsample
def forward(self, x: Tensor) -> Tensor:
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class BN_layer(nn.Module):
def __init__(
self,
block,
layers: int,
width_per_group: int = 64,
):
super(BN_layer, self).__init__()
self._norm_layer = nn.BatchNorm2d
self.base_width = width_per_group
self.inplanes = 256 * block.expansion
self.bn_layer = self._make_layer(block, 512, layers, stride=2)
self.conv1 = nn.Conv2d(64 * block.expansion,
128 * block.expansion,
kernel_size=3,
stride=2,
padding=1,
bias=False)
self.bn1 = self._norm_layer(128 * block.expansion)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(128 * block.expansion,
256 * block.expansion,
kernel_size=3,
stride=2,
padding=1,
bias=False)
self.bn2 = self._norm_layer(256 * block.expansion)
self.conv3 = nn.Conv2d(128 * block.expansion,
256 * block.expansion,
kernel_size=3,
stride=2,
padding=1,
bias=False)
self.bn3 = self._norm_layer(256 * block.expansion)
self.conv4 = nn.Conv2d(1024 * block.expansion,
512 * block.expansion,
kernel_size=1,
stride=1,
bias=False)
self.bn4 = self._norm_layer(512 * block.expansion)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight,
mode='fan_out',
nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer(
self,
block,
planes: int,
blocks: int,
stride: int = 1,
) -> nn.Sequential:
norm_layer = self._norm_layer
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes * 3,
planes * block.expansion,
kernel_size=1,
stride=stride,
bias=False),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes * 3, planes, stride, downsample))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x: Tensor) -> Tensor:
l1 = self.relu(
self.bn2(self.conv2(self.relu(self.bn1(self.conv1(x[0]))))))
l2 = self.relu(self.bn3(self.conv3(x[1])))
feature = torch.cat([l1, l2, x[2]], 1)
output = self.bn_layer(feature)
return output.contiguous()
| 8,342 | 34.502128 | 75 | py |
null | OpenOOD-main/openood/networks/densenet.py | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicBlock(nn.Module):
def __init__(self, in_planes, out_planes, dropRate=0.0):
super(BasicBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(in_planes,
out_planes,
kernel_size=3,
stride=1,
padding=1,
bias=False)
self.droprate = dropRate
def forward(self, x):
out = self.conv1(self.relu(self.bn1(x)))
if self.droprate > 0:
out = F.dropout(out, p=self.droprate, training=self.training)
return torch.cat([x, out], 1)
class BottleneckBlock(nn.Module):
def __init__(self, in_planes, out_planes, dropRate=0.0):
super(BottleneckBlock, self).__init__()
inter_planes = out_planes * 4
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(in_planes,
inter_planes,
kernel_size=1,
stride=1,
padding=0,
bias=False)
self.bn2 = nn.BatchNorm2d(inter_planes)
self.conv2 = nn.Conv2d(inter_planes,
out_planes,
kernel_size=3,
stride=1,
padding=1,
bias=False)
self.droprate = dropRate
def forward(self, x):
out = self.conv1(self.relu(self.bn1(x)))
if self.droprate > 0:
out = F.dropout(out,
p=self.droprate,
inplace=False,
training=self.training)
out = self.conv2(self.relu(self.bn2(out)))
if self.droprate > 0:
out = F.dropout(out,
p=self.droprate,
inplace=False,
training=self.training)
return torch.cat([x, out], 1)
class TransitionBlock(nn.Module):
def __init__(self, in_planes, out_planes, dropRate=0.0):
super(TransitionBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(in_planes,
out_planes,
kernel_size=1,
stride=1,
padding=0,
bias=False)
self.droprate = dropRate
def forward(self, x):
out = self.conv1(self.relu(self.bn1(x)))
if self.droprate > 0:
out = F.dropout(out,
p=self.droprate,
inplace=False,
training=self.training)
return F.avg_pool2d(out, 2)
class DenseBlock(nn.Module):
def __init__(self, nb_layers, in_planes, growth_rate, block, dropRate=0.0):
super(DenseBlock, self).__init__()
self.layer = self._make_layer(block, in_planes, growth_rate, nb_layers,
dropRate)
def _make_layer(self, block, in_planes, growth_rate, nb_layers, dropRate):
layers = []
for i in range(nb_layers):
layers.append(
block(in_planes + i * growth_rate, growth_rate, dropRate))
return nn.Sequential(*layers)
def forward(self, x):
return self.layer(x)
class DenseNet3(nn.Module):
def __init__(self,
depth=100,
growth_rate=12,
reduction=0.5,
bottleneck=True,
dropRate=0.0,
num_classes=10):
super(DenseNet3, self).__init__()
in_planes = 2 * growth_rate
n = (depth - 4) / 3
if bottleneck == True:
n = n / 2
block = BottleneckBlock
else:
block = BasicBlock
n = int(n)
# 1st conv before any dense block
self.conv1 = nn.Conv2d(3,
in_planes,
kernel_size=3,
stride=1,
padding=1,
bias=False)
# 1st block
self.block1 = DenseBlock(n, in_planes, growth_rate, block, dropRate)
in_planes = int(in_planes + n * growth_rate)
self.trans1 = TransitionBlock(in_planes,
int(math.floor(in_planes * reduction)),
dropRate=dropRate)
in_planes = int(math.floor(in_planes * reduction))
# 2nd block
self.block2 = DenseBlock(n, in_planes, growth_rate, block, dropRate)
in_planes = int(in_planes + n * growth_rate)
self.trans2 = TransitionBlock(in_planes,
int(math.floor(in_planes * reduction)),
dropRate=dropRate)
in_planes = int(math.floor(in_planes * reduction))
# 3rd block
self.block3 = DenseBlock(n, in_planes, growth_rate, block, dropRate)
in_planes = int(in_planes + n * growth_rate)
# global average pooling and classifier
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu = nn.ReLU(inplace=True)
self.fc = nn.Linear(in_planes, num_classes)
self.in_planes = in_planes
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def forward(self, x, return_feature=False):
feature1 = self.conv1(x)
feature2 = self.trans1(self.block1(feature1))
feature3 = self.trans2(self.block2(feature2))
feature4 = self.block3(feature3)
feature5 = self.relu(self.bn1(feature4))
out = F.avg_pool2d(feature5, 8)
feature = out.view(-1, self.in_planes)
logits_cls = self.fc(feature)
feature_list = [
feature, feature1, feature2, feature3, feature4, feature5
]
if return_feature:
return logits_cls, feature_list
else:
return logits_cls
def get_fc(self):
fc = self.fc
return fc.weight.cpu().detach().numpy(), fc.bias.cpu().detach().numpy()
| 6,812 | 36.434066 | 79 | py |
null | OpenOOD-main/openood/networks/draem_net.py | import torch
import torch.nn as nn
class ReconstructiveSubNetwork(nn.Module):
def __init__(self, in_channels=3, out_channels=3, base_width=128):
super(ReconstructiveSubNetwork, self).__init__()
self.encoder = EncoderReconstructive(in_channels, base_width)
self.decoder = DecoderReconstructive(base_width,
out_channels=out_channels)
def forward(self, x):
b5 = self.encoder(x)
output = self.decoder(b5)
return output
class DiscriminativeSubNetwork(nn.Module):
def __init__(self,
in_channels=3,
out_channels=3,
base_channels=64,
out_features=False):
super(DiscriminativeSubNetwork, self).__init__()
base_width = base_channels
self.encoder_segment = EncoderDiscriminative(in_channels, base_width)
self.decoder_segment = DecoderDiscriminative(base_width,
out_channels=out_channels)
# self.segment_act = torch.nn.Sigmoid()
self.out_features = out_features
def forward(self, x):
b1, b2, b3, b4, b5, b6 = self.encoder_segment(x)
output_segment = self.decoder_segment(b1, b2, b3, b4, b5, b6)
if self.out_features:
return output_segment, b2, b3, b4, b5, b6
else:
return output_segment
class EncoderDiscriminative(nn.Module):
def __init__(self, in_channels, base_width):
super(EncoderDiscriminative, self).__init__()
self.block1 = nn.Sequential(
nn.Conv2d(in_channels, base_width, kernel_size=3, padding=1),
nn.BatchNorm2d(base_width), nn.ReLU(inplace=True),
nn.Conv2d(base_width, base_width, kernel_size=3, padding=1),
nn.BatchNorm2d(base_width), nn.ReLU(inplace=True))
self.mp1 = nn.Sequential(nn.MaxPool2d(2))
self.block2 = nn.Sequential(
nn.Conv2d(base_width, base_width * 2, kernel_size=3, padding=1),
nn.BatchNorm2d(base_width * 2), nn.ReLU(inplace=True),
nn.Conv2d(base_width * 2, base_width * 2, kernel_size=3,
padding=1), nn.BatchNorm2d(base_width * 2),
nn.ReLU(inplace=True))
self.mp2 = nn.Sequential(nn.MaxPool2d(2))
self.block3 = nn.Sequential(
nn.Conv2d(base_width * 2, base_width * 4, kernel_size=3,
padding=1), nn.BatchNorm2d(base_width * 4),
nn.ReLU(inplace=True),
nn.Conv2d(base_width * 4, base_width * 4, kernel_size=3,
padding=1), nn.BatchNorm2d(base_width * 4),
nn.ReLU(inplace=True))
self.mp3 = nn.Sequential(nn.MaxPool2d(2))
self.block4 = nn.Sequential(
nn.Conv2d(base_width * 4, base_width * 8, kernel_size=3,
padding=1), nn.BatchNorm2d(base_width * 8),
nn.ReLU(inplace=True),
nn.Conv2d(base_width * 8, base_width * 8, kernel_size=3,
padding=1), nn.BatchNorm2d(base_width * 8),
nn.ReLU(inplace=True))
self.mp4 = nn.Sequential(nn.MaxPool2d(2))
self.block5 = nn.Sequential(
nn.Conv2d(base_width * 8, base_width * 8, kernel_size=3,
padding=1), nn.BatchNorm2d(base_width * 8),
nn.ReLU(inplace=True),
nn.Conv2d(base_width * 8, base_width * 8, kernel_size=3,
padding=1), nn.BatchNorm2d(base_width * 8),
nn.ReLU(inplace=True))
self.mp5 = nn.Sequential(nn.MaxPool2d(2))
self.block6 = nn.Sequential(
nn.Conv2d(base_width * 8, base_width * 8, kernel_size=3,
padding=1), nn.BatchNorm2d(base_width * 8),
nn.ReLU(inplace=True),
nn.Conv2d(base_width * 8, base_width * 8, kernel_size=3,
padding=1), nn.BatchNorm2d(base_width * 8),
nn.ReLU(inplace=True))
def forward(self, x):
b1 = self.block1(x)
mp1 = self.mp1(b1)
b2 = self.block2(mp1)
mp2 = self.mp3(b2)
b3 = self.block3(mp2)
mp3 = self.mp3(b3)
b4 = self.block4(mp3)
mp4 = self.mp4(b4)
b5 = self.block5(mp4)
mp5 = self.mp5(b5)
b6 = self.block6(mp5)
return b1, b2, b3, b4, b5, b6
class DecoderDiscriminative(nn.Module):
def __init__(self, base_width, out_channels=1):
super(DecoderDiscriminative, self).__init__()
self.up_b = nn.Sequential(
nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True),
nn.Conv2d(base_width * 8, base_width * 8, kernel_size=3,
padding=1), nn.BatchNorm2d(base_width * 8),
nn.ReLU(inplace=True))
self.db_b = nn.Sequential(
nn.Conv2d(base_width * (8 + 8),
base_width * 8,
kernel_size=3,
padding=1), nn.BatchNorm2d(base_width * 8),
nn.ReLU(inplace=True),
nn.Conv2d(base_width * 8, base_width * 8, kernel_size=3,
padding=1), nn.BatchNorm2d(base_width * 8),
nn.ReLU(inplace=True))
self.up1 = nn.Sequential(
nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True),
nn.Conv2d(base_width * 8, base_width * 4, kernel_size=3,
padding=1), nn.BatchNorm2d(base_width * 4),
nn.ReLU(inplace=True))
self.db1 = nn.Sequential(
nn.Conv2d(base_width * (4 + 8),
base_width * 4,
kernel_size=3,
padding=1), nn.BatchNorm2d(base_width * 4),
nn.ReLU(inplace=True),
nn.Conv2d(base_width * 4, base_width * 4, kernel_size=3,
padding=1), nn.BatchNorm2d(base_width * 4),
nn.ReLU(inplace=True))
self.up2 = nn.Sequential(
nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True),
nn.Conv2d(base_width * 4, base_width * 2, kernel_size=3,
padding=1), nn.BatchNorm2d(base_width * 2),
nn.ReLU(inplace=True))
self.db2 = nn.Sequential(
nn.Conv2d(base_width * (2 + 4),
base_width * 2,
kernel_size=3,
padding=1), nn.BatchNorm2d(base_width * 2),
nn.ReLU(inplace=True),
nn.Conv2d(base_width * 2, base_width * 2, kernel_size=3,
padding=1), nn.BatchNorm2d(base_width * 2),
nn.ReLU(inplace=True))
self.up3 = nn.Sequential(
nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True),
nn.Conv2d(base_width * 2, base_width, kernel_size=3, padding=1),
nn.BatchNorm2d(base_width), nn.ReLU(inplace=True))
self.db3 = nn.Sequential(
nn.Conv2d(base_width * (2 + 1),
base_width,
kernel_size=3,
padding=1), nn.BatchNorm2d(base_width),
nn.ReLU(inplace=True),
nn.Conv2d(base_width, base_width, kernel_size=3, padding=1),
nn.BatchNorm2d(base_width), nn.ReLU(inplace=True))
self.up4 = nn.Sequential(
nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True),
nn.Conv2d(base_width, base_width, kernel_size=3, padding=1),
nn.BatchNorm2d(base_width), nn.ReLU(inplace=True))
self.db4 = nn.Sequential(
nn.Conv2d(base_width * 2, base_width, kernel_size=3, padding=1),
nn.BatchNorm2d(base_width), nn.ReLU(inplace=True),
nn.Conv2d(base_width, base_width, kernel_size=3, padding=1),
nn.BatchNorm2d(base_width), nn.ReLU(inplace=True))
self.fin_out = nn.Sequential(
nn.Conv2d(base_width, out_channels, kernel_size=3, padding=1))
def forward(self, b1, b2, b3, b4, b5, b6):
up_b = self.up_b(b6)
cat_b = torch.cat((up_b, b5), dim=1)
db_b = self.db_b(cat_b)
up1 = self.up1(db_b)
cat1 = torch.cat((up1, b4), dim=1)
db1 = self.db1(cat1)
up2 = self.up2(db1)
cat2 = torch.cat((up2, b3), dim=1)
db2 = self.db2(cat2)
up3 = self.up3(db2)
cat3 = torch.cat((up3, b2), dim=1)
db3 = self.db3(cat3)
up4 = self.up4(db3)
cat4 = torch.cat((up4, b1), dim=1)
db4 = self.db4(cat4)
out = self.fin_out(db4)
return out
class EncoderReconstructive(nn.Module):
def __init__(self, in_channels, base_width):
super(EncoderReconstructive, self).__init__()
self.block1 = nn.Sequential(
nn.Conv2d(in_channels, base_width, kernel_size=3, padding=1),
nn.BatchNorm2d(base_width), nn.ReLU(inplace=True),
nn.Conv2d(base_width, base_width, kernel_size=3, padding=1),
nn.BatchNorm2d(base_width), nn.ReLU(inplace=True))
self.mp1 = nn.Sequential(nn.MaxPool2d(2))
self.block2 = nn.Sequential(
nn.Conv2d(base_width, base_width * 2, kernel_size=3, padding=1),
nn.BatchNorm2d(base_width * 2), nn.ReLU(inplace=True),
nn.Conv2d(base_width * 2, base_width * 2, kernel_size=3,
padding=1), nn.BatchNorm2d(base_width * 2),
nn.ReLU(inplace=True))
self.mp2 = nn.Sequential(nn.MaxPool2d(2))
self.block3 = nn.Sequential(
nn.Conv2d(base_width * 2, base_width * 4, kernel_size=3,
padding=1), nn.BatchNorm2d(base_width * 4),
nn.ReLU(inplace=True),
nn.Conv2d(base_width * 4, base_width * 4, kernel_size=3,
padding=1), nn.BatchNorm2d(base_width * 4),
nn.ReLU(inplace=True))
self.mp3 = nn.Sequential(nn.MaxPool2d(2))
self.block4 = nn.Sequential(
nn.Conv2d(base_width * 4, base_width * 8, kernel_size=3,
padding=1), nn.BatchNorm2d(base_width * 8),
nn.ReLU(inplace=True),
nn.Conv2d(base_width * 8, base_width * 8, kernel_size=3,
padding=1), nn.BatchNorm2d(base_width * 8),
nn.ReLU(inplace=True))
self.mp4 = nn.Sequential(nn.MaxPool2d(2))
self.block5 = nn.Sequential(
nn.Conv2d(base_width * 8, base_width * 8, kernel_size=3,
padding=1), nn.BatchNorm2d(base_width * 8),
nn.ReLU(inplace=True),
nn.Conv2d(base_width * 8, base_width * 8, kernel_size=3,
padding=1), nn.BatchNorm2d(base_width * 8),
nn.ReLU(inplace=True))
def forward(self, x):
b1 = self.block1(x)
mp1 = self.mp1(b1)
b2 = self.block2(mp1)
mp2 = self.mp3(b2)
b3 = self.block3(mp2)
mp3 = self.mp3(b3)
b4 = self.block4(mp3)
mp4 = self.mp4(b4)
b5 = self.block5(mp4)
return b5
class DecoderReconstructive(nn.Module):
def __init__(self, base_width, out_channels=1):
super(DecoderReconstructive, self).__init__()
self.up1 = nn.Sequential(
nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True),
nn.Conv2d(base_width * 8, base_width * 8, kernel_size=3,
padding=1), nn.BatchNorm2d(base_width * 8),
nn.ReLU(inplace=True))
self.db1 = nn.Sequential(
nn.Conv2d(base_width * 8, base_width * 8, kernel_size=3,
padding=1), nn.BatchNorm2d(base_width * 8),
nn.ReLU(inplace=True),
nn.Conv2d(base_width * 8, base_width * 4, kernel_size=3,
padding=1), nn.BatchNorm2d(base_width * 4),
nn.ReLU(inplace=True))
self.up2 = nn.Sequential(
nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True),
nn.Conv2d(base_width * 4, base_width * 4, kernel_size=3,
padding=1), nn.BatchNorm2d(base_width * 4),
nn.ReLU(inplace=True))
self.db2 = nn.Sequential(
nn.Conv2d(base_width * 4, base_width * 4, kernel_size=3,
padding=1), nn.BatchNorm2d(base_width * 4),
nn.ReLU(inplace=True),
nn.Conv2d(base_width * 4, base_width * 2, kernel_size=3,
padding=1), nn.BatchNorm2d(base_width * 2),
nn.ReLU(inplace=True))
self.up3 = nn.Sequential(
nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True),
nn.Conv2d(base_width * 2, base_width * 2, kernel_size=3,
padding=1), nn.BatchNorm2d(base_width * 2),
nn.ReLU(inplace=True))
# cat with base*1
self.db3 = nn.Sequential(
nn.Conv2d(base_width * 2, base_width * 2, kernel_size=3,
padding=1), nn.BatchNorm2d(base_width * 2),
nn.ReLU(inplace=True),
nn.Conv2d(base_width * 2, base_width * 1, kernel_size=3,
padding=1), nn.BatchNorm2d(base_width * 1),
nn.ReLU(inplace=True))
self.up4 = nn.Sequential(
nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True),
nn.Conv2d(base_width, base_width, kernel_size=3, padding=1),
nn.BatchNorm2d(base_width), nn.ReLU(inplace=True))
self.db4 = nn.Sequential(
nn.Conv2d(base_width * 1, base_width, kernel_size=3, padding=1),
nn.BatchNorm2d(base_width), nn.ReLU(inplace=True),
nn.Conv2d(base_width, base_width, kernel_size=3, padding=1),
nn.BatchNorm2d(base_width), nn.ReLU(inplace=True))
self.fin_out = nn.Sequential(
nn.Conv2d(base_width, out_channels, kernel_size=3, padding=1))
# self.fin_out = nn.Conv2d(
# base_width, out_channels, kernel_size=3, adding=1)
def forward(self, b5):
up1 = self.up1(b5)
db1 = self.db1(up1)
up2 = self.up2(db1)
db2 = self.db2(up2)
up3 = self.up3(db2)
db3 = self.db3(up3)
up4 = self.up4(db3)
db4 = self.db4(up4)
out = self.fin_out(db4)
return out
| 14,225 | 41.849398 | 79 | py |
null | OpenOOD-main/openood/networks/dropout_net.py | import torch.nn as nn
import torch.nn.functional as F
class DropoutNet(nn.Module):
def __init__(self, backbone, dropout_p):
super(DropoutNet, self).__init__()
self.backbone = backbone
self.dropout_p = dropout_p
def forward(self, x, use_dropout=True):
if use_dropout:
return self.forward_with_dropout(x)
else:
return self.backbone(x)
def forward_with_dropout(self, x):
_, feature = self.backbone(x, return_feature=True)
feature = F.dropout2d(feature, self.dropout_p, training=True)
logits_cls = self.backbone.fc(feature)
return logits_cls
| 651 | 27.347826 | 69 | py |
null | OpenOOD-main/openood/networks/dsvdd_net.py | import torch
import torch.nn as nn
import torch.nn.functional as F
class MNIST_LeNet(nn.Module):
def __init__(self):
super().__init__()
self.rep_dim = 32
self.pool = nn.MaxPool2d(2, 2)
self.conv1 = nn.Conv2d(1, 8, 5, bias=False, padding=2)
self.bn1 = nn.BatchNorm2d(8, eps=1e-04, affine=False)
self.conv2 = nn.Conv2d(8, 4, 5, bias=False, padding=2)
self.bn2 = nn.BatchNorm2d(4, eps=1e-04, affine=False)
self.fc1 = nn.Linear(4 * 7 * 7, self.rep_dim, bias=False)
def forward(self, x):
x = self.conv1(x)
x = self.pool(F.leaky_relu(self.bn1(x)))
x = self.conv2(x)
x = self.pool(F.leaky_relu(self.bn2(x)))
x = x.view(x.size(0), -1)
x = self.fc1(x)
return x
class CIFAR10_LeNet(nn.Module):
def __init__(self):
super().__init__()
self.rep_dim = 128
self.pool = nn.MaxPool2d(2, 2)
self.conv1 = nn.Conv2d(3, 32, 5, bias=False, padding=2)
self.bn2d1 = nn.BatchNorm2d(32, eps=1e-04, affine=False)
self.conv2 = nn.Conv2d(32, 64, 5, bias=False, padding=2)
self.bn2d2 = nn.BatchNorm2d(64, eps=1e-04, affine=False)
self.conv3 = nn.Conv2d(64, 128, 5, bias=False, padding=2)
self.bn2d3 = nn.BatchNorm2d(128, eps=1e-04, affine=False)
self.fc1 = nn.Linear(128 * 4 * 4, self.rep_dim, bias=False)
def forward(self, x):
x = self.conv1(x)
x = self.pool(F.leaky_relu(self.bn2d1(x)))
x = self.conv2(x)
x = self.pool(F.leaky_relu(self.bn2d2(x)))
x = self.conv3(x)
x = self.pool(F.leaky_relu(self.bn2d3(x)))
x = x.view(x.size(0), -1)
x = self.fc1(x)
return x
class CIFAR10_LeNet_ELU(nn.Module):
def __init__(self):
super().__init__()
self.rep_dim = 128
self.pool = nn.MaxPool2d(2, 2)
self.conv1 = nn.Conv2d(3, 32, 5, bias=False, padding=2)
self.bn2d1 = nn.BatchNorm2d(32, eps=1e-04, affine=False)
self.conv2 = nn.Conv2d(32, 64, 5, bias=False, padding=2)
self.bn2d2 = nn.BatchNorm2d(64, eps=1e-04, affine=False)
self.conv3 = nn.Conv2d(64, 128, 5, bias=False, padding=2)
self.bn2d3 = nn.BatchNorm2d(128, eps=1e-04, affine=False)
self.fc1 = nn.Linear(128 * 4 * 4, self.rep_dim, bias=False)
def forward(self, x):
x = self.conv1(x)
x = self.pool(F.elu(self.bn2d1(x)))
x = self.conv2(x)
x = self.pool(F.elu(self.bn2d2(x)))
x = self.conv3(x)
x = self.pool(F.elu(self.bn2d3(x)))
x = x.view(x.size(0), -1)
x = self.fc1(x)
return x
def build_network(net_type):
net = None
if net_type == 'mnist_LeNet':
net = MNIST_LeNet()
if net_type == 'cifar10_LeNet':
net = CIFAR10_LeNet()
if net_type == 'cifar10_LeNet_ELU':
net = CIFAR10_LeNet_ELU()
return net
class MNIST_LeNet_Autoencoder(nn.Module):
def __init__(self):
super().__init__()
self.rep_dim = 32
self.pool = nn.MaxPool2d(2, 2)
# Encoder (must match the Deep SVDD network above)
self.conv1 = nn.Conv2d(1, 8, 5, bias=False, padding=2)
self.bn1 = nn.BatchNorm2d(8, eps=1e-04, affine=False)
self.conv2 = nn.Conv2d(8, 4, 5, bias=False, padding=2)
self.bn2 = nn.BatchNorm2d(4, eps=1e-04, affine=False)
self.fc1 = nn.Linear(4 * 7 * 7, self.rep_dim, bias=False)
# Decoder
self.deconv1 = nn.ConvTranspose2d(2, 4, 5, bias=False, padding=2)
self.bn3 = nn.BatchNorm2d(4, eps=1e-04, affine=False)
self.deconv2 = nn.ConvTranspose2d(4, 8, 5, bias=False, padding=3)
self.bn4 = nn.BatchNorm2d(8, eps=1e-04, affine=False)
self.deconv3 = nn.ConvTranspose2d(8, 1, 5, bias=False, padding=2)
def forward(self, x):
x = self.conv1(x)
x = self.pool(F.leaky_relu(self.bn1(x)))
x = self.conv2(x)
x = self.pool(F.leaky_relu(self.bn2(x)))
x = x.view(x.size(0), -1)
x = self.fc1(x)
x = x.view(x.size(0), int(self.rep_dim / 16), 4, 4)
x = F.interpolate(F.leaky_relu(x), scale_factor=2)
x = self.deconv1(x)
x = F.interpolate(F.leaky_relu(self.bn3(x)), scale_factor=2)
x = self.deconv2(x)
x = F.interpolate(F.leaky_relu(self.bn4(x)), scale_factor=2)
x = self.deconv3(x)
x = torch.sigmoid(x)
return x
class CIFAR10_LeNet_Autoencoder(nn.Module):
def __init__(self):
super().__init__()
self.rep_dim = 128
self.pool = nn.MaxPool2d(2, 2)
# Encoder (must match the Deep SVDD network above)
self.conv1 = nn.Conv2d(3, 32, 5, bias=False, padding=2)
nn.init.xavier_uniform_(self.conv1.weight,
gain=nn.init.calculate_gain('leaky_relu'))
self.bn2d1 = nn.BatchNorm2d(32, eps=1e-04, affine=False)
self.conv2 = nn.Conv2d(32, 64, 5, bias=False, padding=2)
nn.init.xavier_uniform_(self.conv2.weight,
gain=nn.init.calculate_gain('leaky_relu'))
self.bn2d2 = nn.BatchNorm2d(64, eps=1e-04, affine=False)
self.conv3 = nn.Conv2d(64, 128, 5, bias=False, padding=2)
nn.init.xavier_uniform_(self.conv3.weight,
gain=nn.init.calculate_gain('leaky_relu'))
self.bn2d3 = nn.BatchNorm2d(128, eps=1e-04, affine=False)
self.fc1 = nn.Linear(128 * 4 * 4, self.rep_dim, bias=False)
self.bn1d = nn.BatchNorm1d(self.rep_dim, eps=1e-04, affine=False)
# Decoder
self.deconv1 = nn.ConvTranspose2d(int(self.rep_dim / (4 * 4)),
128,
5,
bias=False,
padding=2)
nn.init.xavier_uniform_(self.deconv1.weight,
gain=nn.init.calculate_gain('leaky_relu'))
self.bn2d4 = nn.BatchNorm2d(128, eps=1e-04, affine=False)
self.deconv2 = nn.ConvTranspose2d(128, 64, 5, bias=False, padding=2)
nn.init.xavier_uniform_(self.deconv2.weight,
gain=nn.init.calculate_gain('leaky_relu'))
self.bn2d5 = nn.BatchNorm2d(64, eps=1e-04, affine=False)
self.deconv3 = nn.ConvTranspose2d(64, 32, 5, bias=False, padding=2)
nn.init.xavier_uniform_(self.deconv3.weight,
gain=nn.init.calculate_gain('leaky_relu'))
self.bn2d6 = nn.BatchNorm2d(32, eps=1e-04, affine=False)
self.deconv4 = nn.ConvTranspose2d(32, 3, 5, bias=False, padding=2)
nn.init.xavier_uniform_(self.deconv4.weight,
gain=nn.init.calculate_gain('leaky_relu'))
def forward(self, x):
x = self.conv1(x)
x = self.pool(F.leaky_relu(self.bn2d1(x)))
x = self.conv2(x)
x = self.pool(F.leaky_relu(self.bn2d2(x)))
x = self.conv3(x)
x = self.pool(F.leaky_relu(self.bn2d3(x)))
x = x.view(x.size(0), -1)
x = self.bn1d(self.fc1(x))
x = x.view(x.size(0), int(self.rep_dim / (4 * 4)), 4, 4)
x = F.leaky_relu(x)
x = self.deconv1(x)
x = F.interpolate(F.leaky_relu(self.bn2d4(x)), scale_factor=2)
x = self.deconv2(x)
x = F.interpolate(F.leaky_relu(self.bn2d5(x)), scale_factor=2)
x = self.deconv3(x)
x = F.interpolate(F.leaky_relu(self.bn2d6(x)), scale_factor=2)
x = self.deconv4(x)
x = torch.sigmoid(x)
return x
class CIFAR10_LeNet_ELU_Autoencoder(nn.Module):
def __init__(self):
super().__init__()
self.rep_dim = 128
self.pool = nn.MaxPool2d(2, 2)
# Encoder (must match the Deep SVDD network above)
self.conv1 = nn.Conv2d(3, 32, 5, bias=False, padding=2)
nn.init.xavier_uniform_(self.conv1.weight)
self.bn2d1 = nn.BatchNorm2d(32, eps=1e-04, affine=False)
self.conv2 = nn.Conv2d(32, 64, 5, bias=False, padding=2)
nn.init.xavier_uniform_(self.conv2.weight)
self.bn2d2 = nn.BatchNorm2d(64, eps=1e-04, affine=False)
self.conv3 = nn.Conv2d(64, 128, 5, bias=False, padding=2)
nn.init.xavier_uniform_(self.conv3.weight)
self.bn2d3 = nn.BatchNorm2d(128, eps=1e-04, affine=False)
self.fc1 = nn.Linear(128 * 4 * 4, self.rep_dim, bias=False)
self.bn1d = nn.BatchNorm1d(self.rep_dim, eps=1e-04, affine=False)
# Decoder
self.deconv1 = nn.ConvTranspose2d(int(self.rep_dim / (4 * 4)),
128,
5,
bias=False,
padding=2)
nn.init.xavier_uniform_(self.deconv1.weight)
self.bn2d4 = nn.BatchNorm2d(128, eps=1e-04, affine=False)
self.deconv2 = nn.ConvTranspose2d(128, 64, 5, bias=False, padding=2)
nn.init.xavier_uniform_(self.deconv2.weight)
self.bn2d5 = nn.BatchNorm2d(64, eps=1e-04, affine=False)
self.deconv3 = nn.ConvTranspose2d(64, 32, 5, bias=False, padding=2)
nn.init.xavier_uniform_(self.deconv3.weight)
self.bn2d6 = nn.BatchNorm2d(32, eps=1e-04, affine=False)
self.deconv4 = nn.ConvTranspose2d(32, 3, 5, bias=False, padding=2)
nn.init.xavier_uniform_(self.deconv4.weight)
def forward(self, x):
x = self.conv1(x)
x = self.pool(F.elu(self.bn2d1(x)))
x = self.conv2(x)
x = self.pool(F.elu(self.bn2d2(x)))
x = self.conv3(x)
x = self.pool(F.elu(self.bn2d3(x)))
x = x.view(x.size(0), -1)
x = self.bn1d(self.fc1(x))
x = x.view(x.size(0), int(self.rep_dim / (4 * 4)), 4, 4)
x = F.elu(x)
x = self.deconv1(x)
x = F.interpolate(F.elu(self.bn2d4(x)), scale_factor=2)
x = self.deconv2(x)
x = F.interpolate(F.elu(self.bn2d5(x)), scale_factor=2)
x = self.deconv3(x)
x = F.interpolate(F.elu(self.bn2d6(x)), scale_factor=2)
x = self.deconv4(x)
x = torch.sigmoid(x)
return x
def get_Autoencoder(net_type):
ae_net = None
if net_type == 'mnist_LeNet':
ae_net = MNIST_LeNet_Autoencoder()
if net_type == 'cifar10_LeNet':
ae_net = CIFAR10_LeNet_Autoencoder()
if net_type == 'cifar10_LeNet_ELU':
ae_net = CIFAR10_LeNet_ELU_Autoencoder()
return ae_net
| 10,502 | 37.054348 | 76 | py |
null | OpenOOD-main/openood/networks/godin_net.py | import torch
import torch.nn as nn
def norm(x):
norm = torch.norm(x, p=2, dim=1)
x = x / (norm.expand(1, -1).t() + .0001)
return x
class CosineDeconf(nn.Module):
def __init__(self, in_features, num_classes):
super(CosineDeconf, self).__init__()
self.h = nn.Linear(in_features, num_classes, bias=False)
self.init_weights()
def init_weights(self):
nn.init.kaiming_normal_(self.h.weight.data, nonlinearity='relu')
def forward(self, x):
x = norm(x)
w = norm(self.h.weight)
ret = (torch.matmul(x, w.T))
return ret
class EuclideanDeconf(nn.Module):
def __init__(self, in_features, num_classes):
super(EuclideanDeconf, self).__init__()
self.h = nn.Linear(in_features, num_classes, bias=False)
self.init_weights()
def init_weights(self):
nn.init.kaiming_normal_(self.h.weight.data, nonlinearity='relu')
def forward(self, x):
# size: (batch, latent, 1)
x = x.unsqueeze(2)
# size: (1, latent, num_classes)
h = self.h.weight.T.unsqueeze(0)
ret = -((x - h).pow(2)).mean(1)
return ret
class InnerDeconf(nn.Module):
def __init__(self, in_features, num_classes):
super(InnerDeconf, self).__init__()
self.h = nn.Linear(in_features, num_classes)
self.init_weights()
def init_weights(self):
nn.init.kaiming_normal_(self.h.weight.data, nonlinearity='relu')
self.h.bias.data = torch.zeros(size=self.h.bias.size())
def forward(self, x):
return self.h(x)
class GodinNet(nn.Module):
def __init__(self,
backbone,
feature_size,
num_classes,
similarity_measure='cosine'):
super(GodinNet, self).__init__()
h_dict = {
'cosine': CosineDeconf,
'inner': InnerDeconf,
'euclid': EuclideanDeconf
}
self.num_classes = num_classes
self.backbone = backbone
if hasattr(self.backbone, 'fc'):
# remove fc otherwise ddp will
# report unused params
self.backbone.fc = nn.Identity()
self.h = h_dict[similarity_measure](feature_size, num_classes)
self.g = nn.Sequential(nn.Linear(feature_size, 1), nn.BatchNorm1d(1),
nn.Sigmoid())
self.softmax = nn.Softmax()
def forward(self, x, inference=False, score_func='h'):
_, feature = self.backbone(x, return_feature=True)
numerators = self.h(feature)
denominators = self.g(feature)
# calculate the logits results
quotients = numerators / denominators
# logits, numerators, and denominators
if inference:
if score_func == 'h':
return numerators
elif score_func == 'g':
return denominators
else:
# maybe generate an error instead
print('Invalid score function, using h instead')
return numerators
else:
return quotients
| 3,133 | 26.017241 | 77 | py |
null | OpenOOD-main/openood/networks/lenet.py | import logging
import torch.nn as nn
logger = logging.getLogger(__name__)
class LeNet(nn.Module):
def __init__(self, num_classes, num_channel=3):
super(LeNet, self).__init__()
self.num_classes = num_classes
self.feature_size = 84
self.block1 = nn.Sequential(
nn.Conv2d(in_channels=num_channel,
out_channels=6,
kernel_size=5,
stride=1,
padding=2), nn.ReLU(), nn.MaxPool2d(kernel_size=2))
self.block2 = nn.Sequential(
nn.Conv2d(in_channels=6, out_channels=16, kernel_size=5, stride=1),
nn.ReLU(), nn.MaxPool2d(kernel_size=2))
self.block3 = nn.Sequential(
nn.Conv2d(in_channels=16,
out_channels=120,
kernel_size=5,
stride=1), nn.ReLU())
self.classifier1 = nn.Linear(in_features=120, out_features=84)
self.relu = nn.ReLU()
self.fc = nn.Linear(in_features=84, out_features=num_classes)
def get_fc(self):
fc = self.fc
return fc.weight.cpu().detach().numpy(), fc.bias.cpu().detach().numpy()
def forward(self, x, return_feature=False, return_feature_list=False):
feature1 = self.block1(x)
feature2 = self.block2(feature1)
feature3 = self.block3(feature2)
feature3 = feature3.view(feature3.shape[0], -1)
feature = self.relu(self.classifier1(feature3))
logits_cls = self.fc(feature)
feature_list = [feature1, feature2, feature3, feature]
if return_feature:
return logits_cls, feature
elif return_feature_list:
return logits_cls, feature_list
else:
return logits_cls
def forward_threshold(self, x, threshold):
feature1 = self.block1(x)
feature2 = self.block2(feature1)
feature3 = self.block3(feature2)
feature3 = feature3.view(feature3.shape[0], -1)
feature = self.relu(self.classifier1(feature3))
feature = feature.clip(max=threshold)
logits_cls = self.fc(feature)
return logits_cls
| 2,170 | 33.460317 | 79 | py |
null | OpenOOD-main/openood/networks/mcd_net.py | import torch.nn as nn
class MCDNet(nn.Module):
def __init__(self, backbone, num_classes):
super(MCDNet, self).__init__()
self.backbone = backbone
try:
feature_size = backbone.feature_size
except AttributeError:
feature_size = backbone.module.feature_size
self.fc1 = nn.Linear(feature_size, num_classes)
self.fc2 = nn.Linear(feature_size, num_classes)
# test conf
def forward(self, x, return_double=False):
_, feature = self.backbone(x, return_feature=True)
logits1 = self.fc1(feature)
logits2 = self.fc2(feature)
if return_double:
return logits1, logits2
else:
return logits1
| 732 | 24.275862 | 58 | py |
null | OpenOOD-main/openood/networks/mmcls_featext.py | from mmcls.models import CLASSIFIERS, ImageClassifier
@CLASSIFIERS.register_module()
class ImageClassifierWithReturnFeature(ImageClassifier):
def forward(self, x, *args, **kwargs):
if 'return_feature' in kwargs:
return self.backbone(x)[0][-1]
else:
return super().forward(x, *args, **kwargs)
| 338 | 29.818182 | 56 | py |
null | OpenOOD-main/openood/networks/net_utils_.py | from types import MethodType
import mmcv
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
from mmcls.apis import init_model
import openood.utils.comm as comm
from .bit import KNOWN_MODELS
from .conf_branch_net import ConfBranchNet
from .csi_net import CSINet
from .de_resnet18_256x256 import AttnBasicBlock, BN_layer, De_ResNet18_256x256
from .densenet import DenseNet3
from .draem_net import DiscriminativeSubNetwork, ReconstructiveSubNetwork
from .dropout_net import DropoutNet
from .dsvdd_net import build_network
from .godin_net import GodinNet
from .lenet import LeNet
from .mcd_net import MCDNet
from .openmax_net import OpenMax
from .patchcore_net import PatchcoreNet
from .projection_net import ProjectionNet
from .react_net import ReactNet
from .resnet18_32x32 import ResNet18_32x32
from .resnet18_64x64 import ResNet18_64x64
from .resnet18_224x224 import ResNet18_224x224
from .resnet18_256x256 import ResNet18_256x256
from .resnet50 import ResNet50
from .udg_net import UDGNet
from .wrn import WideResNet
def get_network(network_config):
num_classes = network_config.num_classes
if network_config.name == 'resnet18_32x32':
net = ResNet18_32x32(num_classes=num_classes)
if network_config.name == 'resnet18_32x32_changed':
net = ResNet18_256x256(num_classes=num_classes)
elif network_config.name == 'resnet18_64x64':
net = ResNet18_64x64(num_classes=num_classes)
elif network_config.name == 'resnet18_224x224':
net = ResNet18_224x224(num_classes=num_classes)
elif network_config.name == 'resnet50':
net = ResNet50(num_classes=num_classes)
elif network_config.name == 'lenet':
net = LeNet(num_classes=num_classes, num_channel=3)
elif network_config.name == 'wrn':
net = WideResNet(depth=28,
widen_factor=10,
dropRate=0.0,
num_classes=num_classes)
elif network_config.name == 'densenet':
net = DenseNet3(depth=100,
growth_rate=12,
reduction=0.5,
bottleneck=True,
dropRate=0.0,
num_classes=num_classes)
elif network_config.name == 'patchcore_net':
# path = '/home/pengyunwang/.cache/torch/hub/vision-0.9.0'
# module = torch.hub._load_local(path,
# 'wide_resnet50_2',
# pretrained=True)
backbone = get_network(network_config.backbone)
net = PatchcoreNet(backbone)
elif network_config.name == 'wide_resnet_50_2':
module = torch.hub.load('pytorch/vision:v0.9.0',
'wide_resnet50_2',
pretrained=True)
net = PatchcoreNet(module)
elif network_config.name == 'godin_net':
backbone = get_network(network_config.backbone)
net = GodinNet(backbone=backbone,
feature_size=backbone.feature_size,
num_classes=num_classes,
similarity_measure=network_config.similarity_measure)
elif network_config.name == 'react_net':
backbone = get_network(network_config.backbone)
net = ReactNet(backbone)
elif network_config.name == 'csi_net':
backbone = get_network(network_config.backbone)
net = CSINet(backbone,
feature_size=backbone.feature_size,
num_classes=num_classes,
simclr_dim=network_config.simclr_dim,
shift_trans_type=network_config.shift_trans_type)
elif network_config.name == 'draem':
model = ReconstructiveSubNetwork(in_channels=3,
out_channels=3,
base_width=int(
network_config.image_size / 2))
model_seg = DiscriminativeSubNetwork(
in_channels=6,
out_channels=2,
base_channels=int(network_config.image_size / 4))
net = {'generative': model, 'discriminative': model_seg}
elif network_config.name == 'openmax_network':
backbone = get_network(network_config.backbone)
net = OpenMax(backbone=backbone, num_classes=num_classes)
elif network_config.name == 'mcd':
backbone = get_network(network_config.backbone)
net = MCDNet(backbone=backbone, num_classes=num_classes)
elif network_config.name == 'udg':
backbone = get_network(network_config.backbone)
net = UDGNet(backbone=backbone,
num_classes=num_classes,
num_clusters=network_config.num_clusters)
elif network_config.name == 'opengan':
from .opengan import Discriminator, Generator
backbone = get_network(network_config.backbone)
netG = Generator(in_channels=network_config.nz,
feature_size=network_config.ngf,
out_channels=network_config.nc)
netD = Discriminator(in_channels=network_config.nc,
feature_size=network_config.ndf)
net = {'netG': netG, 'netD': netD, 'backbone': backbone}
elif network_config.name == 'arpl_gan':
from .arpl_net import (resnet34ABN, Generator, Discriminator,
Generator32, Discriminator32, ARPLayer)
feature_net = resnet34ABN(num_classes=num_classes, num_bns=2)
dim_centers = feature_net.fc.weight.shape[1]
feature_net.fc = nn.Identity()
criterion = ARPLayer(feat_dim=dim_centers,
num_classes=num_classes,
weight_pl=network_config.weight_pl,
temp=network_config.temp)
assert network_config.image_size == 32 \
or network_config.image_size == 64, \
'ARPL-GAN only supports 32x32 or 64x64 images!'
if network_config.image_size == 64:
netG = Generator(1, network_config.nz, network_config.ngf,
network_config.nc) # ngpu, nz, ngf, nc
netD = Discriminator(1, network_config.nc,
network_config.ndf) # ngpu, nc, ndf
else:
netG = Generator32(1, network_config.nz, network_config.ngf,
network_config.nc) # ngpu, nz, ngf, nc
netD = Discriminator32(1, network_config.nc,
network_config.ndf) # ngpu, nc, ndf
net = {
'netF': feature_net,
'criterion': criterion,
'netG': netG,
'netD': netD
}
elif network_config.name == 'arpl_net':
from .arpl_net import ARPLayer
feature_net = get_network(network_config.feat_extract_network)
try:
dim_centers = feature_net.fc.weight.shape[1]
feature_net.fc = nn.Identity()
except Exception:
dim_centers = feature_net.classifier[0].weight.shape[1]
feature_net.classifier = nn.Identity()
criterion = ARPLayer(feat_dim=dim_centers,
num_classes=num_classes,
weight_pl=network_config.weight_pl,
temp=network_config.temp)
net = {'netF': feature_net, 'criterion': criterion}
elif network_config.name == 'bit':
net = KNOWN_MODELS[network_config.model](
head_size=network_config.num_logits,
zero_head=True,
num_block_open=network_config.num_block_open)
elif network_config.name == 'vit':
cfg = mmcv.Config.fromfile(network_config.model)
net = init_model(cfg, network_config.checkpoint, 0)
net.get_fc = MethodType(
lambda self: (self.head.layers.head.weight.cpu().numpy(),
self.head.layers.head.bias.cpu().numpy()), net)
elif network_config.name == 'conf_branch_net':
backbone = get_network(network_config.backbone)
net = ConfBranchNet(backbone=backbone, num_classes=num_classes)
elif network_config.name == 'dsvdd':
net = build_network(network_config.type)
elif network_config.name == 'projectionNet':
backbone = get_network(network_config.backbone)
net = ProjectionNet(backbone=backbone, num_classes=2)
elif network_config.name == 'dropout_net':
backbone = get_network(network_config.backbone)
net = DropoutNet(backbone=backbone, dropout_p=network_config.dropout_p)
elif network_config.name == 'rd4ad_net':
encoder = get_network(network_config.backbone)
bn = BN_layer(AttnBasicBlock, 2)
decoder = De_ResNet18_256x256()
net = {'encoder': encoder, 'bn': bn, 'decoder': decoder}
else:
raise Exception('Unexpected Network Architecture!')
if network_config.pretrained:
if type(net) is dict:
for subnet, checkpoint in zip(net.values(),
network_config.checkpoint):
if checkpoint is not None:
if checkpoint != 'none':
subnet.load_state_dict(torch.load(checkpoint),
strict=False)
elif network_config.name == 'bit' and not network_config.normal_load:
net.load_from(np.load(network_config.checkpoint))
elif network_config.name == 'vit':
pass
else:
try:
net.load_state_dict(torch.load(network_config.checkpoint),
strict=False)
except RuntimeError:
# sometimes fc should not be loaded
loaded_pth = torch.load(network_config.checkpoint)
loaded_pth.pop('fc.weight')
loaded_pth.pop('fc.bias')
net.load_state_dict(loaded_pth, strict=False)
print('Model Loading {} Completed!'.format(network_config.name))
if network_config.num_gpus > 1:
if type(net) is dict:
for key, subnet in zip(net.keys(), net.values()):
net[key] = torch.nn.parallel.DistributedDataParallel(
subnet,
device_ids=[comm.get_local_rank()],
broadcast_buffers=True)
else:
net = torch.nn.parallel.DistributedDataParallel(
net.cuda(),
device_ids=[comm.get_local_rank()],
broadcast_buffers=True)
if network_config.num_gpus > 0:
if type(net) is dict:
for subnet in net.values():
subnet.cuda()
else:
net.cuda()
torch.cuda.manual_seed(1)
np.random.seed(1)
cudnn.benchmark = True
return net
| 10,932 | 38.756364 | 79 | py |
null | OpenOOD-main/openood/networks/npos_net.py | import torch
import torch.nn as nn
import torch.nn.functional as F
class NPOSNet(nn.Module):
def __init__(self, backbone, head, feat_dim, num_classes):
super(NPOSNet, self).__init__()
self.backbone = backbone
if hasattr(self.backbone, 'fc'):
# remove fc otherwise ddp will
# report unused params
self.backbone.fc = nn.Identity()
try:
feature_size = backbone.feature_size
except AttributeError:
feature_size = backbone.module.feature_size
self.prototypes = nn.Parameter(torch.zeros(num_classes, feat_dim),
requires_grad=True)
self.mlp = nn.Sequential(nn.Linear(feature_size, feat_dim),
nn.ReLU(inplace=True), nn.Linear(feat_dim, 1))
if head == 'linear':
self.head = nn.Linear(feature_size, feat_dim)
elif head == 'mlp':
self.head = nn.Sequential(nn.Linear(feature_size, feature_size),
nn.ReLU(inplace=True),
nn.Linear(feature_size, feat_dim))
def forward(self, x):
feat = self.backbone(x).squeeze()
unnorm_features = self.head(feat)
features = F.normalize(unnorm_features, dim=1)
return features
def intermediate_forward(self, x):
feat = self.backbone(x).squeeze()
return F.normalize(feat, dim=1)
| 1,468 | 33.97619 | 79 | py |
null | OpenOOD-main/openood/networks/opengan.py | from torch import nn
class Generator(nn.Module):
def __init__(self, in_channels=100, feature_size=64, out_channels=512):
super(Generator, self).__init__()
self.nz = in_channels
self.ngf = feature_size
self.nc = out_channels
self.main = nn.Sequential(
# input is Z, going into a convolution
# Conv2d(in_channels,
# out_channels,
# kernel_size,
# stride=1,
# padding=0,
# dilation=1,
# groups=1,
# bias=True,
# padding_mode='zeros')
nn.Conv2d(self.nz, self.ngf * 8, 1, 1, 0, bias=False),
nn.BatchNorm2d(self.ngf * 8),
nn.ReLU(True),
# state size. (self.ngf*8) x 4 x 4
nn.Conv2d(self.ngf * 8, self.ngf * 4, 1, 1, 0, bias=False),
nn.BatchNorm2d(self.ngf * 4),
nn.ReLU(True),
# state size. (self.ngf*4) x 8 x 8
nn.Conv2d(self.ngf * 4, self.ngf * 2, 1, 1, 0, bias=False),
nn.BatchNorm2d(self.ngf * 2),
nn.ReLU(True),
# state size. (self.ngf*2) x 16 x 16
nn.Conv2d(self.ngf * 2, self.ngf * 4, 1, 1, 0, bias=False),
nn.BatchNorm2d(self.ngf * 4),
nn.ReLU(True),
# state size. (self.ngf) x 32 x 32
nn.Conv2d(self.ngf * 4, self.nc, 1, 1, 0, bias=True),
# nn.Tanh()
# state size. (self.nc) x 64 x 64
)
def forward(self, input):
return self.main(input)
class Discriminator(nn.Module):
def __init__(self, in_channels=512, feature_size=64):
super(Discriminator, self).__init__()
self.nc = in_channels
self.ndf = feature_size
self.main = nn.Sequential(
nn.Conv2d(self.nc, self.ndf * 8, 1, 1, 0, bias=False),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(self.ndf * 8, self.ndf * 4, 1, 1, 0, bias=False),
nn.BatchNorm2d(self.ndf * 4), nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(self.ndf * 4, self.ndf * 2, 1, 1, 0, bias=False),
nn.BatchNorm2d(self.ndf * 2), nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(self.ndf * 2, self.ndf, 1, 1, 0, bias=False),
nn.BatchNorm2d(self.ndf), nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(self.ndf, 1, 1, 1, 0, bias=False), nn.Sigmoid())
def forward(self, input):
return self.main(input)
| 2,526 | 37.876923 | 75 | py |
null | OpenOOD-main/openood/networks/openmax_net.py | import torch.nn as nn
import torch.nn.functional as F
class OpenMax(nn.Module):
def __init__(self, backbone, num_classes=50, embed_dim=None):
super(OpenMax, self).__init__()
self.backbone_name = backbone
self.backbone = backbone
self.dim = self.get_backbone_last_layer_out_channel()
if embed_dim:
self.embeddingLayer = nn.Sequential(
nn.Linear(self.dim, embed_dim),
nn.PReLU(),
)
self.dim = embed_dim
self.classifier = nn.Linear(self.dim, num_classes)
def get_backbone_last_layer_out_channel(self):
if self.backbone_name == 'LeNetPlus':
return 128 * 3 * 3
last_layer = list(self.backbone.children())[-1]
while (not isinstance(last_layer, nn.Conv2d)) and \
(not isinstance(last_layer, nn.Linear)) and \
(not isinstance(last_layer, nn.BatchNorm2d)):
temp_layer = list(last_layer.children())[-1]
if isinstance(temp_layer, nn.Sequential) and len(
list(temp_layer.children())) == 0:
temp_layer = list(last_layer.children())[-2]
last_layer = temp_layer
if isinstance(last_layer, nn.BatchNorm2d):
return last_layer.num_features
elif isinstance(last_layer, nn.Linear):
return last_layer.out_features
else:
return last_layer.out_channels
def forward(self, x):
feature = self.backbone(x)
if feature.dim() == 4:
feature = F.adaptive_avg_pool2d(feature, 1)
feature = feature.view(x.size(0), -1)
# if includes embedding layer.
feature = self.embeddingLayer(feature) if hasattr(
self, 'embeddingLayer') else feature
logits = self.classifier(feature)
return logits
def get_fc(self):
fc = self.classifier
return fc.weight.cpu().detach().numpy(), fc.bias.cpu().detach().numpy()
| 2,001 | 35.4 | 79 | py |
null | OpenOOD-main/openood/networks/patchcore_net.py | import torch
import torch.nn as nn
class PatchcoreNet(nn.Module):
def __init__(self, backbone):
super(PatchcoreNet, self).__init__()
# def hook_t(module, input, output):
# self.features.append(output)
# path = '/home/pengyunwang/.cache/torch/hub/vision-0.9.0'
# module = torch.hub._load_local(path,
# 'wide_resnet50_2',
# pretrained=True)
# self.module = module
# self.module.layer2[-1].register_forward_hook(hook_t)
# self.module.layer3[-1].register_forward_hook(hook_t)
self.backbone = backbone
for param in self.parameters():
param.requires_grad = False
# self.module.cuda()
backbone.cuda()
self.criterion = torch.nn.MSELoss(reduction='sum')
def forward(self, x, return_feature):
_, feature_list = self.backbone(x, return_feature_list=True)
return [feature_list[-3], feature_list[-2]]
# def init_features(self):
# self.features = []
# def forward(self, x_t, return_feature):
# x_t = x_t.cuda()
# self.init_features()
# _ = self.module(x_t)
# import pdb
# pdb.set_trace()
# return self.features
| 1,294 | 28.431818 | 68 | py |
null | OpenOOD-main/openood/networks/projection_net.py | import torch.nn as nn
from torchvision.models import resnet18
class ProjectionNet(nn.Module):
def __init__(self,
backbone,
head_layers=[512, 512, 512, 512, 512, 512, 512, 512, 128],
num_classes=2):
super(ProjectionNet, self).__init__()
self.backbone = backbone
# use res18 pretrained model if none is given
# self.backbone=resnet18(pretrained=True)
# penultimate layer feature size
last_layer = backbone.feature_size
sequential_layers = []
for num_neurons in head_layers:
sequential_layers.append(nn.Linear(last_layer, num_neurons))
sequential_layers.append(nn.BatchNorm1d(num_neurons))
sequential_layers.append(nn.ReLU(inplace=True))
last_layer = num_neurons
# the last layer without activation
head = nn.Sequential(*sequential_layers)
self.head = head
self.out = nn.Linear(last_layer, num_classes)
def forward(self, x):
# penultimate layer feature
_, embeds = self.backbone(x, return_feature=True)
tmp = self.head(embeds)
logits = self.out(tmp)
return embeds, logits
def get_fc(self):
fc = self.out
return fc.weight.cpu().detach().numpy(), fc.bias.cpu().detach().numpy()
| 1,343 | 32.6 | 79 | py |
null | OpenOOD-main/openood/networks/react_net.py | import torch.nn as nn
class ReactNet(nn.Module):
def __init__(self, backbone):
super(ReactNet, self).__init__()
self.backbone = backbone
def forward(self, x, return_feature=False, return_feature_list=False):
try:
return self.backbone(x, return_feature, return_feature_list)
except TypeError:
return self.backbone(x, return_feature)
def forward_threshold(self, x, threshold):
_, feature = self.backbone(x, return_feature=True)
feature = feature.clip(max=threshold)
feature = feature.view(feature.size(0), -1)
logits_cls = self.backbone.get_fc_layer()(feature)
return logits_cls
def get_fc(self):
fc = self.backbone.fc
return fc.weight.cpu().detach().numpy(), fc.bias.cpu().detach().numpy()
| 822 | 31.92 | 79 | py |
null | OpenOOD-main/openood/networks/resnet18_224x224.py | from torchvision.models.resnet import BasicBlock, ResNet
class ResNet18_224x224(ResNet):
def __init__(self,
block=BasicBlock,
layers=[2, 2, 2, 2],
num_classes=1000):
super(ResNet18_224x224, self).__init__(block=block,
layers=layers,
num_classes=num_classes)
self.feature_size = 512
def forward(self, x, return_feature=False, return_feature_list=False):
feature1 = self.relu(self.bn1(self.conv1(x)))
feature1 = self.maxpool(feature1)
feature2 = self.layer1(feature1)
feature3 = self.layer2(feature2)
feature4 = self.layer3(feature3)
feature5 = self.layer4(feature4)
feature5 = self.avgpool(feature5)
feature = feature5.view(feature5.size(0), -1)
logits_cls = self.fc(feature)
feature_list = [feature1, feature2, feature3, feature4, feature5]
if return_feature:
return logits_cls, feature
elif return_feature_list:
return logits_cls, feature_list
else:
return logits_cls
def forward_threshold(self, x, threshold):
feature1 = self.relu(self.bn1(self.conv1(x)))
feature1 = self.maxpool(feature1)
feature2 = self.layer1(feature1)
feature3 = self.layer2(feature2)
feature4 = self.layer3(feature3)
feature5 = self.layer4(feature4)
feature5 = self.avgpool(feature5)
feature = feature5.clip(max=threshold)
feature = feature.view(feature.size(0), -1)
logits_cls = self.fc(feature)
return logits_cls
def intermediate_forward(self, x, layer_index):
out = self.relu(self.bn1(self.conv1(x)))
out = self.maxpool(out)
out = self.layer1(out)
if layer_index == 1:
return out
out = self.layer2(out)
if layer_index == 2:
return out
out = self.layer3(out)
if layer_index == 3:
return out
out = self.layer4(out)
if layer_index == 4:
return out
raise ValueError
def get_fc(self):
fc = self.fc
return fc.weight.cpu().detach().numpy(), fc.bias.cpu().detach().numpy()
def get_fc_layer(self):
return self.fc
| 2,368 | 30.586667 | 79 | py |
null | OpenOOD-main/openood/networks/resnet18_256x256.py | import torch.nn as nn
import torch.nn.functional as F
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes,
planes,
kernel_size=3,
stride=stride,
padding=1,
bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes,
planes,
kernel_size=3,
stride=1,
padding=1,
bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes,
self.expansion * planes,
kernel_size=1,
stride=stride,
bias=False), nn.BatchNorm2d(self.expansion * planes))
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class BasicBlock2(nn.Module):
expansion = 1
def __init__(
self,
in_planes: int,
planes: int,
stride: int = 1,
downsample=None,
) -> None:
super(BasicBlock2, self).__init__()
self.conv1 = nn.Conv2d(in_planes,
planes,
kernel_size=3,
stride=stride,
padding=1,
bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(planes,
planes,
kernel_size=3,
stride=1,
padding=1,
bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes,
planes,
kernel_size=3,
stride=stride,
padding=1,
bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes,
self.expansion * planes,
kernel_size=1,
bias=False)
self.bn3 = nn.BatchNorm2d(self.expansion * planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes,
self.expansion * planes,
kernel_size=1,
stride=stride,
bias=False), nn.BatchNorm2d(self.expansion * planes))
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet18_256x256(nn.Module):
def __init__(self, block=BasicBlock2, num_blocks=None, num_classes=10):
super(ResNet18_256x256, self).__init__()
if num_blocks is None:
num_blocks = [2, 2, 2, 2]
self.in_planes = 64
self._norm_layer = nn.BatchNorm2d
self.conv1 = nn.Conv2d(
3,
64,
kernel_size=7, # origin 3
stride=2, # origin 1
padding=3, # origin 1
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
# self.avgpool = nn.AvgPool2d(4)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2,
padding=1) # origin no
self.avgpool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Linear(512 * block.expansion, num_classes)
self.feature_size = 512 * block.expansion
# origin no
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight,
mode='fan_out',
nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer(self, block, planes, num_blocks, stride):
'''
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
'''
norm_layer = self._norm_layer
downsample = None
if stride != 1 or self.in_planes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.in_planes,
planes * block.expansion,
kernel_size=1,
stride=stride,
bias=False),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.in_planes, planes, stride, downsample))
self.in_planes = planes * block.expansion
for _ in range(1, num_blocks):
layers.append(block(self.in_planes, planes))
return nn.Sequential(*layers)
def forward(self, x, return_feature=False, return_feature_list=False):
feature1 = self.maxpool(F.relu(self.bn1(
self.conv1(x)))) # origin no maxpool
feature2 = self.layer1(feature1)
feature3 = self.layer2(feature2)
feature4 = self.layer3(feature3)
feature5 = self.layer4(feature4)
feature5 = self.avgpool(feature5)
feature = feature5.view(feature5.size(0), -1)
logits_cls = self.fc(feature)
feature_list = [feature1, feature2, feature3, feature4, feature5]
if return_feature:
return logits_cls, feature
elif return_feature_list:
return logits_cls, feature_list
else:
return logits_cls
def forward_threshold(self, x, threshold):
feature1 = F.relu(self.bn1(self.conv1(x)))
feature2 = self.layer1(feature1)
feature3 = self.layer2(feature2)
feature4 = self.layer3(feature3)
feature5 = self.layer4(feature4)
feature5 = self.avgpool(feature5)
feature = feature5.clip(max=threshold)
feature = feature.view(feature.size(0), -1)
logits_cls = self.fc(feature)
return logits_cls
def get_fc(self):
fc = self.fc
return fc.weight.cpu().detach().numpy(), fc.bias.cpu().detach().numpy()
| 8,058 | 34.817778 | 79 | py |
null | OpenOOD-main/openood/networks/resnet18_32x32.py | import torch.nn as nn
import torch.nn.functional as F
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes,
planes,
kernel_size=3,
stride=stride,
padding=1,
bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes,
planes,
kernel_size=3,
stride=1,
padding=1,
bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes,
self.expansion * planes,
kernel_size=1,
stride=stride,
bias=False), nn.BatchNorm2d(self.expansion * planes))
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes,
planes,
kernel_size=3,
stride=stride,
padding=1,
bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes,
self.expansion * planes,
kernel_size=1,
bias=False)
self.bn3 = nn.BatchNorm2d(self.expansion * planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes,
self.expansion * planes,
kernel_size=1,
stride=stride,
bias=False), nn.BatchNorm2d(self.expansion * planes))
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet18_32x32(nn.Module):
def __init__(self, block=BasicBlock, num_blocks=None, num_classes=10):
super(ResNet18_32x32, self).__init__()
if num_blocks is None:
num_blocks = [2, 2, 2, 2]
self.in_planes = 64
self.conv1 = nn.Conv2d(3,
64,
kernel_size=3,
stride=1,
padding=1,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
# self.avgpool = nn.AvgPool2d(4)
self.avgpool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Linear(512 * block.expansion, num_classes)
self.feature_size = 512 * block.expansion
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x, return_feature=False, return_feature_list=False):
feature1 = F.relu(self.bn1(self.conv1(x)))
feature2 = self.layer1(feature1)
feature3 = self.layer2(feature2)
feature4 = self.layer3(feature3)
feature5 = self.layer4(feature4)
feature5 = self.avgpool(feature5)
feature = feature5.view(feature5.size(0), -1)
logits_cls = self.fc(feature)
feature_list = [feature1, feature2, feature3, feature4, feature5]
if return_feature:
return logits_cls, feature
elif return_feature_list:
return logits_cls, feature_list
else:
return logits_cls
def forward_threshold(self, x, threshold):
feature1 = F.relu(self.bn1(self.conv1(x)))
feature2 = self.layer1(feature1)
feature3 = self.layer2(feature2)
feature4 = self.layer3(feature3)
feature5 = self.layer4(feature4)
feature5 = self.avgpool(feature5)
feature = feature5.clip(max=threshold)
feature = feature.view(feature.size(0), -1)
logits_cls = self.fc(feature)
return logits_cls
def intermediate_forward(self, x, layer_index):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
if layer_index == 1:
return out
out = self.layer2(out)
if layer_index == 2:
return out
out = self.layer3(out)
if layer_index == 3:
return out
out = self.layer4(out)
if layer_index == 4:
return out
raise ValueError
def get_fc(self):
fc = self.fc
return fc.weight.cpu().detach().numpy(), fc.bias.cpu().detach().numpy()
def get_fc_layer(self):
return self.fc
| 5,976 | 34.577381 | 79 | py |
null | OpenOOD-main/openood/networks/resnet18_64x64.py | import torch.nn as nn
import torch.nn.functional as F
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes,
planes,
kernel_size=3,
stride=stride,
padding=1,
bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes,
planes,
kernel_size=3,
stride=1,
padding=1,
bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes,
self.expansion * planes,
kernel_size=1,
stride=stride,
bias=False), nn.BatchNorm2d(self.expansion * planes))
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes,
planes,
kernel_size=3,
stride=stride,
padding=1,
bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes,
self.expansion * planes,
kernel_size=1,
bias=False)
self.bn3 = nn.BatchNorm2d(self.expansion * planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes,
self.expansion * planes,
kernel_size=1,
stride=stride,
bias=False), nn.BatchNorm2d(self.expansion * planes))
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet18_64x64(nn.Module):
def __init__(self, block=BasicBlock, num_blocks=None, num_classes=10):
super(ResNet18_64x64, self).__init__()
if num_blocks is None:
num_blocks = [2, 2, 2, 2]
self.in_planes = 64
self.conv1 = nn.Conv2d(3,
64,
kernel_size=3,
stride=1,
padding=1,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.avgpool = nn.AvgPool2d(8)
self.fc = nn.Linear(512, num_classes)
self.feature_size = 512
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x, return_feature=False, return_feature_list=False):
feature1 = F.relu(self.bn1(self.conv1(x)))
feature2 = self.layer1(feature1)
feature3 = self.layer2(feature2)
feature4 = self.layer3(feature3)
feature5 = self.layer4(feature4)
feature5 = self.avgpool(feature5)
feature = feature5.view(feature5.size(0), -1)
logits_cls = self.fc(feature)
feature_list = [feature1, feature2, feature3, feature4, feature5]
if return_feature:
return logits_cls, feature
elif return_feature_list:
return logits_cls, feature_list
else:
return logits_cls
def forward_threshold(self, x, threshold):
feature1 = F.relu(self.bn1(self.conv1(x)))
feature2 = self.layer1(feature1)
feature3 = self.layer2(feature2)
feature4 = self.layer3(feature3)
feature5 = self.layer4(feature4)
feature5 = self.avgpool(feature5)
feature = feature5.clip(max=threshold)
feature = feature.view(feature.size(0), -1)
logits_cls = self.fc(feature)
return logits_cls
def get_fc(self):
fc = self.fc
return fc.weight.cpu().detach().numpy(), fc.bias.cpu().detach().numpy()
| 5,378 | 36.615385 | 79 | py |
null | OpenOOD-main/openood/networks/resnet50.py | from torchvision.models.resnet import Bottleneck, ResNet
class ResNet50(ResNet):
def __init__(self,
block=Bottleneck,
layers=[3, 4, 6, 3],
num_classes=1000):
super(ResNet50, self).__init__(block=block,
layers=layers,
num_classes=num_classes)
self.feature_size = 2048
def forward(self, x, return_feature=False, return_feature_list=False):
feature1 = self.relu(self.bn1(self.conv1(x)))
feature1 = self.maxpool(feature1)
feature2 = self.layer1(feature1)
feature3 = self.layer2(feature2)
feature4 = self.layer3(feature3)
feature5 = self.layer4(feature4)
feature5 = self.avgpool(feature5)
feature = feature5.view(feature5.size(0), -1)
logits_cls = self.fc(feature)
feature_list = [feature1, feature2, feature3, feature4, feature5]
if return_feature:
return logits_cls, feature
elif return_feature_list:
return logits_cls, feature_list
else:
return logits_cls
def forward_threshold(self, x, threshold):
feature1 = self.relu(self.bn1(self.conv1(x)))
feature1 = self.maxpool(feature1)
feature2 = self.layer1(feature1)
feature3 = self.layer2(feature2)
feature4 = self.layer3(feature3)
feature5 = self.layer4(feature4)
feature5 = self.avgpool(feature5)
feature = feature5.clip(max=threshold)
feature = feature.view(feature.size(0), -1)
logits_cls = self.fc(feature)
return logits_cls
def intermediate_forward(self, x, layer_index):
out = self.relu(self.bn1(self.conv1(x)))
out = self.maxpool(out)
out = self.layer1(out)
if layer_index == 1:
return out
out = self.layer2(out)
if layer_index == 2:
return out
out = self.layer3(out)
if layer_index == 3:
return out
out = self.layer4(out)
if layer_index == 4:
return out
raise ValueError
def get_fc(self):
fc = self.fc
return fc.weight.cpu().detach().numpy(), fc.bias.cpu().detach().numpy()
def get_fc_layer(self):
return self.fc
| 2,337 | 30.173333 | 79 | py |
null | OpenOOD-main/openood/networks/rot_net.py | import torch.nn as nn
class RotNet(nn.Module):
def __init__(self, backbone, num_classes):
super(RotNet, self).__init__()
self.backbone = backbone
if hasattr(self.backbone, 'fc'):
# remove fc otherwise ddp will
# report unused params
self.backbone.fc = nn.Identity()
try:
feature_size = backbone.feature_size
except AttributeError:
feature_size = backbone.module.feature_size
self.fc = nn.Linear(feature_size, num_classes)
self.rot_fc = nn.Linear(feature_size, 4)
def forward(self, x, return_rot_logits=False):
_, feature = self.backbone(x, return_feature=True)
logits = self.fc(feature)
rot_logits = self.rot_fc(feature)
if return_rot_logits:
return logits, rot_logits
else:
return logits
| 885 | 26.6875 | 58 | py |
null | OpenOOD-main/openood/networks/rts_net.py | import torch
import torch.nn as nn
class RTSNet(nn.Module):
def __init__(self, backbone, feature_size, num_classes,
dof=16):
'''
dof: degree of freedom of variance
'''
super(RTSNet, self).__init__()
self.backbone = backbone
self.feature_size = feature_size
self.num_classes = num_classes
self.dof = dof
self.logvar_rts = nn.Sequential(
nn.Linear(feature_size, self.dof),
nn.BatchNorm1d(self.dof),
)
def forward(self, x, return_var=False):
logits_cls, feature = self.backbone(x, return_feature=True)
if return_var:
logvar = self.logvar_rts(feature)
variance = logvar.exp()
return logits_cls, variance
else:
return logits_cls
| 840 | 28 | 67 | py |
null | OpenOOD-main/openood/networks/simclr_net.py | import torch.nn as nn
import torch.nn.functional as F
class SimClrNet(nn.Module):
def __init__(self, backbone, out_dim=128) -> None:
super(SimClrNet, self).__init__()
self.backbone = backbone
feature_dim = backbone.feature_size
self.simclr_head = nn.Sequential(
nn.Linear(feature_dim, feature_dim),
nn.ReLU(inplace=True),
nn.Linear(feature_dim, out_dim)
)
def forward(self, x, return_feature=False, return_feature_list=False):
_, feature = self.backbone.forward(x, return_feature=True)
return _, [F.normalize(self.simclr_head(feature), dim=-1)] | 651 | 31.6 | 74 | py |
null | OpenOOD-main/openood/networks/swin_t.py | from torchvision.models.swin_transformer import SwinTransformer
class Swin_T(SwinTransformer):
def __init__(self,
patch_size=[4, 4],
embed_dim=96,
depths=[2, 2, 6, 2],
num_heads=[3, 6, 12, 24],
window_size=[7, 7],
stochastic_depth_prob=0.2,
num_classes=1000):
super(Swin_T,
self).__init__(patch_size=patch_size,
embed_dim=embed_dim,
depths=depths,
num_heads=num_heads,
window_size=window_size,
stochastic_depth_prob=stochastic_depth_prob,
num_classes=num_classes)
self.feature_size = embed_dim * 2**(len(depths) - 1)
def forward(self, x, return_feature=False):
x = self.features(x)
x = self.norm(x)
x = self.permute(x)
x = self.avgpool(x)
x = self.flatten(x)
if return_feature:
return self.head(x), x
else:
return self.head(x)
def forward_threshold(self, x, threshold):
x = self.features(x)
x = self.norm(x)
x = self.permute(x)
x = self.avgpool(x)
x = self.flatten(x)
feature = x.clip(max=threshold)
feature = feature.view(feature.size(0), -1)
logits_cls = self.head(feature)
return logits_cls
def get_fc(self):
fc = self.head
return fc.weight.cpu().detach().numpy(), fc.bias.cpu().detach().numpy()
def get_fc_layer(self):
return self.head
| 1,667 | 30.471698 | 79 | py |
null | OpenOOD-main/openood/networks/temp.py | """ResNet in PyTorch.
ImageNet-Style ResNet
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
Adapted from: https://github.com/bearpaw/pytorch-classification
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1, is_last=False):
super(BasicBlock, self).__init__()
self.is_last = is_last
self.conv1 = nn.Conv2d(
in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False
)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(
planes, planes, kernel_size=3, stride=1, padding=1, bias=False
)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(
in_planes,
self.expansion * planes,
kernel_size=1,
stride=stride,
bias=False,
),
nn.BatchNorm2d(self.expansion * planes),
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
preact = out
out = F.relu(out)
if self.is_last:
return out, preact
else:
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1, is_last=False):
super(Bottleneck, self).__init__()
self.is_last = is_last
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(
planes, planes, kernel_size=3, stride=stride, padding=1, bias=False
)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(
planes, self.expansion * planes, kernel_size=1, bias=False
)
self.bn3 = nn.BatchNorm2d(self.expansion * planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(
in_planes,
self.expansion * planes,
kernel_size=1,
stride=stride,
bias=False,
),
nn.BatchNorm2d(self.expansion * planes),
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
preact = out
out = F.relu(out)
if self.is_last:
return out, preact
else:
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, in_channel=3, zero_init_residual=False):
super(ResNet, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(
in_channel, 64, kernel_size=3, stride=1, padding=1, bias=False
)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu")
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves
# like an identity. This improves the model by 0.2~0.3% according to:
# https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for i in range(num_blocks):
stride = strides[i]
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x, layer=100):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = self.avgpool(out)
out = torch.flatten(out, 1)
return out
def resnet18(**kwargs):
return ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
def resnet34(**kwargs):
return ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
def resnet50(**kwargs):
return ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
def resnet101(**kwargs):
return ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
model_dict = {
"resnet18": [resnet18, 512],
"resnet34": [resnet34, 512],
"resnet50": [resnet50, 2048],
"resnet101": [resnet101, 2048],
}
class SupResNet(nn.Module):
def __init__(self, arch="resnet50", num_classes=10, **kwargs):
super(SupResNet, self).__init__()
m, fdim = model_dict[arch]
self.encoder = m()
self.head = nn.Linear(fdim, num_classes)
def forward(self, x):
return self.head(self.encoder(x))
class SSLResNet(nn.Module):
def __init__(self, arch="resnet50", out_dim=128, **kwargs):
super(SSLResNet, self).__init__()
m, fdim = model_dict[arch]
self.encoder = m()
self.head = nn.Sequential(
nn.Linear(fdim, fdim), nn.ReLU(inplace=True), nn.Linear(fdim, out_dim)
)
def forward(self, x, return_feature=False, return_feature_list=False):
temp = F.normalize(self.head(self.encoder(x)), dim=-1)
return temp, [temp]
| 6,465 | 32.158974 | 88 | py |
null | OpenOOD-main/openood/networks/udg_net.py | import torch.nn as nn
class UDGNet(nn.Module):
def __init__(self, backbone, num_classes, num_clusters):
super(UDGNet, self).__init__()
self.backbone = backbone
if hasattr(self.backbone, 'fc'):
# remove fc otherwise ddp will
# report unused params
self.backbone.fc = nn.Identity()
self.fc = nn.Linear(backbone.feature_size, num_classes)
self.fc_aux = nn.Linear(backbone.feature_size, num_clusters)
def forward(self, x, return_feature=False, return_aux=False):
_, feature = self.backbone(x, return_feature=True)
logits_cls = self.fc(feature)
logits_aux = self.fc_aux(feature)
if return_aux:
if return_feature:
return logits_cls, logits_aux, feature
else:
return logits_cls, logits_aux
else:
if return_feature:
return logits_cls, feature
else:
return logits_cls
| 999 | 32.333333 | 68 | py |
null | OpenOOD-main/openood/networks/utils.py | # import mmcv
from copy import deepcopy
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
# from mmcls.apis import init_model
import openood.utils.comm as comm
from .bit import KNOWN_MODELS
from .conf_branch_net import ConfBranchNet
from .csi_net import get_csi_linear_layers, CSINet
from .cider_net import CIDERNet
from .de_resnet18_256x256 import AttnBasicBlock, BN_layer, De_ResNet18_256x256
from .densenet import DenseNet3
from .draem_net import DiscriminativeSubNetwork, ReconstructiveSubNetwork
from .dropout_net import DropoutNet
from .dsvdd_net import build_network
from .godin_net import GodinNet
from .lenet import LeNet
from .mcd_net import MCDNet
from .npos_net import NPOSNet
from .openmax_net import OpenMax
from .patchcore_net import PatchcoreNet
from .projection_net import ProjectionNet
from .react_net import ReactNet
from .resnet18_32x32 import ResNet18_32x32
from .resnet18_64x64 import ResNet18_64x64
from .resnet18_224x224 import ResNet18_224x224
from .resnet18_256x256 import ResNet18_256x256
from .resnet50 import ResNet50
from .rot_net import RotNet
from .udg_net import UDGNet
from .vit_b_16 import ViT_B_16
from .wrn import WideResNet
from .rts_net import RTSNet
def get_network(network_config):
num_classes = network_config.num_classes
if network_config.name == 'resnet18_32x32':
net = ResNet18_32x32(num_classes=num_classes)
elif network_config.name == 'resnet18_256x256':
net = ResNet18_256x256(num_classes=num_classes)
elif network_config.name == 'resnet18_64x64':
net = ResNet18_64x64(num_classes=num_classes)
elif network_config.name == 'resnet18_224x224':
net = ResNet18_224x224(num_classes=num_classes)
elif network_config.name == 'resnet50':
net = ResNet50(num_classes=num_classes)
elif network_config.name == 'lenet':
net = LeNet(num_classes=num_classes, num_channel=3)
elif network_config.name == 'wrn':
net = WideResNet(depth=28,
widen_factor=10,
dropRate=0.0,
num_classes=num_classes)
elif network_config.name == 'densenet':
net = DenseNet3(depth=100,
growth_rate=12,
reduction=0.5,
bottleneck=True,
dropRate=0.0,
num_classes=num_classes)
elif network_config.name == 'patchcore_net':
# path = '/home/pengyunwang/.cache/torch/hub/vision-0.9.0'
# module = torch.hub._load_local(path,
# 'wide_resnet50_2',
# pretrained=True)
backbone = get_network(network_config.backbone)
net = PatchcoreNet(backbone)
elif network_config.name == 'wide_resnet_50_2':
module = torch.hub.load('pytorch/vision:v0.9.0',
'wide_resnet50_2',
pretrained=True)
net = PatchcoreNet(module)
elif network_config.name == 'godin_net':
# don't wrap ddp here cuz we need to modify
# backbone
network_config.backbone.num_gpus = 1
backbone = get_network(network_config.backbone)
feature_size = backbone.feature_size
# remove fc otherwise ddp will
# report unused params
backbone.fc = nn.Identity()
net = GodinNet(backbone=backbone,
feature_size=feature_size,
num_classes=num_classes,
similarity_measure=network_config.similarity_measure)
elif network_config.name == 'cider_net':
# don't wrap ddp here cuz we need to modify
# backbone
network_config.backbone.num_gpus = 1
backbone = get_network(network_config.backbone)
# remove fc otherwise ddp will
# report unused params
backbone.fc = nn.Identity()
net = CIDERNet(backbone=backbone,
head=network_config.head,
feat_dim=network_config.feat_dim,
num_classes=num_classes)
elif network_config.name == 'npos_net':
# don't wrap ddp here cuz we need to modify
# backbone
network_config.backbone.num_gpus = 1
backbone = get_network(network_config.backbone)
# remove fc otherwise ddp will
# report unused params
backbone.fc = nn.Identity()
net = NPOSNet(backbone=backbone,
head=network_config.head,
feat_dim=network_config.feat_dim,
num_classes=num_classes)
elif network_config.name == 'rts_net':
backbone = get_network(network_config.backbone)
try:
feature_size = backbone.feature_size
except AttributeError:
feature_size = backbone.module.feature_size
net = RTSNet(backbone=backbone,
feature_size=feature_size,
num_classes=num_classes,
dof=network_config.dof)
elif network_config.name == 'react_net':
backbone = get_network(network_config.backbone)
net = ReactNet(backbone)
elif network_config.name == 'csi_net':
# don't wrap ddp here cuz we need to modify
# backbone
network_config.backbone.num_gpus = 1
backbone = get_network(network_config.backbone)
feature_size = backbone.feature_size
# remove fc otherwise ddp will
# report unused params
backbone.fc = nn.Identity()
net = get_csi_linear_layers(feature_size, num_classes,
network_config.simclr_dim,
network_config.shift_trans_type)
net['backbone'] = backbone
dummy_net = CSINet(deepcopy(backbone),
feature_size=feature_size,
num_classes=num_classes,
simclr_dim=network_config.simclr_dim,
shift_trans_type=network_config.shift_trans_type)
net['dummy_net'] = dummy_net
elif network_config.name == 'draem':
model = ReconstructiveSubNetwork(in_channels=3,
out_channels=3,
base_width=int(
network_config.image_size / 2))
model_seg = DiscriminativeSubNetwork(
in_channels=6,
out_channels=2,
base_channels=int(network_config.image_size / 4))
net = {'generative': model, 'discriminative': model_seg}
elif network_config.name == 'openmax_network':
backbone = get_network(network_config.backbone)
net = OpenMax(backbone=backbone, num_classes=num_classes)
elif network_config.name == 'mcd':
# don't wrap ddp here cuz we need to modify
# backbone
network_config.backbone.num_gpus = 1
backbone = get_network(network_config.backbone)
feature_size = backbone.feature_size
# remove fc otherwise ddp will
# report unused params
backbone.fc = nn.Identity()
net = MCDNet(backbone=backbone, num_classes=num_classes)
elif network_config.name == 'udg':
# don't wrap ddp here cuz we need to modify
# backbone
network_config.backbone.num_gpus = 1
backbone = get_network(network_config.backbone)
feature_size = backbone.feature_size
# remove fc otherwise ddp will
# report unused params
backbone.fc = nn.Identity()
net = UDGNet(backbone=backbone,
num_classes=num_classes,
num_clusters=network_config.num_clusters)
elif network_config.name == 'opengan':
from .opengan import Discriminator, Generator
backbone = get_network(network_config.backbone)
netG = Generator(in_channels=network_config.nz,
feature_size=network_config.ngf,
out_channels=network_config.nc)
netD = Discriminator(in_channels=network_config.nc,
feature_size=network_config.ndf)
net = {'netG': netG, 'netD': netD, 'backbone': backbone}
elif network_config.name == 'arpl_gan':
from .arpl_net import (resnet34ABN, Generator, Discriminator,
Generator32, Discriminator32, ARPLayer)
feature_net = resnet34ABN(num_classes=num_classes, num_bns=2)
dim_centers = feature_net.fc.weight.shape[1]
feature_net.fc = nn.Identity()
criterion = ARPLayer(feat_dim=dim_centers,
num_classes=num_classes,
weight_pl=network_config.weight_pl,
temp=network_config.temp)
assert network_config.image_size == 32 \
or network_config.image_size == 64, \
'ARPL-GAN only supports 32x32 or 64x64 images!'
if network_config.image_size == 64:
netG = Generator(1, network_config.nz, network_config.ngf,
network_config.nc) # ngpu, nz, ngf, nc
netD = Discriminator(1, network_config.nc,
network_config.ndf) # ngpu, nc, ndf
else:
netG = Generator32(1, network_config.nz, network_config.ngf,
network_config.nc) # ngpu, nz, ngf, nc
netD = Discriminator32(1, network_config.nc,
network_config.ndf) # ngpu, nc, ndf
net = {
'netF': feature_net,
'criterion': criterion,
'netG': netG,
'netD': netD
}
elif network_config.name == 'arpl_net':
from .arpl_net import ARPLayer
# don't wrap ddp here because we need to modify
# feature_net
network_config.feat_extract_network.num_gpus = 1
feature_net = get_network(network_config.feat_extract_network)
try:
if isinstance(feature_net, nn.parallel.DistributedDataParallel):
dim_centers = feature_net.module.fc.weight.shape[1]
feature_net.module.fc = nn.Identity()
else:
dim_centers = feature_net.fc.weight.shape[1]
feature_net.fc = nn.Identity()
except Exception:
if isinstance(feature_net, nn.parallel.DistributedDataParallel):
dim_centers = feature_net.module.classifier[0].weight.shape[1]
feature_net.module.classifier = nn.Identity()
else:
dim_centers = feature_net.classifier[0].weight.shape[1]
feature_net.classifier = nn.Identity()
criterion = ARPLayer(feat_dim=dim_centers,
num_classes=num_classes,
weight_pl=network_config.weight_pl,
temp=network_config.temp)
net = {'netF': feature_net, 'criterion': criterion}
elif network_config.name == 'bit':
net = KNOWN_MODELS[network_config.model](
head_size=network_config.num_logits,
zero_head=True,
num_block_open=network_config.num_block_open)
elif network_config.name == 'vit-b-16':
net = ViT_B_16(num_classes=num_classes)
elif network_config.name == 'conf_branch_net':
# don't wrap ddp here cuz we need to modify
# backbone
network_config.backbone.num_gpus = 1
backbone = get_network(network_config.backbone)
feature_size = backbone.feature_size
# remove fc otherwise ddp will
# report unused params
backbone.fc = nn.Identity()
net = ConfBranchNet(backbone=backbone, num_classes=num_classes)
elif network_config.name == 'rot_net':
# don't wrap ddp here cuz we need to modify
# backbone
network_config.backbone.num_gpus = 1
backbone = get_network(network_config.backbone)
feature_size = backbone.feature_size
# remove fc otherwise ddp will
# report unused params
backbone.fc = nn.Identity()
net = RotNet(backbone=backbone, num_classes=num_classes)
elif network_config.name == 'dsvdd':
net = build_network(network_config.type)
elif network_config.name == 'projectionNet':
backbone = get_network(network_config.backbone)
net = ProjectionNet(backbone=backbone, num_classes=2)
elif network_config.name == 'dropout_net':
backbone = get_network(network_config.backbone)
net = DropoutNet(backbone=backbone, dropout_p=network_config.dropout_p)
elif network_config.name == 'simclr_net':
# backbone = get_network(network_config.backbone)
# net = SimClrNet(backbone, out_dim=128)
from .temp import SSLResNet
net = SSLResNet()
net.encoder = nn.DataParallel(net.encoder).cuda()
elif network_config.name == 'rd4ad_net':
encoder = get_network(network_config.backbone)
bn = BN_layer(AttnBasicBlock, 2)
decoder = De_ResNet18_256x256()
net = {'encoder': encoder, 'bn': bn, 'decoder': decoder}
else:
raise Exception('Unexpected Network Architecture!')
if network_config.pretrained:
if type(net) is dict:
if isinstance(network_config.checkpoint, list):
for subnet, checkpoint in zip(net.values(),
network_config.checkpoint):
if checkpoint is not None:
if checkpoint != 'none':
subnet.load_state_dict(torch.load(checkpoint),
strict=False)
elif isinstance(network_config.checkpoint, str):
ckpt = torch.load(network_config.checkpoint)
subnet_ckpts = {k: {} for k in net.keys()}
for k, v in ckpt.items():
for subnet_name in net.keys():
if k.startwith(subnet_name):
subnet_ckpts[subnet_name][k.replace(
subnet_name + '.', '')] = v
break
for subnet_name, subnet in net.items():
subnet.load_state_dict(subnet_ckpts[subnet_name])
elif network_config.name == 'bit' and not network_config.normal_load:
net.load_from(np.load(network_config.checkpoint))
elif network_config.name == 'vit':
pass
else:
try:
net.load_state_dict(torch.load(network_config.checkpoint),
strict=False)
except RuntimeError:
# sometimes fc should not be loaded
loaded_pth = torch.load(network_config.checkpoint)
loaded_pth.pop('fc.weight')
loaded_pth.pop('fc.bias')
net.load_state_dict(loaded_pth, strict=False)
print('Model Loading {} Completed!'.format(network_config.name))
if network_config.num_gpus > 1:
if type(net) is dict:
for key, subnet in zip(net.keys(), net.values()):
net[key] = torch.nn.parallel.DistributedDataParallel(
subnet.cuda(),
device_ids=[comm.get_local_rank()],
broadcast_buffers=True)
else:
net = torch.nn.parallel.DistributedDataParallel(
net.cuda(),
device_ids=[comm.get_local_rank()],
broadcast_buffers=True)
if network_config.num_gpus > 0:
if type(net) is dict:
for subnet in net.values():
subnet.cuda()
else:
net.cuda()
cudnn.benchmark = True
return net
| 15,911 | 38.483871 | 79 | py |
null | OpenOOD-main/openood/networks/vit.py | # model settings
model = dict(type='ImageClassifierWithReturnFeature',
backbone=dict(type='VisionTransformer',
arch='b',
img_size=384,
patch_size=16,
drop_rate=0.1,
init_cfg=[
dict(type='Kaiming',
layer='Conv2d',
mode='fan_in',
nonlinearity='linear')
]),
neck=None,
head=dict(
type='VisionTransformerClsHead',
num_classes=1000,
in_channels=768,
loss=dict(type='LabelSmoothLoss',
label_smooth_val=0.1,
mode='classy_vision'),
))
| 902 | 38.26087 | 58 | py |
null | OpenOOD-main/openood/networks/vit_b_16.py | import torch
from torchvision.models.vision_transformer import VisionTransformer
class ViT_B_16(VisionTransformer):
def __init__(self,
image_size=224,
patch_size=16,
num_layers=12,
num_heads=12,
hidden_dim=768,
mlp_dim=3072,
num_classes=1000):
super(ViT_B_16, self).__init__(image_size=image_size,
patch_size=patch_size,
num_layers=num_layers,
num_heads=num_heads,
hidden_dim=hidden_dim,
mlp_dim=mlp_dim,
num_classes=num_classes)
self.feature_size = hidden_dim
def forward(self, x, return_feature=False):
# Reshape and permute the input tensor
x = self._process_input(x)
n = x.shape[0]
# Expand the class token to the full batch
batch_class_token = self.class_token.expand(n, -1, -1)
x = torch.cat([batch_class_token, x], dim=1)
x = self.encoder(x)
# Classifier "token" as used by standard language architectures
x = x[:, 0]
if return_feature:
return self.heads(x), x
else:
return self.heads(x)
def forward_threshold(self, x, threshold):
# Reshape and permute the input tensor
x = self._process_input(x)
n = x.shape[0]
# Expand the class token to the full batch
batch_class_token = self.class_token.expand(n, -1, -1)
x = torch.cat([batch_class_token, x], dim=1)
x = self.encoder(x)
# Classifier "token" as used by standard language architectures
x = x[:, 0]
feature = x.clip(max=threshold)
logits_cls = self.heads(feature)
return logits_cls
def get_fc(self):
fc = self.heads[0]
return fc.weight.cpu().detach().numpy(), fc.bias.cpu().detach().numpy()
def get_fc_layer(self):
return self.heads[0]
| 2,131 | 30.820896 | 79 | py |
null | OpenOOD-main/openood/networks/wrn.py | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicBlock(nn.Module):
def __init__(self, in_planes, out_planes, stride, dropRate=0.0):
super(BasicBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu1 = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=1,
bias=False)
self.bn2 = nn.BatchNorm2d(out_planes)
self.relu2 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(out_planes,
out_planes,
kernel_size=3,
stride=1,
padding=1,
bias=False)
self.droprate = dropRate
self.equalInOut = (in_planes == out_planes)
self.convShortcut = (not self.equalInOut) and nn.Conv2d(
in_planes,
out_planes,
kernel_size=1,
stride=stride,
padding=0,
bias=False) or None
def forward(self, x):
if not self.equalInOut:
x = self.relu1(self.bn1(x))
else:
out = self.relu1(self.bn1(x))
if self.equalInOut:
out = self.relu2(self.bn2(self.conv1(out)))
else:
out = self.relu2(self.bn2(self.conv1(x)))
if self.droprate > 0:
out = F.dropout(out, p=self.droprate, training=self.training)
out = self.conv2(out)
if not self.equalInOut:
return torch.add(self.convShortcut(x), out)
else:
return torch.add(x, out)
class NetworkBlock(nn.Module):
def __init__(self,
nb_layers,
in_planes,
out_planes,
block,
stride,
dropRate=0.0):
super(NetworkBlock, self).__init__()
self.layer = self._make_layer(block, in_planes, out_planes, nb_layers,
stride, dropRate)
def _make_layer(self, block, in_planes, out_planes, nb_layers, stride,
dropRate):
layers = []
for i in range(nb_layers):
layers.append(
block(i == 0 and in_planes or out_planes, out_planes,
i == 0 and stride or 1, dropRate))
return nn.Sequential(*layers)
def forward(self, x):
return self.layer(x)
class WideResNet(nn.Module):
def __init__(self, depth, num_classes, widen_factor=1, dropRate=0.0):
super(WideResNet, self).__init__()
nChannels = [
16, 16 * widen_factor, 32 * widen_factor, 64 * widen_factor
]
assert ((depth - 4) % 6 == 0)
n = (depth - 4) // 6
block = BasicBlock
# 1st conv before any network block
self.conv1 = nn.Conv2d(3,
nChannels[0],
kernel_size=3,
stride=1,
padding=1,
bias=False)
# 1st block
self.block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1,
dropRate)
# 2nd block
self.block2 = NetworkBlock(n, nChannels[1], nChannels[2], block, 2,
dropRate)
# 3rd block
self.block3 = NetworkBlock(n, nChannels[2], nChannels[3], block, 2,
dropRate)
# global average pooling and classifier
self.bn1 = nn.BatchNorm2d(nChannels[3])
self.relu = nn.ReLU(inplace=True)
self.fc = nn.Linear(nChannels[3], num_classes)
self.nChannels = nChannels[3]
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def forward(self, x, return_feature=False):
feature1 = self.conv1(x)
feature2 = self.block1(feature1)
feature3 = self.block2(feature2)
feature4 = self.block3(feature3)
feature5 = self.relu(self.bn1(feature4))
out = F.avg_pool2d(feature5, 8)
feature = out.view(-1, self.nChannels)
logits_cls = self.fc(feature)
feature_list = [
feature, feature1, feature2, feature3, feature4, feature5
]
if return_feature:
return logits_cls, feature_list
else:
return logits_cls
def intermediate_forward(self, x, layer_index):
out = self.conv1(x)
out = self.block1(out)
out = self.block2(out)
out = self.block3(out)
out = self.relu(self.bn1(out))
return out
def feature_list(self, x):
out_list = []
out = self.conv1(x)
out = self.block1(out)
out = self.block2(out)
out = self.block3(out)
out = self.relu(self.bn1(out))
out_list.append(out)
out = F.avg_pool2d(out, 8)
out = out.view(-1, self.nChannels)
return self.fc(out), out_list
| 5,513 | 34.121019 | 78 | py |
null | OpenOOD-main/openood/pipelines/__init__.py | from .utils import get_pipeline
| 32 | 15.5 | 31 | py |
null | OpenOOD-main/openood/pipelines/feat_extract_opengan_pipeline.py | from openood.datasets import get_dataloader, get_ood_dataloader
from openood.evaluators import get_evaluator
from openood.networks import get_network
from openood.utils import setup_logger
class FeatExtractOpenGANPipeline:
def __init__(self, config) -> None:
self.config = config
def run(self):
# generate output directory and save the full config file
setup_logger(self.config)
# get dataloader
id_loader_dict = get_dataloader(self.config)
ood_loader_dict = get_ood_dataloader(self.config)
assert 'train' in id_loader_dict
assert 'val' in id_loader_dict
assert 'val' in ood_loader_dict
# init network
net = get_network(self.config.network)
# init evaluator
evaluator = get_evaluator(self.config)
# sanity check on id val accuracy
print('\nStart evaluation on ID val data...', flush=True)
test_metrics = evaluator.eval_acc(net, id_loader_dict['val'])
print('\nComplete Evaluation, accuracy {:.2f}%'.format(
100 * test_metrics['acc']),
flush=True)
# start extracting features
print('\nStart Feature Extraction...', flush=True)
print('\t ID training data...')
evaluator.extract(net, id_loader_dict['train'], 'id_train')
print('\t ID val data...')
evaluator.extract(net, id_loader_dict['val'], 'id_val')
print('\t OOD val data...')
evaluator.extract(net, ood_loader_dict['val'], 'ood_val')
print('\nComplete Feature Extraction!')
| 1,579 | 33.347826 | 69 | py |
null | OpenOOD-main/openood/pipelines/feat_extract_pipeline.py | from openood.datasets import get_dataloader
from openood.evaluators import get_evaluator
from openood.networks import get_network
from openood.utils import setup_logger
class FeatExtractPipeline:
def __init__(self, config) -> None:
self.config = config
def run(self):
# generate output directory and save the full config file
setup_logger(self.config)
# get dataloader
loader_dict = get_dataloader(self.config)
test_loader = loader_dict[self.config.pipeline.extract_target]
# init network
net = get_network(self.config.network)
# init evaluator
evaluator = get_evaluator(self.config)
# start calculating accuracy
print('\nStart evaluation...', flush=True)
test_metrics = evaluator.eval_acc(net, test_loader)
print('\nComplete Evaluation, accuracy {:.2f}%'.format(
100 * test_metrics['acc']),
flush=True)
# start extracting features
print('\nStart Feature Extraction...', flush=True)
evaluator.extract(net, test_loader)
print('\nComplete Feature Extraction!')
| 1,145 | 30.833333 | 70 | py |
null | OpenOOD-main/openood/pipelines/finetune_pipeline.py | from openood.datasets import get_dataloader
from openood.evaluators import get_evaluator
from openood.networks import get_network
from openood.recorders import get_recorder
from openood.trainers import get_trainer
from openood.utils import setup_logger
class FinetunePipeline:
def __init__(self, config) -> None:
self.config = config
def run(self):
# generate output directory and save the full config file
setup_logger(self.config)
# get dataloader
loader_dict = get_dataloader(self.config)
train_loader, val_loader = loader_dict['train'], loader_dict['val']
test_loader = loader_dict['test']
# init network
net = get_network(self.config.network)
# init trainer and evaluator
trainer = get_trainer(net, train_loader, self.config)
evaluator = get_evaluator(self.config)
# init recorder
recorder = get_recorder(self.config)
# trainer setup
trainer.setup()
print('\n' + u'\u2500' * 70, flush=True)
print('Start training...', flush=True)
for epoch_idx in range(1, self.config.optimizer.num_epochs + 1):
# train and eval the model
net, train_metrics = trainer.train_epoch(epoch_idx)
val_metrics = evaluator.eval_acc(net, val_loader, None, epoch_idx)
# save model and report the result
recorder.save_model(net, val_metrics)
recorder.report(train_metrics, val_metrics)
recorder.summary()
print(u'\u2500' * 70, flush=True)
# evaluate on test set
print('Start testing...', flush=True)
test_metrics = evaluator.eval_acc(net, test_loader)
print('\nComplete Evaluation, accuracy {:.2f}'.format(
100.0 * test_metrics['acc']),
flush=True)
print('Completed!', flush=True)
| 1,882 | 33.87037 | 78 | py |
null | OpenOOD-main/openood/pipelines/test_acc_pipeline.py | from openood.datasets import get_dataloader
from openood.evaluators import get_evaluator
from openood.networks import get_network
from openood.utils import setup_logger
class TestAccPipeline:
def __init__(self, config) -> None:
self.config = config
def run(self):
# generate output directory and save the full config file
setup_logger(self.config)
# get dataloader
loader_dict = get_dataloader(self.config)
test_loader = loader_dict['test']
# init network
net = get_network(self.config.network)
# init evaluator
evaluator = get_evaluator(self.config)
# start calculating accuracy
print('\nStart evaluation...', flush=True)
test_metrics = evaluator.eval_acc(net, test_loader)
print('\nComplete Evaluation, accuracy {:.2f}%'.format(
100 * test_metrics['acc']),
flush=True)
| 924 | 28.83871 | 65 | py |
null | OpenOOD-main/openood/pipelines/test_ad_pipeline.py | from openood.datasets import get_dataloader, get_ood_dataloader
from openood.evaluators.utils import get_evaluator
from openood.networks.utils import get_network
from openood.postprocessors import get_postprocessor
from openood.utils import setup_logger
class TestAdPipeline:
def __init__(self, config) -> None:
self.config = config
def run(self):
# generate output directory and save the full config file
setup_logger(self.config)
# get dataloader
id_loader_dict = get_dataloader(self.config)
ood_loader_dict = get_ood_dataloader(self.config)
# init network
net = get_network(self.config.network)
# init evaluator
evaluator = get_evaluator(self.config)
postprocessor = get_postprocessor(self.config)
# setup for distance-based methods
postprocessor.setup(net, id_loader_dict, ood_loader_dict)
print('Start testing...', flush=True)
test_metrics = evaluator.eval_ood(net, id_loader_dict, ood_loader_dict,
postprocessor)
evaluator.report(test_metrics)
| 1,136 | 32.441176 | 79 | py |
null | OpenOOD-main/openood/pipelines/test_ood_pipeline.py | import time
from openood.datasets import get_dataloader, get_ood_dataloader
from openood.evaluators import get_evaluator
from openood.networks import get_network
from openood.postprocessors import get_postprocessor
from openood.utils import setup_logger
class TestOODPipeline:
def __init__(self, config) -> None:
self.config = config
def run(self):
# generate output directory and save the full config file
setup_logger(self.config)
# get dataloader
id_loader_dict = get_dataloader(self.config)
ood_loader_dict = get_ood_dataloader(self.config)
# init network
net = get_network(self.config.network)
# init ood evaluator
evaluator = get_evaluator(self.config)
# init ood postprocessor
postprocessor = get_postprocessor(self.config)
# setup for distance-based methods
postprocessor.setup(net, id_loader_dict, ood_loader_dict)
print('\n', flush=True)
print(u'\u2500' * 70, flush=True)
# start calculating accuracy
print('\nStart evaluation...', flush=True)
if self.config.evaluator.ood_scheme == 'fsood':
acc_metrics = evaluator.eval_acc(
net,
id_loader_dict['test'],
postprocessor,
fsood=True,
csid_data_loaders=ood_loader_dict['csid'])
else:
acc_metrics = evaluator.eval_acc(net, id_loader_dict['test'],
postprocessor)
print('\nAccuracy {:.2f}%'.format(100 * acc_metrics['acc']),
flush=True)
print(u'\u2500' * 70, flush=True)
# start evaluating ood detection methods
timer = time.time()
if self.config.evaluator.ood_scheme == 'fsood':
evaluator.eval_ood(net,
id_loader_dict,
ood_loader_dict,
postprocessor,
fsood=True)
else:
evaluator.eval_ood(net, id_loader_dict, ood_loader_dict,
postprocessor)
print('Time used for eval_ood: {:.0f}s'.format(time.time() - timer))
print('Completed!', flush=True)
| 2,281 | 34.65625 | 76 | py |
null | OpenOOD-main/openood/pipelines/test_ood_pipeline_aps.py | from openood.datasets import get_dataloader, get_ood_dataloader
from openood.evaluators import get_evaluator
from openood.networks import get_network
from openood.postprocessors import get_postprocessor
from openood.utils import setup_logger
class TestOODPipelineAPS:
def __init__(self, config) -> None:
self.config = config
def run(self):
# generate output directory and save the full config file
setup_logger(self.config)
# get dataloader
id_loader_dict = get_dataloader(self.config)
ood_loader_dict = get_ood_dataloader(self.config)
# init network
net = get_network(self.config.network)
# init ood evaluator
evaluator = get_evaluator(self.config)
# init ood postprocessor
postprocessor = get_postprocessor(self.config)
# setup for distance-based methods
postprocessor.setup(net, id_loader_dict, ood_loader_dict)
print('\n', flush=True)
print(u'\u2500' * 70, flush=True)
# start calculating accuracy
print('\nStart evaluation...', flush=True)
acc_metrics = evaluator.eval_acc(net, id_loader_dict['test'],
postprocessor)
print('\nAccuracy {:.2f}%'.format(100 * acc_metrics['acc']),
flush=True)
print(u'\u2500' * 70, flush=True)
# start evaluating ood detection methods
evaluator.eval_ood(net, id_loader_dict, ood_loader_dict, postprocessor)
print('Completed!', flush=True)
| 1,538 | 33.977273 | 79 | py |
null | OpenOOD-main/openood/pipelines/train_ad_pipeline.py | from openood.datasets import get_dataloader, get_ood_dataloader
from openood.evaluators import get_evaluator
from openood.networks import get_network
from openood.postprocessors import get_postprocessor
from openood.preprocessors.utils import get_preprocessor
from openood.recorders import get_recorder
from openood.trainers import get_trainer
from openood.utils import setup_logger
class TrainAdPipeline:
def __init__(self, config) -> None:
self.config = config
def run(self):
# generate output directory and save the full config file
setup_logger(self.config)
# get dataloader
id_loader_dict = get_dataloader(self.config)
ood_loader_dict = get_ood_dataloader(self.config)
train_loader = id_loader_dict['train']
# init network
net = get_network(self.config.network)
# init trainer and evaluator
trainer = get_trainer(net, train_loader, self.config)
evaluator = get_evaluator(self.config)
postprocessor = get_postprocessor(self.config)
# setup for distance-based methods
postprocessor.setup(net, id_loader_dict, ood_loader_dict)
# init recorder
recorder = get_recorder(self.config)
print('Start training...', flush=True)
for epoch_idx in range(1, self.config.optimizer.num_epochs + 1):
# train the model
net, train_metrics = trainer.train_epoch(epoch_idx)
test_metrics = evaluator.eval_ood(net,
id_loader_dict,
ood_loader_dict,
postprocessor=postprocessor,
epoch_idx=epoch_idx)
# save model and report the result
recorder.save_model(net, test_metrics)
recorder.report(train_metrics, test_metrics)
recorder.summary()
# evaluate on test set
print('Start testing...', flush=True)
test_metrics = evaluator.eval_ood(net,
id_loader_dict,
ood_loader_dict,
postprocessor=postprocessor)
evaluator.report(test_metrics)
| 2,301 | 38.016949 | 74 | py |
null | OpenOOD-main/openood/pipelines/train_aux_pipeline.py | from openood.datasets import get_dataloader
from openood.evaluators import get_evaluator
from openood.networks import get_network
from openood.recorders import get_recorder
from openood.trainers import get_trainer
from openood.utils import setup_logger
class TrainARPLGANPipeline:
def __init__(self, config) -> None:
self.config = config
def run(self):
# generate output directory and save the full config file
setup_logger(self.config)
# get dataloader
loader_dict = get_dataloader(self.config)
train_loader, val_loader = loader_dict['train'], loader_dict['val']
test_loader = loader_dict['test']
# init network
net = get_network(self.config.network)
# init trainer and evaluator
trainer = get_trainer(net, train_loader, self.config)
self.config.trainer.name = 'arpl'
trainer_aux = get_trainer(net, train_loader, self.config)
evaluator = get_evaluator(self.config)
# init recorder
recorder = get_recorder(self.config)
print('Start training...', flush=True)
for epoch_idx in range(1, self.config.optimizer.num_epochs + 1):
# train and eval the model
net, train_metrics = trainer.train_epoch(epoch_idx)
net, train_aux_metrics = trainer_aux.train_epoch(epoch_idx)
train_metrics['loss'] = train_aux_metrics['loss']
val_metrics = evaluator.eval_acc(net, val_loader, None, epoch_idx)
trainer.scheduler.step()
# save model and report the result
recorder.save_model(net, val_metrics)
recorder.report(train_metrics, val_metrics)
recorder.summary()
print(u'\u2500' * 70, flush=True)
# evaluate on test set
print('Start testing...', flush=True)
test_metrics = evaluator.eval_acc(net, trainer.criterion, test_loader)
print('\nComplete Evaluation, Last accuracy {:.2f}'.format(
100.0 * test_metrics['acc']),
flush=True)
print('Completed!', flush=True)
| 2,091 | 37.036364 | 78 | py |
null | OpenOOD-main/openood/pipelines/train_ddt_pipeline.py | import openood.utils.comm as comm
from openood.datasets import get_dataloader
from openood.evaluators import get_evaluator
from openood.networks import get_network
from openood.recorders import get_recorder
from openood.trainers import get_trainer
from openood.utils import setup_logger
class TrainPipeline:
def __init__(self, config) -> None:
self.config = config
def run(self):
# generate output directory and save the full config file
setup_logger(self.config)
# get dataloader
loader_dict = get_dataloader(self.config)
train_loader, val_loader = loader_dict['train'], loader_dict['val']
test_loader = loader_dict['test']
# init network
net = get_network(self.config.network)
# init trainer and evaluator
trainer = get_trainer(net, train_loader, self.config)
evaluator = get_evaluator(self.config)
if comm.is_main_process():
# init recorder
recorder = get_recorder(self.config)
print('Start training...', flush=True)
for epoch_idx in range(1, self.config.optimizer.num_epochs + 1):
# train and eval the model
net, train_metrics = trainer.train_epoch(epoch_idx)
val_metrics = evaluator.eval_acc(net, val_loader, None, epoch_idx)
comm.synchronize()
if comm.is_main_process():
# save model and report the result
recorder.save_model(net, val_metrics)
recorder.report(train_metrics, val_metrics)
if comm.is_main_process():
recorder.summary()
print(u'\u2500' * 70, flush=True)
# evaluate on test set
print('Start testing...', flush=True)
test_metrics = evaluator.eval_acc(net, test_loader)
if comm.is_main_process():
print('\nComplete Evaluation, Last accuracy {:.2f}'.format(
100.0 * test_metrics['acc']),
flush=True)
print('Completed!', flush=True)
| 2,054 | 33.830508 | 78 | py |
null | OpenOOD-main/openood/pipelines/train_oe_pipeline.py | import numpy as np
import torch
import openood.utils.comm as comm
from openood.datasets import get_dataloader
from openood.evaluators import get_evaluator
from openood.networks import get_network
from openood.recorders import get_recorder
from openood.trainers import get_trainer
from openood.utils import setup_logger
class TrainOEPipeline:
def __init__(self, config) -> None:
self.config = config
def run(self):
# generate output directory and save the full config file
setup_logger(self.config)
# set random seed
torch.manual_seed(self.config.seed)
np.random.seed(self.config.seed)
# get dataloader
loader_dict = get_dataloader(self.config)
train_loader, val_loader = loader_dict['train'], loader_dict['val']
train_oe_loader = loader_dict['oe']
test_loader = loader_dict['test']
# init network
net = get_network(self.config.network)
# init trainer and evaluator
trainer = get_trainer(net, [train_loader, train_oe_loader], None,
self.config)
evaluator = get_evaluator(self.config)
if comm.is_main_process():
# init recorder
recorder = get_recorder(self.config)
print('Start training...', flush=True)
for epoch_idx in range(1, self.config.optimizer.num_epochs + 1):
# train and eval the model
net, train_metrics = trainer.train_epoch(epoch_idx)
val_metrics = evaluator.eval_acc(net, val_loader, None, epoch_idx)
comm.synchronize()
if comm.is_main_process():
# save model and report the result
recorder.save_model(net, val_metrics)
recorder.report(train_metrics, val_metrics)
if comm.is_main_process():
recorder.summary()
print(u'\u2500' * 70, flush=True)
# evaluate on test set
print('Start testing...', flush=True)
test_metrics = evaluator.eval_acc(net, test_loader)
if comm.is_main_process():
print('\nComplete Evaluation, Last accuracy {:.2f}'.format(
100.0 * test_metrics['acc']),
flush=True)
print('Completed!', flush=True)
| 2,300 | 32.838235 | 78 | py |
null | OpenOOD-main/openood/pipelines/train_only_pipeline.py | from openood.datasets import get_feature_dataloader
from openood.networks import get_network
from openood.recorders import get_recorder
from openood.trainers import get_trainer
from openood.utils import setup_logger
class TrainOpenGanPipeline:
def __init__(self, config) -> None:
self.config = config
def run(self):
# generate output directory and save the full config file
setup_logger(self.config)
# get dataloader
feat_loader = get_feature_dataloader(self.config.dataset)
# init network
net = get_network(self.config.network)
# init trainer
trainer = get_trainer(net, feat_loader, self.config)
# init recorder
recorder = get_recorder(self.config)
print('Start training...', flush=True)
for epoch_idx in range(1, self.config.optimizer.num_epochs + 1):
# train the model
net, train_metrics = trainer.train_epoch(epoch_idx)
recorder.save_model(net, train_metrics)
recorder.report(train_metrics)
recorder.summary()
print('Completed!', flush=True)
| 1,131 | 29.594595 | 72 | py |
null | OpenOOD-main/openood/pipelines/train_opengan_pipeline.py | import numpy as np
import torch
from openood.datasets import get_feature_opengan_dataloader
from openood.evaluators import get_evaluator
from openood.networks import get_network
from openood.postprocessors import get_postprocessor
from openood.recorders import get_recorder
from openood.trainers import get_trainer
from openood.utils import setup_logger
class TrainOpenGanPipeline:
def __init__(self, config) -> None:
self.config = config
def run(self):
# generate output directory and save the full config file
setup_logger(self.config)
# set random seed
torch.manual_seed(self.config.seed)
np.random.seed(self.config.seed)
# get dataloader
dataloaders = get_feature_opengan_dataloader(self.config.dataset)
id_loaders = {
'train': dataloaders['id_train'],
'val': dataloaders['id_val']
} # just for consistency with evaluator
ood_loaders = {'val': dataloaders['ood_val']}
# init network
net = get_network(self.config.network)
# init trainer
trainer = get_trainer(net, dataloaders['id_train'],
dataloaders['id_val'], self.config)
evaluator = get_evaluator(self.config)
# init recorder
recorder = get_recorder(self.config)
# init ood postprocessor
postprocessor = get_postprocessor(self.config)
print('Start training...', flush=True)
for epoch_idx in range(1, self.config.optimizer.num_epochs + 1):
# train the model
net, train_metrics = trainer.train_epoch(epoch_idx)
val_metrics = evaluator.eval_ood_val(net, id_loaders, ood_loaders,
postprocessor)
val_metrics['epoch_idx'] = train_metrics['epoch_idx']
recorder.save_model(net, val_metrics)
recorder.report(train_metrics, val_metrics)
recorder.summary()
print('Completed!', flush=True)
| 2,024 | 33.322034 | 78 | py |
null | OpenOOD-main/openood/pipelines/train_pipeline.py | import numpy as np
import torch
import openood.utils.comm as comm
from openood.datasets import get_dataloader
from openood.evaluators import get_evaluator
from openood.networks import get_network
from openood.recorders import get_recorder
from openood.trainers import get_trainer
from openood.utils import setup_logger
class TrainPipeline:
def __init__(self, config) -> None:
self.config = config
def run(self):
# generate output directory and save the full config file
setup_logger(self.config)
# set random seed
torch.manual_seed(self.config.seed)
np.random.seed(self.config.seed)
# get dataloader
loader_dict = get_dataloader(self.config)
train_loader, val_loader = loader_dict['train'], loader_dict['val']
test_loader = loader_dict['test']
# init network
net = get_network(self.config.network)
# init trainer and evaluator
trainer = get_trainer(net, train_loader, val_loader, self.config)
evaluator = get_evaluator(self.config)
if comm.is_main_process():
# init recorder
recorder = get_recorder(self.config)
print('Start training...', flush=True)
for epoch_idx in range(1, self.config.optimizer.num_epochs + 1):
# train and eval the model
if self.config.trainer.name == 'mos':
net, train_metrics, num_groups, group_slices = \
trainer.train_epoch(epoch_idx)
val_metrics = evaluator.eval_acc(net,
val_loader,
train_loader,
epoch_idx,
num_groups=num_groups,
group_slices=group_slices)
elif self.config.trainer.name in ['cider', 'npos']:
net, train_metrics = trainer.train_epoch(epoch_idx)
# cider and npos only trains the backbone
# cannot evaluate ID acc without training the fc layer
val_metrics = train_metrics
else:
net, train_metrics = trainer.train_epoch(epoch_idx)
val_metrics = evaluator.eval_acc(net, val_loader, None,
epoch_idx)
comm.synchronize()
if comm.is_main_process():
# save model and report the result
recorder.save_model(net, val_metrics)
recorder.report(train_metrics, val_metrics)
if comm.is_main_process():
recorder.summary()
print(u'\u2500' * 70, flush=True)
# evaluate on test set
print('Start testing...', flush=True)
test_metrics = evaluator.eval_acc(net, test_loader)
if comm.is_main_process():
print('\nComplete Evaluation, Last accuracy {:.2f}'.format(
100.0 * test_metrics['acc']),
flush=True)
print('Completed!', flush=True)
| 3,143 | 37.341463 | 75 | py |
null | OpenOOD-main/openood/pipelines/utils.py | from openood.utils import Config
from .feat_extract_pipeline import FeatExtractPipeline
from .feat_extract_opengan_pipeline import FeatExtractOpenGANPipeline
from .finetune_pipeline import FinetunePipeline
from .test_acc_pipeline import TestAccPipeline
from .test_ad_pipeline import TestAdPipeline
from .test_ood_pipeline import TestOODPipeline
from .train_ad_pipeline import TrainAdPipeline
from .train_aux_pipeline import TrainARPLGANPipeline
from .train_oe_pipeline import TrainOEPipeline
# from .train_only_pipeline import TrainOpenGanPipeline
from .train_opengan_pipeline import TrainOpenGanPipeline
from .train_pipeline import TrainPipeline
from .test_ood_pipeline_aps import TestOODPipelineAPS
def get_pipeline(config: Config):
pipelines = {
'train': TrainPipeline,
'finetune': FinetunePipeline,
'test_acc': TestAccPipeline,
'feat_extract': FeatExtractPipeline,
'feat_extract_opengan': FeatExtractOpenGANPipeline,
'test_ood': TestOODPipeline,
'test_ad': TestAdPipeline,
'train_ad': TrainAdPipeline,
'train_oe': TrainOEPipeline,
'train_opengan': TrainOpenGanPipeline,
'train_arplgan': TrainARPLGANPipeline,
'test_ood_aps': TestOODPipelineAPS
}
return pipelines[config.pipeline.name](config)
| 1,309 | 36.428571 | 69 | py |
null | OpenOOD-main/openood/postprocessors/__init__.py | from .ash_postprocessor import ASHPostprocessor
from .base_postprocessor import BasePostprocessor
from .cider_postprocessor import CIDERPostprocessor
from .conf_branch_postprocessor import ConfBranchPostprocessor
from .cutpaste_postprocessor import CutPastePostprocessor
from .dice_postprocessor import DICEPostprocessor
from .draem_postprocessor import DRAEMPostprocessor
from .dropout_postprocessor import DropoutPostProcessor
from .dsvdd_postprocessor import DSVDDPostprocessor
from .ebo_postprocessor import EBOPostprocessor
from .ensemble_postprocessor import EnsemblePostprocessor
from .gmm_postprocessor import GMMPostprocessor
from .godin_postprocessor import GodinPostprocessor
from .gradnorm_postprocessor import GradNormPostprocessor
from .gram_postprocessor import GRAMPostprocessor
from .kl_matching_postprocessor import KLMatchingPostprocessor
from .knn_postprocessor import KNNPostprocessor
from .maxlogit_postprocessor import MaxLogitPostprocessor
from .mcd_postprocessor import MCDPostprocessor
from .mds_postprocessor import MDSPostprocessor
from .mds_ensemble_postprocessor import MDSEnsemblePostprocessor
from .mos_postprocessor import MOSPostprocessor
from .npos_postprocessor import NPOSPostprocessor
from .odin_postprocessor import ODINPostprocessor
from .opengan_postprocessor import OpenGanPostprocessor
from .openmax_postprocessor import OpenMax
from .patchcore_postprocessor import PatchcorePostprocessor
from .rd4ad_postprocessor import Rd4adPostprocessor
from .react_postprocessor import ReactPostprocessor
from .rmds_postprocessor import RMDSPostprocessor
from .residual_postprocessor import ResidualPostprocessor
from .ssd_postprocessor import SSDPostprocessor
from .she_postprocessor import SHEPostprocessor
from .temp_scaling_postprocessor import TemperatureScalingPostprocessor
from .utils import get_postprocessor
from .vim_postprocessor import VIMPostprocessor
from .rotpred_postprocessor import RotPredPostprocessor
from .rankfeat_postprocessor import RankFeatPostprocessor
| 2,011 | 50.589744 | 71 | py |
null | OpenOOD-main/openood/postprocessors/ash_postprocessor.py | from typing import Any
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from .base_postprocessor import BasePostprocessor
class ASHPostprocessor(BasePostprocessor):
def __init__(self, config):
super(ASHPostprocessor, self).__init__(config)
self.args = self.config.postprocessor.postprocessor_args
self.percentile = self.args.percentile
self.args_dict = self.config.postprocessor.postprocessor_sweep
@torch.no_grad()
def postprocess(self, net: nn.Module, data: Any):
output = net.forward_threshold(data, self.percentile)
_, pred = torch.max(output, dim=1)
energyconf = torch.logsumexp(output.data.cpu(), dim=1)
return pred, energyconf
def set_hyperparam(self, hyperparam: list):
self.percentile = hyperparam[0]
def get_hyperparam(self):
return self.percentile
| 903 | 29.133333 | 70 | py |
null | OpenOOD-main/openood/postprocessors/base_postprocessor.py | from typing import Any
from tqdm import tqdm
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
import openood.utils.comm as comm
class BasePostprocessor:
def __init__(self, config):
self.config = config
def setup(self, net: nn.Module, id_loader_dict, ood_loader_dict):
pass
@torch.no_grad()
def postprocess(self, net: nn.Module, data: Any):
output = net(data)
score = torch.softmax(output, dim=1)
conf, pred = torch.max(score, dim=1)
return pred, conf
def inference(self,
net: nn.Module,
data_loader: DataLoader,
progress: bool = True):
pred_list, conf_list, label_list = [], [], []
for batch in tqdm(data_loader,
disable=not progress or not comm.is_main_process()):
data = batch['data'].cuda()
label = batch['label'].cuda()
pred, conf = self.postprocess(net, data)
pred_list.append(pred.cpu())
conf_list.append(conf.cpu())
label_list.append(label.cpu())
# convert values into numpy array
pred_list = torch.cat(pred_list).numpy().astype(int)
conf_list = torch.cat(conf_list).numpy()
label_list = torch.cat(label_list).numpy().astype(int)
return pred_list, conf_list, label_list
| 1,389 | 29.217391 | 78 | py |
null | OpenOOD-main/openood/postprocessors/cider_postprocessor.py | from typing import Any
import faiss
import numpy as np
import torch
import torch.nn as nn
from tqdm import tqdm
from .base_postprocessor import BasePostprocessor
class CIDERPostprocessor(BasePostprocessor):
def __init__(self, config):
super(CIDERPostprocessor, self).__init__(config)
self.args = self.config.postprocessor.postprocessor_args
self.K = self.args.K
self.activation_log = None
self.args_dict = self.config.postprocessor.postprocessor_sweep
self.setup_flag = False
def setup(self, net: nn.Module, id_loader_dict, ood_loader_dict):
if not self.setup_flag:
activation_log = []
net.eval()
with torch.no_grad():
for batch in tqdm(id_loader_dict['train'],
desc='Setup: ',
position=0,
leave=True):
data = batch['data'].cuda()
feature = net.intermediate_forward(data)
activation_log.append(feature.data.cpu().numpy())
self.activation_log = np.concatenate(activation_log, axis=0)
self.index = faiss.IndexFlatL2(feature.shape[1])
self.index.add(self.activation_log)
self.setup_flag = True
else:
pass
@torch.no_grad()
def postprocess(self, net: nn.Module, data: Any):
feature = net.intermediate_forward(data)
D, _ = self.index.search(
feature.cpu().numpy(), # feature is already normalized within net
self.K,
)
kth_dist = -D[:, -1]
# put dummy prediction here
# as cider only trains the feature extractor
pred = torch.zeros(len(kth_dist))
return pred, torch.from_numpy(kth_dist)
def set_hyperparam(self, hyperparam: list):
self.K = hyperparam[0]
def get_hyperparam(self):
return self.K
| 1,964 | 31.75 | 78 | py |
null | OpenOOD-main/openood/postprocessors/conf_branch_postprocessor.py | from typing import Any
import torch
import torch.nn as nn
from .base_postprocessor import BasePostprocessor
class ConfBranchPostprocessor(BasePostprocessor):
def __init__(self, config):
super(ConfBranchPostprocessor, self).__init__(config)
self.config = config
@torch.no_grad()
def postprocess(self, net: nn.Module, data: Any):
output, conf = net(data, return_confidence=True)
conf = torch.sigmoid(conf)
_, pred = torch.max(output, dim=1)
return pred, conf
| 522 | 25.15 | 61 | py |
null | OpenOOD-main/openood/postprocessors/cutpaste_postprocessor.py | from __future__ import division, print_function
from typing import Any
import numpy as np
import torch
import torch.nn as nn
from sklearn.covariance import LedoitWolf as LW
from torch.utils.data import DataLoader
from tqdm import tqdm
class CutPastePostprocessor:
def __init__(self, config):
self.config = config
def setup(self, net: nn.Module, id_loader_dict, ood_loader_dict):
# get train embeds
train_loader = id_loader_dict['train']
train_embed = []
train_dataiter = iter(train_loader)
with torch.no_grad():
for train_step in tqdm(range(1,
len(train_dataiter) + 1),
desc='Train embeds'):
batch = next(train_dataiter)
data = torch.cat(batch['data'], 0)
if (np.array(data).shape[0] == 4):
data = data.numpy().tolist()
data = data[0:len(data) // 2]
data = torch.Tensor(data)
data = data.cuda()
embed, logit = net(data)
train_embed.append(embed.cuda())
train_embeds = torch.cat(train_embed)
self.train_embeds = torch.nn.functional.normalize(train_embeds,
p=2,
dim=1)
@torch.no_grad()
def postprocess(self, net: nn.Module, data: Any):
# get embeds
embeds = []
embed, output = net(data)
embeds.append(embed.cuda())
embeds = torch.cat(embeds)
embeds = torch.nn.functional.normalize(embeds, p=2, dim=1)
score = torch.softmax(output, dim=1)
conf, pred = torch.max(score, dim=1)
# compute distances
density = GaussianDensityTorch()
density.fit(self.train_embeds)
distances = density.predict(embeds)
distances = 200 - distances
return pred, distances
def inference(self, net: nn.Module, data_loader: DataLoader):
pred_list, conf_list, label_list = [], [], []
for batch in data_loader:
data = torch.cat(batch['data'], 0)
data = data.cuda()
# label = torch.arange(2)
label = torch.tensor([0, -1])
label = label.repeat_interleave(len(batch['data'][0])).cuda()
pred, conf = self.postprocess(net, data)
for idx in range(len(data)):
pred_list.append(pred[idx].cpu().tolist())
conf_list.append(conf[idx].cpu().tolist())
label_list.append(label[idx].cpu().tolist())
# convert values into numpy array
pred_list = np.array(pred_list, dtype=int)
conf_list = np.array(conf_list)
label_list = np.array(label_list, dtype=int)
return pred_list, conf_list, label_list
class Density(object):
def fit(self, embeddings):
raise NotImplementedError
def predict(self, embeddings):
raise NotImplementedError
class GaussianDensityTorch(Density):
def fit(self, embeddings):
self.mean = torch.mean(embeddings, axis=0)
self.inv_cov = torch.Tensor(LW().fit(embeddings.cpu()).precision_,
device='cpu')
def predict(self, embeddings):
distances = self.mahalanobis_distance(embeddings, self.mean,
self.inv_cov)
return distances
@staticmethod
def mahalanobis_distance(values: torch.Tensor, mean: torch.Tensor,
inv_covariance: torch.Tensor) -> torch.Tensor:
assert values.dim() == 2
assert 1 <= mean.dim() <= 2
assert len(inv_covariance.shape) == 2
assert values.shape[1] == mean.shape[-1]
assert mean.shape[-1] == inv_covariance.shape[0]
assert inv_covariance.shape[0] == inv_covariance.shape[1]
if mean.dim() == 1: # Distribution mean.
mean = mean.unsqueeze(0)
x_mu = values - mean # batch x features
# Same as dist = x_mu.t() * inv_covariance * x_mu batch wise
inv_covariance = inv_covariance.cuda()
dist = torch.einsum('im,mn,in->i', x_mu, inv_covariance, x_mu)
return dist.sqrt()
| 4,300 | 35.760684 | 75 | py |
null | OpenOOD-main/openood/postprocessors/dice_postprocessor.py | from typing import Any
import numpy as np
import torch
import torch.nn as nn
from tqdm import tqdm
from .base_postprocessor import BasePostprocessor
normalizer = lambda x: x / np.linalg.norm(x, axis=-1, keepdims=True) + 1e-10
class DICEPostprocessor(BasePostprocessor):
def __init__(self, config):
super(DICEPostprocessor, self).__init__(config)
self.args = self.config.postprocessor.postprocessor_args
self.p = self.args.p
self.mean_act = None
self.masked_w = None
self.args_dict = self.config.postprocessor.postprocessor_sweep
self.setup_flag = False
def setup(self, net: nn.Module, id_loader_dict, ood_loader_dict):
if not self.setup_flag:
activation_log = []
net.eval()
with torch.no_grad():
for batch in tqdm(id_loader_dict['train'],
desc='Setup: ',
position=0,
leave=True):
data = batch['data'].cuda()
data = data.float()
_, feature = net(data, return_feature=True)
activation_log.append(feature.data.cpu().numpy())
activation_log = np.concatenate(activation_log, axis=0)
self.mean_act = activation_log.mean(0)
self.setup_flag = True
else:
pass
def calculate_mask(self, w):
contrib = self.mean_act[None, :] * w.data.squeeze().cpu().numpy()
self.thresh = np.percentile(contrib, self.p)
mask = torch.Tensor((contrib > self.thresh)).cuda()
self.masked_w = w * mask
@torch.no_grad()
def postprocess(self, net: nn.Module, data: Any):
fc_weight, fc_bias = net.get_fc()
if self.masked_w is None:
self.calculate_mask(torch.from_numpy(fc_weight).cuda())
_, feature = net(data, return_feature=True)
vote = feature[:, None, :] * self.masked_w
output = vote.sum(2) + torch.from_numpy(fc_bias).cuda()
_, pred = torch.max(torch.softmax(output, dim=1), dim=1)
energyconf = torch.logsumexp(output.data.cpu(), dim=1)
return pred, energyconf
def set_hyperparam(self, hyperparam: list):
self.p = hyperparam[0]
def get_hyperparam(self):
return self.p
| 2,355 | 34.164179 | 76 | py |
null | OpenOOD-main/openood/postprocessors/draem_postprocessor.py | from typing import Any
import numpy as np
import torch
import torch.nn as nn
from .base_postprocessor import BasePostprocessor
class DRAEMPostprocessor(BasePostprocessor):
def __init__(self, config):
super(DRAEMPostprocessor, self).__init__(config)
@torch.no_grad()
def postprocess(self, net: nn.Module, data: Any):
# forward
gray_rec = net['generative'](data)
joined_in = torch.cat((gray_rec.detach(), data), dim=1)
out_mask = net['discriminative'](joined_in)
out_mask_sm = torch.softmax(out_mask, dim=1)
# calculate image level scores
out_mask_averaged = torch.nn.functional.avg_pool2d(
out_mask_sm[:, 1:, :, :], 21, stride=1,
padding=21 // 2).cpu().detach().numpy()
image_score = np.max(out_mask_averaged, axis=(1, 2, 3))
return -1 * torch.ones(data.shape[0]), torch.tensor(
[-image_score]).reshape((data.shape[0]))
| 956 | 28.90625 | 63 | py |
null | OpenOOD-main/openood/postprocessors/dropout_postprocessor.py | from typing import Any
import torch
from torch import nn
from .base_postprocessor import BasePostprocessor
class DropoutPostProcessor(BasePostprocessor):
def __init__(self, config):
self.config = config
self.args = config.postprocessor.postprocessor_args
self.dropout_times = self.args.dropout_times
@torch.no_grad()
def postprocess(self, net: nn.Module, data: Any):
logits_list = [net.forward(data) for i in range(self.dropout_times)]
logits_mean = torch.zeros_like(logits_list[0], dtype=torch.float32)
for i in range(self.dropout_times):
logits_mean += logits_list[i]
logits_mean /= self.dropout_times
score = torch.softmax(logits_mean, dim=1)
conf, pred = torch.max(score, dim=1)
return pred, conf
| 810 | 31.44 | 76 | py |
null | OpenOOD-main/openood/postprocessors/dsvdd_postprocessor.py | from typing import Any
import torch
import torch.nn as nn
from openood.trainers.dsvdd_trainer import init_center_c
from .base_postprocessor import BasePostprocessor
class DSVDDPostprocessor(BasePostprocessor):
def __init__(self, config):
super(DSVDDPostprocessor, self).__init__(config)
self.hyperpara = {}
def setup(self, net: nn.Module, id_loader_dict, ood_loader_dict):
if self.config.c == 'None' and self.config.network.name != 'dcae':
self.c = init_center_c(id_loader_dict['train'], net)
else:
self.c = self.config.c
@torch.no_grad()
def postprocess(self, net: nn.Module, data: Any):
outputs = net(data)
if self.config.network.name != 'dcae':
conf = torch.sum((outputs - self.c)**2,
dim=tuple(range(1, outputs.dim())))
# this is for pre-training the dcae network from the original paper
elif self.config.network.name == 'dcae':
conf = torch.sum((outputs - data)**2,
dim=tuple(range(1, outputs.dim())))
else:
raise NotImplementedError
return -1 * torch.ones(data.shape[0]), conf
| 1,206 | 31.621622 | 75 | py |
null | OpenOOD-main/openood/postprocessors/ebo_postprocessor.py | from typing import Any
import torch
import torch.nn as nn
from .base_postprocessor import BasePostprocessor
class EBOPostprocessor(BasePostprocessor):
def __init__(self, config):
super().__init__(config)
self.args = self.config.postprocessor.postprocessor_args
self.temperature = self.args.temperature
self.args_dict = self.config.postprocessor.postprocessor_sweep
@torch.no_grad()
def postprocess(self, net: nn.Module, data: Any):
output = net(data)
score = torch.softmax(output, dim=1)
_, pred = torch.max(score, dim=1)
conf = self.temperature * torch.logsumexp(output / self.temperature,
dim=1)
return pred, conf
def set_hyperparam(self, hyperparam:list):
self.temperature =hyperparam[0]
def get_hyperparam(self):
return self.temperature
| 919 | 29.666667 | 76 | py |
null | OpenOOD-main/openood/postprocessors/ensemble_postprocessor.py | import os.path as osp
from copy import deepcopy
from typing import Any
import torch
from torch import nn
from .base_postprocessor import BasePostprocessor
class EnsemblePostprocessor(BasePostprocessor):
def __init__(self, config):
super(EnsemblePostprocessor, self).__init__(config)
self.config = config
self.postprocess_config = config.postprocessor
self.postprocessor_args = self.postprocess_config.postprocessor_args
assert self.postprocessor_args.network_name == \
self.config.network.name,\
'checkpoint network type and model type do not align!'
# get ensemble args
self.checkpoint_root = self.postprocessor_args.checkpoint_root
# list of trained network checkpoints
self.checkpoints = self.postprocessor_args.checkpoints
# number of networks to esembel
self.num_networks = self.postprocessor_args.num_networks
# get networks
self.checkpoint_dirs = [
osp.join(self.checkpoint_root, path, 'best.ckpt')
for path in self.checkpoints
]
def setup(self, net: nn.Module, id_loader_dict, ood_loader_dict):
self.networks = [deepcopy(net) for i in range(self.num_networks)]
for i in range(self.num_networks):
self.networks[i].load_state_dict(torch.load(
self.checkpoint_dirs[i]),
strict=False)
self.networks[i].eval()
def postprocess(self, net: nn.Module, data: Any):
logits_list = [
self.networks[i](data) for i in range(self.num_networks)
]
logits_mean = torch.zeros_like(logits_list[0], dtype=torch.float32)
for i in range(self.num_networks):
logits_mean += logits_list[i]
logits_mean /= self.num_networks
score = torch.softmax(logits_mean, dim=1)
conf, pred = torch.max(score, dim=1)
return pred, conf
| 1,970 | 36.188679 | 76 | py |
null | OpenOOD-main/openood/postprocessors/gmm_postprocessor.py | from __future__ import print_function
from typing import Any
import numpy as np
import torch
import torch.nn as nn
from sklearn.mixture import GaussianMixture
from tqdm import tqdm
from .base_postprocessor import BasePostprocessor
from .mds_ensemble_postprocessor import (process_feature_type,
reduce_feature_dim, tensor2list)
class GMMPostprocessor(BasePostprocessor):
def __init__(self, config):
self.config = config
self.postprocessor_args = config.postprocessor.postprocessor_args
self.feature_type_list = self.postprocessor_args.feature_type_list
self.reduce_dim_list = self.postprocessor_args.reduce_dim_list
self.num_clusters_list = self.postprocessor_args.num_clusters_list
self.alpha_list = self.postprocessor_args.alpha_list
self.num_layer = len(self.feature_type_list)
self.feature_mean, self.feature_prec = None, None
self.component_weight_list, self.transform_matrix_list = None, None
def setup(self, net: nn.Module, id_loader_dict, ood_loader_dict):
self.feature_mean, self.feature_prec, self.component_weight_list, \
self.transform_matrix_list = get_GMM_stat(net,
id_loader_dict['train'],
self.num_clusters_list,
self.feature_type_list,
self.reduce_dim_list)
def postprocess(self, net: nn.Module, data: Any):
for layer_index in range(self.num_layer):
pred, score = compute_GMM_score(net,
data,
self.feature_mean,
self.feature_prec,
self.component_weight_list,
self.transform_matrix_list,
layer_index,
self.feature_type_list,
return_pred=True)
if layer_index == 0:
score_list = score.view([-1, 1])
else:
score_list = torch.cat((score_list, score.view([-1, 1])), 1)
alpha = torch.cuda.FloatTensor(self.alpha_list)
# import pdb; pdb.set_trace();
# conf = torch.matmul(score_list, alpha)
conf = torch.matmul(torch.log(score_list + 1e-45), alpha)
return pred, conf
@torch.no_grad()
def get_GMM_stat(model, train_loader, num_clusters_list, feature_type_list,
reduce_dim_list):
""" Compute GMM.
Args:
model (nn.Module): pretrained model to extract features
train_loader (DataLoader): use all training data to perform GMM
num_clusters_list (list): number of clusters for each layer
feature_type_list (list): feature type for each layer
reduce_dim_list (list): dim-reduce method for each layer
return: feature_mean: list of class mean
feature_prec: list of precisions
component_weight_list: list of component
transform_matrix_list: list of transform_matrix
"""
feature_mean_list, feature_prec_list = [], []
component_weight_list, transform_matrix_list = [], []
num_layer = len(num_clusters_list)
feature_all = [None for x in range(num_layer)]
label_list = []
# collect features
for batch in tqdm(train_loader, desc='Compute GMM Stats [Collecting]'):
data = batch['data_aux'].cuda()
label = batch['label']
_, feature_list = model(data, return_feature_list=True)
label_list.extend(tensor2list(label))
for layer_idx in range(num_layer):
feature_type = feature_type_list[layer_idx]
feature_processed = process_feature_type(feature_list[layer_idx],
feature_type)
if isinstance(feature_all[layer_idx], type(None)):
feature_all[layer_idx] = tensor2list(feature_processed)
else:
feature_all[layer_idx].extend(tensor2list(feature_processed))
label_list = np.array(label_list)
# reduce feature dim and perform gmm estimation
for layer_idx in tqdm(range(num_layer),
desc='Compute GMM Stats [Estimating]'):
feature_sub = np.array(feature_all[layer_idx])
transform_matrix = reduce_feature_dim(feature_sub, label_list,
reduce_dim_list[layer_idx])
feature_sub = np.dot(feature_sub, transform_matrix)
# GMM estimation
gm = GaussianMixture(
n_components=num_clusters_list[layer_idx],
random_state=0,
covariance_type='tied',
).fit(feature_sub)
feature_mean = gm.means_
feature_prec = gm.precisions_
component_weight = gm.weights_
feature_mean_list.append(torch.Tensor(feature_mean).cuda())
feature_prec_list.append(torch.Tensor(feature_prec).cuda())
component_weight_list.append(torch.Tensor(component_weight).cuda())
transform_matrix_list.append(torch.Tensor(transform_matrix).cuda())
return feature_mean_list, feature_prec_list, \
component_weight_list, transform_matrix_list
def compute_GMM_score(model,
data,
feature_mean,
feature_prec,
component_weight,
transform_matrix,
layer_idx,
feature_type_list,
return_pred=False):
""" Compute GMM.
Args:
model (nn.Module): pretrained model to extract features
data (DataLoader): input one training batch
feature_mean (list): a list of torch.cuda.Tensor()
feature_prec (list): a list of torch.cuda.Tensor()
component_weight (list): a list of torch.cuda.Tensor()
transform_matrix (list): a list of torch.cuda.Tensor()
layer_idx (int): index of layer in interest
feature_type_list (list): a list of strings to indicate feature type
return_pred (bool): return prediction and confidence, or only conf.
return:
pred (torch.cuda.Tensor):
prob (torch.cuda.Tensor):
"""
# extract features
pred_list, feature_list = model(data, return_feature_list=True)
pred = torch.argmax(pred_list, dim=1)
feature_list = process_feature_type(feature_list[layer_idx],
feature_type_list[layer_idx])
feature_list = torch.mm(feature_list, transform_matrix[layer_idx])
# compute prob
for cluster_idx in range(len(feature_mean[layer_idx])):
zero_f = feature_list - feature_mean[layer_idx][cluster_idx]
term_gau = -0.5 * torch.mm(torch.mm(zero_f, feature_prec[layer_idx]),
zero_f.t()).diag()
prob_gau = torch.exp(term_gau)
if cluster_idx == 0:
prob_matrix = prob_gau.view([-1, 1])
else:
prob_matrix = torch.cat((prob_matrix, prob_gau.view(-1, 1)), 1)
prob = torch.mm(prob_matrix, component_weight[layer_idx].view(-1, 1))
if return_pred:
return pred, prob
else:
return prob
def compute_single_GMM_score(model,
data,
feature_mean,
feature_prec,
component_weight,
transform_matrix,
layer_idx,
feature_type_list,
return_pred=False):
# extract features
pred_list, feature_list = model(data, return_feature_list=True)
pred = torch.argmax(pred_list, dim=1)
feature_list = process_feature_type(feature_list[layer_idx],
feature_type_list)
feature_list = torch.mm(feature_list, transform_matrix)
# compute prob
for cluster_idx in range(len(feature_mean)):
zero_f = feature_list - feature_mean[cluster_idx]
term_gau = -0.5 * torch.mm(torch.mm(zero_f, feature_prec),
zero_f.t()).diag()
prob_gau = torch.exp(term_gau)
if cluster_idx == 0:
prob_matrix = prob_gau.view([-1, 1])
else:
prob_matrix = torch.cat((prob_matrix, prob_gau.view(-1, 1)), 1)
prob = torch.mm(prob_matrix, component_weight.view(-1, 1))
if return_pred:
return pred, prob
else:
return prob
| 8,744 | 42.507463 | 78 | py |
null | OpenOOD-main/openood/postprocessors/godin_postprocessor.py | from typing import Any
import torch
import torch.nn as nn
from .base_postprocessor import BasePostprocessor
from openood.preprocessors.transform import normalization_dict
class GodinPostprocessor(BasePostprocessor):
def __init__(self, config):
super(GodinPostprocessor, self).__init__(config)
self.args = self.config.postprocessor.postprocessor_args
self.score_func = self.args.score_func
self.noise_magnitude = self.args.noise_magnitude
try:
self.input_std = normalization_dict[self.config.dataset.name][1]
except KeyError:
self.input_std = [0.5, 0.5, 0.5]
def postprocess(self, net: nn.Module, data: Any):
data.requires_grad = True
output = net(data, inference=True)
# Calculating the perturbation we need to add, that is,
# the sign of gradient of cross entropy loss w.r.t. input
max_scores, _ = torch.max(output, dim=1)
max_scores.backward(torch.ones(len(max_scores)).cuda())
# Normalizing the gradient to binary in {0, 1}
gradient = torch.ge(data.grad.detach(), 0)
gradient = (gradient.float() - 0.5) * 2
# Scaling values taken from original code
gradient[:, 0] = (gradient[:, 0]) / self.input_std[0]
gradient[:, 1] = (gradient[:, 1]) / self.input_std[1]
gradient[:, 2] = (gradient[:, 2]) / self.input_std[2]
# Adding small perturbations to images
tempInputs = torch.add(data.detach(),
gradient,
alpha=self.noise_magnitude)
# calculate score
output = net(tempInputs, inference=True, score_func=self.score_func)
# Calculating the confidence after adding perturbations
nnOutput = output.detach()
nnOutput = nnOutput - nnOutput.max(dim=1, keepdims=True).values
nnOutput = nnOutput.exp() / nnOutput.exp().sum(dim=1, keepdims=True)
conf, pred = nnOutput.max(dim=1)
return pred, conf
| 2,023 | 35.142857 | 76 | py |
null | OpenOOD-main/openood/postprocessors/gradnorm_postprocessor.py | from typing import Any
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from .base_postprocessor import BasePostprocessor
from .info import num_classes_dict
class GradNormPostprocessor(BasePostprocessor):
def __init__(self, config):
super().__init__(config)
self.args = self.config.postprocessor.postprocessor_args
self.num_classes = num_classes_dict[self.config.dataset.name]
def gradnorm(self, x, w, b):
fc = torch.nn.Linear(*w.shape[::-1])
fc.weight.data[...] = torch.from_numpy(w)
fc.bias.data[...] = torch.from_numpy(b)
fc.cuda()
targets = torch.ones((1, self.num_classes)).cuda()
confs = []
for i in x:
fc.zero_grad()
loss = torch.mean(
torch.sum(-targets * F.log_softmax(fc(i[None]), dim=-1),
dim=-1))
loss.backward()
layer_grad_norm = torch.sum(torch.abs(
fc.weight.grad.data)).cpu().numpy()
confs.append(layer_grad_norm)
return np.array(confs)
def setup(self, net: nn.Module, id_loader_dict, ood_loader_dict):
pass
@torch.no_grad()
def postprocess(self, net: nn.Module, data: Any):
w, b = net.get_fc()
logits, features = net.forward(data, return_feature=True)
with torch.enable_grad():
scores = self.gradnorm(features, w, b)
_, preds = torch.max(logits, dim=1)
return preds, torch.from_numpy(scores)
| 1,542 | 29.86 | 72 | py |
null | OpenOOD-main/openood/postprocessors/gram_postprocessor.py | from __future__ import division, print_function
from typing import Any
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from tqdm import tqdm
from .base_postprocessor import BasePostprocessor
from .info import num_classes_dict
class GRAMPostprocessor(BasePostprocessor):
def __init__(self, config):
self.config = config
self.postprocessor_args = config.postprocessor.postprocessor_args
self.num_classes = num_classes_dict[self.config.dataset.name]
self.powers = self.postprocessor_args.powers
self.feature_min, self.feature_max = None, None
self.args_dict = self.config.postprocessor.postprocessor_sweep
self.setup_flag = False
def setup(self, net: nn.Module, id_loader_dict, ood_loader_dict):
if not self.setup_flag:
self.feature_min, self.feature_max = sample_estimator(
net, id_loader_dict['train'], self.num_classes, self.powers)
self.setup_flag = True
else:
pass
def postprocess(self, net: nn.Module, data: Any):
preds, deviations = get_deviations(net, data, self.feature_min,
self.feature_max, self.num_classes,
self.powers)
return preds, deviations
def set_hyperparam(self, hyperparam: list):
self.powers = hyperparam[0]
def get_hyperparam(self):
return self.powers
def tensor2list(x):
return x.data.cuda().tolist()
@torch.no_grad()
def sample_estimator(model, train_loader, num_classes, powers):
model.eval()
num_layer = 5 # 4 for lenet
num_poles_list = powers
num_poles = len(num_poles_list)
feature_class = [[[None for x in range(num_poles)]
for y in range(num_layer)] for z in range(num_classes)]
label_list = []
mins = [[[None for x in range(num_poles)] for y in range(num_layer)]
for z in range(num_classes)]
maxs = [[[None for x in range(num_poles)] for y in range(num_layer)]
for z in range(num_classes)]
# collect features and compute gram metrix
for batch in tqdm(train_loader, desc='Compute min/max'):
data = batch['data'].cuda()
label = batch['label']
_, feature_list = model(data, return_feature_list=True)
label_list = tensor2list(label)
for layer_idx in range(num_layer):
for pole_idx, p in enumerate(num_poles_list):
temp = feature_list[layer_idx].detach()
temp = temp**p
temp = temp.reshape(temp.shape[0], temp.shape[1], -1)
temp = ((torch.matmul(temp,
temp.transpose(dim0=2,
dim1=1)))).sum(dim=2)
temp = (temp.sign() * torch.abs(temp)**(1 / p)).reshape(
temp.shape[0], -1)
temp = tensor2list(temp)
for feature, label in zip(temp, label_list):
if isinstance(feature_class[label][layer_idx][pole_idx],
type(None)):
feature_class[label][layer_idx][pole_idx] = feature
else:
feature_class[label][layer_idx][pole_idx].extend(
feature)
# compute mins/maxs
for label in range(num_classes):
for layer_idx in range(num_layer):
for poles_idx in range(num_poles):
feature = torch.tensor(
np.array(feature_class[label][layer_idx][poles_idx]))
current_min = feature.min(dim=0, keepdim=True)[0]
current_max = feature.max(dim=0, keepdim=True)[0]
if mins[label][layer_idx][poles_idx] is None:
mins[label][layer_idx][poles_idx] = current_min
maxs[label][layer_idx][poles_idx] = current_max
else:
mins[label][layer_idx][poles_idx] = torch.min(
current_min, mins[label][layer_idx][poles_idx])
maxs[label][layer_idx][poles_idx] = torch.max(
current_min, maxs[label][layer_idx][poles_idx])
return mins, maxs
def get_deviations(model, data, mins, maxs, num_classes, powers):
model.eval()
num_layer = 5 # 4 for lenet
num_poles_list = powers
exist = 1
pred_list = []
dev = [0 for x in range(data.shape[0])]
# get predictions
logits, feature_list = model(data, return_feature_list=True)
confs = F.softmax(logits, dim=1).cpu().detach().numpy()
preds = np.argmax(confs, axis=1)
predsList = preds.tolist()
preds = torch.tensor(preds)
for pred in predsList:
exist = 1
if len(pred_list) == 0:
pred_list.extend([pred])
else:
for pred_now in pred_list:
if pred_now == pred:
exist = 0
if exist == 1:
pred_list.extend([pred])
# compute sample level deviation
for layer_idx in range(num_layer):
for pole_idx, p in enumerate(num_poles_list):
# get gram metirx
temp = feature_list[layer_idx].detach()
temp = temp**p
temp = temp.reshape(temp.shape[0], temp.shape[1], -1)
temp = ((torch.matmul(temp, temp.transpose(dim0=2,
dim1=1)))).sum(dim=2)
temp = (temp.sign() * torch.abs(temp)**(1 / p)).reshape(
temp.shape[0], -1)
temp = tensor2list(temp)
# compute the deviations with train data
for idx in range(len(temp)):
dev[idx] += (F.relu(mins[preds[idx]][layer_idx][pole_idx] -
sum(temp[idx])) /
torch.abs(mins[preds[idx]][layer_idx][pole_idx] +
10**-6)).sum()
dev[idx] += (F.relu(
sum(temp[idx]) - maxs[preds[idx]][layer_idx][pole_idx]) /
torch.abs(maxs[preds[idx]][layer_idx][pole_idx] +
10**-6)).sum()
conf = [i / 50 for i in dev]
return preds, torch.tensor(conf)
| 6,348 | 36.791667 | 78 | py |
null | OpenOOD-main/openood/postprocessors/info.py | num_classes_dict = {
'cifar10': 10,
'cifar100': 100,
'imagenet200': 200,
'imagenet': 1000
}
| 108 | 14.571429 | 23 | py |
null | OpenOOD-main/openood/postprocessors/kl_matching_postprocessor.py | from typing import Any
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from sklearn.metrics import pairwise_distances_argmin_min
import scipy
from tqdm import tqdm
from .base_postprocessor import BasePostprocessor
from .info import num_classes_dict
class KLMatchingPostprocessor(BasePostprocessor):
def __init__(self, config):
super().__init__(config)
self.num_classes = num_classes_dict[self.config.dataset.name]
self.setup_flag = False
def kl(self, p, q):
return scipy.stats.entropy(p, q)
def setup(self, net: nn.Module, id_loader_dict, ood_loader_dict):
if not self.setup_flag:
net.eval()
print('Extracting id validation softmax posterior distributions')
all_softmax = []
preds = []
with torch.no_grad():
for batch in tqdm(id_loader_dict['val'],
desc='Setup: ',
position=0,
leave=True):
data = batch['data'].cuda()
logits = net(data)
all_softmax.append(F.softmax(logits, 1).cpu())
preds.append(logits.argmax(1).cpu())
all_softmax = torch.cat(all_softmax)
preds = torch.cat(preds)
self.mean_softmax_val = []
for i in tqdm(range(self.num_classes)):
# if there are no validation samples
# for this category
if torch.sum(preds.eq(i).float()) == 0:
temp = np.zeros((self.num_classes, ))
temp[i] = 1
self.mean_softmax_val.append(temp)
else:
self.mean_softmax_val.append(
all_softmax[preds.eq(i)].mean(0).numpy())
self.setup_flag = True
else:
pass
@torch.no_grad()
def postprocess(self, net: nn.Module, data: Any):
logits = net(data)
preds = logits.argmax(1)
softmax = F.softmax(logits, 1).cpu().numpy()
scores = -pairwise_distances_argmin_min(
softmax, np.array(self.mean_softmax_val), metric=self.kl)[1]
return preds, torch.from_numpy(scores)
| 2,308 | 32.955882 | 77 | py |
null | OpenOOD-main/openood/postprocessors/knn_postprocessor.py | from typing import Any
import faiss
import numpy as np
import torch
import torch.nn as nn
from tqdm import tqdm
from .base_postprocessor import BasePostprocessor
normalizer = lambda x: x / np.linalg.norm(x, axis=-1, keepdims=True) + 1e-10
class KNNPostprocessor(BasePostprocessor):
def __init__(self, config):
super(KNNPostprocessor, self).__init__(config)
self.args = self.config.postprocessor.postprocessor_args
self.K = self.args.K
self.activation_log = None
self.args_dict = self.config.postprocessor.postprocessor_sweep
self.setup_flag = False
def setup(self, net: nn.Module, id_loader_dict, ood_loader_dict):
if not self.setup_flag:
activation_log = []
net.eval()
with torch.no_grad():
for batch in tqdm(id_loader_dict['train'],
desc='Setup: ',
position=0,
leave=True):
data = batch['data'].cuda()
data = data.float()
_, feature = net(data, return_feature=True)
activation_log.append(
normalizer(feature.data.cpu().numpy()))
self.activation_log = np.concatenate(activation_log, axis=0)
self.index = faiss.IndexFlatL2(feature.shape[1])
self.index.add(self.activation_log)
self.setup_flag = True
else:
pass
@torch.no_grad()
def postprocess(self, net: nn.Module, data: Any):
output, feature = net(data, return_feature=True)
feature_normed = normalizer(feature.data.cpu().numpy())
D, _ = self.index.search(
feature_normed,
self.K,
)
kth_dist = -D[:, -1]
_, pred = torch.max(torch.softmax(output, dim=1), dim=1)
return pred, torch.from_numpy(kth_dist)
def set_hyperparam(self, hyperparam: list):
self.K = hyperparam[0]
def get_hyperparam(self):
return self.K
| 2,073 | 31.920635 | 76 | py |
null | OpenOOD-main/openood/postprocessors/maxlogit_postprocessor.py | from typing import Any
import numpy as np
import torch
import torch.nn as nn
from tqdm import tqdm
from .base_postprocessor import BasePostprocessor
class MaxLogitPostprocessor(BasePostprocessor):
def __init__(self, config):
super().__init__(config)
self.args = self.config.postprocessor.postprocessor_args
@torch.no_grad()
def postprocess(self, net: nn.Module, data: Any):
output = net(data)
conf, pred = torch.max(output, dim=1)
return pred, conf
| 506 | 23.142857 | 64 | py |
null | OpenOOD-main/openood/postprocessors/mcd_postprocessor.py | from typing import Any
import torch
import torch.nn as nn
from .base_postprocessor import BasePostprocessor
class MCDPostprocessor(BasePostprocessor):
@torch.no_grad()
def postprocess(self, net: nn.Module, data: Any):
logits1, logits2 = net(data, return_double=True)
score1 = torch.softmax(logits1, dim=1)
score2 = torch.softmax(logits2, dim=1)
conf = -torch.sum(torch.abs(score1 - score2), dim=1)
_, pred = torch.max(score1, dim=1)
return pred, conf
| 511 | 27.444444 | 60 | py |
null | OpenOOD-main/openood/postprocessors/mds_ensemble_postprocessor.py | from typing import Any
import numpy as np
import torch
import torch.nn as nn
from scipy import linalg
from sklearn.covariance import (empirical_covariance, ledoit_wolf,
shrunk_covariance)
from sklearn.decomposition import PCA
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.linear_model import LogisticRegressionCV
from sklearn.preprocessing import StandardScaler
from torch.autograd import Variable
from tqdm import tqdm
from .base_postprocessor import BasePostprocessor
from .info import num_classes_dict
class MDSEnsemblePostprocessor(BasePostprocessor):
def __init__(self, config):
self.config = config
self.postprocessor_args = config.postprocessor.postprocessor_args
self.magnitude = self.postprocessor_args.noise
self.feature_type_list = self.postprocessor_args.feature_type_list
self.reduce_dim_list = self.postprocessor_args.reduce_dim_list
self.num_classes = num_classes_dict[self.config.dataset.name]
self.num_layer = len(self.feature_type_list)
self.feature_mean, self.feature_prec = None, None
self.alpha_list = None
self.args_dict = self.config.postprocessor.postprocessor_sweep
self.setup_flag = False
def setup(self, net: nn.Module, id_loader_dict, ood_loader_dict):
if not self.setup_flag:
# step 1: estimate initial mean and variance from training set
self.feature_mean, self.feature_prec, self.transform_matrix = \
get_MDS_stat(net, id_loader_dict['train'], self.num_classes,
self.feature_type_list, self.reduce_dim_list)
# step 2: input process and hyperparam searching for alpha
if self.postprocessor_args.alpha_list:
print('\n Load predefined alpha list...')
self.alpha_list = self.postprocessor_args.alpha_list
else:
print('\n Searching for optimal alpha list...')
# get in-distribution scores
for layer_index in range(self.num_layer):
M_in = get_Mahalanobis_scores(
net, id_loader_dict['val'], self.num_classes,
self.feature_mean, self.feature_prec,
self.transform_matrix, layer_index,
self.feature_type_list, self.magnitude)
M_in = np.asarray(M_in, dtype=np.float32)
if layer_index == 0:
Mahalanobis_in = M_in.reshape((M_in.shape[0], -1))
else:
Mahalanobis_in = np.concatenate(
(Mahalanobis_in, M_in.reshape(
(M_in.shape[0], -1))),
axis=1)
# get out-of-distribution scores
for layer_index in range(self.num_layer):
M_out = get_Mahalanobis_scores(
net, ood_loader_dict['val'], self.num_classes,
self.feature_mean, self.feature_prec,
self.transform_matrix, layer_index,
self.feature_type_list, self.magnitude)
M_out = np.asarray(M_out, dtype=np.float32)
if layer_index == 0:
Mahalanobis_out = M_out.reshape((M_out.shape[0], -1))
else:
Mahalanobis_out = np.concatenate(
(Mahalanobis_out,
M_out.reshape((M_out.shape[0], -1))),
axis=1)
Mahalanobis_in = np.asarray(Mahalanobis_in, dtype=np.float32)
Mahalanobis_out = np.asarray(Mahalanobis_out, dtype=np.float32)
# logistic regression for optimal alpha
self.alpha_list = alpha_selector(Mahalanobis_in,
Mahalanobis_out)
self.setup_flag = True
else:
pass
def postprocess(self, net: nn.Module, data: Any):
for layer_index in range(self.num_layer):
pred, score = compute_Mahalanobis_score(net,
Variable(
data,
requires_grad=True),
self.num_classes,
self.feature_mean,
self.feature_prec,
self.transform_matrix,
layer_index,
self.feature_type_list,
self.magnitude,
return_pred=True)
if layer_index == 0:
score_list = score.view([-1, 1])
else:
score_list = torch.cat((score_list, score.view([-1, 1])), 1)
alpha = torch.cuda.FloatTensor(self.alpha_list)
conf = torch.matmul(score_list, alpha)
return pred, conf
def set_hyperparam(self, hyperparam: list):
self.magnitude = hyperparam[0]
def get_hyperparam(self):
return self.magnitude
def tensor2list(x):
return x.data.cpu().tolist()
def get_torch_feature_stat(feature, only_mean=False):
feature = feature.view([feature.size(0), feature.size(1), -1])
feature_mean = torch.mean(feature, dim=-1)
feature_var = torch.var(feature, dim=-1)
if feature.size(-2) * feature.size(-1) == 1 or only_mean:
# [N, C, 1, 1] does not need variance for kernel
feature_stat = feature_mean
else:
feature_stat = torch.cat((feature_mean, feature_var), 1)
return feature_stat
def process_feature_type(feature_temp, feature_type):
if feature_type == 'flat':
feature_temp = feature_temp.view([feature_temp.size(0), -1])
elif feature_type == 'stat':
feature_temp = get_torch_feature_stat(feature_temp)
elif feature_type == 'mean':
feature_temp = get_torch_feature_stat(feature_temp, only_mean=True)
else:
raise ValueError('Unknown feature type')
return feature_temp
def reduce_feature_dim(feature_list_full, label_list_full, feature_process):
if feature_process == 'none':
transform_matrix = np.eye(feature_list_full.shape[1])
else:
feature_process, kept_dim = feature_process.split('_')
kept_dim = int(kept_dim)
if feature_process == 'capca':
lda = InverseLDA(solver='eigen')
lda.fit(feature_list_full, label_list_full)
transform_matrix = lda.scalings_[:, :kept_dim]
elif feature_process == 'pca':
pca = PCA(n_components=kept_dim)
pca.fit(feature_list_full)
transform_matrix = pca.components_.T
elif feature_process == 'lda':
lda = LinearDiscriminantAnalysis(solver='eigen')
lda.fit(feature_list_full, label_list_full)
transform_matrix = lda.scalings_[:, :kept_dim]
else:
raise Exception('Unknown Process Type')
return transform_matrix
@torch.no_grad()
def get_MDS_stat(model, train_loader, num_classes, feature_type_list,
reduce_dim_list):
""" Compute sample mean and precision (inverse of covariance)
return: sample_class_mean: list of class mean
precision: list of precisions
transform_matrix_list: list of transform_matrix
"""
import sklearn.covariance
group_lasso = sklearn.covariance.EmpiricalCovariance(assume_centered=False)
model.eval()
num_layer = len(feature_type_list)
feature_class = [[None for x in range(num_classes)]
for y in range(num_layer)]
feature_all = [None for x in range(num_layer)]
label_list = []
# collect features
for batch in tqdm(train_loader, desc='Compute mean/std'):
data = batch['data_aux'].cuda()
label = batch['label']
_, feature_list = model(data, return_feature_list=True)
label_list.extend(tensor2list(label))
for layer_idx in range(num_layer):
feature_type = feature_type_list[layer_idx]
feature_processed = process_feature_type(feature_list[layer_idx],
feature_type)
if isinstance(feature_all[layer_idx], type(None)):
feature_all[layer_idx] = tensor2list(feature_processed)
else:
feature_all[layer_idx].extend(tensor2list(feature_processed))
label_list = np.array(label_list)
# reduce feature dim and split by classes
transform_matrix_list = []
for layer_idx in range(num_layer):
feature_sub = np.array(feature_all[layer_idx])
transform_matrix = reduce_feature_dim(feature_sub, label_list,
reduce_dim_list[layer_idx])
transform_matrix_list.append(torch.Tensor(transform_matrix).cuda())
feature_sub = np.dot(feature_sub, transform_matrix)
for feature, label in zip(feature_sub, label_list):
feature = feature.reshape([-1, len(feature)])
if isinstance(feature_class[layer_idx][label], type(None)):
feature_class[layer_idx][label] = feature
else:
feature_class[layer_idx][label] = np.concatenate(
(feature_class[layer_idx][label], feature), axis=0)
# calculate feature mean
feature_mean_list = [[
np.mean(feature_by_class, axis=0)
for feature_by_class in feature_by_layer
] for feature_by_layer in feature_class]
# calculate precision
precision_list = []
for layer in range(num_layer):
X = []
for k in range(num_classes):
X.append(feature_class[layer][k] - feature_mean_list[layer][k])
X = np.concatenate(X, axis=0)
# find inverse
group_lasso.fit(X)
precision = group_lasso.precision_
precision_list.append(precision)
# put mean and precision to cuda
feature_mean_list = [torch.Tensor(i).cuda() for i in feature_mean_list]
precision_list = [torch.Tensor(p).cuda() for p in precision_list]
return feature_mean_list, precision_list, transform_matrix_list
def get_Mahalanobis_scores(model, test_loader, num_classes, sample_mean,
precision, transform_matrix, layer_index,
feature_type_list, magnitude):
'''
Compute the proposed Mahalanobis confidence score on input dataset
return: Mahalanobis score from layer_index
'''
model.eval()
Mahalanobis = []
for batch in tqdm(test_loader,
desc=f'{test_loader.dataset.name}_layer{layer_index}'):
data = batch['data'].cuda()
data = Variable(data, requires_grad=True)
noise_gaussian_score = compute_Mahalanobis_score(
model, data, num_classes, sample_mean, precision, transform_matrix,
layer_index, feature_type_list, magnitude)
Mahalanobis.extend(noise_gaussian_score.cpu().numpy())
return Mahalanobis
def compute_Mahalanobis_score(model,
data,
num_classes,
sample_mean,
precision,
transform_matrix,
layer_index,
feature_type_list,
magnitude,
return_pred=False):
# extract features
_, out_features = model(data, return_feature_list=True)
out_features = process_feature_type(out_features[layer_index],
feature_type_list[layer_index])
out_features = torch.mm(out_features, transform_matrix[layer_index])
# compute Mahalanobis score
gaussian_score = 0
for i in range(num_classes):
batch_sample_mean = sample_mean[layer_index][i]
zero_f = out_features.data - batch_sample_mean
term_gau = -0.5 * torch.mm(torch.mm(zero_f, precision[layer_index]),
zero_f.t()).diag()
if i == 0:
gaussian_score = term_gau.view(-1, 1)
else:
gaussian_score = torch.cat((gaussian_score, term_gau.view(-1, 1)),
1)
# Input_processing
sample_pred = gaussian_score.max(1)[1]
batch_sample_mean = sample_mean[layer_index].index_select(0, sample_pred)
zero_f = out_features - Variable(batch_sample_mean)
pure_gau = -0.5 * torch.mm(
torch.mm(zero_f, Variable(precision[layer_index])), zero_f.t()).diag()
loss = torch.mean(-pure_gau)
loss.backward()
gradient = torch.ge(data.grad.data, 0)
gradient = (gradient.float() - 0.5) * 2
# here we use the default value of 0.5
gradient.index_copy_(
1,
torch.LongTensor([0]).cuda(),
gradient.index_select(1,
torch.LongTensor([0]).cuda()) / 0.5)
gradient.index_copy_(
1,
torch.LongTensor([1]).cuda(),
gradient.index_select(1,
torch.LongTensor([1]).cuda()) / 0.5)
gradient.index_copy_(
1,
torch.LongTensor([2]).cuda(),
gradient.index_select(1,
torch.LongTensor([2]).cuda()) / 0.5)
tempInputs = torch.add(
data.data, gradient,
alpha=-magnitude) # updated input data with perturbation
with torch.no_grad():
_, noise_out_features = model(Variable(tempInputs),
return_feature_list=True)
noise_out_features = process_feature_type(
noise_out_features[layer_index], feature_type_list[layer_index])
noise_out_features = torch.mm(noise_out_features,
transform_matrix[layer_index])
noise_gaussian_score = 0
for i in range(num_classes):
batch_sample_mean = sample_mean[layer_index][i]
zero_f = noise_out_features.data - batch_sample_mean
term_gau = -0.5 * torch.mm(torch.mm(zero_f, precision[layer_index]),
zero_f.t()).diag()
if i == 0:
noise_gaussian_score = term_gau.view(-1, 1)
else:
noise_gaussian_score = torch.cat(
(noise_gaussian_score, term_gau.view(-1, 1)), 1)
noise_gaussian_score, _ = torch.max(noise_gaussian_score, dim=1)
if return_pred:
return sample_pred, noise_gaussian_score
else:
return noise_gaussian_score
def alpha_selector(data_in, data_out):
label_in = np.ones(len(data_in))
label_out = np.zeros(len(data_out))
data = np.concatenate([data_in, data_out])
label = np.concatenate([label_in, label_out])
# skip the last-layer flattened feature (duplicated with the last feature)
lr = LogisticRegressionCV(n_jobs=-1).fit(data, label)
alpha_list = lr.coef_.reshape(-1)
print(f'Optimal Alpha List: {alpha_list}')
return alpha_list
def _cov(X, shrinkage=None, covariance_estimator=None):
"""Estimate covariance matrix (using optional covariance_estimator).
Parameters
----------
X : array-like of shape (n_samples, n_features)
Input data.
shrinkage : {'empirical', 'auto'} or float, default=None
Shrinkage parameter, possible values:
- None or 'empirical': no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Shrinkage parameter is ignored if `covariance_estimator`
is not None.
covariance_estimator : estimator, default=None
If not None, `covariance_estimator` is used to estimate
the covariance matrices instead of relying on the empirical
covariance estimator (with potential shrinkage).
The object should have a fit method and a ``covariance_`` attribute
like the estimators in :mod:`sklearn.covariance``.
if None the shrinkage parameter drives the estimate.
.. versionadded:: 0.24
Returns
-------
s : ndarray of shape (n_features, n_features)
Estimated covariance matrix.
"""
if covariance_estimator is None:
shrinkage = 'empirical' if shrinkage is None else shrinkage
if isinstance(shrinkage, str):
if shrinkage == 'auto':
sc = StandardScaler() # standardize features
X = sc.fit_transform(X)
s = ledoit_wolf(X)[0]
# rescale
s = sc.scale_[:, np.newaxis] * s * sc.scale_[np.newaxis, :]
elif shrinkage == 'empirical':
s = empirical_covariance(X)
else:
raise ValueError('unknown shrinkage parameter')
elif isinstance(shrinkage, float) or isinstance(shrinkage, int):
if shrinkage < 0 or shrinkage > 1:
raise ValueError('shrinkage parameter must be between 0 and 1')
s = shrunk_covariance(empirical_covariance(X), shrinkage)
else:
raise TypeError('shrinkage must be a float or a string')
else:
if shrinkage is not None and shrinkage != 0:
raise ValueError('covariance_estimator and shrinkage parameters '
'are not None. Only one of the two can be set.')
covariance_estimator.fit(X)
if not hasattr(covariance_estimator, 'covariance_'):
raise ValueError('%s does not have a covariance_ attribute' %
covariance_estimator.__class__.__name__)
s = covariance_estimator.covariance_
return s
def _class_means(X, y):
"""Compute class means.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Input data.
y : array-like of shape (n_samples,) or (n_samples, n_targets)
Target values.
Returns
-------
means : array-like of shape (n_classes, n_features)
Class means.
"""
classes, y = np.unique(y, return_inverse=True)
cnt = np.bincount(y)
means = np.zeros(shape=(len(classes), X.shape[1]))
np.add.at(means, y, X)
means /= cnt[:, None]
return means
def _class_cov(X, y, priors, shrinkage=None, covariance_estimator=None):
"""Compute weighted within-class covariance matrix.
The per-class covariance are weighted by the class priors.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Input data.
y : array-like of shape (n_samples,) or (n_samples, n_targets)
Target values.
priors : array-like of shape (n_classes,)
Class priors.
shrinkage : 'auto' or float, default=None
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Shrinkage parameter is ignored if `covariance_estimator` is not None.
covariance_estimator : estimator, default=None
If not None, `covariance_estimator` is used to estimate
the covariance matrices instead of relying the empirical
covariance estimator (with potential shrinkage).
The object should have a fit method and a ``covariance_`` attribute
like the estimators in sklearn.covariance.
If None, the shrinkage parameter drives the estimate.
.. versionadded:: 0.24
Returns
-------
cov : array-like of shape (n_features, n_features)
Weighted within-class covariance matrix
"""
classes = np.unique(y)
cov = np.zeros(shape=(X.shape[1], X.shape[1]))
for idx, group in enumerate(classes):
Xg = X[y == group, :]
cov += priors[idx] * np.atleast_2d(
_cov(Xg, shrinkage, covariance_estimator))
return cov
class InverseLDA(LinearDiscriminantAnalysis):
def _solve_eigen(self, X, y, shrinkage):
"""Eigenvalue solver.
The eigenvalue solver computes the optimal solution of the Rayleigh
coefficient (basically the ratio of between class scatter to within
class scatter). This solver supports both classification and
dimensionality reduction (with optional shrinkage).
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage constant.
Notes
-----
This solver is based on [1]_, section 3.8.3, pp. 121-124.
References
----------
"""
self.means_ = _class_means(X, y)
self.covariance_ = _class_cov(X, y, self.priors_, shrinkage)
Sw = self.covariance_ # within scatter
# St = _cov(X, shrinkage) # total scatter
# Sb = St - Sw # between scatter
# Standard LDA: evals, evecs = linalg.eigh(Sb, Sw)
# Here we hope to find a mapping
# to maximize Sw with minimum Sb for class agnostic.
evals, evecs = linalg.eigh(Sw)
self.explained_variance_ratio_ = np.sort(
evals / np.sum(evals))[::-1][:self._max_components]
evecs = evecs[:, np.argsort(evals)[::-1]] # sort eigenvectors
self.scalings_ = evecs
self.coef_ = np.dot(self.means_, evecs).dot(evecs.T)
self.intercept_ = (-0.5 * np.diag(np.dot(self.means_, self.coef_.T)) +
np.log(self.priors_))
| 22,148 | 41.431034 | 79 | py |
null | OpenOOD-main/openood/postprocessors/mds_postprocessor.py | from typing import Any
from copy import deepcopy
import numpy as np
import torch
import torch.nn as nn
import sklearn.covariance
from tqdm import tqdm
from .base_postprocessor import BasePostprocessor
from .info import num_classes_dict
class MDSPostprocessor(BasePostprocessor):
def __init__(self, config):
self.config = config
self.num_classes = num_classes_dict[self.config.dataset.name]
self.setup_flag = False
def setup(self, net: nn.Module, id_loader_dict, ood_loader_dict):
if not self.setup_flag:
# estimate mean and variance from training set
print('\n Estimating mean and variance from training set...')
all_feats = []
all_labels = []
all_preds = []
with torch.no_grad():
for batch in tqdm(id_loader_dict['train'],
desc='Setup: ',
position=0,
leave=True):
data, labels = batch['data'].cuda(), batch['label']
logits, features = net(data, return_feature=True)
all_feats.append(features.cpu())
all_labels.append(deepcopy(labels))
all_preds.append(logits.argmax(1).cpu())
all_feats = torch.cat(all_feats)
all_labels = torch.cat(all_labels)
all_preds = torch.cat(all_preds)
# sanity check on train acc
train_acc = all_preds.eq(all_labels).float().mean()
print(f' Train acc: {train_acc:.2%}')
# compute class-conditional statistics
self.class_mean = []
centered_data = []
for c in range(self.num_classes):
class_samples = all_feats[all_labels.eq(c)].data
self.class_mean.append(class_samples.mean(0))
centered_data.append(class_samples -
self.class_mean[c].view(1, -1))
self.class_mean = torch.stack(
self.class_mean) # shape [#classes, feature dim]
group_lasso = sklearn.covariance.EmpiricalCovariance(
assume_centered=False)
group_lasso.fit(
torch.cat(centered_data).cpu().numpy().astype(np.float32))
# inverse of covariance
self.precision = torch.from_numpy(group_lasso.precision_).float()
self.setup_flag = True
else:
pass
@torch.no_grad()
def postprocess(self, net: nn.Module, data: Any):
logits, features = net(data, return_feature=True)
pred = logits.argmax(1)
class_scores = torch.zeros((logits.shape[0], self.num_classes))
for c in range(self.num_classes):
tensor = features.cpu() - self.class_mean[c].view(1, -1)
class_scores[:, c] = -torch.matmul(
torch.matmul(tensor, self.precision), tensor.t()).diag()
conf = torch.max(class_scores, dim=1)[0]
return pred, conf
| 3,062 | 37.2875 | 77 | py |
null | OpenOOD-main/openood/postprocessors/mos_postprocessor.py | from __future__ import absolute_import, division, print_function
import numpy as np
import torch
from torch import nn
from tqdm import tqdm
from .base_postprocessor import BasePostprocessor
def get_group_slices(classes_per_group):
group_slices = []
start = 0
for num_cls in classes_per_group:
end = start + num_cls + 1
group_slices.append([start, end])
start = end
return torch.LongTensor(group_slices)
def cal_ood_score(logits, group_slices):
num_groups = group_slices.shape[0]
all_group_ood_score_MOS = []
smax = torch.nn.Softmax(dim=-1).cuda()
for i in range(num_groups):
group_logit = logits[:, group_slices[i][0]:group_slices[i][1]]
group_softmax = smax(group_logit)
group_others_score = group_softmax[:, 0]
all_group_ood_score_MOS.append(-group_others_score)
all_group_ood_score_MOS = torch.stack(all_group_ood_score_MOS, dim=1)
final_max_score_MOS, _ = torch.max(all_group_ood_score_MOS, dim=1)
return final_max_score_MOS.data.cpu().numpy()
class MOSPostprocessor(BasePostprocessor):
def __init__(self, config):
super(MOSPostprocessor, self).__init__(config)
self.config = config
self.setup_flag = False
def cal_group_slices(self, train_loader):
config = self.config
# if specified group_config
if (config.trainer.group_config.endswith('npy')):
classes_per_group = np.load(config.trainer.group_config)
elif (config.trainer.group_config.endswith('txt')):
classes_per_group = np.loadtxt(config.trainer.group_config,
dtype=int)
else:
# cal group config
config = self.config
group = {}
train_dataiter = iter(train_loader)
for train_step in tqdm(range(1,
len(train_dataiter) + 1),
desc='cal group_config',
position=0,
leave=True):
batch = next(train_dataiter)
group_label = batch['group_label'].cuda()
class_label = batch['class_label'].cuda()
for i in range(len(class_label)):
try:
group[str(
group_label[i].cpu().detach().numpy().tolist())]
except:
group[str(group_label[i].cpu().detach().numpy().tolist(
))] = []
if class_label[i].cpu().detach().numpy().tolist() \
not in group[str(group_label[i].cpu().detach().numpy().tolist())]:
group[str(group_label[i].cpu().detach().numpy().tolist(
))].append(
class_label[i].cpu().detach().numpy().tolist())
classes_per_group = []
for i in range(len(group)):
classes_per_group.append(max(group[str(i)]) + 1)
self.num_groups = len(classes_per_group)
self.group_slices = get_group_slices(classes_per_group)
self.group_slices = self.group_slices.cuda()
def setup(self, net: nn.Module, id_loader_dict, ood_loader_dict):
# this postprocessor does not really do anything
# the inference is done in the mos_evaluator
pass
def postprocess(self, net: nn.Module, data):
net.eval()
confs_mos = []
with torch.no_grad():
logits = net(data)
conf_mos = cal_ood_score(logits, self.group_slices)
confs_mos.extend(conf_mos)
# conf = np.array(confs_mos)
conf = torch.tensor(confs_mos)
pred = logits.data.max(1)[1]
return pred, conf
| 3,841 | 34.574074 | 94 | py |
null | OpenOOD-main/openood/postprocessors/npos_postprocessor.py | from typing import Any
import faiss
import numpy as np
import torch
import torch.nn as nn
from tqdm import tqdm
from .base_postprocessor import BasePostprocessor
class NPOSPostprocessor(BasePostprocessor):
def __init__(self, config):
super(NPOSPostprocessor, self).__init__(config)
self.args = self.config.postprocessor.postprocessor_args
self.K = self.args.K
self.activation_log = None
self.args_dict = self.config.postprocessor.postprocessor_sweep
self.setup_flag = False
def setup(self, net: nn.Module, id_loader_dict, ood_loader_dict):
if not self.setup_flag:
activation_log = []
net.eval()
with torch.no_grad():
for batch in tqdm(id_loader_dict['train'],
desc='Setup: ',
position=0,
leave=True):
data = batch['data'].cuda()
feature = net.intermediate_forward(data)
activation_log.append(feature.data.cpu().numpy())
self.activation_log = np.concatenate(activation_log, axis=0)
self.index = faiss.IndexFlatL2(feature.shape[1])
self.index.add(self.activation_log)
self.setup_flag = True
else:
pass
@torch.no_grad()
def postprocess(self, net: nn.Module, data: Any):
feature = net.intermediate_forward(data)
D, _ = self.index.search(
feature.cpu().numpy(), # feature is already normalized within net
self.K,
)
kth_dist = -D[:, -1]
# put dummy prediction here
# as cider only trains the feature extractor
pred = torch.zeros(len(kth_dist))
return pred, torch.from_numpy(kth_dist)
def set_hyperparam(self, hyperparam: list):
self.K = hyperparam[0]
def get_hyperparam(self):
return self.K
| 1,962 | 31.716667 | 78 | py |
null | OpenOOD-main/openood/postprocessors/odin_postprocessor.py | """Adapted from: https://github.com/facebookresearch/odin."""
from typing import Any
import torch
import torch.nn as nn
from .base_postprocessor import BasePostprocessor
from openood.preprocessors.transform import normalization_dict
class ODINPostprocessor(BasePostprocessor):
def __init__(self, config):
super().__init__(config)
self.args = self.config.postprocessor.postprocessor_args
self.temperature = self.args.temperature
self.noise = self.args.noise
try:
self.input_std = normalization_dict[self.config.dataset.name][1]
except KeyError:
self.input_std = [0.5, 0.5, 0.5]
self.args_dict = self.config.postprocessor.postprocessor_sweep
def postprocess(self, net: nn.Module, data: Any):
data.requires_grad = True
output = net(data)
# Calculating the perturbation we need to add, that is,
# the sign of gradient of cross entropy loss w.r.t. input
criterion = nn.CrossEntropyLoss()
labels = output.detach().argmax(axis=1)
# Using temperature scaling
output = output / self.temperature
loss = criterion(output, labels)
loss.backward()
# Normalizing the gradient to binary in {0, 1}
gradient = torch.ge(data.grad.detach(), 0)
gradient = (gradient.float() - 0.5) * 2
# Scaling values taken from original code
gradient[:, 0] = (gradient[:, 0]) / self.input_std[0]
gradient[:, 1] = (gradient[:, 1]) / self.input_std[1]
gradient[:, 2] = (gradient[:, 2]) / self.input_std[2]
# Adding small perturbations to images
tempInputs = torch.add(data.detach(), gradient, alpha=-self.noise)
output = net(tempInputs)
output = output / self.temperature
# Calculating the confidence after adding perturbations
nnOutput = output.detach()
nnOutput = nnOutput - nnOutput.max(dim=1, keepdims=True).values
nnOutput = nnOutput.exp() / nnOutput.exp().sum(dim=1, keepdims=True)
conf, pred = nnOutput.max(dim=1)
return pred, conf
def set_hyperparam(self, hyperparam: list):
self.temperature = hyperparam[0]
self.noise = hyperparam[1]
def get_hyperparam(self):
return [self.temperature, self.noise]
| 2,324 | 32.695652 | 76 | py |
null | OpenOOD-main/openood/postprocessors/opengan_postprocessor.py | from typing import Any
import torch
from .base_postprocessor import BasePostprocessor
class OpenGanPostprocessor(BasePostprocessor):
def __init__(self, config):
super(OpenGanPostprocessor, self).__init__(config)
@torch.no_grad()
def postprocess(self, net, data: Any):
# images input
if data.shape[-1] > 1 and data.shape[1] == 3:
output = net['backbone'](data)
score = torch.softmax(output, dim=1)
_, pred = torch.max(score, dim=1)
_, feats = net['backbone'](data, return_feature=True)
feats = feats.unsqueeze_(-1).unsqueeze_(-1)
predConf = net['netD'](feats)
predConf = predConf.view(-1, 1)
conf = predConf.reshape(-1).detach().cpu()
# feature input
elif data.shape[-1] == 1 and data.shape[-1] == 1:
predConf = net['netD'](data)
predConf = predConf.view(-1, 1)
conf = predConf.reshape(-1).detach().cpu()
pred = torch.ones_like(conf) # dummy predictions
return pred, conf
| 1,083 | 31.848485 | 65 | py |
null | OpenOOD-main/openood/postprocessors/openmax_postprocessor.py | import libmr
import numpy as np
import scipy.spatial.distance as spd
import torch
import torch.nn as nn
from tqdm import tqdm
from .base_postprocessor import BasePostprocessor
from .info import num_classes_dict
class OpenMax(BasePostprocessor):
def __init__(self, config):
super(OpenMax, self).__init__(config)
self.nc = num_classes_dict[config.dataset.name]
self.weibull_alpha = 3
self.weibull_threshold = 0.9
self.weibull_tail = 20
self.setup_flag = False
def setup(self, net: nn.Module, id_loader_dict, ood_loder_dict):
if not self.setup_flag:
# Fit the weibull distribution from training data.
print('Fittting Weibull distribution...')
_, mavs, dists = compute_train_score_and_mavs_and_dists(
self.nc, id_loader_dict['train'], device='cuda', net=net)
categories = list(range(0, self.nc))
self.weibull_model = fit_weibull(mavs, dists, categories,
self.weibull_tail, 'euclidean')
self.setup_flag = True
else:
pass
@torch.no_grad()
def postprocess(self, net: nn.Module, data):
net.eval()
scores = net(data).cpu().numpy()
scores = np.array(scores)[:, np.newaxis, :]
categories = list(range(0, self.nc))
pred_openmax = []
score_openmax = []
for score in scores:
so, _ = openmax(self.weibull_model, categories, score, 0.5,
self.weibull_alpha,
'euclidean') # openmax_prob, softmax_prob
pred_openmax.append(
np.argmax(so) if np.max(so) >= self.weibull_threshold else (
self.nc - 1))
score_openmax.append(so)
pred = torch.tensor(pred_openmax)
conf = -1 * torch.from_numpy(np.array(score_openmax))[:, -1]
return pred, conf
def compute_channel_distances(mavs, features, eu_weight=0.5):
"""
Input:
mavs (channel, C)
features: (N, channel, C)
Output:
channel_distances: dict of distance distribution from MAV
for each channel.
"""
eucos_dists, eu_dists, cos_dists = [], [], []
for channel, mcv in enumerate(mavs): # Compute channel specific distances
eu_dists.append(
[spd.euclidean(mcv, feat[channel]) for feat in features])
cos_dists.append([spd.cosine(mcv, feat[channel]) for feat in features])
eucos_dists.append([
spd.euclidean(mcv, feat[channel]) * eu_weight +
spd.cosine(mcv, feat[channel]) for feat in features
])
return {
'eucos': np.array(eucos_dists),
'cosine': np.array(cos_dists),
'euclidean': np.array(eu_dists)
}
def compute_train_score_and_mavs_and_dists(train_class_num, trainloader,
device, net):
scores = [[] for _ in range(train_class_num)]
train_dataiter = iter(trainloader)
with torch.no_grad():
for train_step in tqdm(range(1,
len(train_dataiter) + 1),
desc='Progress: ',
position=0,
leave=True):
batch = next(train_dataiter)
data = batch['data'].cuda()
target = batch['label'].cuda()
# this must cause error for cifar
outputs = net(data)
for score, t in zip(outputs, target):
if torch.argmax(score) == t:
scores[t].append(score.unsqueeze(dim=0).unsqueeze(dim=0))
scores = [torch.cat(x).cpu().numpy() for x in scores] # (N_c, 1, C) * C
mavs = np.array([np.mean(x, axis=0) for x in scores]) # (C, 1, C)
dists = [
compute_channel_distances(mcv, score)
for mcv, score in zip(mavs, scores)
]
return scores, mavs, dists
def fit_weibull(means, dists, categories, tailsize=20, distance_type='eucos'):
"""
Input:
means (C, channel, C)
dists (N_c, channel, C) * C
Output:
weibull_model : Perform EVT based analysis using tails of distances
and save weibull model parameters for re-adjusting
softmax scores
"""
weibull_model = {}
for mean, dist, category_name in zip(means, dists, categories):
weibull_model[category_name] = {}
weibull_model[category_name]['distances_{}'.format(
distance_type)] = dist[distance_type]
weibull_model[category_name]['mean_vec'] = mean
weibull_model[category_name]['weibull_model'] = []
for channel in range(mean.shape[0]):
mr = libmr.MR()
tailtofit = np.sort(dist[distance_type][channel, :])[-tailsize:]
mr.fit_high(tailtofit, len(tailtofit))
weibull_model[category_name]['weibull_model'].append(mr)
return weibull_model
def compute_openmax_prob(scores, scores_u):
prob_scores, prob_unknowns = [], []
for s, su in zip(scores, scores_u):
channel_scores = np.exp(s)
channel_unknown = np.exp(np.sum(su))
total_denom = np.sum(channel_scores) + channel_unknown
prob_scores.append(channel_scores / total_denom)
prob_unknowns.append(channel_unknown / total_denom)
# Take channel mean
scores = np.mean(prob_scores, axis=0)
unknowns = np.mean(prob_unknowns, axis=0)
modified_scores = scores.tolist() + [unknowns]
return modified_scores
def query_weibull(category_name, weibull_model, distance_type='eucos'):
return [
weibull_model[category_name]['mean_vec'],
weibull_model[category_name]['distances_{}'.format(distance_type)],
weibull_model[category_name]['weibull_model']
]
def calc_distance(query_score, mcv, eu_weight, distance_type='eucos'):
if distance_type == 'eucos':
query_distance = spd.euclidean(mcv, query_score) * eu_weight + \
spd.cosine(mcv, query_score)
elif distance_type == 'euclidean':
query_distance = spd.euclidean(mcv, query_score)
elif distance_type == 'cosine':
query_distance = spd.cosine(mcv, query_score)
else:
print('distance type not known: enter either of eucos, \
euclidean or cosine')
return query_distance
def softmax(x):
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum()
def openmax(weibull_model,
categories,
input_score,
eu_weight,
alpha=10,
distance_type='eucos'):
"""Re-calibrate scores via OpenMax layer
Output:
openmax probability and softmax probability
"""
nb_classes = len(categories)
ranked_list = input_score.argsort().ravel()[::-1][:alpha]
alpha_weights = [((alpha + 1) - i) / float(alpha)
for i in range(1, alpha + 1)]
omega = np.zeros(nb_classes)
omega[ranked_list] = alpha_weights
scores, scores_u = [], []
for channel, input_score_channel in enumerate(input_score):
score_channel, score_channel_u = [], []
for c, category_name in enumerate(categories):
mav, dist, model = query_weibull(category_name, weibull_model,
distance_type)
channel_dist = calc_distance(input_score_channel, mav[channel],
eu_weight, distance_type)
wscore = model[channel].w_score(channel_dist)
modified_score = input_score_channel[c] * (1 - wscore * omega[c])
score_channel.append(modified_score)
score_channel_u.append(input_score_channel[c] - modified_score)
scores.append(score_channel)
scores_u.append(score_channel_u)
scores = np.asarray(scores)
scores_u = np.asarray(scores_u)
openmax_prob = np.array(compute_openmax_prob(scores, scores_u))
softmax_prob = softmax(np.array(input_score.ravel()))
return openmax_prob, softmax_prob
| 8,078 | 34.747788 | 79 | py |
null | OpenOOD-main/openood/postprocessors/patchcore_postprocessor.py | from __future__ import absolute_import, division, print_function
import abc
import os
import faiss
import numpy as np
import torch
from sklearn.metrics import pairwise_distances
from sklearn.random_projection import SparseRandomProjection
from torch import nn
from torch.nn import functional as F
from tqdm import tqdm
from .base_postprocessor import BasePostprocessor
def embedding_concat(x, y):
B, C1, H1, W1 = x.size()
_, C2, H2, W2 = y.size()
s = int(H1 / H2)
x = F.unfold(x, kernel_size=s, dilation=1, stride=s)
x = x.view(B, C1, -1, H2, W2)
z = torch.zeros(B, C1 + C2, x.size(2), H2, W2)
for i in range(x.size(2)):
z[:, :, i, :, :] = torch.cat((x[:, :, i, :, :], y), 1)
z = z.view(B, -1, H2 * W2)
z = F.fold(z, kernel_size=s, output_size=(H1, W1), stride=s)
return z
def reshape_embedding(embedding):
embedding_list = []
for k in range(embedding.shape[0]):
for i in range(embedding.shape[2]):
for j in range(embedding.shape[3]):
embedding_list.append(embedding[k, :, i, j])
return embedding_list
class PatchcorePostprocessor(BasePostprocessor):
def __init__(self, config):
super(PatchcorePostprocessor, self).__init__(config)
self.config = config
self.postprocessor_args = config.postprocessor.postprocessor_args
self.n_neighbors = config.postprocessor.postprocessor_args.n_neighbors
self.feature_mean, self.feature_prec = None, None
self.alpha_list = None
self.gt_list_px_lvl = []
self.pred_list_px_lvl = []
self.gt_list_img_lvl = []
self.pred_list_img_lvl = []
self.img_path_list = []
self.features = []
def setup(self, net: nn.Module, id_loader_dict, ood_loader_dict):
# step 1:
self.model = net
# on train start
self.model.eval() # to stop running_var move (maybe not critical)
self.embedding_list = []
if (self.config.network.load_cached_faiss):
path = self.config.output_dir
# load index
if os.path.isfile(os.path.join(path, 'index.faiss')):
self.index = faiss.read_index(os.path.join(
path, 'index.faiss'))
if torch.cuda.is_available():
res = faiss.StandardGpuResources()
self.index = faiss.index_cpu_to_gpu(res, 0, self.index)
self.init_results_list()
return
# training step
train_dataiter = iter(id_loader_dict['train'])
for train_step in tqdm(range(1,
len(train_dataiter) + 1),
position=0,
leave=True):
batch = next(train_dataiter)
x = batch['data'].cuda()
features = self.model.forward(x, return_feature=True)
embeddings = []
for feature in features:
m = torch.nn.AvgPool2d(9, 1, 1)
embeddings.append(m(feature))
embedding = embedding_concat(embeddings[0], embeddings[1])
self.embedding_list.extend(reshape_embedding(np.array(embedding)))
# training end
total_embeddings = np.array(self.embedding_list)
# Random projection
print('Random projection')
self.randomprojector = SparseRandomProjection(
n_components='auto',
eps=0.9) # 'auto' => Johnson-Lindenstrauss lemma
self.randomprojector.fit(total_embeddings)
# Coreset Subsampling
print('Coreset Subsampling')
selector = kCenterGreedy(total_embeddings, 0, 0)
selected_idx = selector.select_batch(
model=self.randomprojector,
already_selected=[],
N=int(total_embeddings.shape[0] *
self.postprocessor_args.coreset_sampling_ratio))
self.embedding_coreset = total_embeddings[selected_idx]
print('initial embedding size : ', total_embeddings.shape)
print('final embedding size : ', self.embedding_coreset.shape)
# faiss
print('faiss indexing')
self.index = faiss.IndexFlatL2(self.embedding_coreset.shape[1])
self.index.add(self.embedding_coreset)
if not os.path.isdir(os.path.join('./results/patch/')):
os.mkdir('./results/patch/')
faiss.write_index(self.index,
os.path.join('./results/patch/', 'index.faiss'))
def init_results_list(self):
self.gt_list_px_lvl = []
self.pred_list_px_lvl = []
self.gt_list_img_lvl = []
self.pred_list_img_lvl = []
def postprocess(self, net: nn.Module, data):
self.init_results_list()
score_patch = []
# extract embedding
for x in data.split(1, dim=0):
features = self.model.forward(x, return_feature=True)
embeddings = []
for feature in features:
m = torch.nn.AvgPool2d(3, 1, 1)
embeddings.append(m(feature))
embedding_ = embedding_concat(embeddings[0], embeddings[1])
embedding_test = np.array(reshape_embedding(np.array(embedding_)))
score_patches, _ = self.index.search(embedding_test,
k=self.n_neighbors)
score_patch.append(score_patches)
N_b = score_patches[np.argmax(score_patches[:, 0])]
w = (1 - (np.max(np.exp(N_b)) / np.sum(np.exp(N_b))))
score = w * max(score_patches[:, 0]) # Image-level score
self.pred_list_img_lvl.append(score)
pred = []
for i in self.pred_list_img_lvl:
# 6.3 is the trial value.
if (i > 6.3):
pred.append(torch.tensor(1))
else:
pred.append(torch.tensor(-1))
conf = []
for i in score_patch:
conf.append(i)
conf = torch.tensor(conf, dtype=torch.float32)
conf = conf.cuda()
pred_list_img_lvl = []
for patchscore in np.concatenate([conf.cpu().tolist()]):
N_b = patchscore[np.argmax(patchscore[:, 0])]
w = (1 - (np.max(np.exp(N_b)) / np.sum(np.exp(N_b))))
score = w * max(patchscore[:, 0]) # Image-level score
pred_list_img_lvl.append(score)
if self.config.evaluator.name == 'patch':
return pred, conf
else:
return pred, -1 * torch.tensor(pred_list_img_lvl).cuda()
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Abstract class for sampling methods.
Provides interface to sampling methods that allow same signature for
select_batch. Each subclass implements select_batch_ with the desired
signature for readability.
"""
class SamplingMethod(object):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def __init__(self, X, y, seed, **kwargs):
self.X = X
self.y = y
self.seed = seed
def flatten_X(self):
shape = self.X.shape
flat_X = self.X
if len(shape) > 2:
flat_X = np.reshape(self.X, (shape[0], np.product(shape[1:])))
return flat_X
@abc.abstractmethod
def select_batch_(self):
return
def select_batch(self, **kwargs):
return self.select_batch_(**kwargs)
def to_dict(self):
return None
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Returns points that minimizes the maximum distance of any point to a center.
Implements the k-Center-Greedy method in
Ozan Sener and Silvio Savarese. A Geometric Approach to Active Learning for
Convolutional Neural Networks. https://arxiv.org/abs/1708.00489 2017
Distance metric defaults to l2 distance. Features used to calculate distance
are either raw features or if a model has transform method then uses the output
of model.transform(X).
Can be extended to a robust k centers algorithm that ignores a certain number
of outlier datapoints.
Resulting centers are solution to multiple integer program.
"""
class kCenterGreedy(SamplingMethod):
def __init__(self, X, y, seed, metric='euclidean'):
self.X = X
self.y = y
self.flat_X = self.flatten_X()
self.name = 'kcenter'
self.features = self.flat_X
self.metric = metric
self.min_distances = None
self.n_obs = self.X.shape[0]
self.already_selected = []
def update_distances(self,
cluster_centers,
only_new=True,
reset_dist=False):
"""Update min distances given cluster centers.
Args:
cluster_centers: indices of cluster centers
only_new: only calculate distance for newly selected points and
update min_distances.
rest_dist: whether to reset min_distances.
"""
if reset_dist:
self.min_distances = None
if only_new:
cluster_centers = [
d for d in cluster_centers if d not in self.already_selected
]
if cluster_centers:
# Update min_distances for all examples given new cluster center.
x = self.features[cluster_centers]
dist = pairwise_distances(self.features, x, metric=self.metric)
if self.min_distances is None:
self.min_distances = np.min(dist, axis=1).reshape(-1, 1)
else:
self.min_distances = np.minimum(self.min_distances, dist)
def select_batch_(self, model, already_selected, N, **kwargs):
"""Diversity promoting active learning method that greedily forms a
batch to minimize the maximum distance to a cluster center among all
unlabeled datapoints.
Args:
model: model with scikit-like API with decision_function implemented
already_selected: index of datapoints already selected
N: batch size
Returns:
indices of points selected to minimize distance to cluster centers
"""
try:
# Assumes that the transform function takes in original data and
# not flattened data.
print('Getting transformed features...')
self.features = model.transform(self.X)
print('Calculating distances...')
self.update_distances(already_selected,
only_new=False,
reset_dist=True)
except:
print('Using flat_X as features.')
self.update_distances(already_selected,
only_new=True,
reset_dist=False)
new_batch = []
for _ in tqdm(range(N)):
if self.already_selected is None:
# Initialize centers with a randomly selected datapoint
ind = np.random.choice(np.arange(self.n_obs))
else:
ind = np.argmax(self.min_distances)
# New examples should not be in already selected since those points
# should have min_distance of zero to a cluster center.
assert ind not in already_selected
self.update_distances([ind], only_new=True, reset_dist=False)
new_batch.append(ind)
print('Maximum distance from cluster centers is %0.2f' %
max(self.min_distances))
self.already_selected = already_selected
return new_batch
| 12,580 | 35.256484 | 79 | py |
null | OpenOOD-main/openood/postprocessors/rankfeat_postprocessor.py | from typing import Any
import torch
import torch.nn as nn
from .base_postprocessor import BasePostprocessor
class RankFeatPostprocessor(BasePostprocessor):
def __init__(self, config):
super(RankFeatPostprocessor, self).__init__(config)
self.config = config
self.args = self.config.postprocessor.postprocessor_args
@torch.no_grad()
def postprocess(self, net: nn.Module, data: Any):
inputs = data.cuda()
# Logit of Block 4 feature
feat1 = net.intermediate_forward(inputs, layer_index=4)
B, C, H, W = feat1.size()
feat1 = feat1.view(B, C, H * W)
if self.args.accelerate:
feat1 = feat1 - power_iteration(feat1, iter=20)
else:
u, s, v = torch.linalg.svd(feat1, full_matrices=False)
feat1 = feat1 - s[:, 0:1].unsqueeze(2) * u[:, :, 0:1].bmm(
v[:, 0:1, :])
feat1 = feat1.view(B, C, H, W)
logits1 = net.fc(torch.flatten(net.avgpool(feat1), 1))
# Logit of Block 3 feature
feat2 = net.intermediate_forward(inputs, layer_index=3)
B, C, H, W = feat2.size()
feat2 = feat2.view(B, C, H * W)
if self.args.accelerate:
feat2 = feat2 - power_iteration(feat2, iter=20)
else:
u, s, v = torch.linalg.svd(feat2, full_matrices=False)
feat2 = feat2 - s[:, 0:1].unsqueeze(2) * u[:, :, 0:1].bmm(
v[:, 0:1, :])
feat2 = feat2.view(B, C, H, W)
feat2 = net.layer4(feat2)
logits2 = net.fc(torch.flatten(net.avgpool(feat2), 1))
# Fusion at the logit space
logits = (logits1 + logits2) / 2
conf = self.args.temperature * torch.logsumexp(
logits / self.args.temperature, dim=1)
_, pred = torch.max(logits, dim=1)
return pred, conf
def _l2normalize(v, eps=1e-10):
return v / (torch.norm(v, dim=2, keepdim=True) + eps)
# Power Iteration as SVD substitute for acceleration
def power_iteration(A, iter=20):
u = torch.FloatTensor(1, A.size(1)).normal_(0, 1).view(
1, 1, A.size(1)).repeat(A.size(0), 1, 1).to(A)
v = torch.FloatTensor(A.size(2),
1).normal_(0, 1).view(1, A.size(2),
1).repeat(A.size(0), 1,
1).to(A)
for _ in range(iter):
v = _l2normalize(u.bmm(A)).transpose(1, 2)
u = _l2normalize(A.bmm(v).transpose(1, 2))
sigma = u.bmm(A).bmm(v)
sub = sigma * u.transpose(1, 2).bmm(v.transpose(1, 2))
return sub
| 2,605 | 34.69863 | 71 | py |
null | OpenOOD-main/openood/postprocessors/rd4ad_postprocessor.py | from typing import Any
import numpy as np
import torch
import torch.nn as nn
from scipy.ndimage import gaussian_filter
from torch.nn import functional as F
from .base_postprocessor import BasePostprocessor
class Rd4adPostprocessor(BasePostprocessor):
def __init__(self, config):
super(Rd4adPostprocessor, self).__init__(config)
def setup(self, net: nn.Module, id_loader_dict, ood_loader_dict):
pass
@torch.no_grad()
def postprocess(self, net: nn.Module, data: Any):
encoder = net['encoder']
bn = net['bn']
decoder = net['decoder']
feature_list = encoder.forward(data, return_feature_list=True)[1]
input = feature_list[1:4]
en_feature1 = input[0].cpu().numpy().tolist()
en_feature2 = input[1].cpu().numpy().tolist()
en_feature3 = input[2].cpu().numpy().tolist()
output = decoder(bn(input))
de_feature1 = output[0].cpu().numpy().tolist()
de_feature2 = output[1].cpu().numpy().tolist()
de_feature3 = output[2].cpu().numpy().tolist()
conf_list = []
for i in range(data.shape[0]):
feature_list_en = []
feature_list_de = []
feature_list_en.append(en_feature1[i])
feature_list_en.append(en_feature2[i])
feature_list_en.append(en_feature3[i])
feature_list_de.append(de_feature1[i])
feature_list_de.append(de_feature2[i])
feature_list_de.append(de_feature3[i])
anomaly_map, _ = cal_anomaly_map(feature_list_en,
feature_list_de,
data.shape[-1],
amap_mode='a')
anomaly_map = gaussian_filter(anomaly_map, sigma=4)
conf = np.max(anomaly_map)
conf_list.append(-conf)
return -1 * torch.ones(data.shape[0]), torch.tensor(
[conf_list]).reshape((data.shape[0]))
# def inference(self, net: nn.Module, data_loader: DataLoader):
# pred_list, conf_list, label_list = [], [], []
# for batch in data_loader:
# data = batch['data'].cuda()
# label = batch['label'].cuda()
# import pdb
# pdb.set_trace()
# conf = self.postprocess(net, data)
# for idx in range(len(data)):
# conf_list.append(conf[idx].tolist())
# label_list.append(label[idx].cpu().tolist())
# # convert values into numpy array
# conf_list = np.array(conf_list)
# label_list = np.array(label_list, dtype=int)
# return pred_list, conf_list, label_list
def cal_anomaly_map(fs_list, ft_list, out_size=224, amap_mode='mul'):
if amap_mode == 'mul':
anomaly_map = np.ones([out_size, out_size])
else:
anomaly_map = np.zeros([out_size, out_size])
a_map_list = []
for i in range(len(ft_list)):
fs = torch.Tensor([fs_list[i]])
ft = torch.Tensor([ft_list[i]])
a_map = 1 - F.cosine_similarity(fs, ft)
a_map = torch.unsqueeze(a_map, dim=1)
a_map = F.interpolate(a_map,
size=out_size,
mode='bilinear',
align_corners=True)
a_map = a_map[0, 0, :, :].to('cpu').detach().numpy()
a_map_list.append(a_map)
if amap_mode == 'mul':
anomaly_map *= a_map
else:
anomaly_map += a_map
return anomaly_map, a_map_list
| 3,562 | 35.731959 | 73 | py |
null | OpenOOD-main/openood/postprocessors/react_postprocessor.py | from typing import Any
import numpy as np
import torch
import torch.nn as nn
from tqdm import tqdm
from .base_postprocessor import BasePostprocessor
class ReactPostprocessor(BasePostprocessor):
def __init__(self, config):
super(ReactPostprocessor, self).__init__(config)
self.args = self.config.postprocessor.postprocessor_args
self.percentile = self.args.percentile
self.args_dict = self.config.postprocessor.postprocessor_sweep
self.setup_flag = False
def setup(self, net: nn.Module, id_loader_dict, ood_loader_dict):
if not self.setup_flag:
activation_log = []
net.eval()
with torch.no_grad():
for batch in tqdm(id_loader_dict['val'],
desc='Setup: ',
position=0,
leave=True):
data = batch['data'].cuda()
data = data.float()
_, feature = net(data, return_feature=True)
activation_log.append(feature.data.cpu().numpy())
self.activation_log = np.concatenate(activation_log, axis=0)
self.setup_flag = True
else:
pass
@torch.no_grad()
def postprocess(self, net: nn.Module, data: Any):
output = net.forward_threshold(data, self.threshold)
score = torch.softmax(output, dim=1)
_, pred = torch.max(score, dim=1)
energyconf = torch.logsumexp(output.data.cpu(), dim=1)
return pred, energyconf
def set_hyperparam(self, hyperparam: list):
self.percentile = hyperparam[0]
self.threshold = np.percentile(self.activation_log.flatten(),
self.percentile)
print('Threshold at percentile {:2d} over id data is: {}'.format(
self.percentile, self.threshold))
def get_hyperparam(self):
return self.percentile
| 1,973 | 34.25 | 73 | py |
null | OpenOOD-main/openood/postprocessors/residual_postprocessor.py | from typing import Any
import numpy as np
import torch
import torch.nn as nn
from numpy.linalg import norm, pinv
from sklearn.covariance import EmpiricalCovariance
from tqdm import tqdm
from .base_postprocessor import BasePostprocessor
class ResidualPostprocessor(BasePostprocessor):
def __init__(self, config):
super().__init__(config)
self.args = self.config.postprocessor.postprocessor_args
self.dim = self.args.dim
def setup(self, net: nn.Module, id_loader_dict, ood_loader_dict):
net.eval()
with torch.no_grad():
self.w, self.b = net.get_fc()
print('Extracting id training feature')
feature_id_train = []
for batch in tqdm(id_loader_dict['val'],
desc='Eval: ',
position=0,
leave=True):
data = batch['data'].cuda()
data = data.float()
_, feature = net(data, return_feature=True)
feature_id_train.append(feature.cpu().numpy())
feature_id_train = np.concatenate(feature_id_train, axis=0)
print('Extracting id testing feature')
feature_id_val = []
for batch in tqdm(id_loader_dict['test'],
desc='Eval: ',
position=0,
leave=True):
data = batch['data'].cuda()
data = data.float()
_, feature = net(data, return_feature=True)
feature_id_val.append(feature.cpu().numpy())
feature_id_val = np.concatenate(feature_id_val, axis=0)
self.u = -np.matmul(pinv(self.w), self.b)
ec = EmpiricalCovariance(assume_centered=True)
ec.fit(feature_id_train - self.u)
eig_vals, eigen_vectors = np.linalg.eig(ec.covariance_)
self.NS = np.ascontiguousarray(
(eigen_vectors.T[np.argsort(eig_vals * -1)[self.dim:]]).T)
self.score_id = -norm(np.matmul(feature_id_val - self.u, self.NS),
axis=-1)
@torch.no_grad()
def postprocess(self, net: nn.Module, data: Any):
_, feature_ood = net(data, return_feature=True)
logit_ood = feature_ood.cpu() @ self.w.T + self.b
_, pred = torch.max(logit_ood, dim=1)
score_ood = -norm(np.matmul(feature_ood.cpu() - self.u, self.NS),
axis=-1)
return pred, torch.from_numpy(score_ood)
| 2,527 | 37.30303 | 74 | py |
null | OpenOOD-main/openood/postprocessors/rmds_postprocessor.py | from copy import deepcopy
from typing import Any
import numpy as np
import torch
import torch.nn as nn
import sklearn.covariance
from tqdm import tqdm
from .base_postprocessor import BasePostprocessor
from .info import num_classes_dict
class RMDSPostprocessor(BasePostprocessor):
def __init__(self, config):
self.config = config
self.num_classes = num_classes_dict[self.config.dataset.name]
self.setup_flag = False
def setup(self, net: nn.Module, id_loader_dict, ood_loader_dict):
if not self.setup_flag:
# estimate mean and variance from training set
print('\n Estimating mean and variance from training set...')
all_feats = []
all_labels = []
all_preds = []
with torch.no_grad():
for batch in tqdm(id_loader_dict['train'],
desc='Setup: ',
position=0,
leave=True):
data, labels = batch['data'].cuda(), batch['label']
logits, features = net(data, return_feature=True)
all_feats.append(features.cpu())
all_labels.append(deepcopy(labels))
all_preds.append(logits.argmax(1).cpu())
all_feats = torch.cat(all_feats)
all_labels = torch.cat(all_labels)
all_preds = torch.cat(all_preds)
# sanity check on train acc
train_acc = all_preds.eq(all_labels).float().mean()
print(f' Train acc: {train_acc:.2%}')
# compute class-conditional statistics
self.class_mean = []
centered_data = []
for c in range(self.num_classes):
class_samples = all_feats[all_labels.eq(c)].data
self.class_mean.append(class_samples.mean(0))
centered_data.append(class_samples -
self.class_mean[c].view(1, -1))
self.class_mean = torch.stack(
self.class_mean) # shape [#classes, feature dim]
group_lasso = sklearn.covariance.EmpiricalCovariance(
assume_centered=False)
group_lasso.fit(
torch.cat(centered_data).cpu().numpy().astype(np.float32))
# inverse of covariance
self.precision = torch.from_numpy(group_lasso.precision_).float()
self.whole_mean = all_feats.mean(0)
centered_data = all_feats - self.whole_mean.view(1, -1)
group_lasso = sklearn.covariance.EmpiricalCovariance(
assume_centered=False)
group_lasso.fit(centered_data.cpu().numpy().astype(np.float32))
self.whole_precision = torch.from_numpy(
group_lasso.precision_).float()
self.setup_flag = True
else:
pass
@torch.no_grad()
def postprocess(self, net: nn.Module, data: Any):
logits, features = net(data, return_feature=True)
pred = logits.argmax(1)
tensor1 = features.cpu() - self.whole_mean.view(1, -1)
background_scores = -torch.matmul(
torch.matmul(tensor1, self.whole_precision), tensor1.t()).diag()
class_scores = torch.zeros((logits.shape[0], self.num_classes))
for c in range(self.num_classes):
tensor = features.cpu() - self.class_mean[c].view(1, -1)
class_scores[:, c] = -torch.matmul(
torch.matmul(tensor, self.precision), tensor.t()).diag()
class_scores[:, c] = class_scores[:, c] - background_scores
conf = torch.max(class_scores, dim=1)[0]
return pred, conf
| 3,718 | 38.989247 | 77 | py |
null | OpenOOD-main/openood/postprocessors/rotpred_postprocessor.py | from typing import Any
import torch
import torch.nn as nn
import torch.nn.functional as F
from .base_postprocessor import BasePostprocessor
def kl_div(d1, d2):
"""Compute KL-Divergence between d1 and d2."""
dirty_logs = d1 * torch.log2(d1 / d2)
return torch.sum(torch.where(d1 != 0, dirty_logs, torch.zeros_like(d1)),
axis=1)
class RotPredPostprocessor(BasePostprocessor):
def __init__(self, config):
super(RotPredPostprocessor, self).__init__(config)
self.config = config
@torch.no_grad()
def postprocess(self, net: nn.Module, data: Any):
batch_size = len(data)
x_90 = torch.rot90(data, 1, [2, 3])
x_180 = torch.rot90(data, 2, [2, 3])
x_270 = torch.rot90(data, 3, [2, 3])
x_rot = torch.cat([data, x_90, x_180, x_270])
y_rot = torch.cat([
torch.zeros(batch_size),
torch.ones(batch_size),
2 * torch.ones(batch_size),
3 * torch.ones(batch_size),
]).long().cuda()
logits, logits_rot = net(x_rot, return_rot_logits=True)
logits = logits[:batch_size]
preds = logits.argmax(1)
# https://github.com/hendrycks/ss-ood/blob/8051356592a152614ab7251fd15084dd86eb9104/multiclass_ood/test_auxiliary_ood.py#L177-L208
num_classes = logits.shape[1]
uniform_dist = torch.ones_like(logits) / num_classes
cls_loss = kl_div(uniform_dist, F.softmax(logits, dim=1))
rot_one_hot = torch.zeros_like(logits_rot).scatter_(
1,
y_rot.unsqueeze(1).cuda(), 1)
rot_loss = kl_div(rot_one_hot, F.softmax(logits_rot, dim=1))
rot_0_loss, rot_90_loss, rot_180_loss, rot_270_loss = torch.chunk(
rot_loss, 4, dim=0)
total_rot_loss = (rot_0_loss + rot_90_loss + rot_180_loss +
rot_270_loss) / 4.0
# here ID samples will yield larger scores
scores = cls_loss - total_rot_loss
return preds, scores
| 2,008 | 33.050847 | 138 | py |
null | OpenOOD-main/openood/postprocessors/rts_postprocessor.py | from typing import Any
import torch
import torch.nn as nn
from .base_postprocessor import BasePostprocessor
class RTSPostprocessor(BasePostprocessor):
def __init__(self, config):
super(RTSPostprocessor, self).__init__(config)
self.args = self.config.postprocessor.postprocessor_args
self.ood_score = self.args.ood_score
def postprocess(self, net: nn.Module, data: Any):
output, variance = net(data, return_var=True)
if self.ood_score == 'var':
_, pred = torch.max(torch.softmax(output, dim=1), dim=1)
conf = torch.mean(variance, dim=1)
elif self.ood_score == 'msp':
score = torch.softmax(output, dim=1)
conf, pred = torch.max(score, dim=1)
else:
print('Invalid ood score type, using var instead')
_, pred = torch.max(torch.softmax(output, dim=1), dim=1)
conf = torch.mean(variance, dim=1)
return pred, conf
| 968 | 33.607143 | 68 | py |
null | OpenOOD-main/openood/postprocessors/she_postprocessor.py | from typing import Any
from copy import deepcopy
import torch
import torch.nn as nn
from tqdm import tqdm
from .base_postprocessor import BasePostprocessor
from .info import num_classes_dict
def distance(penultimate, target, metric='inner_product'):
if metric == 'inner_product':
return torch.sum(torch.mul(penultimate, target), dim=1)
elif metric == 'euclidean':
return -torch.sqrt(torch.sum((penultimate - target)**2, dim=1))
elif metric == 'cosine':
return torch.cosine_similarity(penultimate, target, dim=1)
else:
raise ValueError('Unknown metric: {}'.format(metric))
class SHEPostprocessor(BasePostprocessor):
def __init__(self, config):
super(SHEPostprocessor, self).__init__(config)
self.args = self.config.postprocessor.postprocessor_args
self.num_classes = num_classes_dict[self.config.dataset.name]
self.activation_log = None
self.setup_flag = False
def setup(self, net: nn.Module, id_loader_dict, ood_loader_dict):
if not self.setup_flag:
net.eval()
all_activation_log = []
all_labels = []
all_preds = []
with torch.no_grad():
for batch in tqdm(id_loader_dict['train'],
desc='Eval: ',
position=0,
leave=True):
data = batch['data'].cuda()
labels = batch['label']
all_labels.append(deepcopy(labels))
logits, features = net(data, return_feature=True)
all_activation_log.append(features.cpu())
all_preds.append(logits.argmax(1).cpu())
all_preds = torch.cat(all_preds)
all_labels = torch.cat(all_labels)
all_activation_log = torch.cat(all_activation_log)
self.activation_log = []
for i in range(self.num_classes):
mask = torch.logical_and(all_labels == i, all_preds == i)
class_correct_activations = all_activation_log[mask]
self.activation_log.append(
class_correct_activations.mean(0, keepdim=True))
self.activation_log = torch.cat(self.activation_log).cuda()
self.setup_flag = True
else:
pass
@torch.no_grad()
def postprocess(self, net: nn.Module, data: Any):
output, feature = net(data, return_feature=True)
pred = output.argmax(1)
conf = distance(feature, self.activation_log[pred], self.args.metric)
return pred, conf
| 2,657 | 35.410959 | 77 | py |
null | OpenOOD-main/openood/postprocessors/ssd_postprocessor.py | import torch
import torch.nn as nn
from .base_postprocessor import BasePostprocessor
from .mds_ensemble_postprocessor import get_MDS_stat
class SSDPostprocessor(BasePostprocessor):
def __init__(self, config):
self.config = config
self.postprocessor_args = config.postprocessor.postprocessor_args
self.feature_type_list = self.postprocessor_args.feature_type_list
self.reduce_dim_list = self.postprocessor_args.reduce_dim_list
# self.num_classes = self.config.dataset.num_classes
self.num_classes = 1
self.num_layer = len(self.feature_type_list)
self.feature_mean, self.feature_prec = None, None
def setup(self, net: nn.Module, id_loader_dict, ood_loader_dict):
self.feature_mean, self.feature_prec, self.transform_matrix = \
get_MDS_stat(net, id_loader_dict['train'], self.num_classes,
self.feature_type_list, self.reduce_dim_list)
| 955 | 37.24 | 74 | py |
null | OpenOOD-main/openood/postprocessors/temp_scaling_postprocessor.py | from typing import Any
import torch
from torch import nn, optim
from tqdm import tqdm
from .base_postprocessor import BasePostprocessor
class TemperatureScalingPostprocessor(BasePostprocessor):
"""A decorator which wraps a model with temperature scaling, internalize
'temperature' parameter as part of a net model."""
def __init__(self, config):
super(TemperatureScalingPostprocessor, self).__init__(config)
self.config = config
self.temperature = nn.Parameter(torch.ones(1, device='cuda') *
1.5) # initialize T
self.setup_flag = False
def setup(self, net: nn.Module, id_loader_dict, ood_loader_dict):
if not self.setup_flag:
# make sure that validation set exists
assert 'val' in id_loader_dict.keys(
), 'No validation dataset found!'
val_dl = id_loader_dict['val']
nll_criterion = nn.CrossEntropyLoss().cuda()
logits_list = [] # fit in whole dataset at one time to back prop
labels_list = []
with torch.no_grad(
): # fix other params of the net, only learn temperature
for batch in tqdm(val_dl):
data = batch['data'].cuda()
labels = batch['label']
logits = net(data)
logits_list.append(logits)
labels_list.append(labels)
# convert a list of many tensors (each of a batch) to one tensor
logits = torch.cat(logits_list).cuda()
labels = torch.cat(labels_list).cuda()
# calculate NLL before temperature scaling
before_temperature_nll = nll_criterion(logits, labels)
print('Before temperature - NLL: %.3f' % (before_temperature_nll))
optimizer = optim.LBFGS([self.temperature], lr=0.01, max_iter=50)
# make sure only temperature parameter will be learned,
# fix other parameters of the network
def eval():
optimizer.zero_grad()
loss = nll_criterion(self._temperature_scale(logits), labels)
loss.backward()
return loss
optimizer.step(eval)
# print learned parameter temperature,
# calculate NLL after temperature scaling
after_temperature_nll = nll_criterion(
self._temperature_scale(logits), labels).item()
print('Optimal temperature: %.3f' % self.temperature.item())
print('After temperature - NLL: %.3f' % (after_temperature_nll))
self.setup_flag = True
else:
pass
def _temperature_scale(self, logits):
return logits / self.temperature
@torch.no_grad()
def postprocess(self, net: nn.Module, data: Any):
logits = net(data)
logits_ts = self._temperature_scale(logits)
score = torch.softmax(logits_ts, dim=1)
conf, pred = torch.max(score, dim=1)
return pred, conf
| 3,081 | 38.012658 | 80 | py |
null | OpenOOD-main/openood/postprocessors/utils.py | from openood.utils import Config
from .ash_postprocessor import ASHPostprocessor
from .base_postprocessor import BasePostprocessor
from .cider_postprocessor import CIDERPostprocessor
from .conf_branch_postprocessor import ConfBranchPostprocessor
from .cutpaste_postprocessor import CutPastePostprocessor
from .dice_postprocessor import DICEPostprocessor
from .draem_postprocessor import DRAEMPostprocessor
from .dropout_postprocessor import DropoutPostProcessor
from .dsvdd_postprocessor import DSVDDPostprocessor
from .ebo_postprocessor import EBOPostprocessor
from .ensemble_postprocessor import EnsemblePostprocessor
from .gmm_postprocessor import GMMPostprocessor
from .godin_postprocessor import GodinPostprocessor
from .gradnorm_postprocessor import GradNormPostprocessor
from .gram_postprocessor import GRAMPostprocessor
from .kl_matching_postprocessor import KLMatchingPostprocessor
from .knn_postprocessor import KNNPostprocessor
from .maxlogit_postprocessor import MaxLogitPostprocessor
from .mcd_postprocessor import MCDPostprocessor
from .mds_postprocessor import MDSPostprocessor
from .mds_ensemble_postprocessor import MDSEnsemblePostprocessor
from .mos_postprocessor import MOSPostprocessor
from .npos_postprocessor import NPOSPostprocessor
from .odin_postprocessor import ODINPostprocessor
from .opengan_postprocessor import OpenGanPostprocessor
from .openmax_postprocessor import OpenMax
from .patchcore_postprocessor import PatchcorePostprocessor
from .rd4ad_postprocessor import Rd4adPostprocessor
from .react_postprocessor import ReactPostprocessor
from .rmds_postprocessor import RMDSPostprocessor
from .residual_postprocessor import ResidualPostprocessor
from .rotpred_postprocessor import RotPredPostprocessor
from .rankfeat_postprocessor import RankFeatPostprocessor
from .ssd_postprocessor import SSDPostprocessor
from .she_postprocessor import SHEPostprocessor
from .temp_scaling_postprocessor import TemperatureScalingPostprocessor
from .vim_postprocessor import VIMPostprocessor
from .rts_postprocessor import RTSPostprocessor
def get_postprocessor(config: Config):
postprocessors = {
'ash': ASHPostprocessor,
'cider': CIDERPostprocessor,
'conf_branch': ConfBranchPostprocessor,
'msp': BasePostprocessor,
'ebo': EBOPostprocessor,
'odin': ODINPostprocessor,
'mds': MDSPostprocessor,
'mds_ensemble': MDSEnsemblePostprocessor,
'rmds': RMDSPostprocessor,
'gmm': GMMPostprocessor,
'patchcore': PatchcorePostprocessor,
'openmax': OpenMax,
'react': ReactPostprocessor,
'vim': VIMPostprocessor,
'gradnorm': GradNormPostprocessor,
'godin': GodinPostprocessor,
'gram': GRAMPostprocessor,
'cutpaste': CutPastePostprocessor,
'mls': MaxLogitPostprocessor,
'npos': NPOSPostprocessor,
'residual': ResidualPostprocessor,
'klm': KLMatchingPostprocessor,
'temperature_scaling': TemperatureScalingPostprocessor,
'ensemble': EnsemblePostprocessor,
'dropout': DropoutPostProcessor,
'draem': DRAEMPostprocessor,
'dsvdd': DSVDDPostprocessor,
'mos': MOSPostprocessor,
'mcd': MCDPostprocessor,
'opengan': OpenGanPostprocessor,
'knn': KNNPostprocessor,
'dice': DICEPostprocessor,
'ssd': SSDPostprocessor,
'she': SHEPostprocessor,
'rd4ad': Rd4adPostprocessor,
'rts': RTSPostprocessor,
'rotpred': RotPredPostprocessor,
'rankfeat': RankFeatPostprocessor
}
return postprocessors[config.postprocessor.name](config)
| 3,632 | 41.244186 | 71 | py |
null | OpenOOD-main/openood/postprocessors/vim_postprocessor.py | from typing import Any
import numpy as np
import torch
import torch.nn as nn
from numpy.linalg import norm, pinv
from scipy.special import logsumexp
from sklearn.covariance import EmpiricalCovariance
from tqdm import tqdm
from .base_postprocessor import BasePostprocessor
class VIMPostprocessor(BasePostprocessor):
def __init__(self, config):
super().__init__(config)
self.args = self.config.postprocessor.postprocessor_args
self.args_dict = self.config.postprocessor.postprocessor_sweep
self.dim = self.args.dim
self.setup_flag = False
def setup(self, net: nn.Module, id_loader_dict, ood_loader_dict):
if not self.setup_flag:
net.eval()
with torch.no_grad():
self.w, self.b = net.get_fc()
print('Extracting id training feature')
feature_id_train = []
for batch in tqdm(id_loader_dict['train'],
desc='Setup: ',
position=0,
leave=True):
data = batch['data'].cuda()
data = data.float()
_, feature = net(data, return_feature=True)
feature_id_train.append(feature.cpu().numpy())
feature_id_train = np.concatenate(feature_id_train, axis=0)
logit_id_train = feature_id_train @ self.w.T + self.b
self.u = -np.matmul(pinv(self.w), self.b)
ec = EmpiricalCovariance(assume_centered=True)
ec.fit(feature_id_train - self.u)
eig_vals, eigen_vectors = np.linalg.eig(ec.covariance_)
self.NS = np.ascontiguousarray(
(eigen_vectors.T[np.argsort(eig_vals * -1)[self.dim:]]).T)
vlogit_id_train = norm(np.matmul(feature_id_train - self.u,
self.NS),
axis=-1)
self.alpha = logit_id_train.max(
axis=-1).mean() / vlogit_id_train.mean()
print(f'{self.alpha=:.4f}')
self.setup_flag = True
else:
pass
@torch.no_grad()
def postprocess(self, net: nn.Module, data: Any):
_, feature_ood = net.forward(data, return_feature=True)
feature_ood = feature_ood.cpu()
logit_ood = feature_ood @ self.w.T + self.b
_, pred = torch.max(logit_ood, dim=1)
energy_ood = logsumexp(logit_ood.numpy(), axis=-1)
vlogit_ood = norm(np.matmul(feature_ood.numpy() - self.u, self.NS),
axis=-1) * self.alpha
score_ood = -vlogit_ood + energy_ood
return pred, torch.from_numpy(score_ood)
def set_hyperparam(self, hyperparam: list):
self.dim = hyperparam[0]
def get_hyperparam(self):
return self.dim
| 2,873 | 36.815789 | 75 | py |
null | OpenOOD-main/openood/preprocessors/__init__.py | from .base_preprocessor import BasePreprocessor
from .cutpaste_preprocessor import CutPastePreprocessor
from .draem_preprocessor import DRAEMPreprocessor
from .pixmix_preprocessor import PixMixPreprocessor
from .test_preprocessor import TestStandardPreProcessor
from .utils import get_preprocessor
| 298 | 41.714286 | 55 | py |
null | OpenOOD-main/openood/preprocessors/augmix_preprocessor.py | import torchvision.transforms as tvs_trans
from openood.utils.config import Config
from .transform import Convert, interpolation_modes, normalization_dict
class AugMixPreprocessor():
def __init__(self, config: Config):
self.pre_size = config.dataset.pre_size
self.image_size = config.dataset.image_size
self.interpolation = interpolation_modes[config.dataset.interpolation]
normalization_type = config.dataset.normalization_type
if normalization_type in normalization_dict.keys():
self.mean = normalization_dict[normalization_type][0]
self.std = normalization_dict[normalization_type][1]
else:
self.mean = [0.5, 0.5, 0.5]
self.std = [0.5, 0.5, 0.5]
self.severity = config.preprocessor.severity
self.mixture_width = config.preprocessor.mixture_width
self.alpha = config.preprocessor.alpha
self.chain_depth = config.preprocessor.chain_depth
self.all_ops = config.preprocessor.all_ops
self.jsd = config.trainer.trainer_args.jsd
self.augmix = tvs_trans.AugMix(severity=self.severity,
mixture_width=self.mixture_width,
chain_depth=self.chain_depth,
alpha=self.alpha,
all_ops=self.all_ops,
interpolation=self.interpolation)
self.normalize = tvs_trans.Compose([
tvs_trans.ToTensor(),
tvs_trans.Normalize(mean=self.mean, std=self.std),
])
if 'imagenet' in config.dataset.name:
self.transform = tvs_trans.Compose([
tvs_trans.RandomResizedCrop(self.image_size,
interpolation=self.interpolation),
tvs_trans.RandomHorizontalFlip(0.5),
])
elif 'aircraft' in config.dataset.name or 'cub' in config.dataset.name:
self.transform = tvs_trans.Compose([
tvs_trans.Resize(self.pre_size,
interpolation=self.interpolation),
tvs_trans.RandomCrop(self.image_size),
tvs_trans.RandomHorizontalFlip(),
])
else:
self.transform = tvs_trans.Compose([
Convert('RGB'),
tvs_trans.Resize(self.pre_size,
interpolation=self.interpolation),
tvs_trans.CenterCrop(self.image_size),
tvs_trans.RandomHorizontalFlip(),
tvs_trans.RandomCrop(self.image_size, padding=4),
])
def setup(self, **kwargs):
pass
def __call__(self, image):
if self.jsd:
orig = self.transform(image)
aug1 = self.normalize(self.augmix(orig))
aug2 = self.normalize(self.augmix(orig))
return self.normalize(orig), aug1, aug2
else:
return self.normalize(self.augmix(self.transform(image)))
| 3,075 | 41.136986 | 79 | py |
null | OpenOOD-main/openood/preprocessors/base_preprocessor.py | import torchvision.transforms as tvs_trans
from openood.utils.config import Config
from .transform import Convert, interpolation_modes, normalization_dict
class BasePreprocessor():
"""For train dataset standard transformation."""
def __init__(self, config: Config):
self.pre_size = config.dataset.pre_size
self.image_size = config.dataset.image_size
self.interpolation = interpolation_modes[config.dataset.interpolation]
normalization_type = config.dataset.normalization_type
if normalization_type in normalization_dict.keys():
self.mean = normalization_dict[normalization_type][0]
self.std = normalization_dict[normalization_type][1]
else:
self.mean = [0.5, 0.5, 0.5]
self.std = [0.5, 0.5, 0.5]
if 'imagenet' in config.dataset.name:
self.transform = tvs_trans.Compose([
tvs_trans.RandomResizedCrop(self.image_size,
interpolation=self.interpolation),
tvs_trans.RandomHorizontalFlip(0.5),
tvs_trans.ToTensor(),
tvs_trans.Normalize(mean=self.mean, std=self.std),
])
elif 'aircraft' in config.dataset.name or 'cub' in config.dataset.name:
self.transform = tvs_trans.Compose([
tvs_trans.Resize(self.pre_size,
interpolation=self.interpolation),
tvs_trans.RandomCrop(self.image_size),
tvs_trans.RandomHorizontalFlip(),
tvs_trans.ColorJitter(brightness=32. / 255., saturation=0.5),
tvs_trans.ToTensor(),
tvs_trans.Normalize(mean=self.mean, std=self.std),
])
else:
self.transform = tvs_trans.Compose([
Convert('RGB'),
tvs_trans.Resize(self.pre_size,
interpolation=self.interpolation),
tvs_trans.CenterCrop(self.image_size),
tvs_trans.RandomHorizontalFlip(),
tvs_trans.RandomCrop(self.image_size, padding=4),
tvs_trans.ToTensor(),
tvs_trans.Normalize(mean=self.mean, std=self.std),
])
def setup(self, **kwargs):
pass
def __call__(self, image):
return self.transform(image)
| 2,377 | 40.719298 | 79 | py |
null | OpenOOD-main/openood/preprocessors/cider_preprocessor.py | import torchvision.transforms as tvs_trans
from openood.utils.config import Config
from .transform import Convert, interpolation_modes, normalization_dict
class CiderPreprocessor():
def __init__(self, config: Config):
self.pre_size = config.dataset.pre_size
self.image_size = config.dataset.image_size
self.interpolation = interpolation_modes[config.dataset.interpolation]
normalization_type = config.dataset.normalization_type
if normalization_type in normalization_dict.keys():
self.mean = normalization_dict[normalization_type][0]
self.std = normalization_dict[normalization_type][1]
else:
self.mean = [0.5, 0.5, 0.5]
self.std = [0.5, 0.5, 0.5]
if 'imagenet' in config.dataset.name:
self.transform = tvs_trans.Compose([
tvs_trans.RandomResizedCrop(size=self.image_size,
scale=(0.4, 1.),
interpolation=self.interpolation),
tvs_trans.RandomHorizontalFlip(),
tvs_trans.RandomApply(
[tvs_trans.ColorJitter(0.4, 0.4, 0.4, 0.1)], p=0.8),
tvs_trans.RandomGrayscale(p=0.2),
tvs_trans.ToTensor(),
tvs_trans.Normalize(mean=self.mean, std=self.std),
])
else:
self.transform = tvs_trans.Compose([
Convert('RGB'),
tvs_trans.RandomResizedCrop(size=self.image_size,
scale=(0.2, 1.),
interpolation=self.interpolation),
tvs_trans.RandomHorizontalFlip(),
tvs_trans.RandomApply(
[tvs_trans.ColorJitter(0.4, 0.4, 0.4, 0.1)], p=0.8),
tvs_trans.RandomGrayscale(p=0.2),
tvs_trans.ToTensor(),
tvs_trans.Normalize(mean=self.mean, std=self.std),
])
self.transform = TwoCropTransform(self.transform)
def setup(self, **kwargs):
pass
def __call__(self, image):
return self.transform(image)
class TwoCropTransform:
"""Create two crops of the same image."""
def __init__(self, transform):
self.transform = transform
def __call__(self, x):
return [self.transform(x), self.transform(x)]
| 2,416 | 37.365079 | 78 | py |
null | OpenOOD-main/openood/preprocessors/csi_preprocessor.py | import torchvision.transforms as tvs_trans
from openood.utils.config import Config
from .transform import Convert, interpolation_modes, normalization_dict
class CSIPreprocessor():
def __init__(self, config: Config):
self.pre_size = config.dataset.pre_size
self.image_size = config.dataset.image_size
self.interpolation = interpolation_modes[config.dataset.interpolation]
normalization_type = config.dataset.normalization_type
if normalization_type in normalization_dict.keys():
self.mean = normalization_dict[normalization_type][0]
self.std = normalization_dict[normalization_type][1]
else:
self.mean = [0.5, 0.5, 0.5]
self.std = [0.5, 0.5, 0.5]
if 'imagenet' in config.dataset.name:
self.transform = tvs_trans.Compose([
tvs_trans.RandomResizedCrop(self.image_size,
interpolation=self.interpolation),
# tvs_trans.RandomHorizontalFlip(0.5),
tvs_trans.ToTensor(),
tvs_trans.Normalize(mean=self.mean, std=self.std),
])
elif 'aircraft' in config.dataset.name or 'cub' in config.dataset.name:
self.transform = tvs_trans.Compose([
tvs_trans.Resize(self.pre_size,
interpolation=self.interpolation),
tvs_trans.RandomCrop(self.image_size),
# tvs_trans.RandomHorizontalFlip(),
# tvs_trans.ColorJitter(brightness=32./255., saturation=0.5),
tvs_trans.ToTensor(),
tvs_trans.Normalize(mean=self.mean, std=self.std),
])
else:
self.transform = tvs_trans.Compose([
Convert('RGB'),
tvs_trans.Resize(self.pre_size,
interpolation=self.interpolation),
# tvs_trans.RandomHorizontalFlip(),
# tvs_trans.RandomCrop(self.image_size, padding=4),
tvs_trans.CenterCrop(self.image_size),
tvs_trans.ToTensor(),
tvs_trans.Normalize(mean=self.mean, std=self.std),
])
def setup(self, **kwargs):
pass
def __call__(self, image):
return self.transform(image)
| 2,331 | 40.642857 | 79 | py |