Search is not available for this dataset
repo stringlengths 2 152 ⌀ | file stringlengths 15 239 | code stringlengths 0 58.4M | file_length int64 0 58.4M | avg_line_length float64 0 1.81M | max_line_length int64 0 12.7M | extension_type stringclasses 364 values |
|---|---|---|---|---|---|---|
null | OpenOOD-main/openood/preprocessors/cutout_preprocessor.py | import numpy as np
import torch
import torchvision.transforms as tvs_trans
from openood.utils.config import Config
from .transform import Convert, interpolation_modes, normalization_dict
class CutoutPreprocessor():
def __init__(self, config: Config):
self.pre_size = config.dataset.pre_size
self.image_size = config.dataset.image_size
self.interpolation = interpolation_modes[config.dataset.interpolation]
normalization_type = config.dataset.normalization_type
if normalization_type in normalization_dict.keys():
self.mean = normalization_dict[normalization_type][0]
self.std = normalization_dict[normalization_type][1]
else:
self.mean = [0.5, 0.5, 0.5]
self.std = [0.5, 0.5, 0.5]
self.n_holes = config.preprocessor.n_holes
self.length = config.preprocessor.length
if 'imagenet' in config.dataset.name:
self.transform = tvs_trans.Compose([
tvs_trans.RandomResizedCrop(self.image_size,
interpolation=self.interpolation),
tvs_trans.RandomHorizontalFlip(0.5),
tvs_trans.ToTensor(),
tvs_trans.Normalize(mean=self.mean, std=self.std),
Cutout(n_holes=self.n_holes, length=self.length)
])
elif 'aircraft' in config.dataset.name or 'cub' in config.dataset.name:
self.transform = tvs_trans.Compose([
tvs_trans.Resize(self.pre_size,
interpolation=self.interpolation),
tvs_trans.RandomCrop(self.image_size),
tvs_trans.RandomHorizontalFlip(),
tvs_trans.ColorJitter(brightness=32. / 255., saturation=0.5),
tvs_trans.ToTensor(),
tvs_trans.Normalize(mean=self.mean, std=self.std),
Cutout(n_holes=self.n_holes, length=self.length)
])
else:
self.transform = tvs_trans.Compose([
Convert('RGB'),
tvs_trans.Resize(self.pre_size,
interpolation=self.interpolation),
tvs_trans.CenterCrop(self.image_size),
tvs_trans.RandomHorizontalFlip(),
tvs_trans.RandomCrop(self.image_size, padding=4),
tvs_trans.ToTensor(),
tvs_trans.Normalize(mean=self.mean, std=self.std),
Cutout(n_holes=self.n_holes, length=self.length)
])
def setup(self, **kwargs):
pass
def __call__(self, image):
return self.transform(image)
class Cutout(object):
"""Randomly mask out one or more patches from an image.
Args:
n_holes (int): Number of patches to cut out of each image.
length (int): The length (in pixels) of each square patch.
"""
def __init__(self, n_holes, length):
self.n_holes = n_holes
self.length = length
def __call__(self, img):
"""
Args:
img (Tensor): Tensor image of size (C, H, W).
Returns:
Tensor: Image with n_holes of dimension length x length
cut out of it.
"""
h = img.size(1)
w = img.size(2)
mask = np.ones((h, w), np.float32)
for n in range(self.n_holes):
y = np.random.randint(h)
x = np.random.randint(w)
y1 = np.clip(y - self.length // 2, 0, h)
y2 = np.clip(y + self.length // 2, 0, h)
x1 = np.clip(x - self.length // 2, 0, w)
x2 = np.clip(x + self.length // 2, 0, w)
mask[y1:y2, x1:x2] = 0.
mask = torch.from_numpy(mask)
mask = mask.expand_as(img)
img = img * mask
return img
| 3,803 | 35.228571 | 79 | py |
null | OpenOOD-main/openood/preprocessors/cutpaste_preprocessor.py | import math
import random
import torch
import torchvision.transforms as tvs_trans
from .base_preprocessor import BasePreprocessor
from .transform import Convert, normalization_dict
class CutPastePreprocessor(BasePreprocessor):
def __init__(
self, config,
split): # modify, preprocessors unify to only passing in "config"
self.args = config.preprocessor.preprocessor_args
self.area_ratio = self.args.area_ratio
self.aspect_ratio = self.args.aspect_ratio
dataset_name = config.dataset.name.split('_')[0]
image_size = config.dataset.image_size
pre_size = config.dataset.pre_size
if dataset_name in normalization_dict.keys():
mean = normalization_dict[dataset_name][0]
std = normalization_dict[dataset_name][1]
else:
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
self.before_preprocessor_transform = tvs_trans.Compose([
Convert('RGB'),
tvs_trans.Resize(
pre_size, interpolation=tvs_trans.InterpolationMode.BILINEAR),
tvs_trans.CenterCrop(image_size),
tvs_trans.RandomHorizontalFlip(),
tvs_trans.RandomCrop(image_size, padding=4),
])
self.after_preprocessor_transform = tvs_trans.Compose([
tvs_trans.ToTensor(),
tvs_trans.Normalize(mean=mean, std=std),
])
def __call__(self, img):
img = self.before_preprocessor_transform(img)
h = img.size[0]
w = img.size[1]
# ratio between area_ratio[0] and area_ratio[1]
ratio_area = random.uniform(0.02, 0.15) * w * h
# sample in log space
log_ratio = torch.log(
torch.tensor((self.aspect_ratio, 1 / self.aspect_ratio)))
aspect = torch.exp(
torch.empty(1).uniform_(log_ratio[0], log_ratio[1])).item()
cut_w = int(round(math.sqrt(ratio_area * aspect)))
cut_h = int(round(math.sqrt(ratio_area / aspect)))
from_location_h = int(random.uniform(0, h - cut_h))
from_location_w = int(random.uniform(0, w - cut_w))
box = [
from_location_w, from_location_h, from_location_w + cut_w,
from_location_h + cut_h
]
patch = img.crop(box)
# if self.colorJitter:
# patch = self.colorJitter(patch)
to_location_h = int(random.uniform(0, h - cut_h))
to_location_w = int(random.uniform(0, w - cut_w))
insert_box = [
to_location_w, to_location_h, to_location_w + cut_w,
to_location_h + cut_h
]
augmented = img.copy()
augmented.paste(patch, insert_box)
img = self.after_preprocessor_transform(img)
augmented = self.after_preprocessor_transform(augmented)
return img, augmented
| 2,875 | 32.44186 | 78 | py |
null | OpenOOD-main/openood/preprocessors/draem_preprocessor.py | import glob
import math
import os
import cv2
import imgaug.augmenters as iaa
import numpy as np
import torch
from .base_preprocessor import BasePreprocessor
class DRAEMPreprocessor(BasePreprocessor):
def __init__(self, config):
self.config = config
self.args = self.config.preprocessor.preprocessor_args
self.resize_shape = [self.args.image_size, self.args.image_size]
self.anomaly_source_paths = sorted(
glob.glob(self.args.anomaly_source + '/*/*.jpg'))
self.augmenters = [
iaa.GammaContrast((0.5, 2.0), per_channel=True),
iaa.MultiplyAndAddToBrightness(mul=(0.8, 1.2), add=(-30, 30)),
iaa.pillike.EnhanceSharpness(),
iaa.AddToHueAndSaturation((-50, 50), per_channel=True),
iaa.Solarize(0.5, threshold=(32, 128)),
iaa.Posterize(),
iaa.Invert(),
iaa.pillike.Autocontrast(),
iaa.pillike.Equalize(),
iaa.Affine(rotate=(-45, 45))
]
self.rot = iaa.Sequential([iaa.Affine(rotate=(-90, 90))])
# if config.evaluator.name == 'ood':
# assert config.use_gt == False
# if config.evaluator.name == 'draem':
# assert config.use_gt == True
def transform_test_image(self, image_path, mask_path):
image = cv2.imread(image_path, cv2.IMREAD_COLOR)
if self.resize_shape is not None:
image = cv2.resize(image,
dsize=(self.resize_shape[1],
self.resize_shape[0]))
image = image / 255.0
image = np.array(image).reshape(
(image.shape[0], image.shape[1], 3)).astype(np.float32)
image = np.transpose(image, (2, 0, 1))
mask = image
if self.config.use_gt:
if mask_path is not None:
mask = cv2.imread(mask_path, cv2.IMREAD_GRAYSCALE)
else:
mask = np.zeros((image.shape[0], image.shape[1]))
if self.resize_shape is not None:
mask = cv2.resize(mask,
dsize=(self.resize_shape[1],
self.resize_shape[0]))
mask = mask / 255.0
mask = np.array(mask).reshape(
(mask.shape[0], mask.shape[1], 1)).astype(np.float32)
mask = np.transpose(mask, (2, 0, 1))
return image, mask
def get_test_item(self, path):
sample = {}
dir_path, file_name = os.path.split(path)
base_dir = os.path.basename(dir_path)
if base_dir == 'good':
image, mask = self.transform_test_image(path, None)
else:
mask_path = os.path.join(dir_path, '../../ground_truth/')
mask_path = os.path.join(mask_path, base_dir)
mask_file_name = file_name.split('.')[0] + '_mask.png'
mask_path = os.path.join(mask_path, mask_file_name)
image, mask = self.transform_test_image(path, mask_path)
if self.config.use_gt:
sample['image'] = image
sample['mask'] = mask
return sample
else:
return image
def randAugmenter(self):
aug_ind = np.random.choice(np.arange(len(self.augmenters)),
3,
replace=False)
aug = iaa.Sequential([
self.augmenters[aug_ind[0]], self.augmenters[aug_ind[1]],
self.augmenters[aug_ind[2]]
])
return aug
def augment_image(self, image, anomaly_source_path):
aug = self.randAugmenter()
perlin_scale = 6
min_perlin_scale = 0
anomaly_source_img = cv2.imread(anomaly_source_path)
anomaly_source_img = cv2.resize(anomaly_source_img,
dsize=(self.resize_shape[1],
self.resize_shape[0]))
anomaly_img_augmented = aug(image=anomaly_source_img)
perlin_scalex = 2**(torch.randint(min_perlin_scale, perlin_scale,
(1, )).numpy()[0])
perlin_scaley = 2**(torch.randint(min_perlin_scale, perlin_scale,
(1, )).numpy()[0])
perlin_noise = rand_perlin_2d_np(
(self.resize_shape[0], self.resize_shape[1]),
(perlin_scalex, perlin_scaley))
perlin_noise = self.rot(image=perlin_noise)
threshold = 0.5
perlin_thr = np.where(perlin_noise > threshold,
np.ones_like(perlin_noise),
np.zeros_like(perlin_noise))
perlin_thr = np.expand_dims(perlin_thr, axis=2)
img_thr = anomaly_img_augmented.astype(np.float32) * perlin_thr / 255.0
beta = torch.rand(1).numpy()[0] * 0.8
augmented_image = image * (1 - perlin_thr) + (
1 - beta) * img_thr + beta * image * (perlin_thr)
no_anomaly = torch.rand(1).numpy()[0]
if no_anomaly > 0.5:
image = image.astype(np.float32)
return image, np.zeros_like(
perlin_thr, dtype=np.float32), np.array([0.0],
dtype=np.float32)
else:
augmented_image = augmented_image.astype(np.float32)
msk = (perlin_thr).astype(np.float32)
augmented_image = msk * augmented_image + (1 - msk) * image
has_anomaly = 1.0
if np.sum(msk) == 0:
has_anomaly = 0.0
return augmented_image, msk, np.array([has_anomaly],
dtype=np.float32)
def transform_train_image(self, image_path, anomaly_source_path):
image = cv2.imread(image_path)
image = cv2.resize(image,
dsize=(self.resize_shape[1], self.resize_shape[0]))
do_aug_orig = torch.rand(1).numpy()[0] > 0.7
if do_aug_orig:
image = self.rot(image=image)
image = np.array(image).reshape(
(image.shape[0], image.shape[1], image.shape[2])).astype(
np.float32) / 255.0
augmented_image, anomaly_mask, has_anomaly = self.augment_image(
image, anomaly_source_path)
augmented_image = np.transpose(augmented_image, (2, 0, 1))
image = np.transpose(image, (2, 0, 1))
anomaly_mask = np.transpose(anomaly_mask, (2, 0, 1))
return image, augmented_image, anomaly_mask, has_anomaly
def get_train_item(self, path):
sample = {}
anomaly_source_idx = torch.randint(0, len(self.anomaly_source_paths),
(1, )).item()
image, augmented_image, anomaly_mask, has_anomaly = \
self.transform_train_image(
path, self.anomaly_source_paths[anomaly_source_idx])
sample['image'] = image
sample['anomaly_mask'] = anomaly_mask
sample['augmented_image'] = augmented_image
sample['has_anomaly'] = has_anomaly
return sample
def __call__(self, img):
if self.name.endswith('_train'):
sample = self.get_train_item(self.path)
else:
sample = self.get_test_item(self.path)
return sample
# some setup so that the preprocessor can get the gt map
def setup(self, **kwargs):
self.path = kwargs['path']
self.name = kwargs['name']
# append transforms that will apply after the preprocessor
def concat_transform(self, post_preprocessor_transform=None):
self.post_preprocessor_transform = post_preprocessor_transform
return self
def lerp_np(x, y, w):
fin_out = (y - x) * w + x
return fin_out
def generate_fractal_noise_2d(shape, res, octaves=1, persistence=0.5):
noise = np.zeros(shape)
frequency = 1
amplitude = 1
for _ in range(octaves):
noise += amplitude * generate_perlin_noise_2d(
shape, (frequency * res[0], frequency * res[1]))
frequency *= 2
amplitude *= persistence
return noise
def generate_perlin_noise_2d(shape, res):
def f(t):
return 6 * t**5 - 15 * t**4 + 10 * t**3
delta = (res[0] / shape[0], res[1] / shape[1])
d = (shape[0] // res[0], shape[1] // res[1])
grid = np.mgrid[0:res[0]:delta[0], 0:res[1]:delta[1]].transpose(1, 2,
0) % 1
# Gradients
angles = 2 * np.pi * np.random.rand(res[0] + 1, res[1] + 1)
gradients = np.dstack((np.cos(angles), np.sin(angles)))
g00 = gradients[0:-1, 0:-1].repeat(d[0], 0).repeat(d[1], 1)
g10 = gradients[1:, 0:-1].repeat(d[0], 0).repeat(d[1], 1)
g01 = gradients[0:-1, 1:].repeat(d[0], 0).repeat(d[1], 1)
g11 = gradients[1:, 1:].repeat(d[0], 0).repeat(d[1], 1)
# Ramps
n00 = np.sum(grid * g00, 2)
n10 = np.sum(np.dstack((grid[:, :, 0] - 1, grid[:, :, 1])) * g10, 2)
n01 = np.sum(np.dstack((grid[:, :, 0], grid[:, :, 1] - 1)) * g01, 2)
n11 = np.sum(np.dstack((grid[:, :, 0] - 1, grid[:, :, 1] - 1)) * g11, 2)
# Interpolation
t = f(grid)
n0 = n00 * (1 - t[:, :, 0]) + t[:, :, 0] * n10
n1 = n01 * (1 - t[:, :, 0]) + t[:, :, 0] * n11
return np.sqrt(2) * ((1 - t[:, :, 1]) * n0 + t[:, :, 1] * n1)
def rand_perlin_2d_np(shape,
res,
fade=lambda t: 6 * t**5 - 15 * t**4 + 10 * t**3):
delta = (res[0] / shape[0], res[1] / shape[1])
d = (shape[0] // res[0], shape[1] // res[1])
grid = np.mgrid[0:res[0]:delta[0], 0:res[1]:delta[1]].transpose(1, 2,
0) % 1
angles = 2 * math.pi * np.random.rand(res[0] + 1, res[1] + 1)
gradients = np.stack((np.cos(angles), np.sin(angles)), axis=-1)
tile_grads = lambda slice1, slice2: np.repeat(np.repeat(
gradients[slice1[0]:slice1[1], slice2[0]:slice2[1]], d[0], axis=0),
d[1],
axis=1)
dot = lambda grad, shift: (np.stack(
(grid[:shape[0], :shape[1], 0] + shift[0], grid[:shape[0], :shape[1], 1
] + shift[1]),
axis=-1) * grad[:shape[0], :shape[1]]).sum(axis=-1)
n00 = dot(tile_grads([0, -1], [0, -1]), [0, 0])
n10 = dot(tile_grads([1, None], [0, -1]), [-1, 0])
n01 = dot(tile_grads([0, -1], [1, None]), [0, -1])
n11 = dot(tile_grads([1, None], [1, None]), [-1, -1])
t = fade(grid[:shape[0], :shape[1]])
return math.sqrt(2) * lerp_np(lerp_np(n00, n10, t[..., 0]),
lerp_np(n01, n11, t[..., 0]), t[..., 1])
def rand_perlin_2d(shape,
res,
fade=lambda t: 6 * t**5 - 15 * t**4 + 10 * t**3):
delta = (res[0] / shape[0], res[1] / shape[1])
d = (shape[0] // res[0], shape[1] // res[1])
grid = torch.stack(torch.meshgrid(torch.arange(0, res[0], delta[0]),
torch.arange(0, res[1], delta[1])),
dim=-1) % 1
angles = 2 * math.pi * torch.rand(res[0] + 1, res[1] + 1)
gradients = torch.stack((torch.cos(angles), torch.sin(angles)), dim=-1)
tile_grads = lambda slice1, slice2: gradients[slice1[0]:slice1[1], slice2[
0]:slice2[1]].repeat_interleave(d[0], 0).repeat_interleave(d[1], 1)
dot = lambda grad, shift: (torch.stack(
(grid[:shape[0], :shape[1], 0] + shift[0], grid[:shape[0], :shape[1], 1
] + shift[1]),
dim=-1) * grad[:shape[0], :shape[1]]).sum(dim=-1)
n00 = dot(tile_grads([0, -1], [0, -1]), [0, 0])
n10 = dot(tile_grads([1, None], [0, -1]), [-1, 0])
n01 = dot(tile_grads([0, -1], [1, None]), [0, -1])
n11 = dot(tile_grads([1, None], [1, None]), [-1, -1])
t = fade(grid[:shape[0], :shape[1]])
return math.sqrt(2) * torch.lerp(torch.lerp(
n00, n10, t[..., 0]), torch.lerp(n01, n11, t[..., 0]), t[..., 1])
def rand_perlin_2d_octaves(shape, res, octaves=1, persistence=0.5):
noise = torch.zeros(shape)
frequency = 1
amplitude = 1
for _ in range(octaves):
noise += amplitude * rand_perlin_2d(
shape, (frequency * res[0], frequency * res[1]))
frequency *= 2
amplitude *= persistence
return noise
| 12,479 | 38.745223 | 79 | py |
null | OpenOOD-main/openood/preprocessors/pixmix_preprocessor.py | import os
import numpy as np
import torch
import torchvision.transforms as tvs_trans
from PIL import Image as Image
from PIL import ImageEnhance, ImageOps
from .base_preprocessor import BasePreprocessor
from .transform import Convert, interpolation_modes, normalization_dict
resize_list = {
'osr': 32,
'mnist': 32,
'cifar10': 36,
'cifar100': 36,
'tin': 72,
'imagenet': 256,
'imagenet200': 256,
'aircraft': 512,
'cub': 512,
} # set mnist bymyself, imagenet was set to 224 by author, but 256 here
class PixMixPreprocessor(BasePreprocessor):
def __init__(self, config):
self.pre_size = config.dataset.pre_size
self.dataset_name = config.dataset.name.split('_')[0]
self.image_size = config.dataset.image_size
self.interpolation = interpolation_modes[config.dataset.interpolation]
normalization_type = config.dataset.normalization_type
if normalization_type in normalization_dict.keys():
self.mean = normalization_dict[normalization_type][0]
self.std = normalization_dict[normalization_type][1]
else:
self.mean = [0.5, 0.5, 0.5]
self.std = [0.5, 0.5, 0.5]
self.normalize = tvs_trans.Normalize(self.mean, self.std)
self.tensorize = tvs_trans.ToTensor()
self.args = config.preprocessor.preprocessor_args
if 'imagenet' in config.dataset.name:
self.transform = tvs_trans.Compose([
tvs_trans.RandomResizedCrop(self.image_size,
interpolation=self.interpolation),
tvs_trans.RandomHorizontalFlip(0.5),
])
elif 'aircraft' in config.dataset.name or 'cub' in config.dataset.name:
self.transform = tvs_trans.Compose([
tvs_trans.Resize(self.pre_size,
interpolation=self.interpolation),
tvs_trans.RandomCrop(self.image_size),
tvs_trans.RandomHorizontalFlip(),
])
else:
self.transform = tvs_trans.Compose([
Convert('RGB'),
tvs_trans.Resize(self.pre_size,
interpolation=self.interpolation),
tvs_trans.CenterCrop(self.image_size),
tvs_trans.RandomHorizontalFlip(),
tvs_trans.RandomCrop(self.image_size, padding=4),
])
self.mixing_set_transform = tvs_trans.Compose([
tvs_trans.Resize(resize_list[self.dataset_name]),
tvs_trans.RandomCrop(self.image_size)
])
with open(self.args.mixing_set_dir, 'r') as f:
self.mixing_list = f.readlines()
def __call__(self, image):
# ? need to add random seed ?
rnd_idx = np.random.choice(len(self.mixing_list))
mixing_pic_dir = self.mixing_list[rnd_idx].strip('\n')
mixing_pic = Image.open(
os.path.join('./data/images_classic/',
mixing_pic_dir)).convert('RGB')
return self.pixmix(image, mixing_pic)
def augment_input(self, image):
aug_list = augmentations_all if self.args.all_ops else augmentations
op = np.random.choice(aug_list)
return op(image.copy(), self.args.aug_severity, self.image_size)
def pixmix(self, orig, mixing_pic):
mixings = [add, multiply]
orig = self.transform(orig)
# do basic augmentation first
mixing_pic = self.mixing_set_transform(mixing_pic)
if np.random.random() < 0.5:
mixed = self.tensorize(self.augment_input(orig))
else:
mixed = self.tensorize(orig)
for _ in range(np.random.randint(self.args.k + 1)):
if np.random.random() < 0.5:
aug_image_copy = self.tensorize(self.augment_input(orig))
else:
aug_image_copy = self.tensorize(mixing_pic)
mixed_op = np.random.choice(mixings)
mixed = mixed_op(mixed, aug_image_copy, self.args.beta)
mixed = torch.clip(mixed, 0, 1)
return self.normalize(mixed)
"""Base augmentations operators."""
#########################################################
#################### AUGMENTATIONS ######################
#########################################################
def int_parameter(level, maxval):
"""Helper function to scale `val` between 0 and maxval .
Args:
level: Level of the operation that will be between [0, `PARAMETER_MAX`].
maxval: Maximum value that the operation can have. This will be scaled to
level/PARAMETER_MAX.
Returns:
An int that results from scaling `maxval` according to `level`.
"""
return int(level * maxval / 10)
def float_parameter(level, maxval):
"""Helper function to scale `val` between 0 and maxval.
Args:
level: Level of the operation that will be between [0, `PARAMETER_MAX`].
maxval: Maximum value that the operation can have. This will be scaled to
level/PARAMETER_MAX.
Returns:
A float that results from scaling `maxval` according to `level`.
"""
return float(level) * maxval / 10.
def sample_level(n):
return np.random.uniform(low=0.1, high=n)
def autocontrast(pil_img, _, IMAGE_SIZE):
return ImageOps.autocontrast(pil_img)
def equalize(pil_img, _, IMAGE_SIZE):
return ImageOps.equalize(pil_img)
def posterize(pil_img, level, IMAGE_SIZE):
level = int_parameter(sample_level(level), 4)
return ImageOps.posterize(pil_img, 4 - level)
def rotate(pil_img, level, IMAGE_SIZE):
degrees = int_parameter(sample_level(level), 30)
if np.random.uniform() > 0.5:
degrees = -degrees
return pil_img.rotate(degrees, resample=Image.BILINEAR)
def solarize(pil_img, level, IMAGE_SIZE):
level = int_parameter(sample_level(level), 256)
return ImageOps.solarize(pil_img, 256 - level)
def shear_x(pil_img, level, IMAGE_SIZE):
level = float_parameter(sample_level(level), 0.3)
if np.random.uniform() > 0.5:
level = -level
return pil_img.transform((IMAGE_SIZE, IMAGE_SIZE),
Image.AFFINE, (1, level, 0, 0, 1, 0),
resample=Image.BILINEAR)
def shear_y(pil_img, level, IMAGE_SIZE):
level = float_parameter(sample_level(level), 0.3)
if np.random.uniform() > 0.5:
level = -level
return pil_img.transform((IMAGE_SIZE, IMAGE_SIZE),
Image.AFFINE, (1, 0, 0, level, 1, 0),
resample=Image.BILINEAR)
def translate_x(pil_img, level, IMAGE_SIZE):
level = int_parameter(sample_level(level), IMAGE_SIZE / 3)
if np.random.random() > 0.5:
level = -level
return pil_img.transform((IMAGE_SIZE, IMAGE_SIZE),
Image.AFFINE, (1, 0, level, 0, 1, 0),
resample=Image.BILINEAR)
def translate_y(pil_img, level, IMAGE_SIZE):
level = int_parameter(sample_level(level), IMAGE_SIZE / 3)
if np.random.random() > 0.5:
level = -level
return pil_img.transform((IMAGE_SIZE, IMAGE_SIZE),
Image.AFFINE, (1, 0, 0, 0, 1, level),
resample=Image.BILINEAR)
# operation that overlaps with ImageNet-C's test set
def color(pil_img, level, IMAGE_SIZE):
level = float_parameter(sample_level(level), 1.8) + 0.1
return ImageEnhance.Color(pil_img).enhance(level)
# operation that overlaps with ImageNet-C's test set
def contrast(pil_img, level, IMAGE_SIZE):
level = float_parameter(sample_level(level), 1.8) + 0.1
return ImageEnhance.Contrast(pil_img).enhance(level)
# operation that overlaps with ImageNet-C's test set
def brightness(pil_img, level, IMAGE_SIZE):
level = float_parameter(sample_level(level), 1.8) + 0.1
return ImageEnhance.Brightness(pil_img).enhance(level)
# operation that overlaps with ImageNet-C's test set
def sharpness(pil_img, level, IMAGE_SIZE):
level = float_parameter(sample_level(level), 1.8) + 0.1
return ImageEnhance.Sharpness(pil_img).enhance(level)
augmentations = [
autocontrast, equalize, posterize, rotate, solarize, shear_x, shear_y,
translate_x, translate_y
]
augmentations_all = [
autocontrast, equalize, posterize, rotate, solarize, shear_x, shear_y,
translate_x, translate_y, color, contrast, brightness, sharpness
]
#########################################################
######################## MIXINGS ########################
#########################################################
def get_ab(beta):
if np.random.random() < 0.5:
a = np.float32(np.random.beta(beta, 1))
b = np.float32(np.random.beta(1, beta))
else:
a = 1 + np.float32(np.random.beta(1, beta))
b = -np.float32(np.random.beta(1, beta))
return a, b
def add(img1, img2, beta):
a, b = get_ab(beta)
img1, img2 = img1 * 2 - 1, img2 * 2 - 1
out = a * img1 + b * img2
return (out + 1) / 2
def multiply(img1, img2, beta):
a, b = get_ab(beta)
img1, img2 = img1 * 2, img2 * 2
out = (img1**a) * (img2.clip(1e-37)**b)
return out / 2
########################################
##### EXTRA MIXIMGS (EXPREIMENTAL) #####
########################################
def invert(img):
return 1 - img
def screen(img1, img2, beta):
img1, img2 = invert(img1), invert(img2)
out = multiply(img1, img2, beta)
return invert(out)
def overlay(img1, img2, beta):
case1 = multiply(img1, img2, beta)
case2 = screen(img1, img2, beta)
if np.random.random() < 0.5:
cond = img1 < 0.5
else:
cond = img1 > 0.5
return torch.where(cond, case1, case2)
def darken_or_lighten(img1, img2, beta):
if np.random.random() < 0.5:
cond = img1 < img2
else:
cond = img1 > img2
return torch.where(cond, img1, img2)
def swap_channel(img1, img2, beta):
channel = np.random.randint(3)
img1[channel] = img2[channel]
return img1
| 10,054 | 31.022293 | 79 | py |
null | OpenOOD-main/openood/preprocessors/randaugment_preprocessor.py | import torchvision.transforms as tvs_trans
from openood.utils.config import Config
from .transform import Convert, interpolation_modes, normalization_dict
class RandAugmentPreprocessor():
def __init__(self, config: Config):
self.pre_size = config.dataset.pre_size
self.image_size = config.dataset.image_size
self.interpolation = interpolation_modes[config.dataset.interpolation]
normalization_type = config.dataset.normalization_type
if normalization_type in normalization_dict.keys():
self.mean = normalization_dict[normalization_type][0]
self.std = normalization_dict[normalization_type][1]
else:
self.mean = [0.5, 0.5, 0.5]
self.std = [0.5, 0.5, 0.5]
self.n = config.preprocessor.n
self.m = config.preprocessor.m
if 'imagenet' in config.dataset.name:
self.transform = tvs_trans.Compose([
tvs_trans.RandomResizedCrop(self.image_size,
interpolation=self.interpolation),
tvs_trans.RandomHorizontalFlip(0.5),
tvs_trans.RandAugment(num_ops=self.n,
magnitude=self.m,
interpolation=self.interpolation),
tvs_trans.ToTensor(),
tvs_trans.Normalize(mean=self.mean, std=self.std),
])
elif 'aircraft' in config.dataset.name or 'cub' in config.dataset.name:
self.transform = tvs_trans.Compose([
tvs_trans.Resize(self.pre_size,
interpolation=self.interpolation),
tvs_trans.RandomCrop(self.image_size),
tvs_trans.RandomHorizontalFlip(),
tvs_trans.RandAugment(num_ops=self.n,
magnitude=self.m,
interpolation=self.interpolation),
tvs_trans.ToTensor(),
tvs_trans.Normalize(mean=self.mean, std=self.std),
])
else:
self.transform = tvs_trans.Compose([
Convert('RGB'),
tvs_trans.RandAugment(num_ops=self.n,
magnitude=self.m,
interpolation=self.interpolation),
tvs_trans.Resize(self.pre_size,
interpolation=self.interpolation),
tvs_trans.CenterCrop(self.image_size),
tvs_trans.RandomHorizontalFlip(),
tvs_trans.RandomCrop(self.image_size, padding=4),
tvs_trans.ToTensor(),
tvs_trans.Normalize(mean=self.mean, std=self.std),
])
def setup(self, **kwargs):
pass
def __call__(self, image):
return self.transform(image)
| 2,881 | 42.014925 | 79 | py |
null | OpenOOD-main/openood/preprocessors/test_preprocessor.py | import torchvision.transforms as tvs_trans
from openood.utils.config import Config
from .base_preprocessor import BasePreprocessor
from .transform import Convert
class TestStandardPreProcessor(BasePreprocessor):
"""For test and validation dataset standard image transformation."""
def __init__(self, config: Config):
super(TestStandardPreProcessor, self).__init__(config)
self.transform = tvs_trans.Compose([
Convert('RGB'),
tvs_trans.Resize(self.pre_size, interpolation=self.interpolation),
tvs_trans.CenterCrop(self.image_size),
tvs_trans.ToTensor(),
tvs_trans.Normalize(mean=self.mean, std=self.std),
])
| 703 | 34.2 | 78 | py |
null | OpenOOD-main/openood/preprocessors/transform.py | import torchvision.transforms as tvs_trans
normalization_dict = {
'cifar10': [[0.4914, 0.4822, 0.4465], [0.2470, 0.2435, 0.2616]],
'cifar100': [[0.5071, 0.4867, 0.4408], [0.2675, 0.2565, 0.2761]],
'imagenet': [[0.485, 0.456, 0.406], [0.229, 0.224, 0.225]],
'imagenet200': [[0.485, 0.456, 0.406], [0.229, 0.224, 0.225]],
'covid': [[0.4907, 0.4907, 0.4907], [0.2697, 0.2697, 0.2697]],
'aircraft': [[0.5, 0.5, 0.5], [0.5, 0.5, 0.5]],
'cub': [[0.5, 0.5, 0.5], [0.5, 0.5, 0.5]],
'cars': [[0.5, 0.5, 0.5], [0.5, 0.5, 0.5]],
}
interpolation_modes = {
'nearest': tvs_trans.InterpolationMode.NEAREST,
'bilinear': tvs_trans.InterpolationMode.BILINEAR,
}
class Convert:
def __init__(self, mode='RGB'):
self.mode = mode
def __call__(self, image):
return image.convert(self.mode)
# More transform classes shall be written here
| 885 | 29.551724 | 69 | py |
null | OpenOOD-main/openood/preprocessors/utils.py | from openood.utils import Config
from .base_preprocessor import BasePreprocessor
from .cider_preprocessor import CiderPreprocessor
from .csi_preprocessor import CSIPreprocessor
from .cutpaste_preprocessor import CutPastePreprocessor
from .draem_preprocessor import DRAEMPreprocessor
from .augmix_preprocessor import AugMixPreprocessor
from .pixmix_preprocessor import PixMixPreprocessor
from .randaugment_preprocessor import RandAugmentPreprocessor
from .cutout_preprocessor import CutoutPreprocessor
from .test_preprocessor import TestStandardPreProcessor
def get_preprocessor(config: Config, split):
train_preprocessors = {
'base': BasePreprocessor,
'draem': DRAEMPreprocessor,
'cutpaste': CutPastePreprocessor,
'augmix': AugMixPreprocessor,
'pixmix': PixMixPreprocessor,
'randaugment': RandAugmentPreprocessor,
'cutout': CutoutPreprocessor,
'csi': CSIPreprocessor,
'cider': CiderPreprocessor,
}
test_preprocessors = {
'base': TestStandardPreProcessor,
'draem': DRAEMPreprocessor,
'cutpaste': CutPastePreprocessor,
}
if split == 'train':
return train_preprocessors[config.preprocessor.name](config)
else:
try:
return test_preprocessors[config.preprocessor.name](config)
except KeyError:
return test_preprocessors['base'](config)
| 1,403 | 34.1 | 71 | py |
null | OpenOOD-main/openood/recorders/__init__.py | from .utils import get_recorder
| 32 | 15.5 | 31 | py |
null | OpenOOD-main/openood/recorders/ad_recorder.py | import os
import time
from pathlib import Path
import torch
from .base_recorder import BaseRecorder
class ADRecorder(BaseRecorder):
def __init__(self, config) -> None:
super(ADRecorder, self).__init__(config)
self.best_epoch_idx = 0
self.best_result = 0
self.begin_time = time.time()
def report(self, train_metrics, test_metrics):
print('Epoch {:03d} | Time {:5d}s | Train Loss {:.4f} | '
'Auroc {:.4f}\n'.format(train_metrics['epoch_idx'],
int(time.time() - self.begin_time),
train_metrics['loss'],
100.0 * test_metrics['image_auroc']),
flush=True)
def save_model(self, net, test_metrics):
if self.config.recorder.save_all_models:
torch.save(
net.state_dict(),
os.path.join(
self.output_dir,
'model_epoch{}.ckpt'.format(test_metrics['epoch_idx'])))
# enter only if lower loss occurs
if test_metrics['image_auroc'] >= self.best_result:
# delete the depreciated best model
old_fname = 'best_epoch{}_auroc{:.4f}.ckpt'.format(
self.best_epoch_idx, self.best_result)
old_pth = os.path.join(self.output_dir, old_fname)
Path(old_pth).unlink(missing_ok=True)
# update the best model
self.best_epoch_idx = test_metrics['epoch_idx']
self.best_result = test_metrics['image_auroc']
torch.save(net.state_dict(),
os.path.join(self.output_dir, 'best.ckpt'))
save_fname = 'best_epoch{}_auroc{:.4f}.ckpt'.format(
self.best_epoch_idx, self.best_result)
save_pth = os.path.join(self.output_dir, save_fname)
torch.save(net.state_dict(), save_pth)
def summary(self):
print('Training Completed!\n '
'Best Auroc: {:.4f} at epoch {:d}\n'.format(
100.0 * self.best_result, self.best_epoch_idx),
flush=True)
| 2,143 | 34.733333 | 76 | py |
null | OpenOOD-main/openood/recorders/arpl_recorder.py | import copy
import os
import time
import torch
from .base_recorder import BaseRecorder
class ARPLRecorder(BaseRecorder):
def __init__(self, config) -> None:
super().__init__(config)
def report(self, train_metrics, val_metrics):
if 'lossD' in train_metrics.keys():
print('\nEpoch {:03d} | Time {:5d}s | D Loss {:.4f} | '
'G Loss {:.4f} | Train Loss {:.4f} | '
'Val Loss {:.3f} | Val Acc {:.2f}'.format(
(train_metrics['epoch_idx']),
int(time.time() - self.begin_time),
train_metrics['lossD'], train_metrics['lossG'],
train_metrics['loss'], val_metrics['loss'],
100.0 * val_metrics['acc']),
flush=True)
else:
print('\nEpoch {:03d} | Time {:5d}s | Train Loss {:.4f} | '
'Val Loss {:.3f} | Val Acc {:.2f}'.format(
(train_metrics['epoch_idx']),
int(time.time() - self.begin_time),
train_metrics['loss'], val_metrics['loss'],
100.0 * val_metrics['acc']),
flush=True)
def save_model(self, net, val_metrics):
netF = net['netF']
criterion = net['criterion']
epoch_idx = val_metrics['epoch_idx']
try:
netF_wts = copy.deepcopy(netF.module.state_dict())
criterion_wts = copy.deepcopy(criterion.module.state_dict())
except AttributeError:
netF_wts = copy.deepcopy(netF.state_dict())
criterion_wts = copy.deepcopy(criterion.state_dict())
if self.config.recorder.save_all_models:
save_pth = os.path.join(self.save_dir,
'epoch-{}_NetF.ckpt'.format(epoch_idx))
torch.save(netF_wts, save_pth)
save_pth = os.path.join(
self.save_dir, 'epoch-{}_criterion.ckpt'.format(epoch_idx))
torch.save(criterion_wts, save_pth)
# enter only if better accuracy occurs
if val_metrics['acc'] >= self.best_acc:
# update the best model
self.best_epoch_idx = val_metrics['epoch_idx']
self.best_acc = val_metrics['acc']
torch.save(netF_wts, os.path.join(self.output_dir,
'best_NetF.ckpt'))
torch.save(criterion_wts,
os.path.join(self.output_dir, 'best_criterion.ckpt'))
| 2,532 | 37.378788 | 76 | py |
null | OpenOOD-main/openood/recorders/base_recorder.py | import os
import time
from pathlib import Path
import torch
class BaseRecorder:
def __init__(self, config) -> None:
self.config = config
self.best_acc = 0.0
self.best_epoch_idx = 0
self.begin_time = time.time()
self.output_dir = config.output_dir
def report(self, train_metrics, val_metrics):
print('\nEpoch {:03d} | Time {:5d}s | Train Loss {:.4f} | '
'Val Loss {:.3f} | Val Acc {:.2f}'.format(
(train_metrics['epoch_idx']),
int(time.time() - self.begin_time), train_metrics['loss'],
val_metrics['loss'], 100.0 * val_metrics['acc']),
flush=True)
def save_model(self, net, val_metrics):
try:
state_dict = net.module.state_dict()
except AttributeError:
state_dict = net.state_dict()
if self.config.recorder.save_all_models:
torch.save(
state_dict,
os.path.join(
self.output_dir,
'model_epoch{}.ckpt'.format(val_metrics['epoch_idx'])))
# enter only if better accuracy occurs
if val_metrics['acc'] >= self.best_acc:
# delete the depreciated best model
old_fname = 'best_epoch{}_acc{:.4f}.ckpt'.format(
self.best_epoch_idx, self.best_acc)
old_pth = os.path.join(self.output_dir, old_fname)
Path(old_pth).unlink(missing_ok=True)
# update the best model
self.best_epoch_idx = val_metrics['epoch_idx']
self.best_acc = val_metrics['acc']
torch.save(state_dict, os.path.join(self.output_dir, 'best.ckpt'))
save_fname = 'best_epoch{}_acc{:.4f}.ckpt'.format(
self.best_epoch_idx, self.best_acc)
save_pth = os.path.join(self.output_dir, save_fname)
torch.save(state_dict, save_pth)
# save last path
if val_metrics['epoch_idx'] == self.config.optimizer.num_epochs:
save_fname = 'last_epoch{}_acc{:.4f}.ckpt'.format(
val_metrics['epoch_idx'], val_metrics['acc'])
save_pth = os.path.join(self.output_dir, save_fname)
torch.save(state_dict, save_pth)
def summary(self):
print('Training Completed! '
'Best accuracy: {:.2f} '
'at epoch {:d}'.format(100 * self.best_acc, self.best_epoch_idx),
flush=True)
| 2,475 | 34.884058 | 79 | py |
null | OpenOOD-main/openood/recorders/cider_recorder.py | import os
import time
from pathlib import Path
import torch
class CiderRecorder:
def __init__(self, config) -> None:
self.config = config
self.best_loss = float('inf')
self.best_epoch_idx = 0
self.begin_time = time.time()
self.output_dir = config.output_dir
def report(self, train_metrics, val_metrics):
print('\nEpoch {:03d} | Time {:5d}s | Train Loss {:.4f}'.format(
(train_metrics['epoch_idx']), int(time.time() - self.begin_time),
train_metrics['loss']),
flush=True)
def save_model(self, net, train_metrics):
try:
state_dict = net.module.state_dict()
except AttributeError:
state_dict = net.state_dict()
if self.config.recorder.save_all_models:
torch.save(
state_dict,
os.path.join(
self.output_dir,
'model_epoch{}.ckpt'.format(train_metrics['epoch_idx'])))
# enter only if better accuracy occurs
if train_metrics['loss'] <= self.best_loss:
# delete the depreciated best model
old_fname = 'best_epoch{}_loss{:.4f}.ckpt'.format(
self.best_epoch_idx, self.best_loss)
old_pth = os.path.join(self.output_dir, old_fname)
Path(old_pth).unlink(missing_ok=True)
# update the best model
self.best_epoch_idx = train_metrics['epoch_idx']
self.best_loss = train_metrics['loss']
torch.save(state_dict, os.path.join(self.output_dir, 'best.ckpt'))
save_fname = 'best_epoch{}_loss{:.4f}.ckpt'.format(
self.best_epoch_idx, self.best_loss)
save_pth = os.path.join(self.output_dir, save_fname)
torch.save(state_dict, save_pth)
# save last path
if train_metrics['epoch_idx'] == self.config.optimizer.num_epochs:
save_fname = 'last_epoch{}_loss{:.4f}.ckpt'.format(
train_metrics['epoch_idx'], train_metrics['loss'])
save_pth = os.path.join(self.output_dir, save_fname)
torch.save(state_dict, save_pth)
def summary(self):
print('Training Completed! '
'Best loss: {:.4f} '
'at epoch {:d}'.format(self.best_loss, self.best_epoch_idx),
flush=True)
| 2,372 | 34.41791 | 78 | py |
null | OpenOOD-main/openood/recorders/cutpaste_recorder.py | import os
import time
from pathlib import Path
import torch
class CutpasteRecorder:
def __init__(self, config) -> None:
self.config = config
self.best_auroc = 0.0
self.best_epoch_idx = 0
self.begin_time = time.time()
self.output_dir = config.output_dir
def report(self, train_metrics, val_metrics):
print('\nEpoch {:03d} | Time {:5d}s | Train Loss {:.4f} | '
'AUROC {:.3f}'.format((val_metrics['epoch_idx']),
int(time.time() - self.begin_time),
train_metrics['loss'],
val_metrics['image_auroc']),
flush=True)
def save_model(self, net, val_metrics):
if self.config.recorder.save_all_models:
torch.save(
net.state_dict(),
os.path.join(
self.output_dir,
'model_epoch{}.ckpt'.format(val_metrics['epoch_idx'])))
# enter only if best auroc occurs
if val_metrics['image_auroc'] >= self.best_auroc:
# delete the depreciated best model
old_fname = 'best_epoch{}_auroc{}.ckpt'.format(
self.best_epoch_idx, self.best_auroc)
old_pth = os.path.join(self.output_dir, old_fname)
Path(old_pth).unlink(missing_ok=True)
# update the best model
self.best_epoch_idx = val_metrics['epoch_idx']
self.best_auroc = val_metrics['image_auroc']
torch.save(net.state_dict(),
os.path.join(self.output_dir, 'best.ckpt'))
save_fname = 'best_epoch{}_auroc{}.ckpt'.format(
self.best_epoch_idx, self.best_auroc)
save_pth = os.path.join(self.output_dir, save_fname)
torch.save(net.state_dict(), save_pth)
def summary(self):
print('Training Completed! '
'Best auroc: {:.2f} '
'at epoch {:d}'.format(self.best_auroc, self.best_epoch_idx),
flush=True)
| 2,071 | 34.118644 | 75 | py |
null | OpenOOD-main/openood/recorders/draem_recorder.py | import os
from pathlib import Path
import torch
from .ad_recorder import ADRecorder
class DRAEMRecorder(ADRecorder):
def __init__(self, config) -> None:
super(DRAEMRecorder, self).__init__(config)
self.best_model_basis = self.config.recorder.best_model_basis
self.run_name = ('draem_test_' + str(self.config.optimizer.lr) + '_' +
str(self.config.optimizer.num_epochs) + '_bs' +
str(self.config.dataset.train.batch_size) + '_' +
self.config.dataset.name)
def save_model(self, net, test_metrics):
if self.config.recorder.save_all_models:
save_fname = self.run_name + '_model_epoch{}'.format(
test_metrics['epoch_idx'])
save_pth = os.path.join(self.output_dir, save_fname)
torch.save(net['generative'].state_dict(), save_pth + '.ckpt')
torch.save(net['discriminative'].state_dict(),
save_pth + '_seg.ckpt')
# enter only if lower loss occurs
if test_metrics[self.best_model_basis] >= self.best_result:
# delete the depreciated best model
old_fname = self.run_name + '_best_epoch{}_loss{:.4f}'.format(
self.best_epoch_idx, self.best_result)
old_pth = os.path.join(self.output_dir, old_fname)
Path(old_pth + '.ckpt').unlink(missing_ok=True)
Path(old_pth + '_seg.ckpt').unlink(missing_ok=True)
# update the best model
self.best_epoch_idx = test_metrics['epoch_idx']
self.best_result = test_metrics[self.best_model_basis]
save_fname = self.run_name + '_best_epoch{}_loss{:.4f}'.format(
self.best_epoch_idx, self.best_result)
save_pth = os.path.join(self.output_dir, save_fname)
torch.save(net['generative'].state_dict(), save_pth + '.ckpt')
torch.save(net['discriminative'].state_dict(),
save_pth + '_seg.ckpt')
if test_metrics['epoch_idx'] == self.config.optimizer.num_epochs:
save_fname = self.run_name + '_latest_checkpoint'
save_pth = os.path.join(self.output_dir, save_fname)
torch.save(net['generative'].state_dict(), save_pth + '.ckpt')
torch.save(net['discriminative'].state_dict(),
save_pth + '_seg.ckpt')
| 2,416 | 41.403509 | 78 | py |
null | OpenOOD-main/openood/recorders/dsvdd_recorder.py | import os
import time
from pathlib import Path
import torch
class DCAERecorder:
def __init__(self, config) -> None:
self.config = config
self.output_dir = config.output_dir
self.best_roc_auc = 0.0
self.best_epoch_idx = 0
self.begin_time = time.time()
def report(self, train_metrics, test_metrics):
print('epoch [{}],time:{:5d}s,loss:{:.4f},roc_auc:{:.2f}'.format(
train_metrics['epoch_idx'], int(time.time() - self.begin_time),
train_metrics['epoch_loss'], test_metrics['roc_auc']))
def save_model(self, net, test_metrics):
# enter only if better accuracy occurs
if test_metrics['roc_auc'] >= self.best_roc_auc:
# delete the depreciated best model
old_fname = 'AE_best_epoch{}_roc_auc{}.pth'.format(
self.best_epoch_idx, self.best_roc_auc)
old_pth = os.path.join(self.output_dir, old_fname)
Path(old_pth).unlink(missing_ok=True)
# update the best model
self.best_epoch_idx = test_metrics['epoch_idx']
self.best_roc_auc = test_metrics['roc_auc']
save_fname = 'AE_best_epoch{}_roc_auc{}.pth'.format(
self.best_epoch_idx, self.best_roc_auc)
save_pth = os.path.join(self.output_dir, save_fname)
torch.save(net.state_dict(), save_pth)
def summary(self):
print('Training Completed! '
'Best Roc_auc: {:.2f}%,'
'at epoch {:d}'.format(100 * self.best_roc_auc,
self.best_epoch_idx),
flush=True)
class DSVDDRecorder:
def __init__(self, config) -> None:
self.config = config
self.output_dir = config.output_dir
self.best_roc_auc = 0.0
self.best_epoch_idx = 0
self.begin_time = time.time()
def report(self, train_metrics, test_metrics):
print('epoch [{}],time:{:5d}s,loss:{:.4f},roc_auc:{:.2f}'.format(
train_metrics['epoch_idx'], int(time.time() - self.begin_time),
train_metrics['epoch_loss'], test_metrics['roc_auc']))
def save_model(self, net, test_metrics):
# enter only if better accuracy occurs
if test_metrics['roc_auc'] >= self.best_roc_auc:
# delete the depreciated best model
old_fname = 'DSVDD_best_epoch{}_roc_auc{}.pth'.format(
self.best_epoch_idx, self.best_roc_auc)
old_pth = os.path.join(self.output_dir, old_fname)
Path(old_pth).unlink(missing_ok=True)
# update the best model
self.best_epoch_idx = test_metrics['epoch_idx']
self.best_roc_auc = test_metrics['roc_auc']
save_fname = 'DSVDD_best_epoch{}_roc_auc{}.pth'.format(
self.best_epoch_idx, self.best_roc_auc)
save_pth = os.path.join(self.output_dir, save_fname)
torch.save(net.state_dict(), save_pth)
def summary(self):
print('Training Completed! '
'Best Roc_auc: {:.2f}%,'
'at epoch {:d}'.format(100 * self.best_roc_auc,
self.best_epoch_idx),
flush=True)
| 3,220 | 36.453488 | 75 | py |
null | OpenOOD-main/openood/recorders/kdad_recorder.py | import os
import time
from pathlib import Path
import torch
class KdadRecorder:
def __init__(self, config) -> None:
self.config = config
self.output_dir = config.output_dir
self.best_roc_auc = 0.0
self.best_epoch_idx = 0
self.begin_time = time.time()
def report(self, train_metrics, test_metrics):
print('epoch [{}],time:{:5d}s,loss:{:.4f},roc_auc:{:.2f}'.format(
train_metrics['epoch_idx'], int(time.time() - self.begin_time),
train_metrics['epoch_loss'], test_metrics['roc_auc']))
def save_model(self, net, test_metrics):
if self.config.recorder.save_all_models:
torch.save(
net['model'].state_dict(),
os.path.join(
self.output_dir,
'Clone_epoch{}.ckpt'.format(test_metrics['epoch_idx'])))
# enter only if better accuracy occurs
if test_metrics['roc_auc'] >= self.best_roc_auc:
# delete the depreciated best model
old_fname = 'Clone_best_epoch{}_roc_auc{}.pth'.format(
self.best_epoch_idx, self.best_roc_auc)
old_pth = os.path.join(self.output_dir, old_fname)
Path(old_pth).unlink(missing_ok=True)
# update the best model
self.best_epoch_idx = test_metrics['epoch_idx']
self.best_roc_auc = test_metrics['roc_auc']
save_fname = 'Clone_best_epoch{}_roc_auc{}.pth'.format(
self.best_epoch_idx, self.best_roc_auc)
save_pth = os.path.join(self.output_dir, save_fname)
torch.save(net['model'].state_dict(), save_pth)
if test_metrics['epoch_idx'] == self.config['last_checkpoint']:
torch.save(
net['model'].state_dict(),
'{}/Cloner_{}_epoch_{}.pth'.format(self.config['output_dir'],
self.config.normal_class,
test_metrics['epoch_idx']))
def summary(self):
print('Training Completed! '
'Best Roc_auc: {:.2f}%,'
'at epoch {:d}'.format(100 * self.best_roc_auc,
self.best_epoch_idx),
flush=True)
| 2,285 | 38.413793 | 78 | py |
null | OpenOOD-main/openood/recorders/opengan_recorder.py | import copy
import os
import time
import torch
from .base_recorder import BaseRecorder
class OpenGanRecorder(BaseRecorder):
def __init__(self, config) -> None:
super().__init__(config)
self.save_dir = self.config.output_dir
self.best_val_auroc = 0
self.best_epoch_idx = 0
def report(self, train_metrics, val_metrics):
print('Epoch [{:03d}/{:03d}] | Time {:5d}s | Loss_G: {:.4f} | '
'Loss_D: {:.4f} | Val AUROC: {:.2f}\n'.format(
train_metrics['epoch_idx'], self.config.optimizer.num_epochs,
int(time.time() - self.begin_time),
train_metrics['G_losses'][-1], train_metrics['D_losses'][-1],
val_metrics['auroc']),
flush=True)
def save_model(self, net, val_metrics):
netG = net['netG']
netD = net['netD']
epoch_idx = val_metrics['epoch_idx']
try:
netG_wts = copy.deepcopy(netG.module.state_dict())
netD_wts = copy.deepcopy(netD.module.state_dict())
except AttributeError:
netG_wts = copy.deepcopy(netG.state_dict())
netD_wts = copy.deepcopy(netD.state_dict())
if self.config.recorder.save_all_models:
save_pth = os.path.join(self.save_dir,
'epoch-{}_GNet.ckpt'.format(epoch_idx))
torch.save(netG_wts, save_pth)
save_pth = os.path.join(self.save_dir,
'epoch-{}_DNet.ckpt'.format(epoch_idx))
torch.save(netD_wts, save_pth)
if val_metrics['auroc'] >= self.best_val_auroc:
self.best_epoch_idx = epoch_idx
self.best_val_auroc = val_metrics['auroc']
torch.save(netG_wts, os.path.join(self.output_dir,
'best_GNet.ckpt'))
torch.save(netD_wts, os.path.join(self.output_dir,
'best_DNet.ckpt'))
def summary(self):
print('Training Completed! '
'Best val AUROC on netD: {:.6f} '
'at epoch {:d}'.format(self.best_val_auroc, self.best_epoch_idx),
flush=True)
| 2,222 | 36.05 | 79 | py |
null | OpenOOD-main/openood/recorders/rd4ad_recorder.py | import os
import time
from pathlib import Path
import torch
from .base_recorder import BaseRecorder
class Rd4adRecorder(BaseRecorder):
def __init__(self, config) -> None:
super(Rd4adRecorder, self).__init__(config)
self.best_epoch_idx = 0
self.best_result = 0
self.begin_time = time.time()
def report(self, train_metrics, test_metrics):
print('Epoch {:03d} | Time {:5d}s | Train Loss {:.4f} | '
'Auroc {:.4f}\n'.format(train_metrics['epoch_idx'],
int(time.time() - self.begin_time),
train_metrics['loss'],
100.0 * test_metrics['image_auroc']),
flush=True)
def save_model(self, net, test_metrics):
if self.config.recorder.save_all_models:
torch.save(
{
'bn': net['bn'].state_dict(),
'decoder': net['decoder'].state_dict()
},
os.path.join(
self.output_dir,
'model_epoch{}.ckpt'.format(test_metrics['epoch_idx'])))
# enter only if lower loss occurs
if test_metrics['image_auroc'] >= self.best_result:
# delete the depreciated best model
old_fname1 = 'bn_best_epoch{}_auroc{:.4f}.ckpt'.format(
self.best_epoch_idx, self.best_result)
old_fname2 = 'decoder_best_epoch{}_auroc{:.4f}.ckpt'.format(
self.best_epoch_idx, self.best_result)
old_pth1 = os.path.join(self.output_dir, old_fname1)
old_pth2 = os.path.join(self.output_dir, old_fname2)
Path(old_pth1).unlink(missing_ok=True)
Path(old_pth2).unlink(missing_ok=True)
# update the best model
self.best_epoch_idx = test_metrics['epoch_idx']
self.best_result = test_metrics['image_auroc']
torch.save({'bn': net['bn'].state_dict()},
os.path.join(self.output_dir, 'bn_best.ckpt'))
torch.save({'decoder': net['decoder'].state_dict()},
os.path.join(self.output_dir, 'decoder_best.ckpt'))
save_fname1 = 'bn_best_epoch{}_auroc{:.4f}.ckpt'.format(
self.best_epoch_idx, self.best_result)
save_pth1 = os.path.join(self.output_dir, save_fname1)
save_fname2 = 'decoder_best_epoch{}_auroc{:.4f}.ckpt'.format(
self.best_epoch_idx, self.best_result)
save_pth2 = os.path.join(self.output_dir, save_fname2)
torch.save({'bn': net['bn'].state_dict()}, save_pth1)
torch.save({'decoder': net['decoder'].state_dict()}, save_pth2)
def summary(self):
print('Training Completed!\n '
'Best Auroc: {:.4f} at epoch {:d}\n'.format(
100.0 * self.best_result, self.best_epoch_idx),
flush=True)
| 2,961 | 40.138889 | 76 | py |
null | OpenOOD-main/openood/recorders/utils.py | from openood.utils import Config
from .ad_recorder import ADRecorder
from .arpl_recorder import ARPLRecorder
from .base_recorder import BaseRecorder
from .cider_recorder import CiderRecorder
from .cutpaste_recorder import CutpasteRecorder
from .draem_recorder import DRAEMRecorder
from .dsvdd_recorder import DCAERecorder, DSVDDRecorder
from .kdad_recorder import KdadRecorder
from .opengan_recorder import OpenGanRecorder
from .rd4ad_recorder import Rd4adRecorder
def get_recorder(config: Config):
recorders = {
'base': BaseRecorder,
'cider': CiderRecorder,
'draem': DRAEMRecorder,
'opengan': OpenGanRecorder,
'dcae': DCAERecorder,
'dsvdd': DSVDDRecorder,
'kdad': KdadRecorder,
'arpl': ARPLRecorder,
'cutpaste': CutpasteRecorder,
'ad': ADRecorder,
'rd4ad': Rd4adRecorder,
}
return recorders[config.recorder.name](config)
| 926 | 28.903226 | 55 | py |
null | OpenOOD-main/openood/trainers/__init__.py | from .utils import get_trainer
| 31 | 15 | 30 | py |
null | OpenOOD-main/openood/trainers/arpl_gan_trainer.py | import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.utils.data import DataLoader
from tqdm import tqdm
from openood.utils import Config
from .lr_scheduler import cosine_annealing
class ARPLGANTrainer:
def __init__(self, net: dict, train_loader: DataLoader,
config: Config) -> None:
self.net = net['netF']
self.netG = net['netG']
self.netD = net['netD']
self.train_loader = train_loader
self.config = config
self.criterion = net['criterion']
self.fixed_noise = torch.FloatTensor(64, config.network.nz, 1,
1).normal_(0, 1).cuda()
self.criterionD = nn.BCELoss()
params_list = [{
'params': self.net.parameters()
}, {
'params': self.criterion.parameters()
}]
self.optimizer = torch.optim.SGD(
params_list,
config.optimizer.lr,
momentum=config.optimizer.momentum,
weight_decay=config.optimizer.weight_decay,
nesterov=True,
)
self.scheduler = torch.optim.lr_scheduler.LambdaLR(
self.optimizer,
lr_lambda=lambda step: cosine_annealing(
step,
config.optimizer.num_epochs * len(train_loader),
1,
1e-6 / config.optimizer.lr,
),
)
self.optimizerD = torch.optim.Adam(self.netD.parameters(),
lr=config.optimizer.gan_lr,
betas=(0.5, 0.999))
self.optimizerG = torch.optim.Adam(self.netG.parameters(),
lr=config.optimizer.gan_lr,
betas=(0.5, 0.999))
def train_epoch(self, epoch_idx):
self.net.train()
self.netD.train()
self.netG.train()
loss_avg, lossG_avg, lossD_avg = 0.0, 0.0, 0.0
train_dataiter = iter(self.train_loader)
real_label, fake_label = 1, 0
for train_step in tqdm(range(1,
len(train_dataiter) + 1),
desc='Epoch {:03d}: '.format(epoch_idx),
position=0,
leave=True):
batch = next(train_dataiter)
data = batch['data'].cuda()
target = batch['label'].cuda()
gan_target = torch.FloatTensor(target.size()).fill_(0).cuda()
noise = torch.FloatTensor(
data.size(0), self.config.network.nz, self.config.network.ns,
self.config.network.ns).normal_(0, 1).cuda()
noise = noise.cuda()
noise = Variable(noise)
fake = self.netG(noise)
###########################
# (1) Update D network #
###########################
# train with real
gan_target.fill_(real_label)
targetv = Variable(gan_target)
self.optimizerD.zero_grad()
output = self.netD(data)
errD_real = self.criterionD(output, targetv)
errD_real.backward()
# train with fake
targetv = Variable(gan_target.fill_(fake_label))
output = self.netD(fake.detach())
errD_fake = self.criterionD(output, targetv)
errD_fake.backward()
errD = errD_real + errD_fake
self.optimizerD.step()
###########################
# (2) Update G network #
###########################
self.optimizerG.zero_grad()
# Original GAN loss
targetv = Variable(gan_target.fill_(real_label))
output = self.netD(fake)
errG = self.criterionD(output, targetv)
# minimize the true distribution
_, feat = self.net(
fake, True,
1 * torch.ones(data.shape[0], dtype=torch.long).cuda())
errG_F = self.criterion.fake_loss(feat).mean()
generator_loss = errG + self.config.loss.beta * errG_F
generator_loss.backward()
self.optimizerG.step()
###########################
# (3) Update classifier #
###########################
# cross entropy loss
self.optimizer.zero_grad()
_, feat = self.net(
data, True,
0 * torch.ones(data.shape[0], dtype=torch.long).cuda())
_, loss = self.criterion(feat, target)
# KL divergence
noise = torch.FloatTensor(
data.size(0), self.config.network.nz, self.config.network.ns,
self.config.network.ns).normal_(0, 1).cuda()
noise = Variable(noise)
fake = self.netG(noise)
_, feat = self.net(
fake, True,
1 * torch.ones(data.shape[0], dtype=torch.long).cuda())
F_loss_fake = self.criterion.fake_loss(feat).mean()
total_loss = loss + self.config.loss.beta * F_loss_fake
total_loss.backward()
self.optimizer.step()
self.scheduler.step()
# exponential moving average, show smooth values
with torch.no_grad():
loss_avg = loss_avg * 0.8 + float(total_loss) * 0.2
lossG_avg = lossG_avg * 0.8 + float(generator_loss) * 0.2
lossD_avg = lossD_avg * 0.8 + float(errD) * 0.2
metrics = {}
metrics['epoch_idx'] = epoch_idx
metrics['loss'] = loss_avg
metrics['lossG'] = lossG_avg
metrics['lossD'] = lossD_avg
return {
'netG': self.netG,
'netD': self.netD,
'netF': self.net,
'criterion': self.criterion
}, metrics
| 5,926 | 34.921212 | 77 | py |
null | OpenOOD-main/openood/trainers/arpl_trainer.py | import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from tqdm import tqdm
import openood.utils.comm as comm
from openood.utils import Config
from .lr_scheduler import cosine_annealing
class ARPLTrainer:
def __init__(self, net: nn.Module, train_loader: DataLoader,
config: Config) -> None:
self.net = net['netF']
self.train_loader = train_loader
self.config = config
self.criterion = net['criterion']
params_list = [{
'params': self.net.parameters()
}, {
'params': self.criterion.parameters()
}]
self.optimizer = torch.optim.SGD(
params_list,
config.optimizer.lr,
momentum=config.optimizer.momentum,
weight_decay=config.optimizer.weight_decay,
nesterov=True,
)
self.scheduler = torch.optim.lr_scheduler.LambdaLR(
self.optimizer,
lr_lambda=lambda step: cosine_annealing(
step,
config.optimizer.num_epochs * len(train_loader),
1,
1e-6 / config.optimizer.lr,
),
)
def train_epoch(self, epoch_idx):
self.net.train()
loss_avg = 0.0
train_dataiter = iter(self.train_loader)
for train_step in tqdm(range(1,
len(train_dataiter) + 1),
desc='Epoch {:03d}: '.format(epoch_idx),
position=0,
leave=True,
disable=not comm.is_main_process()):
batch = next(train_dataiter)
data = batch['data'].cuda()
target = batch['label'].cuda()
# forward
_, feat = self.net(data, return_feature=True)
logits, loss = self.criterion(feat, target)
# backward
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
self.scheduler.step()
# exponential moving average, show smooth values
with torch.no_grad():
loss_avg = loss_avg * 0.8 + float(loss) * 0.2
metrics = {}
metrics['epoch_idx'] = epoch_idx
metrics['loss'] = loss_avg
return {'netF': self.net, 'criterion': self.criterion}, metrics
| 2,400 | 29.0125 | 71 | py |
null | OpenOOD-main/openood/trainers/augmix_trainer.py | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
from tqdm import tqdm
import openood.utils.comm as comm
from openood.utils import Config
from .lr_scheduler import cosine_annealing
class AugMixTrainer:
def __init__(self, net: nn.Module, train_loader: DataLoader,
config: Config) -> None:
self.net = net
self.train_loader = train_loader
self.config = config
self.lam = config.trainer.trainer_args.lam
self.jsd = config.trainer.trainer_args.jsd
self.optimizer = torch.optim.SGD(
net.parameters(),
config.optimizer.lr,
momentum=config.optimizer.momentum,
weight_decay=config.optimizer.weight_decay,
nesterov=True,
)
self.scheduler = torch.optim.lr_scheduler.LambdaLR(
self.optimizer,
lr_lambda=lambda step: cosine_annealing(
step,
config.optimizer.num_epochs * len(train_loader),
1,
1e-6 / config.optimizer.lr,
),
)
def train_epoch(self, epoch_idx):
self.net.train()
loss_avg = 0.0
train_dataiter = iter(self.train_loader)
for train_step in tqdm(range(1,
len(train_dataiter) + 1),
desc='Epoch {:03d}: '.format(epoch_idx),
position=0,
leave=True,
disable=not comm.is_main_process()):
batch = next(train_dataiter)
target = batch['label'].cuda()
if self.jsd:
orig_data = batch['data'].cuda()
aug1_data = batch['data_aug1'].cuda()
aug2_data = batch['data_aug2'].cuda()
data = torch.cat([orig_data, aug1_data, aug2_data])
# forward
logits_all = self.net(data)
logits_clean, logits_aug1, logits_aug2 = torch.split(
logits_all, orig_data.size(0))
# Cross-entropy is only computed on clean images
loss = F.cross_entropy(logits_clean, target)
p_clean, p_aug1, p_aug2 = \
F.softmax(logits_clean, dim=1), \
F.softmax(logits_aug1, dim=1), \
F.softmax(logits_aug2, dim=1)
# Clamp mixture distribution to avoid exploding KL divergence
p_mixture = torch.clamp((p_clean + p_aug1 + p_aug2) / 3., 1e-7,
1).log()
loss += self.lam * (
F.kl_div(p_mixture, p_clean, reduction='batchmean') +
F.kl_div(p_mixture, p_aug1, reduction='batchmean') +
F.kl_div(p_mixture, p_aug2, reduction='batchmean')) / 3.
else:
data = batch['data'].cuda()
# forward
logits = self.net(data)
loss = F.cross_entropy(logits, target)
# backward
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
self.scheduler.step()
# exponential moving average, show smooth values
with torch.no_grad():
loss_avg = loss_avg * 0.8 + float(loss) * 0.2
# comm.synchronize()
metrics = {}
metrics['epoch_idx'] = epoch_idx
metrics['loss'] = self.save_metrics(loss_avg)
return self.net, metrics
def save_metrics(self, loss_avg):
all_loss = comm.gather(loss_avg)
total_losses_reduced = np.mean([x for x in all_loss])
return total_losses_reduced
| 3,803 | 32.663717 | 79 | py |
null | OpenOOD-main/openood/trainers/base_trainer.py | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
from tqdm import tqdm
import openood.utils.comm as comm
from openood.utils import Config
from .lr_scheduler import cosine_annealing
class BaseTrainer:
def __init__(self, net: nn.Module, train_loader: DataLoader,
config: Config) -> None:
self.net = net
self.train_loader = train_loader
self.config = config
self.optimizer = torch.optim.SGD(
net.parameters(),
config.optimizer.lr,
momentum=config.optimizer.momentum,
weight_decay=config.optimizer.weight_decay,
nesterov=True,
)
self.scheduler = torch.optim.lr_scheduler.LambdaLR(
self.optimizer,
lr_lambda=lambda step: cosine_annealing(
step,
config.optimizer.num_epochs * len(train_loader),
1,
1e-6 / config.optimizer.lr,
),
)
def train_epoch(self, epoch_idx):
self.net.train()
loss_avg = 0.0
train_dataiter = iter(self.train_loader)
for train_step in tqdm(range(1,
len(train_dataiter) + 1),
desc='Epoch {:03d}: '.format(epoch_idx),
position=0,
leave=True,
disable=not comm.is_main_process()):
batch = next(train_dataiter)
data = batch['data'].cuda()
target = batch['label'].cuda()
# forward
logits_classifier = self.net(data)
loss = F.cross_entropy(logits_classifier, target)
# backward
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
self.scheduler.step()
# exponential moving average, show smooth values
with torch.no_grad():
loss_avg = loss_avg * 0.8 + float(loss) * 0.2
# comm.synchronize()
metrics = {}
metrics['epoch_idx'] = epoch_idx
metrics['loss'] = self.save_metrics(loss_avg)
return self.net, metrics
def save_metrics(self, loss_avg):
all_loss = comm.gather(loss_avg)
total_losses_reduced = np.mean([x for x in all_loss])
return total_losses_reduced
| 2,446 | 28.481928 | 71 | py |
null | OpenOOD-main/openood/trainers/cider_trainer.py | import math
import time
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
from tqdm import tqdm
import openood.utils.comm as comm
from openood.utils import Config
class CIDERTrainer:
def __init__(self, net: nn.Module, train_loader: DataLoader,
val_loader: DataLoader, config: Config) -> None:
self.net = net
self.train_loader = train_loader
self.config = config
if 'imagenet' in self.config.dataset.name:
try:
for name, p in self.net.backbone.named_parameters():
if not name.startswith('layer4'):
p.requires_grad = False
except AttributeError:
for name, p in self.net.module.backbone.named_parameters():
if not name.startswith('layer4'):
p.requires_grad = False
self.optimizer = torch.optim.SGD(
net.parameters(),
config.optimizer.lr,
momentum=config.optimizer.momentum,
weight_decay=config.optimizer.weight_decay,
nesterov=True,
)
if config.dataset.train.batch_size \
* config.num_gpus * config.num_machines > 256:
config.optimizer.warm = True
if config.optimizer.warm:
self.warmup_from = 0.001
self.warm_epochs = 10
if config.optimizer.cosine:
eta_min = config.optimizer.lr * \
(config.optimizer.lr_decay_rate**3)
self.warmup_to = eta_min + (config.optimizer.lr - eta_min) * (
1 + math.cos(math.pi * self.warm_epochs /
config.optimizer.num_epochs)) / 2
else:
self.warmup_to = config.optimizer.lr
self.criterion_comp = CompLoss(
config.dataset.num_classes,
temperature=config.trainer.trainer_args.temp).cuda()
# V2: EMA style prototypes
self.criterion_dis = DisLoss(
config.dataset.num_classes,
config.network.feat_dim,
config.trainer.trainer_args.proto_m,
self.net,
val_loader,
temperature=config.trainer.trainer_args.temp).cuda()
def train_epoch(self, epoch_idx):
adjust_learning_rate(self.config, self.optimizer, epoch_idx - 1)
self.net.train()
loss_avg = 0.0
train_dataiter = iter(self.train_loader)
for train_step in tqdm(range(1,
len(train_dataiter) + 1),
desc='Epoch {:03d}: '.format(epoch_idx),
position=0,
leave=True,
disable=not comm.is_main_process()):
warmup_learning_rate(self.config, self.warm_epochs,
self.warmup_from,
self.warmup_to, epoch_idx - 1, train_step,
len(train_dataiter), self.optimizer)
batch = next(train_dataiter)
data = batch['data']
target = batch['label']
data = torch.cat([data[0], data[1]], dim=0).cuda()
target = target.repeat(2).cuda()
# forward
features = self.net(data)
dis_loss = self.criterion_dis(features, target) # V2: EMA style
comp_loss = self.criterion_comp(features,
self.criterion_dis.prototypes,
target)
loss = self.config.trainer.trainer_args.w * comp_loss + dis_loss
# backward
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
# exponential moving average, show smooth values
with torch.no_grad():
loss_avg = loss_avg * 0.8 + float(loss) * 0.2
# comm.synchronize()
metrics = {}
metrics['epoch_idx'] = epoch_idx
metrics['loss'] = self.save_metrics(loss_avg)
return self.net, metrics
def save_metrics(self, loss_avg):
all_loss = comm.gather(loss_avg)
total_losses_reduced = np.mean([x for x in all_loss])
return total_losses_reduced
def adjust_learning_rate(config, optimizer, epoch):
lr = config.optimizer.lr
if config.optimizer.cosine:
eta_min = lr * (config.optimizer.lr_decay_rate**3)
lr = eta_min + (lr - eta_min) * (
1 + math.cos(math.pi * epoch / config.optimizer.num_epochs)) / 2
else:
steps = np.sum(epoch > np.asarray(config.optimizer.lr_decay_epochs))
if steps > 0:
lr = lr * (config.optimizer.lr_decay_rate**steps)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def warmup_learning_rate(config, warm_epochs, warmup_from, warmup_to, epoch,
batch_id, total_batches, optimizer):
if config.optimizer.warm and epoch <= warm_epochs:
p = (batch_id + (epoch - 1) * total_batches) / \
(warm_epochs * total_batches)
lr = warmup_from + p * (warmup_to - warmup_from)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
class CompLoss(nn.Module):
"""Compactness Loss with class-conditional prototypes."""
def __init__(self, n_cls, temperature=0.07, base_temperature=0.07):
super(CompLoss, self).__init__()
self.n_cls = n_cls
self.temperature = temperature
self.base_temperature = base_temperature
def forward(self, features, prototypes, labels):
prototypes = F.normalize(prototypes, dim=1)
proxy_labels = torch.arange(0, self.n_cls).cuda()
labels = labels.contiguous().view(-1, 1)
mask = torch.eq(labels, proxy_labels.T).float().cuda() # bz, cls
# compute logits
feat_dot_prototype = torch.div(torch.matmul(features, prototypes.T),
self.temperature)
# for numerical stability
logits_max, _ = torch.max(feat_dot_prototype, dim=1, keepdim=True)
logits = feat_dot_prototype - logits_max.detach()
# compute log_prob
exp_logits = torch.exp(logits)
log_prob = logits - torch.log(exp_logits.sum(1, keepdim=True))
# compute mean of log-likelihood over positive
mean_log_prob_pos = (mask * log_prob).sum(1)
# loss
loss = -(self.temperature /
self.base_temperature) * mean_log_prob_pos.mean()
return loss
class DisLoss(nn.Module):
"""Dispersion Loss with EMA prototypes."""
def __init__(self,
n_cls,
feat_dim,
proto_m,
model,
loader,
temperature=0.1,
base_temperature=0.1):
super(DisLoss, self).__init__()
self.n_cls = n_cls
self.feat_dim = feat_dim
self.proto_m = proto_m
self.temperature = temperature
self.base_temperature = base_temperature
self.register_buffer('prototypes',
torch.zeros(self.n_cls, self.feat_dim))
self.model = model
self.loader = loader
self.init_class_prototypes()
def forward(self, features, labels):
prototypes = self.prototypes
num_cls = self.n_cls
for j in range(len(features)):
prototypes[labels[j].item()] = F.normalize(
prototypes[labels[j].item()] * self.proto_m + features[j] *
(1 - self.proto_m),
dim=0)
self.prototypes = prototypes.detach()
labels = torch.arange(0, num_cls).cuda()
labels = labels.contiguous().view(-1, 1)
labels = labels.contiguous().view(-1, 1)
mask = (1 - torch.eq(labels, labels.T).float()).cuda()
logits = torch.div(torch.matmul(prototypes, prototypes.T),
self.temperature)
logits_mask = torch.scatter(torch.ones_like(mask), 1,
torch.arange(num_cls).view(-1, 1).cuda(),
0)
mask = mask * logits_mask
mean_prob_neg = torch.log(
(mask * torch.exp(logits)).sum(1) / mask.sum(1))
mean_prob_neg = mean_prob_neg[~torch.isnan(mean_prob_neg)]
loss = self.temperature / self.base_temperature * mean_prob_neg.mean()
return loss
def init_class_prototypes(self):
"""Initialize class prototypes."""
self.model.eval()
start = time.time()
prototype_counts = [0] * self.n_cls
with torch.no_grad():
prototypes = torch.zeros(self.n_cls, self.feat_dim).cuda()
for i, batch in enumerate(self.loader):
input = batch['data']
target = batch['label']
input, target = input.cuda(), target.cuda()
features = self.model(input)
for j, feature in enumerate(features):
prototypes[target[j].item()] += feature
prototype_counts[target[j].item()] += 1
for cls in range(self.n_cls):
prototypes[cls] /= prototype_counts[cls]
# measure elapsed time
duration = time.time() - start
print(f'Time to initialize prototypes: {duration:.3f}')
prototypes = F.normalize(prototypes, dim=1)
self.prototypes = prototypes
| 9,625 | 36.166023 | 78 | py |
null | OpenOOD-main/openood/trainers/conf_branch_trainer.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from tqdm import tqdm
import openood.utils.comm as comm
from openood.utils import Config
from .lr_scheduler import cosine_annealing
class ConfBranchTrainer:
def __init__(self, net, train_loader, config: Config) -> None:
self.train_loader = train_loader
self.config = config
self.net = net
self.prediction_criterion = nn.NLLLoss().cuda()
self.optimizer = torch.optim.SGD(
net.parameters(),
lr=config.optimizer['lr'],
momentum=config.optimizer['momentum'],
nesterov=config.optimizer['nesterov'],
weight_decay=config.optimizer['weight_decay'])
self.scheduler = torch.optim.lr_scheduler.LambdaLR(
self.optimizer,
lr_lambda=lambda step: cosine_annealing(
step,
config.optimizer.num_epochs * len(train_loader),
1,
1e-6 / config.optimizer.lr,
),
)
self.lmbda = self.config.trainer['lmbda']
def train_epoch(self, epoch_idx):
self.net.train()
correct_count = 0.
total = 0.
accuracy = 0.
train_dataiter = iter(self.train_loader)
for train_step in tqdm(range(1,
len(train_dataiter) + 1),
desc='Epoch {:03d}'.format(epoch_idx),
position=0,
leave=True,
disable=not comm.is_main_process()):
batch = next(train_dataiter)
images = Variable(batch['data']).cuda()
labels = Variable(batch['label']).cuda()
labels_onehot = Variable(
encode_onehot(labels, self.config.num_classes))
self.net.zero_grad()
pred_original, confidence = self.net(images,
return_confidence=True)
pred_original = F.softmax(pred_original, dim=-1)
confidence = torch.sigmoid(confidence)
eps = self.config.trainer['eps']
pred_original = torch.clamp(pred_original, 0. + eps, 1. - eps)
confidence = torch.clamp(confidence, 0. + eps, 1. - eps)
if not self.config.baseline:
# Randomly set half of the confidences to 1 (i.e. no hints)
b = Variable(
torch.bernoulli(
torch.Tensor(confidence.size()).uniform_(0,
1))).cuda()
conf = confidence * b + (1 - b)
pred_new = pred_original * conf.expand_as(
pred_original) + labels_onehot * (
1 - conf.expand_as(labels_onehot))
pred_new = torch.log(pred_new)
else:
pred_new = torch.log(pred_original)
xentropy_loss = self.prediction_criterion(pred_new, labels)
confidence_loss = torch.mean(-torch.log(confidence))
if self.config.baseline:
total_loss = xentropy_loss
else:
total_loss = xentropy_loss + (self.lmbda * confidence_loss)
if self.config.trainer['budget'] > confidence_loss.item():
self.lmbda = self.lmbda / 1.01
elif self.config.trainer['budget'] <= confidence_loss.item():
self.lmbda = self.lmbda / 0.99
total_loss.backward()
self.optimizer.step()
self.scheduler.step()
pred_idx = torch.max(pred_original.data, 1)[1]
total += labels.size(0)
correct_count += (pred_idx == labels.data).sum()
accuracy = correct_count / total
metrics = {}
metrics['train_acc'] = accuracy
metrics['loss'] = total_loss
metrics['epoch_idx'] = epoch_idx
return self.net, metrics
def encode_onehot(labels, n_classes):
onehot = torch.FloatTensor(labels.size()[0],
n_classes) # batchsize * num of class
labels = labels.data
if labels.is_cuda:
onehot = onehot.cuda()
onehot.zero_()
onehot.scatter_(1, labels.view(-1, 1), 1)
return onehot
| 4,375 | 37.052174 | 77 | py |
null | OpenOOD-main/openood/trainers/csi_trainer.py | import math
import numbers
import diffdist.functional as distops
import numpy as np
import torch
import torch.distributed as dist
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.optim.lr_scheduler as lr_scheduler
from torch.autograd import Function
from torch.optim.lr_scheduler import ReduceLROnPlateau, _LRScheduler
from torch.utils.data import DataLoader
from tqdm import tqdm
import openood.utils.comm as comm
from openood.utils import Config
class CSITrainer:
def __init__(self, net: nn.Module, train_loader: DataLoader,
config: Config) -> None:
self.net = net['backbone']
self.train_loader = train_loader
self.config = config
self.mode = config.mode
if self.config.num_gpus > 1:
self.dummy_net = net['dummy_net'].module
else:
self.dummy_net = net['dummy_net']
self.dummy_net.cpu()
self.simclr_aug = get_simclr_augmentation(
config, image_size=config.dataset.image_size).cuda()
self.linear = net['linear']
self.linear_optim = torch.optim.Adam(
self.linear.parameters(),
lr=1e-3,
betas=(.9, .999),
weight_decay=config.optimizer.weight_decay)
self.criterion = nn.CrossEntropyLoss().cuda()
self.hflip = HorizontalFlipLayer().cuda()
self.simclr_layer = net['simclr_layer']
self.rotation_linear = net['shift_cls_layer']
self.joint_linear = net['joint_distribution_layer']
if 'step1' in self.mode:
self.optimizer = optim.SGD(
list(self.net.parameters()) +
list(self.simclr_layer.parameters()),
lr=config.optimizer.lr,
momentum=0.9,
weight_decay=config.optimizer.weight_decay)
self.scheduler = lr_scheduler.CosineAnnealingLR(
self.optimizer, config.optimizer.num_epochs)
self.scheduler_warmup = GradualWarmupScheduler(
self.optimizer,
multiplier=10.0,
total_epoch=config.optimizer.warmup,
after_scheduler=self.scheduler)
else:
milestones = [
int(0.6 * config.optimizer.num_epochs),
int(0.75 * config.optimizer.num_epochs),
int(0.9 * config.optimizer.num_epochs)
]
self.linear_optim = torch.optim.Adam(
self.linear.parameters(),
lr=1e-3,
betas=(.9, .999),
weight_decay=config.optimizer.weight_decay)
self.linear_scheduler = lr_scheduler.MultiStepLR(
self.linear_optim, gamma=0.1, milestones=milestones)
self.rotation_linear_optim = torch.optim.SGD(
self.rotation_linear.parameters(),
lr=1e-1,
weight_decay=config.optimizer.weight_decay)
self.rot_scheduler = lr_scheduler.MultiStepLR(
self.rotation_linear_optim, gamma=0.1, milestones=milestones)
self.joint_linear_optim = torch.optim.SGD(
self.joint_linear.parameters(),
lr=1e-1,
weight_decay=config.optimizer.weight_decay)
self.joint_scheduler = lr_scheduler.MultiStepLR(
self.joint_linear_optim, gamma=0.1, milestones=milestones)
def train_epoch(self, epoch_idx):
if 'step1' in self.mode:
return self.train_sup_epoch(epoch_idx)
else:
return self.train_suplinear_epoch(epoch_idx)
def train_sup_epoch(self, epoch_idx):
self.net.train()
train_dataiter = iter(self.train_loader)
n = 0
for train_step in tqdm(range(1,
len(train_dataiter) + 1),
desc='Epoch {:03d}: '.format(epoch_idx),
position=0,
leave=True,
disable=not comm.is_main_process()):
batch = next(train_dataiter)
images = batch['data'].cuda()
labels = batch['label'].cuda()
batch_size = images.size(0)
images1, images2 = self.hflip(images.repeat(2, 1, 1,
1)).chunk(2) # hflip
images1 = torch.cat(
[torch.rot90(images1, rot, (2, 3)) for rot in range(4)]) # 4B
images2 = torch.cat(
[torch.rot90(images2, rot, (2, 3)) for rot in range(4)]) # 4B
images_pair = torch.cat([images1, images2], dim=0) # 8B
rot_sim_labels = torch.cat([
labels + self.config.dataset.num_classes * i for i in range(4)
],
dim=0)
images_pair = self.simclr_aug(images_pair) # simclr augment
_, features = self.net(images_pair, return_feature=True)
simclr_outputs = self.simclr_layer(features)
simclr = normalize(simclr_outputs) # normalize
sim_matrix = get_similarity_matrix(
simclr, multi_gpu=self.config.num_gpus > 1)
loss_sim = Supervised_NT_xent(
sim_matrix,
labels=rot_sim_labels,
temperature=self.config.temperature,
multi_gpu=self.config.num_gpus > 1) * self.config.sim_lambda
# total loss
loss = loss_sim
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
self.scheduler.step(epoch_idx - 1 + n / len(self.train_loader))
# lr = self.optimizer.param_groups[0]['lr']
# Post-processing stuffs
penul_1 = features[:batch_size]
penul_2 = features[4 * batch_size:5 * batch_size]
features = torch.cat([penul_1,
penul_2]) # only use original rotation
# Linear evaluation
outputs_linear_eval = self.linear(features.detach())
loss_linear = self.criterion(outputs_linear_eval, labels.repeat(2))
self.linear_optim.zero_grad()
loss_linear.backward()
self.linear_optim.step()
n = n + 1
metrics = {}
metrics['epoch_idx'] = epoch_idx
metrics['loss'] = loss
if self.config.num_gpus > 1:
self.dummy_net.backbone.load_state_dict(
self.net.module.state_dict())
self.dummy_net.linear.load_state_dict(
self.linear.module.state_dict())
self.dummy_net.simclr_layer.load_state_dict(
self.simclr_layer.module.state_dict())
self.dummy_net.joint_distribution_layer.load_state_dict(
self.joint_distribution_layer.module.state_dict())
self.dummy_net.shift_cls_layer.load_state_dict(
self.shift_cls_layer.module.state_dict())
else:
self.dummy_net.backbone.load_state_dict(self.net.state_dict())
self.dummy_net.linear.load_state_dict(self.linear.state_dict())
self.dummy_net.simclr_layer.load_state_dict(
self.simclr_layer.state_dict())
self.dummy_net.joint_distribution_layer.load_state_dict(
self.joint_distribution_layer.state_dict())
self.dummy_net.shift_cls_layer.load_state_dict(
self.shift_cls_layer.state_dict())
return self.dummy_net, metrics
def train_suplinear_epoch(self, epoch_idx):
self.net.train()
train_dataiter = iter(self.train_loader)
for train_step in tqdm(range(1,
len(train_dataiter) + 1),
desc='Epoch {:03d}: '.format(epoch_idx),
position=0,
leave=True,
disable=not comm.is_main_process()):
self.net.eval()
batch = next(train_dataiter)
images = batch['data'].cuda()
labels = batch['label'].cuda()
batch_size = images.size(0)
images = self.hflip(images)
images = torch.cat(
[torch.rot90(images, rot, (2, 3)) for rot in range(4)]) # 4B
rot_labels = torch.cat(
[torch.ones_like(labels) * k for k in range(4)], 0) # B -> 4B
joint_labels = torch.cat([
labels + self.config.dataset.num_classes * i for i in range(4)
],
dim=0)
images = self.simclr_aug(images) # simclr augmentation
_, features = self.net(images, return_feature=True)
penultimate = features.detach()
outputs = self.linear(
penultimate[0:batch_size]
) # only use 0 degree samples for linear eval
outputs_rot = self.rotation_linear(penultimate)
outputs_joint = self.joint_linear(penultimate)
loss_ce = self.criterion(outputs, labels)
loss_rot = self.criterion(outputs_rot, rot_labels)
loss_joint = self.criterion(outputs_joint, joint_labels)
# CE loss
self.linear_optim.zero_grad()
loss_ce.backward()
self.linear_optim.step()
# Rot loss
self.rotation_linear_optim.zero_grad()
loss_rot.backward()
self.rotation_linear_optim.step()
# Joint loss
self.joint_linear_optim.zero_grad()
loss_joint.backward()
self.joint_linear_optim.step()
# optimizer learning rate
# lr = self.linear_optim.param_groups[0]['lr']
self.linear_scheduler.step()
self.rot_scheduler.step()
self.joint_scheduler.step()
metrics = {}
metrics['epoch_idx'] = epoch_idx
metrics['loss'] = loss_ce + loss_rot + loss_joint
if self.config.num_gpus > 1:
self.dummy_net.backbone.load_state_dict(
self.net.module.state_dict())
self.dummy_net.linear.load_state_dict(
self.linear.module.state_dict())
self.dummy_net.simclr_layer.load_state_dict(
self.simclr_layer.module.state_dict())
self.dummy_net.joint_distribution_layer.load_state_dict(
self.joint_distribution_layer.module.state_dict())
self.dummy_net.shift_cls_layer.load_state_dict(
self.shift_cls_layer.module.state_dict())
else:
self.dummy_net.backbone.load_state_dict(self.net.state_dict())
self.dummy_net.linear.load_state_dict(self.linear.state_dict())
self.dummy_net.simclr_layer.load_state_dict(
self.simclr_layer.state_dict())
self.dummy_net.joint_distribution_layer.load_state_dict(
self.joint_distribution_layer.state_dict())
self.dummy_net.shift_cls_layer.load_state_dict(
self.shift_cls_layer.state_dict())
return self.dummy_net, metrics
def get_similarity_matrix(outputs, chunk=2, multi_gpu=False):
"""Compute similarity matrix.
- outputs: (B', d) tensor for B' = B * chunk
- sim_matrix: (B', B') tensor
"""
if multi_gpu:
outputs_gathered = []
for out in outputs.chunk(chunk):
gather_t = [
torch.empty_like(out) for _ in range(dist.get_world_size())
]
gather_t = torch.cat(distops.all_gather(gather_t, out))
outputs_gathered.append(gather_t)
outputs = torch.cat(outputs_gathered)
sim_matrix = torch.mm(outputs, outputs.t()) # (B', d), (d, B') -> (B', B')
return sim_matrix
def Supervised_NT_xent(sim_matrix,
labels,
temperature=0.5,
chunk=2,
eps=1e-8,
multi_gpu=False):
"""Compute NT_xent loss.
- sim_matrix: (B', B') tensor for B' = B * chunk (first 2B are pos samples)
"""
device = sim_matrix.device
if multi_gpu:
gather_t = [
torch.empty_like(labels) for _ in range(dist.get_world_size())
]
labels = torch.cat(distops.all_gather(gather_t, labels))
labels = labels.repeat(2)
logits_max, _ = torch.max(sim_matrix, dim=1, keepdim=True)
sim_matrix = sim_matrix - logits_max.detach()
B = sim_matrix.size(0) // chunk # B = B' / chunk
eye = torch.eye(B * chunk).to(device) # (B', B')
sim_matrix = torch.exp(sim_matrix / temperature) * (1 - eye
) # remove diagonal
denom = torch.sum(sim_matrix, dim=1, keepdim=True)
sim_matrix = -torch.log(sim_matrix / (denom + eps) + eps) # loss matrix
labels = labels.contiguous().view(-1, 1)
Mask = torch.eq(labels, labels.t()).float().to(device)
# Mask = eye * torch.stack([labels == labels[i]
# for i in range(labels.size(0))]).float().to(device)
Mask = Mask / (Mask.sum(dim=1, keepdim=True) + eps)
loss = torch.sum(Mask * sim_matrix) / (2 * B)
return loss
def normalize(x, dim=1, eps=1e-8):
return x / (x.norm(dim=dim, keepdim=True) + eps)
def get_simclr_augmentation(config, image_size):
# parameter for resizecrop
resize_scale = (config.resize_factor, 1.0) # resize scaling factor
if config.resize_fix: # if resize_fix is True, use same scale
resize_scale = (config.resize_factor, config.resize_factor)
# Align augmentation
color_jitter = ColorJitterLayer(brightness=0.4,
contrast=0.4,
saturation=0.4,
hue=0.1,
p=0.8)
color_gray = RandomColorGrayLayer(p=0.2)
resize_crop = RandomResizedCropLayer(scale=resize_scale, size=image_size)
# Transform define #
if config.dataset.name == 'imagenet':
# Using RandomResizedCrop at PIL transform
transform = nn.Sequential(
color_jitter,
color_gray,
)
else:
transform = nn.Sequential(
color_jitter,
color_gray,
resize_crop,
)
return transform
# ----------Warmup Scheduler----------
class GradualWarmupScheduler(_LRScheduler):
"""Gradually warm-up(increasing) learning rate in optimizer. Proposed in
'Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour'.
Args:
optimizer (Optimizer): Wrapped optimizer.
multiplier: target learning rate = base lr * multiplier
if multiplier > 1.0. if multiplier = 1.0,
lr starts from 0 and ends up with the base_lr.
total_epoch: target learning rate is reached at total_epoch, gradually
after_scheduler: after target_epoch,
use this scheduler (eg. ReduceLROnPlateau)
"""
def __init__(self,
optimizer,
multiplier,
total_epoch,
after_scheduler=None):
self.multiplier = multiplier
if self.multiplier < 1.:
raise ValueError(
'multiplier should be greater than or equal to 1.')
self.total_epoch = total_epoch
self.after_scheduler = after_scheduler
self.finished = False
super(GradualWarmupScheduler, self).__init__(optimizer)
def get_lr(self):
if self.last_epoch > self.total_epoch:
if self.after_scheduler:
if not self.finished:
self.after_scheduler.base_lrs = [
base_lr * self.multiplier for base_lr in self.base_lrs
]
self.finished = True
return self.after_scheduler.get_lr()
return [base_lr * self.multiplier for base_lr in self.base_lrs]
if self.multiplier == 1.0:
return [
base_lr * (float(self.last_epoch) / self.total_epoch)
for base_lr in self.base_lrs
]
else:
return [
base_lr *
((self.multiplier - 1.) * self.last_epoch / self.total_epoch +
1.) for base_lr in self.base_lrs
]
def step_ReduceLROnPlateau(self, metrics, epoch=None):
if epoch is None:
epoch = self.last_epoch + 1
self.last_epoch = epoch if epoch != 0 else 1
# ReduceLROnPlateau is called at the end of epoch,
# whereas others are called at beginning
if self.last_epoch <= self.total_epoch:
warmup_lr = [
base_lr *
((self.multiplier - 1.) * self.last_epoch / self.total_epoch +
1.) for base_lr in self.base_lrs
]
for param_group, lr in zip(self.optimizer.param_groups, warmup_lr):
param_group['lr'] = lr
else:
if epoch is None:
self.after_scheduler.step(metrics, None)
else:
self.after_scheduler.step(metrics, epoch - self.total_epoch)
def step(self, epoch=None, metrics=None):
if type(self.after_scheduler) != ReduceLROnPlateau:
if self.finished and self.after_scheduler:
if epoch is None:
self.after_scheduler.step(None)
else:
self.after_scheduler.step(epoch - self.total_epoch)
else:
return super(GradualWarmupScheduler, self).step(epoch)
else:
self.step_ReduceLROnPlateau(metrics, epoch)
# ----------transform layers----------
if torch.__version__ >= '1.4.0':
kwargs = {'align_corners': False}
else:
kwargs = {}
def rgb2hsv(rgb):
"""Convert a 4-d RGB tensor to the HSV counterpart.
Here, we compute hue using atan2() based on the definition in [1],
instead of using the common lookup table approach as in [2, 3].
Those values agree when the angle is a multiple of 30°,
otherwise they may differ at most ~1.2°.
References
[1] https://en.wikipedia.org/wiki/Hue
[2] https://www.rapidtables.com/convert/color/rgb-to-hsv.html
[3] https://github.com/scikit-image/scikit-image/
blob/master/skimage/color/colorconv.py#L212
"""
r, g, b = rgb[:, 0, :, :], rgb[:, 1, :, :], rgb[:, 2, :, :]
Cmax = rgb.max(1)[0]
Cmin = rgb.min(1)[0]
delta = Cmax - Cmin
hue = torch.atan2(math.sqrt(3) * (g - b), 2 * r - g - b)
hue = (hue % (2 * math.pi)) / (2 * math.pi)
saturate = delta / Cmax
value = Cmax
hsv = torch.stack([hue, saturate, value], dim=1)
hsv[~torch.isfinite(hsv)] = 0.
return hsv
def hsv2rgb(hsv):
"""Convert a 4-d HSV tensor to the RGB counterpart.
>>> %timeit hsv2rgb(hsv)
2.37 ms ± 13.4 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)
>>> %timeit rgb2hsv_fast(rgb)
298 µs ± 542 ns per loop (mean ± std. dev. of 7 runs, 1000 loops each)
>>> torch.allclose(hsv2rgb(hsv), hsv2rgb_fast(hsv), atol=1e-6)
True
References
[1] https://en.wikipedia.org/wiki/HSL_and_HSV#HSV_to_RGB_alternative
"""
h, s, v = hsv[:, [0]], hsv[:, [1]], hsv[:, [2]]
c = v * s
n = hsv.new_tensor([5, 3, 1]).view(3, 1, 1)
k = (n + h * 6) % 6
t = torch.min(k, 4 - k)
t = torch.clamp(t, 0, 1)
return v - c * t
class RandomResizedCropLayer(nn.Module):
def __init__(self, size=None, scale=(0.08, 1.0), ratio=(3. / 4., 4. / 3.)):
"""Inception Crop size (tuple): size of forwarding image (C, W, H)
scale (tuple): range of size of the origin size cropped ratio (tuple):
range of aspect ratio of the origin aspect ratio cropped.
"""
super(RandomResizedCropLayer, self).__init__()
_eye = torch.eye(2, 3)
self.size = size
self.register_buffer('_eye', _eye)
self.scale = scale
self.ratio = ratio
def forward(self, inputs, whbias=None):
_device = inputs.device
N = inputs.size(0)
_theta = self._eye.repeat(N, 1, 1)
if whbias is None:
whbias = self._sample_latent(inputs)
_theta[:, 0, 0] = whbias[:, 0]
_theta[:, 1, 1] = whbias[:, 1]
_theta[:, 0, 2] = whbias[:, 2]
_theta[:, 1, 2] = whbias[:, 3]
grid = F.affine_grid(_theta, inputs.size(), **kwargs).to(_device)
output = F.grid_sample(inputs,
grid,
padding_mode='reflection',
**kwargs)
if self.size is not None:
output = F.adaptive_avg_pool2d(output, self.size)
return output
def _clamp(self, whbias):
w = whbias[:, 0]
h = whbias[:, 1]
w_bias = whbias[:, 2]
h_bias = whbias[:, 3]
# Clamp with scale
w = torch.clamp(w, *self.scale)
h = torch.clamp(h, *self.scale)
# Clamp with ratio
w = self.ratio[0] * h + torch.relu(w - self.ratio[0] * h)
w = self.ratio[1] * h - torch.relu(self.ratio[1] * h - w)
# Clamp with bias range: w_bias \in (w - 1, 1 - w),
# h_bias \in (h - 1, 1 - h)
w_bias = w - 1 + torch.relu(w_bias - w + 1)
w_bias = 1 - w - torch.relu(1 - w - w_bias)
h_bias = h - 1 + torch.relu(h_bias - h + 1)
h_bias = 1 - h - torch.relu(1 - h - h_bias)
whbias = torch.stack([w, h, w_bias, h_bias], dim=0).t()
return whbias
def _sample_latent(self, inputs):
_device = inputs.device
N, _, width, height = inputs.shape
# N * 10 trial
area = width * height
target_area = np.random.uniform(*self.scale, N * 10) * area
log_ratio = (math.log(self.ratio[0]), math.log(self.ratio[1]))
aspect_ratio = np.exp(np.random.uniform(*log_ratio, N * 10))
# If doesn't satisfy ratio condition, then do central crop
w = np.round(np.sqrt(target_area * aspect_ratio))
h = np.round(np.sqrt(target_area / aspect_ratio))
cond = (0 < w) * (w <= width) * (0 < h) * (h <= height)
w = w[cond]
h = h[cond]
cond_len = w.shape[0]
if cond_len >= N:
w = w[:N]
h = h[:N]
else:
w = np.concatenate([w, np.ones(N - cond_len) * width])
h = np.concatenate([h, np.ones(N - cond_len) * height])
w_bias = np.random.randint(w - width, width - w + 1) / width
h_bias = np.random.randint(h - height, height - h + 1) / height
w = w / width
h = h / height
whbias = np.column_stack([w, h, w_bias, h_bias])
whbias = torch.tensor(whbias, device=_device)
return whbias
class HorizontalFlipRandomCrop(nn.Module):
def __init__(self, max_range):
super(HorizontalFlipRandomCrop, self).__init__()
self.max_range = max_range
_eye = torch.eye(2, 3)
self.register_buffer('_eye', _eye)
def forward(self, input, sign=None, bias=None, rotation=None):
_device = input.device
N = input.size(0)
_theta = self._eye.repeat(N, 1, 1)
if sign is None:
sign = torch.bernoulli(torch.ones(N, device=_device) * 0.5) * 2 - 1
if bias is None:
bias = torch.empty(
(N, 2), device=_device).uniform_(-self.max_range,
self.max_range)
_theta[:, 0, 0] = sign
_theta[:, :, 2] = bias
if rotation is not None:
_theta[:, 0:2, 0:2] = rotation
grid = F.affine_grid(_theta, input.size(), **kwargs).to(_device)
output = F.grid_sample(input,
grid,
padding_mode='reflection',
**kwargs)
return output
def _sample_latent(self, N, device=None):
sign = torch.bernoulli(torch.ones(N, device=device) * 0.5) * 2 - 1
bias = torch.empty(
(N, 2), device=device).uniform_(-self.max_range, self.max_range)
return sign, bias
class Rotation(nn.Module):
def __init__(self, max_range=4):
super(Rotation, self).__init__()
self.max_range = max_range
self.prob = 0.5
def forward(self, input, aug_index=None):
_, _, H, W = input.size()
if aug_index is None:
aug_index = np.random.randint(4)
output = torch.rot90(input, aug_index, (2, 3))
_prob = input.new_full((input.size(0), ), self.prob)
_mask = torch.bernoulli(_prob).view(-1, 1, 1, 1)
output = _mask * input + (1 - _mask) * output
else:
aug_index = aug_index % self.max_range
output = torch.rot90(input, aug_index, (2, 3))
return output
class CutPerm(nn.Module):
def __init__(self, max_range=4):
super(CutPerm, self).__init__()
self.max_range = max_range
self.prob = 0.5
def forward(self, input, aug_index=None):
_, _, H, W = input.size()
if aug_index is None:
aug_index = np.random.randint(4)
output = self._cutperm(input, aug_index)
_prob = input.new_full((input.size(0), ), self.prob)
_mask = torch.bernoulli(_prob).view(-1, 1, 1, 1)
output = _mask * input + (1 - _mask) * output
else:
aug_index = aug_index % self.max_range
output = self._cutperm(input, aug_index)
return output
def _cutperm(self, inputs, aug_index):
_, _, H, W = inputs.size()
h_mid = int(H / 2)
w_mid = int(W / 2)
jigsaw_h = aug_index // 2
jigsaw_v = aug_index % 2
if jigsaw_h == 1:
inputs = torch.cat(
(inputs[:, :, h_mid:, :], inputs[:, :, 0:h_mid, :]), dim=2)
if jigsaw_v == 1:
inputs = torch.cat(
(inputs[:, :, :, w_mid:], inputs[:, :, :, 0:w_mid]), dim=3)
return inputs
class HorizontalFlipLayer(nn.Module):
def __init__(self):
"""
img_size : (int, int, int)
Height and width must be powers of 2. E.g. (32, 32, 1) or
(64, 128, 3). Last number indicates number of channels, e.g. 1 for
grayscale or 3 for RGB
"""
super(HorizontalFlipLayer, self).__init__()
_eye = torch.eye(2, 3)
self.register_buffer('_eye', _eye)
def forward(self, inputs):
_device = inputs.device
N = inputs.size(0)
_theta = self._eye.repeat(N, 1, 1)
r_sign = torch.bernoulli(torch.ones(N, device=_device) * 0.5) * 2 - 1
_theta[:, 0, 0] = r_sign
grid = F.affine_grid(_theta, inputs.size(), **kwargs).to(_device)
inputs = F.grid_sample(inputs,
grid,
padding_mode='reflection',
**kwargs)
return inputs
class RandomColorGrayLayer(nn.Module):
def __init__(self, p):
super(RandomColorGrayLayer, self).__init__()
self.prob = p
_weight = torch.tensor([[0.299, 0.587, 0.114]])
self.register_buffer('_weight', _weight.view(1, 3, 1, 1))
def forward(self, inputs, aug_index=None):
if aug_index == 0:
return inputs
outputs = F.conv2d(inputs, self._weight)
gray = torch.cat([outputs, outputs, outputs], dim=1)
if aug_index is None:
_prob = inputs.new_full((inputs.size(0), ), self.prob)
_mask = torch.bernoulli(_prob).view(-1, 1, 1, 1)
gray = inputs * (1 - _mask) + gray * _mask
return gray
class ColorJitterLayer(nn.Module):
def __init__(self, p, brightness, contrast, saturation, hue):
super(ColorJitterLayer, self).__init__()
self.prob = p
self.brightness = self._check_input(brightness, 'brightness')
self.contrast = self._check_input(contrast, 'contrast')
self.saturation = self._check_input(saturation, 'saturation')
self.hue = self._check_input(hue,
'hue',
center=0,
bound=(-0.5, 0.5),
clip_first_on_zero=False)
def _check_input(self,
value,
name,
center=1,
bound=(0, float('inf')),
clip_first_on_zero=True):
if isinstance(value, numbers.Number):
if value < 0:
raise ValueError(
'If {} is a single number, it must be non negative.'.
format(name))
value = [center - value, center + value]
if clip_first_on_zero:
value[0] = max(value[0], 0)
elif isinstance(value, (tuple, list)) and len(value) == 2:
if not bound[0] <= value[0] <= value[1] <= bound[1]:
raise ValueError('{} values should be between {}'.format(
name, bound))
else:
raise TypeError(
'{} should be a single number or a list/tuple with length 2.'.
format(name))
# if value is 0 or (1., 1.) for brightness/contrast/saturation
# or (0., 0.) for hue, do nothing
if value[0] == value[1] == center:
value = None
return value
def adjust_contrast(self, x):
if self.contrast:
factor = x.new_empty(x.size(0), 1, 1, 1).uniform_(*self.contrast)
means = torch.mean(x, dim=[2, 3], keepdim=True)
x = (x - means) * factor + means
return torch.clamp(x, 0, 1)
def adjust_hsv(self, x):
f_h = x.new_zeros(x.size(0), 1, 1)
f_s = x.new_ones(x.size(0), 1, 1)
f_v = x.new_ones(x.size(0), 1, 1)
if self.hue:
f_h.uniform_(*self.hue)
if self.saturation:
f_s = f_s.uniform_(*self.saturation)
if self.brightness:
f_v = f_v.uniform_(*self.brightness)
return RandomHSVFunction.apply(x, f_h, f_s, f_v)
def transform(self, inputs):
# Shuffle transform
if np.random.rand() > 0.5:
transforms = [self.adjust_contrast, self.adjust_hsv]
else:
transforms = [self.adjust_hsv, self.adjust_contrast]
for t in transforms:
inputs = t(inputs)
return inputs
def forward(self, inputs):
_prob = inputs.new_full((inputs.size(0), ), self.prob)
_mask = torch.bernoulli(_prob).view(-1, 1, 1, 1)
return inputs * (1 - _mask) + self.transform(inputs) * _mask
class RandomHSVFunction(Function):
@staticmethod
def forward(ctx, x, f_h, f_s, f_v):
# ctx is a context object that can be used to stash information
# for backward computation
x = rgb2hsv(x)
h = x[:, 0, :, :]
h += (f_h * 255. / 360.)
h = (h % 1)
x[:, 0, :, :] = h
x[:, 1, :, :] = x[:, 1, :, :] * f_s
x[:, 2, :, :] = x[:, 2, :, :] * f_v
x = torch.clamp(x, 0, 1)
x = hsv2rgb(x)
return x
@staticmethod
def backward(ctx, grad_output):
# We return as many input gradients as there were arguments.
# Gradients of non-Tensor arguments to forward must be None.
grad_input = None
if ctx.needs_input_grad[0]:
grad_input = grad_output.clone()
return grad_input, None, None, None
class NormalizeLayer(nn.Module):
"""In order to certify radii in original coordinates rather than
standardized coordinates, we add the Gaussian noise _before_ standardizing,
which is why we have standardization be the first layer of the classifier
rather than as a part of preprocessing as is typical."""
def __init__(self):
super(NormalizeLayer, self).__init__()
def forward(self, inputs):
return (inputs - 0.5) / 0.5
| 32,183 | 34.250821 | 79 | py |
null | OpenOOD-main/openood/trainers/cutmix_trainer.py | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
from tqdm import tqdm
import openood.utils.comm as comm
from openood.utils import Config
from .lr_scheduler import cosine_annealing
class CutMixTrainer:
def __init__(self, net: nn.Module, train_loader: DataLoader,
config: Config) -> None:
self.net = net
self.train_loader = train_loader
self.config = config
self.args = config.trainer.trainer_args
self.optimizer = torch.optim.SGD(
net.parameters(),
config.optimizer.lr,
momentum=config.optimizer.momentum,
weight_decay=config.optimizer.weight_decay,
nesterov=True,
)
self.scheduler = torch.optim.lr_scheduler.LambdaLR(
self.optimizer,
lr_lambda=lambda step: cosine_annealing(
step,
config.optimizer.num_epochs * len(train_loader),
1,
1e-6 / config.optimizer.lr,
),
)
def train_epoch(self, epoch_idx):
self.net.train()
loss_avg = 0.0
train_dataiter = iter(self.train_loader)
for train_step in tqdm(range(1,
len(train_dataiter) + 1),
desc='Epoch {:03d}: '.format(epoch_idx),
position=0,
leave=True,
disable=not comm.is_main_process()):
batch = next(train_dataiter)
data = batch['data'].cuda()
target = batch['label'].cuda()
# perform cutmix augmentation in a batch
r = np.random.rand(1)
if self.args.beta > 0 and r < self.args.cutmix_prob:
# generate mixed sample
lam = np.random.beta(self.args.beta, self.args.beta)
rand_index = torch.randperm(data.size()[0]).cuda()
target_a = target
target_b = target[rand_index]
bbx1, bby1, bbx2, bby2 = rand_bbox(data.size(), lam)
data[:, :, bbx1:bbx2, bby1:bby2] = data[rand_index, :,
bbx1:bbx2, bby1:bby2]
# adjust lambda to exactly match pixel ratio
lam = 1 - ((bbx2 - bbx1) * (bby2 - bby1) /
(data.size()[-1] * data.size()[-2]))
# forward
logits_classifier = self.net(data)
loss = F.cross_entropy(
logits_classifier, target_a) * lam + F.cross_entropy(
logits_classifier, target_b) * (1. - lam)
else:
# forward
logits_classifier = self.net(data)
loss = F.cross_entropy(logits_classifier, target)
# backward
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
self.scheduler.step()
# exponential moving average, show smooth values
with torch.no_grad():
loss_avg = loss_avg * 0.8 + float(loss) * 0.2
metrics = {}
metrics['epoch_idx'] = epoch_idx
metrics['loss'] = loss_avg
return self.net, metrics
def rand_bbox(size, lam):
W = size[2]
H = size[3]
cut_rat = np.sqrt(1. - lam)
cut_w = np.int(W * cut_rat)
cut_h = np.int(H * cut_rat)
# uniform
cx = np.random.randint(W)
cy = np.random.randint(H)
bbx1 = np.clip(cx - cut_w // 2, 0, W)
bby1 = np.clip(cy - cut_h // 2, 0, H)
bbx2 = np.clip(cx + cut_w // 2, 0, W)
bby2 = np.clip(cy + cut_h // 2, 0, H)
return bbx1, bby1, bbx2, bby2
| 3,800 | 32.052174 | 77 | py |
null | OpenOOD-main/openood/trainers/cutpaste_trainer.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
from tqdm import tqdm
from openood.utils import Config
from .lr_scheduler import cosine_annealing
class CutPasteTrainer:
def __init__(self, net: nn.Module, train_loader: DataLoader,
config: Config) -> None:
self.net = net
self.train_loader = train_loader
self.config = config
self.optimizer = torch.optim.SGD(
net.parameters(),
config.optimizer.lr,
momentum=config.optimizer.momentum,
weight_decay=config.optimizer.weight_decay,
nesterov=True,
)
self.scheduler = torch.optim.lr_scheduler.LambdaLR(
self.optimizer,
lr_lambda=lambda step: cosine_annealing(
step,
config.optimizer.num_epochs * len(train_loader),
1,
1e-6 / config.optimizer.lr,
),
)
def train_epoch(self, epoch_idx):
self.net.train()
loss_avg = 0.0
train_dataiter = iter(self.train_loader)
embeds = []
for train_step in tqdm(range(1,
len(train_dataiter) + 1),
desc='Epoch {:03d}: '.format(epoch_idx),
position=0,
leave=True):
batch = next(train_dataiter)
data = torch.cat(batch['data'], 0)
data = data.cuda()
y = torch.arange(2)
y = y.repeat_interleave(len(batch['data'][0]))
y = y.cuda()
# forward
embed, logits_classifier = self.net(data)
loss = F.cross_entropy(logits_classifier, y)
embeds.append(embed.cuda())
# backward
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
self.scheduler.step()
# exponential moving average, show smooth values
with torch.no_grad():
loss_avg = loss_avg * 0.8 + float(loss) * 0.2
embeds = torch.cat(embeds)
embeds = torch.nn.functional.normalize(embeds, p=2, dim=1)
metrics = {}
metrics['epoch_idx'] = epoch_idx
metrics['loss'] = loss_avg
return self.net, metrics
| 2,379 | 28.382716 | 71 | py |
null | OpenOOD-main/openood/trainers/draem_trainer.py | import torch
from torch import optim
from torch.utils.data import DataLoader
from tqdm import tqdm
from openood.losses.draem_loss import get_draem_losses
from openood.utils import Config
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data.normal_(0.0, 0.02)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
class DRAEMTrainer:
def __init__(self, net, train_loader: DataLoader, config: Config) -> None:
self.config = config
self.net = net
self.net['generative'].apply(weights_init)
self.net['discriminative'].apply(weights_init)
self.train_loader = train_loader
self.optimizer = torch.optim.Adam([{
'params':
self.net['generative'].parameters(),
'lr':
self.config.optimizer.lr
}, {
'params':
self.net['discriminative'].parameters(),
'lr':
self.config.optimizer.lr
}])
steps = []
for step in self.config.optimizer.steps:
steps.append(self.config.optimizer.num_epochs * step)
self.scheduler = optim.lr_scheduler.MultiStepLR(self.optimizer,
steps,
gamma=0.2,
last_epoch=-1)
self.losses = get_draem_losses()
def train_epoch(self, epoch_idx):
self.net['generative'].train()
self.net['discriminative'].train()
loss_avg = 0.0
train_dataiter = iter(self.train_loader)
for train_step in tqdm(range(1,
len(train_dataiter) + 1),
desc='Epoch {:03d}: '.format(epoch_idx),
position=0,
leave=True):
sample_batched = next(train_dataiter)
gray_batch = sample_batched['data']['image'].cuda()
aug_gray_batch = sample_batched['data']['augmented_image'].cuda()
anomaly_mask = sample_batched['data']['anomaly_mask'].cuda()
# forward
gray_rec = self.net['generative'](aug_gray_batch)
# conconcat origin and generated
joined_in = torch.cat((gray_rec, aug_gray_batch), dim=1)
out_mask = self.net['discriminative'](joined_in)
out_mask_sm = torch.softmax(out_mask, dim=1)
l2_loss = self.losses['l2'](gray_rec, gray_batch)
ssim_loss = self.losses['ssim'](gray_rec, gray_batch)
segment_loss = self.losses['focal'](out_mask_sm, anomaly_mask)
loss = l2_loss + ssim_loss + segment_loss
# backward
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
# exponential moving average, show smooth values
with torch.no_grad():
loss_avg = loss_avg * 0.8 + float(loss) * 0.2
self.scheduler.step()
metrics = {}
metrics['epoch_idx'] = epoch_idx
metrics['loss_smoothed'] = loss_avg
metrics['loss'] = loss
return self.net, metrics
| 3,302 | 33.051546 | 78 | py |
null | OpenOOD-main/openood/trainers/dropout_trainer.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
from tqdm import tqdm
from openood.utils import Config
from .lr_scheduler import cosine_annealing
class DropoutTrainer:
def __init__(self, net: nn.Module, train_loader: DataLoader,
config: Config) -> None:
self.net = net
self.train_loader = train_loader
self.config = config
self.p = config.trainer.dropout_p
self.optimizer = torch.optim.SGD(
net.parameters(),
config.optimizer.lr,
momentum=config.optimizer.momentum,
weight_decay=config.optimizer.weight_decay,
nesterov=True,
)
self.scheduler = torch.optim.lr_scheduler.LambdaLR(
self.optimizer,
lr_lambda=lambda step: cosine_annealing(
step,
config.optimizer.num_epochs * len(train_loader),
1,
1e-6 / config.optimizer.lr,
),
)
def train_epoch(self, epoch_idx):
self.net.train()
loss_avg = 0.0
train_dataiter = iter(self.train_loader)
for train_step in tqdm(range(1,
len(train_dataiter) + 1),
desc='Epoch {:03d}: '.format(epoch_idx),
position=0,
leave=True):
batch = next(train_dataiter)
data = batch['data'].cuda()
target = batch['label'].cuda()
# forward
logits_classifier = self.net.forward_with_dropout(data, self.p)
loss = F.cross_entropy(logits_classifier, target)
# backward
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
self.scheduler.step()
# exponential moving average, show smooth values
with torch.no_grad():
loss_avg = loss_avg * 0.8 + float(loss) * 0.2
metrics = {}
metrics['epoch_idx'] = epoch_idx
metrics['loss'] = loss_avg
return self.net, metrics
| 2,172 | 28.767123 | 75 | py |
null | OpenOOD-main/openood/trainers/dsvdd_trainer.py | import numpy as np
import torch
import torch.optim as optim
from tqdm import tqdm
from openood.utils import Config
class AETrainer:
def __init__(self, net, train_loader, config: Config):
self.config = config
self.net = net
self.train_loader = train_loader
if config.optimizer.name == 'adam':
self.optimizer = optim.Adam(
net.parameters(),
lr=config.lr,
weight_decay=config.weight_decay,
amsgrad=config.optimizer.name == 'amsgrad')
self.scheduler = optim.lr_scheduler.MultiStepLR(
self.optimizer, milestones=config.lr_milestones, gamma=0.1)
def train_epoch(self, epoch_idx):
self.net.train()
epoch_loss = 0
train_dataiter = iter(self.train_loader)
for train_step in tqdm(range(1,
len(train_dataiter) + 1),
desc='Epoch {:03d} '.format(epoch_idx),
position=0,
leave=True):
batch = next(train_dataiter)
inputs = batch['data'].cuda()
self.optimizer.zero_grad()
outputs = self.net(inputs)
scores = torch.sum((outputs - inputs)**2,
dim=tuple(range(1, outputs.dim())))
loss = torch.mean(scores)
loss.backward()
self.optimizer.step()
self.scheduler.step()
epoch_loss += loss.item()
metrics = {}
metrics['epoch_idx'] = epoch_idx
metrics['loss'] = epoch_loss
return self.net, metrics
class DSVDDTrainer:
def __init__(self, net, train_loader, config: Config) -> None:
self.config = config
self.net = net
self.train_loader = train_loader
if config.optimizer.name == 'adam':
self.optimizer = optim.Adam(
net.parameters(),
lr=config.lr,
weight_decay=config.weight_decay,
amsgrad=config.optimizer.name == 'amsgrad')
self.scheduler = optim.lr_scheduler.MultiStepLR(
self.optimizer, milestones=config.lr_milestones, gamma=0.1)
if self.config.c == 'None' and self.config.network.name != 'dcae':
self.config.c = init_center_c(train_loader, net)
self.c = self.config.c
def train_epoch(self, epoch_idx):
self.net.train()
epoch_loss = 0
train_dataiter = iter(self.train_loader)
for train_step in tqdm(range(1,
len(train_dataiter) + 1),
desc='Epoch {:03d}'.format(epoch_idx),
position=0,
leave=True):
batch = next(train_dataiter)
inputs = batch['data'].cuda()
self.optimizer.zero_grad()
outputs = self.net(inputs)
if self.config.network.name != 'dcae':
scores = torch.sum((outputs - self.c)**2, dim=1)
# this is for pre-training the dcae network from the original paper
elif self.config.network.name == 'dcae':
scores = torch.sum((outputs - inputs)**2,
dim=tuple(range(1, outputs.dim())))
else:
raise NotImplementedError
loss = torch.mean(scores)
loss.backward()
self.optimizer.step()
self.scheduler.step()
epoch_loss += loss.item()
metrics = {}
metrics['epoch_idx'] = epoch_idx
metrics['loss'] = epoch_loss
return self.net, metrics
def init_center_c(train_loader, net, eps=0.1):
"""Initialize hypersphere center c as the mean from an initial forward pass
on the data."""
n_samples = 0
first_iter = True
train_dataiter = iter(train_loader)
net.eval()
with torch.no_grad():
for train_step in tqdm(range(1,
len(train_dataiter) + 1),
desc='Initialize center',
position=0,
leave=True):
batch = next(train_dataiter)
inputs = batch['data'].cuda()
outputs = net(inputs)
if first_iter:
c = torch.zeros(outputs.shape[1]).cuda()
first_iter = False
n_samples += outputs.shape[0]
c += torch.sum(outputs, dim=0)
c /= n_samples
# If c_i is too close to 0, set to +-eps.
# Reason: a zero unit can be trivially matched with zero weights.
c[(abs(c) < eps) & (c < 0)] = -eps
c[(abs(c) < eps) & (c > 0)] = eps
return c
def get_radius(dist: torch.Tensor, nu: float):
"""Optimally solve for radius R via the (1-nu)-quantile of distances."""
return np.quantile(np.sqrt(dist.clone().data.cpu().numpy()), 1 - nu)
| 4,963 | 35.233577 | 79 | py |
null | OpenOOD-main/openood/trainers/godin_trainer.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
from tqdm import tqdm
import openood.utils.comm as comm
from openood.utils import Config
from .lr_scheduler import cosine_annealing
class GodinTrainer:
def __init__(self, net: nn.Module, train_loader: DataLoader,
config: Config) -> None:
parameters = []
h_parameters = []
for name, parameter in net.named_parameters():
if name in [
'h.h.weight', 'h.h.bias', 'module.h.h.weight',
'module.h.h.bias'
]:
h_parameters.append(parameter)
else:
parameters.append(parameter)
self.net = net
self.train_loader = train_loader
self.config = config
self.optimizer = torch.optim.SGD(
parameters,
config.optimizer.lr,
momentum=config.optimizer.momentum,
weight_decay=config.optimizer.weight_decay,
nesterov=True,
)
self.scheduler = torch.optim.lr_scheduler.LambdaLR(
self.optimizer,
lr_lambda=lambda step: cosine_annealing(
step,
config.optimizer.num_epochs * len(train_loader),
1,
1e-6 / config.optimizer.lr,
),
)
# no weight decaty
self.h_optimizer = torch.optim.SGD(
h_parameters,
config.optimizer.lr,
momentum=config.optimizer.momentum,
nesterov=True,
)
# same as normal
self.h_scheduler = torch.optim.lr_scheduler.LambdaLR(
self.optimizer,
lr_lambda=lambda step: cosine_annealing(
step,
config.optimizer.num_epochs * len(train_loader),
1,
1e-6 / config.optimizer.lr,
),
)
def train_epoch(self, epoch_idx):
self.net.train()
loss_avg = 0.0
train_dataiter = iter(self.train_loader)
for train_step in tqdm(range(1,
len(train_dataiter) + 1),
desc='Epoch {:03d}: '.format(epoch_idx),
position=0,
leave=True,
disable=not comm.is_main_process()):
batch = next(train_dataiter)
data = batch['data'].cuda()
target = batch['label'].cuda()
# forward
logits_classifier = self.net(data)
loss = F.cross_entropy(logits_classifier, target)
# backward
self.optimizer.zero_grad()
self.h_optimizer.zero_grad()
loss.backward()
self.optimizer.step()
self.h_optimizer.step()
self.scheduler.step()
self.h_scheduler.step()
# exponential moving average, show smooth values
with torch.no_grad():
loss_avg = loss_avg * 0.8 + float(loss) * 0.2
metrics = {}
metrics['epoch_idx'] = epoch_idx
metrics['loss'] = loss_avg
return self.net, metrics
| 3,228 | 29.17757 | 71 | py |
null | OpenOOD-main/openood/trainers/kdad_trainer.py | import torch
from torch.autograd import Variable
from tqdm import tqdm
from openood.losses.kdad_losses import DirectionOnlyLoss, MseDirectionLoss
from openood.utils import Config
class KdadTrainer:
def __init__(self, net, train_loader, config: Config):
self.vgg = net['vgg']
self.model = net['model']
self.train_loader = train_loader
self.config = config
# choose loss type
if self.config['direction_loss_only']:
self.criterion = DirectionOnlyLoss()
else:
self.criterion = MseDirectionLoss(self.config['lamda'])
self.optimizer = torch.optim.Adam(self.model.parameters(),
lr=float(
self.config['learning_rate']))
def train_epoch(self, epoch_idx):
self.model.train()
epoch_loss = 0
train_dataiter = iter(self.train_loader)
for train_step in tqdm(range(1,
len(train_dataiter) + 1),
desc='Epoch {:03d}'.format(epoch_idx),
position=0,
leave=True):
batch = next(train_dataiter)
X = batch['data']
if X.shape[1] == 1:
X = X.repeat(1, 3, 1, 1)
X = Variable(X).cuda()
# compute respective output
output_pred = self.model.forward(X)
output_real = self.vgg(X)
# compute loss
total_loss = self.criterion(output_pred, output_real)
# Add loss to the list
epoch_loss += total_loss.item()
# Clear the previous gradients
self.optimizer.zero_grad()
# Compute gradients
total_loss.backward()
# Adjust weights
self.optimizer.step()
net = {}
metrics = {}
metrics['epoch_idx'] = epoch_idx
metrics['epoch_loss'] = epoch_loss
net['vgg'] = self.vgg
net['model'] = self.model
return net, metrics
| 2,103 | 30.878788 | 76 | py |
null | OpenOOD-main/openood/trainers/logitnorm_trainer.py | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
from tqdm import tqdm
import openood.utils.comm as comm
from openood.utils import Config
from .lr_scheduler import cosine_annealing
class LogitNormTrainer:
def __init__(self, net: nn.Module, train_loader: DataLoader,
config: Config) -> None:
self.net = net
self.train_loader = train_loader
self.config = config
self.optimizer = torch.optim.SGD(
net.parameters(),
config.optimizer.lr,
momentum=config.optimizer.momentum,
weight_decay=config.optimizer.weight_decay,
nesterov=True,
)
self.scheduler = torch.optim.lr_scheduler.LambdaLR(
self.optimizer,
lr_lambda=lambda step: cosine_annealing(
step,
config.optimizer.num_epochs * len(train_loader),
1,
1e-6 / config.optimizer.lr,
),
)
self.loss_fn = LogitNormLoss(tau=config.trainer.trainer_args.tau)
def train_epoch(self, epoch_idx):
self.net.train()
loss_avg = 0.0
train_dataiter = iter(self.train_loader)
for train_step in tqdm(range(1,
len(train_dataiter) + 1),
desc='Epoch {:03d}: '.format(epoch_idx),
position=0,
leave=True,
disable=not comm.is_main_process()):
batch = next(train_dataiter)
data = batch['data'].cuda()
target = batch['label'].cuda()
# forward
logits_classifier = self.net(data)
loss = self.loss_fn(logits_classifier, target)
# backward
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
self.scheduler.step()
# exponential moving average, show smooth values
with torch.no_grad():
loss_avg = loss_avg * 0.8 + float(loss) * 0.2
# comm.synchronize()
metrics = {}
metrics['epoch_idx'] = epoch_idx
metrics['loss'] = self.save_metrics(loss_avg)
return self.net, metrics
def save_metrics(self, loss_avg):
all_loss = comm.gather(loss_avg)
total_losses_reduced = np.mean([x for x in all_loss])
return total_losses_reduced
class LogitNormLoss(nn.Module):
def __init__(self, tau=0.04):
super(LogitNormLoss, self).__init__()
self.tau = tau
def forward(self, x, target):
norms = torch.norm(x, p=2, dim=-1, keepdim=True) + 1e-7
logit_norm = torch.div(x, norms) / self.tau
return F.cross_entropy(logit_norm, target)
| 2,862 | 28.822917 | 73 | py |
null | OpenOOD-main/openood/trainers/lr_scheduler.py | import numpy as np
def cosine_annealing(step, total_steps, lr_max, lr_min):
return lr_min + (lr_max - lr_min) * 0.5 * \
(1 + np.cos(step / total_steps * np.pi))
| 183 | 25.285714 | 56 | py |
null | OpenOOD-main/openood/trainers/mcd_trainer.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
from tqdm import tqdm
import openood.utils.comm as comm
from openood.utils import Config
from .base_trainer import BaseTrainer
class MCDTrainer(BaseTrainer):
def __init__(
self,
net: nn.Module,
train_loader: DataLoader,
train_unlabeled_loader: DataLoader,
config: Config,
) -> None:
super().__init__(net, train_loader, config)
self.train_unlabeled_loader = train_unlabeled_loader
self.lambda_oe = config.trainer.lambda_oe
self.margin = config.trainer.margin
self.epoch_ft = config.trainer.start_epoch_ft
def train_epoch(self, epoch_idx):
self.net.train() # enter train mode
loss_avg = 0.0
train_dataiter = iter(self.train_loader)
if self.train_unlabeled_loader:
unlabeled_dataiter = iter(self.train_unlabeled_loader)
for train_step in tqdm(range(1,
len(train_dataiter) + 1),
desc='Epoch {:03d}: '.format(epoch_idx),
position=0,
leave=True,
disable=not comm.is_main_process()):
batch = next(train_dataiter)
data = batch['data'].cuda()
if epoch_idx < self.epoch_ft:
logits1, logits2 = self.net(data, return_double=True)
loss = F.cross_entropy(logits1, batch['label'].cuda()) \
+ F.cross_entropy(logits2, batch['label'].cuda())
elif self.train_unlabeled_loader and epoch_idx >= self.epoch_ft:
try:
unlabeled_batch = next(unlabeled_dataiter)
except StopIteration:
unlabeled_dataiter = iter(self.train_unlabeled_loader)
unlabeled_batch = next(unlabeled_dataiter)
id_bs = data.size(0)
unlabeled_data = unlabeled_batch['data'].cuda()
all_data = torch.cat([data, unlabeled_data])
logits1, logits2 = self.net(all_data, return_double=True)
logits1_id, logits2_id = logits1[:id_bs], logits2[:id_bs]
logits1_ood, logits2_ood = logits1[id_bs:], logits2[id_bs:]
loss = F.cross_entropy(logits1_id, batch['label'].cuda()) \
+ F.cross_entropy(logits2_id, batch['label'].cuda())
ent = torch.mean(entropy(logits1_ood) - entropy(logits2_ood))
loss_oe = F.relu(self.margin - ent)
loss += self.lambda_oe * loss_oe
# backward
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
self.scheduler.step()
# exponential moving average, show smooth values
with torch.no_grad():
loss_avg = loss_avg * 0.8 + float(loss) * 0.2
metrics = {}
metrics['epoch_idx'] = epoch_idx
metrics['loss'] = self.save_metrics(loss_avg)
return self.net, metrics
def entropy(logits):
score = torch.softmax(logits, dim=0)
logscore = torch.log(score)
entropy = torch.sum(-score * logscore, dim=0)
return entropy
| 3,326 | 33.65625 | 77 | py |
null | OpenOOD-main/openood/trainers/mixoe_trainer.py | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
from tqdm import tqdm
import openood.utils.comm as comm
from openood.utils import Config
from .base_trainer import BaseTrainer
class MixOETrainer(BaseTrainer):
def __init__(
self,
net: nn.Module,
train_loader: DataLoader,
train_unlabeled_loader: DataLoader,
config: Config,
) -> None:
super().__init__(net, train_loader, config)
self.train_unlabeled_loader = train_unlabeled_loader
self.lambda_oe = config.trainer.lambda_oe
self.alpha = config.trainer.alpha
self.beta = config.trainer.beta
self.mix_op = config.trainer.mix_op
self.num_classes = config.dataset.num_classes
self.criterion = SoftCE()
def train_epoch(self, epoch_idx):
self.net.train() # enter train mode
loss_avg = 0.0
train_dataiter = iter(self.train_loader)
if self.train_unlabeled_loader:
unlabeled_dataiter = iter(self.train_unlabeled_loader)
for train_step in tqdm(range(1,
len(train_dataiter) + 1),
desc='Epoch {:03d}: '.format(epoch_idx),
position=0,
leave=True,
disable=not comm.is_main_process()):
# manually drop last batch to avoid batch size mismatch
if train_step == len(train_dataiter):
continue
batch = next(train_dataiter)
try:
unlabeled_batch = next(unlabeled_dataiter)
except StopIteration:
unlabeled_dataiter = iter(self.train_unlabeled_loader)
unlabeled_batch = next(unlabeled_dataiter)
if len(unlabeled_batch['data']) < len(batch['data']):
unlabeled_dataiter = iter(self.train_unlabeled_loader)
unlabeled_batch = next(unlabeled_dataiter)
x, y = batch['data'].cuda(), batch['label'].cuda()
oe_x = unlabeled_batch['data'].cuda()
bs = x.size(0)
one_hot_y = torch.zeros(bs, self.num_classes).cuda()
one_hot_y.scatter_(1, y.view(-1, 1), 1)
# ID loss
logits = self.net(x)
id_loss = F.cross_entropy(logits, y)
# MixOE loss
# build mixed samples
lam = np.random.beta(self.alpha, self.beta)
if self.mix_op == 'cutmix':
mixed_x = x.clone().detach()
bbx1, bby1, bbx2, bby2 = rand_bbox(x.size(), lam)
# adjust lambda to exactly match pixel ratio
lam = 1 - ((bbx2 - bbx1) * (bby2 - bby1) /
(x.size()[-1] * x.size()[-2]))
# we empirically find that pasting outlier patch into ID data performs better
# than pasting ID patch into outlier data
mixed_x[:, :, bbx1:bbx2, bby1:bby2] = oe_x[:, :, bbx1:bbx2,
bby1:bby2]
elif self.mix_op == 'mixup':
mixed_x = lam * x + (1 - lam) * oe_x
# construct soft labels and compute loss
oe_y = torch.ones(oe_x.size(0),
self.num_classes).cuda() / self.num_classes
soft_labels = lam * one_hot_y + (1 - lam) * oe_y
mixed_loss = self.criterion(self.net(mixed_x), soft_labels)
# Total loss
loss = id_loss + self.lambda_oe * mixed_loss
# backward
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
self.scheduler.step()
# exponential moving average, show smooth values
with torch.no_grad():
loss_avg = loss_avg * 0.8 + float(loss) * 0.2
metrics = {}
metrics['epoch_idx'] = epoch_idx
metrics['loss'] = self.save_metrics(loss_avg)
return self.net, metrics
class SoftCE(nn.Module):
def __init__(self, reduction='mean'):
super(SoftCE, self).__init__()
self.reduction = reduction
def forward(self, logits, soft_targets):
preds = logits.log_softmax(dim=-1)
assert preds.shape == soft_targets.shape
loss = torch.sum(-soft_targets * preds, dim=-1)
if self.reduction == 'mean':
return torch.mean(loss)
elif self.reduction == 'sum':
return torch.sum(loss)
elif self.reduction == 'none':
return loss
else:
raise ValueError("Reduction type '{:s}' is not supported!".format(
self.reduction))
def rand_bbox(size, lam):
W = size[2]
H = size[3]
cut_rat = np.sqrt(1. - lam)
cut_w = int(W * cut_rat)
cut_h = int(H * cut_rat)
# uniform
cx = np.random.randint(W)
cy = np.random.randint(H)
bbx1 = np.clip(cx - cut_w // 2, 0, W)
bby1 = np.clip(cy - cut_h // 2, 0, H)
bbx2 = np.clip(cx + cut_w // 2, 0, W)
bby2 = np.clip(cy + cut_h // 2, 0, H)
return bbx1, bby1, bbx2, bby2
| 5,219 | 32.677419 | 93 | py |
null | OpenOOD-main/openood/trainers/mixup_trainer.py | import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from tqdm import tqdm
import openood.utils.comm as comm
from openood.losses import soft_cross_entropy
from openood.utils import Config
from .lr_scheduler import cosine_annealing
def prepare_mixup(batch, alpha=1.0, use_cuda=True):
"""Returns mixed inputs, pairs of targets, and lambda."""
if alpha > 0:
lam = np.random.beta(alpha, alpha)
else:
lam = 1
batch_size = batch['data'].size()[0]
if use_cuda:
index = torch.randperm(batch_size).cuda()
else:
index = torch.randperm(batch_size)
return index, lam
def mixing(data, index, lam):
return lam * data + (1 - lam) * data[index]
class MixupTrainer:
def __init__(self, net: nn.Module, train_loader: DataLoader,
config: Config) -> None:
self.net = net
self.train_loader = train_loader
self.config = config
self.alpha = self.config.trainer.trainer_args.alpha
self.optimizer = torch.optim.SGD(
net.parameters(),
config.optimizer.lr,
momentum=config.optimizer.momentum,
weight_decay=config.optimizer.weight_decay,
nesterov=True,
)
self.scheduler = torch.optim.lr_scheduler.LambdaLR(
self.optimizer,
lr_lambda=lambda step: cosine_annealing(
step,
config.optimizer.num_epochs * len(train_loader),
1,
1e-6 / config.optimizer.lr,
),
)
def train_epoch(self, epoch_idx):
self.net.train()
loss_avg = 0.0
train_dataiter = iter(self.train_loader)
for train_step in tqdm(range(1,
len(train_dataiter) + 1),
desc='Epoch {:03d}: '.format(epoch_idx),
position=0,
leave=True,
disable=not comm.is_main_process()):
batch = next(train_dataiter)
# mixup operation
index, lam = prepare_mixup(batch, self.alpha)
data_mix = mixing(batch['data'].cuda(), index, lam)
soft_label_mix = mixing(batch['soft_label'].cuda(), index, lam)
# forward
logits_classifier = self.net(data_mix)
loss = soft_cross_entropy(logits_classifier, soft_label_mix)
# backward
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
self.scheduler.step()
# exponential moving average, show smooth values
with torch.no_grad():
loss_avg = loss_avg * 0.8 + float(loss) * 0.2
metrics = {}
metrics['epoch_idx'] = epoch_idx
metrics['loss'] = loss_avg
return self.net, metrics
| 2,922 | 28.525253 | 75 | py |
null | OpenOOD-main/openood/trainers/mos_trainer.py | from copy import deepcopy
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from tqdm import tqdm
import openood.utils.comm as comm
from openood.utils import Config
def get_mixup(dataset_size):
return 0.0 if dataset_size < 20_000 else 0.1
def get_group_slices(classes_per_group):
group_slices = []
start = 0
for num_cls in classes_per_group:
end = start + num_cls + 1
group_slices.append([start, end])
start = end
return torch.LongTensor(group_slices)
def get_schedule(dataset_size):
if dataset_size < 20_000:
return [100, 200, 300, 400, 500]
elif dataset_size < 500_000:
return [500, 3000, 6000, 9000, 10_000]
else:
return [500, 6000, 12_000, 18_000, 20_000]
def get_lr(step, dataset_size, base_lr=0.003):
"""Returns learning-rate for `step` or None at the end."""
supports = get_schedule(dataset_size)
# Linear warmup
if step < supports[0]:
return base_lr * step / supports[0]
# End of training
elif step >= supports[-1]:
return None
# Staircase decays by factor of 10
else:
for s in supports[1:]:
if s < step:
base_lr /= 10
return base_lr
def mixup_data(x, y, lam):
"""Returns mixed inputs, pairs of targets, and lambda."""
indices = torch.randperm(x.shape[0]).to(x.device)
mixed_x = lam * x + (1 - lam) * x[indices]
y_a, y_b = y, y[indices]
return mixed_x, y_a, y_b
def mixup_criterion_group(criterion, pred, y_a, y_b, lam, group_slices):
return lam * calc_group_softmax_loss(criterion, pred, y_a, group_slices) \
+ (1 - lam) * calc_group_softmax_loss(criterion,
pred, y_b, group_slices)
def calc_group_softmax_loss(criterion, logits, labels, group_slices):
num_groups = group_slices.shape[0]
loss = 0
for i in range(num_groups):
group_logit = logits[:, group_slices[i][0]:group_slices[i][1]]
group_label = labels[:, i]
loss += criterion(group_logit, group_label)
return loss
def calc_group_softmax_acc(logits, labels, group_slices):
num_groups = group_slices.shape[0]
loss = 0
num_samples = logits.shape[0]
all_group_max_score, all_group_max_class = [], []
smax = torch.nn.Softmax(dim=-1).cuda()
cri = torch.nn.CrossEntropyLoss(reduction='none').cuda()
for i in range(num_groups):
group_logit = logits[:, group_slices[i][0]:group_slices[i][1]]
group_label = labels[:, i]
loss += cri(group_logit, group_label)
group_softmax = smax(group_logit)
group_softmax = group_softmax[:, 1:] # disregard others category
group_max_score, group_max_class = torch.max(group_softmax, dim=1)
group_max_class += 1 # shift the class index by 1
all_group_max_score.append(group_max_score)
all_group_max_class.append(group_max_class)
all_group_max_score = torch.stack(all_group_max_score, dim=1)
all_group_max_class = torch.stack(all_group_max_class, dim=1)
final_max_score, max_group = torch.max(all_group_max_score, dim=1)
pred_cls_within_group = all_group_max_class[torch.arange(num_samples),
max_group]
gt_class, gt_group = torch.max(labels, dim=1)
selected_groups = (max_group == gt_group)
pred_acc = torch.zeros(logits.shape[0]).bool().cuda()
pred_acc[selected_groups] = (
pred_cls_within_group[selected_groups] == gt_class[selected_groups])
return loss, pred_acc
def topk(output, target, ks=(1, )):
"""Returns one boolean vector for each k, whether the target is within the
output's top-k."""
_, pred = output.topk(max(ks), 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
return [correct[:k].max(0)[0] for k in ks]
def run_eval(model, data_loader, step, group_slices, num_group):
# switch to evaluate mode
model.eval()
all_c, all_top1 = [], []
train_dataiter = iter(data_loader)
for train_step in tqdm(range(1,
len(train_dataiter) + 1),
desc='Test : ',
position=0,
leave=True,
disable=not comm.is_main_process()):
batch = next(train_dataiter)
data = batch['data'].cuda()
group_label = batch['group_label'].cuda()
class_label = batch['class_label'].cuda()
labels = []
for i in range(len(group_label)):
label = torch.zeros(num_group, dtype=torch.int64)
label[group_label[i]] = class_label[i] + 1
labels.append(label.unsqueeze(0))
labels = torch.cat(labels, dim=0).cuda()
with torch.no_grad():
x = data
y = labels
# compute output, measure accuracy and record loss.
logits = model(x)
if group_slices is not None:
c, top1 = calc_group_softmax_acc(logits, y, group_slices)
else:
c = torch.nn.CrossEntropyLoss(reduction='none')(logits, y)
top1 = topk(logits, y, ks=(1, ))[0]
all_c.extend(c.cpu()) # Also ensures a sync point.
all_top1.extend(top1.cpu())
model.train()
# print(f'Validation@{step} loss {np.mean(all_c):.5f}, '
# f'top1 {np.mean(all_top1):.2%}')
# writer.add_scalar('Val/loss', np.mean(all_c), step)
# writer.add_scalar('Val/top1', np.mean(all_top1), step)
return all_c, all_top1
class MOSTrainer:
def __init__(self, net: nn.Module, train_loader: DataLoader,
config: Config) -> None:
self.net = net.cuda()
self.train_loader = train_loader
self.config = config
self.lr = config.optimizer.lr
trainable_params = filter(lambda p: p.requires_grad, net.parameters())
self.optim = torch.optim.SGD(trainable_params,
lr=self.lr,
momentum=0.9)
self.optim.zero_grad()
self.net.train()
# train_set len
self.train_set_len = config.dataset.train.batch_size * len(
train_loader)
self.mixup = get_mixup(self.train_set_len)
self.cri = torch.nn.CrossEntropyLoss().cuda()
self.accum_steps = 0
self.mixup_l = np.random.beta(self.mixup,
self.mixup) if self.mixup > 0 else 1
# if specified group_config
if (config.trainer.group_config.endswith('npy')):
self.classes_per_group = np.load(config.trainer.group_config)
elif (config.trainer.group_config.endswith('txt')):
self.classes_per_group = np.loadtxt(config.trainer.group_config,
dtype=int)
else:
self.cal_group_slices(self.train_loader)
self.num_group = len(self.classes_per_group)
self.group_slices = get_group_slices(self.classes_per_group)
self.group_slices = self.group_slices.cuda()
self.step = 0
self.batch_split = 1
def cal_group_slices(self, train_loader):
# cal group config
group = {}
train_dataiter = iter(self.train_loader)
for train_step in tqdm(range(1,
len(train_dataiter) + 1),
desc='cal group_config',
position=0,
leave=True,
disable=not comm.is_main_process()):
batch = next(train_dataiter)
group_label = deepcopy(batch['group_label'])
class_label = deepcopy(batch['class_label'])
for i in range(len(class_label)):
gl = group_label[i].item()
cl = class_label[i].item()
try:
group[str(gl)]
except:
group[str(gl)] = []
if cl not in group[str(gl)]:
group[str(gl)].append(cl)
self.classes_per_group = []
for i in range(len(group)):
self.classes_per_group.append(max(group[str(i)]) + 1)
def train_epoch(self, epoch_idx):
total_loss = 0
train_dataiter = iter(self.train_loader)
for train_step in tqdm(range(1,
len(train_dataiter) + 1),
desc='Epoch {:03d}: '.format(epoch_idx),
position=0,
leave=True,
disable=not comm.is_main_process()):
batch = next(train_dataiter)
data = batch['data'].cuda()
group_label = batch['group_label'].cuda()
class_label = batch['class_label'].cuda()
labels = []
for i in range(len(group_label)):
label = torch.zeros(self.num_group, dtype=torch.int64)
label[group_label[i]] = class_label[i] + 1
labels.append(label.unsqueeze(0))
labels = torch.cat(labels, dim=0).cuda()
# Update learning-rate, including stop training if over.
lr = get_lr(self.step, self.train_set_len, self.lr)
if lr is None:
break
for param_group in self.optim.param_groups:
param_group['lr'] = lr
if self.mixup > 0.0:
x, y_a, y_b = mixup_data(data, labels, self.mixup_l)
logits = self.net(data)
y_a = y_a.cuda()
y_b = y_b.cuda()
if self.mixup > 0.0:
c = mixup_criterion_group(self.cri, logits, y_a, y_b,
self.mixup_l, self.group_slices)
else:
c = calc_group_softmax_loss(self.cri, logits, labels,
self.group_slices)
c_num = float(c.data.cpu().numpy()) # Also ensures a sync point.
# # Accumulate grads
(c / self.batch_split).backward()
self.accum_steps += 1
# accstep = f' ({self.accum_steps}/{self.batch_split})' \
# if self.batch_split > 1 else ''
# print(
# f'[step {self.step}{accstep}]: loss={c_num:.5f} (lr={lr:.1e})')
total_loss += c_num
# Update params
# if self.accum_steps == self.batch_split:
self.optim.step()
self.optim.zero_grad()
self.step += 1
self.accum_steps = 0
# Sample new mixup ratio for next batch
self.mixup_l = np.random.beta(self.mixup,
self.mixup) if self.mixup > 0 else 1
# torch.save(self.net.state_dict(),
# os.path.join(self.config.output_dir, 'mos_epoch_latest.ckpt'))
# step, all_top1 = run_eval(self.net, self.train_loader, self.step, self.group_slices,
# self.num_group)
loss_avg = total_loss / len(train_dataiter)
metrics = {}
metrics['epoch_idx'] = epoch_idx
metrics['loss'] = loss_avg
# metrics['acc'] = np.mean(all_top1) # the acc used in there is the top1 acc
return self.net, metrics, self.num_group, self.group_slices
| 11,494 | 33.623494 | 94 | py |
null | OpenOOD-main/openood/trainers/npos_trainer.py | import faiss.contrib.torch_utils
import math
import time
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import MultivariateNormal
from torch.utils.data import DataLoader
from tqdm import tqdm
import openood.utils.comm as comm
from openood.utils import Config
class NPOSTrainer:
def __init__(self, net: nn.Module, train_loader: DataLoader,
val_loader: DataLoader, config: Config) -> None:
self.net = net
self.train_loader = train_loader
self.config = config
# a bunch of constants or hyperparams
self.n_cls = config.dataset.num_classes
self.sample_number = config.trainer.trainer_args.sample_number
self.sample_from = config.trainer.trainer_args.sample_from
try:
self.penultimate_dim = net.backbone.feature_size
except AttributeError:
self.penultimate_dim = net.backbone.module.feature_size
self.start_epoch_KNN = config.trainer.trainer_args.start_epoch_KNN
self.K = config.trainer.trainer_args.K
self.select = config.trainer.trainer_args.select
self.cov_mat = config.trainer.trainer_args.cov_mat
self.pick_nums = config.trainer.trainer_args.pick_nums
self.w_disp = config.trainer.trainer_args.w_disp
self.w_comp = config.trainer.trainer_args.w_comp
self.loss_weight = config.trainer.trainer_args.loss_weight
self.temp = config.trainer.trainer_args.temp
self.ID_points_num = config.trainer.trainer_args.ID_points_num
res = faiss.StandardGpuResources()
self.KNN_index = faiss.GpuIndexFlatL2(res, self.penultimate_dim)
self.number_dict = {}
for i in range(self.n_cls):
self.number_dict[i] = 0
if self.config.num_gpus > 1:
params = [{
'params': net.module.backbone.parameters()
}, {
'params': net.module.head.parameters()
}, {
'params':
net.module.mlp.parameters(),
'lr':
config.optimizer.lr * config.optimizer.mlp_decay_rate
}]
else:
params = [{
'params': net.backbone.parameters()
}, {
'params': net.head.parameters()
}, {
'params':
net.mlp.parameters(),
'lr':
config.optimizer.lr * config.optimizer.mlp_decay_rate
}]
self.optimizer = torch.optim.SGD(
params,
lr=config.optimizer.lr,
momentum=config.optimizer.momentum,
weight_decay=config.optimizer.weight_decay,
nesterov=True,
)
if config.dataset.train.batch_size \
* config.num_gpus * config.num_machines > 256:
config.optimizer.warm = True
if config.optimizer.warm:
self.warmup_from = 0.001
self.warm_epochs = 10
if config.optimizer.cosine:
eta_min = config.optimizer.lr * \
(config.optimizer.lr_decay_rate**3)
self.warmup_to = eta_min + (config.optimizer.lr - eta_min) * (
1 + math.cos(math.pi * self.warm_epochs /
config.optimizer.num_epochs)) / 2
else:
self.warmup_to = config.optimizer.lr
self.criterion_comp = CompLoss(self.n_cls,
temperature=self.temp).cuda()
# V2: EMA style prototypes
self.criterion_disp = DispLoss(self.n_cls,
config.network.feat_dim,
config.trainer.trainer_args.proto_m,
self.net,
val_loader,
temperature=self.temp).cuda()
def train_epoch(self, epoch_idx):
adjust_learning_rate(self.config, self.optimizer, epoch_idx - 1)
self.net.train()
loss_avg = 0.0
train_dataiter = iter(self.train_loader)
data_dict = torch.zeros(self.n_cls, self.sample_number,
self.penultimate_dim).cuda()
for train_step in tqdm(range(1,
len(train_dataiter) + 1),
desc='Epoch {:03d}: '.format(epoch_idx),
position=0,
leave=True,
disable=not comm.is_main_process()):
warmup_learning_rate(self.config, self.warm_epochs,
self.warmup_from,
self.warmup_to, epoch_idx - 1, train_step,
len(train_dataiter), self.optimizer)
batch = next(train_dataiter)
data = batch['data']
target = batch['label']
data = torch.cat([data[0], data[1]], dim=0).cuda()
target = target.repeat(2).cuda()
# forward
penultimate = self.net.backbone(data)
features = self.net.head(penultimate)
sum_temp = 0
for index in range(self.n_cls):
sum_temp += self.number_dict[index]
lr_reg_loss = torch.zeros(1).cuda()[0]
if sum_temp == self.n_cls * self.sample_number \
and epoch_idx < self.start_epoch_KNN:
# maintaining an ID data queue for each class.
target_numpy = target.cpu().data.numpy()
for index in range(len(target)):
dict_key = target_numpy[index]
data_dict[dict_key] = torch.cat(
(data_dict[dict_key][1:],
penultimate[index].detach().view(1, -1)), 0)
elif sum_temp == self.n_cls * self.sample_number \
and epoch_idx >= self.start_epoch_KNN:
target_numpy = target.cpu().data.numpy()
for index in range(len(target)):
dict_key = target_numpy[index]
data_dict[dict_key] = torch.cat(
(data_dict[dict_key][1:],
penultimate[index].detach().view(1, -1)), 0)
# Standard Gaussian distribution
new_dis = MultivariateNormal(
torch.zeros(self.penultimate_dim).cuda(),
torch.eye(self.penultimate_dim).cuda())
negative_samples = new_dis.rsample((self.sample_from, ))
for index in range(self.n_cls):
ID = data_dict[index]
sample_point = generate_outliers(
ID,
input_index=self.KNN_index,
negative_samples=negative_samples,
ID_points_num=self.ID_points_num,
K=self.K,
select=self.select,
cov_mat=self.cov_mat,
sampling_ratio=1.0,
pic_nums=self.pick_nums,
depth=self.penultimate_dim)
if index == 0:
ood_samples = sample_point
else:
ood_samples = torch.cat((ood_samples, sample_point), 0)
if len(ood_samples) != 0:
energy_score_for_fg = self.net.mlp(penultimate)
energy_score_for_bg = self.net.mlp(ood_samples)
input_for_lr = torch.cat(
(energy_score_for_fg, energy_score_for_bg),
0).squeeze()
labels_for_lr = torch.cat(
(torch.ones(len(energy_score_for_fg)).cuda(),
torch.zeros(len(energy_score_for_bg)).cuda()), -1)
criterion_BCE = torch.nn.BCEWithLogitsLoss()
lr_reg_loss = criterion_BCE(input_for_lr.view(-1),
labels_for_lr)
else:
target_numpy = target.cpu().data.numpy()
for index in range(len(target)):
dict_key = target_numpy[index]
if self.number_dict[dict_key] < self.sample_number:
data_dict[dict_key][self.number_dict[
dict_key]] = penultimate[index].detach()
self.number_dict[dict_key] += 1
normed_features = F.normalize(features, dim=1)
disp_loss = self.criterion_disp(normed_features, target)
comp_loss = self.criterion_comp(normed_features,
self.criterion_disp.prototypes,
target)
loss = self.w_disp * disp_loss + self.w_comp * comp_loss
loss = self.loss_weight * lr_reg_loss + loss
# backward
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
# exponential moving average, show smooth values
with torch.no_grad():
loss_avg = loss_avg * 0.8 + float(loss) * 0.2
# comm.synchronize()
metrics = {}
metrics['epoch_idx'] = epoch_idx
metrics['loss'] = self.save_metrics(loss_avg)
return self.net, metrics
def save_metrics(self, loss_avg):
all_loss = comm.gather(loss_avg)
total_losses_reduced = np.mean([x for x in all_loss])
return total_losses_reduced
def adjust_learning_rate(config, optimizer, epoch):
lr = config.optimizer.lr
if config.optimizer.cosine:
eta_min = lr * (config.optimizer.lr_decay_rate**3)
lr = eta_min + (lr - eta_min) * (
1 + math.cos(math.pi * epoch / config.optimizer.num_epochs)) / 2
else:
steps = np.sum(epoch > np.asarray(config.optimizer.lr_decay_epochs))
if steps > 0:
lr = lr * (config.optimizer.lr_decay_rate**steps)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def warmup_learning_rate(config, warm_epochs, warmup_from, warmup_to, epoch,
batch_id, total_batches, optimizer):
if config.optimizer.warm and epoch <= warm_epochs:
p = (batch_id + (epoch - 1) * total_batches) / \
(warm_epochs * total_batches)
lr = warmup_from + p * (warmup_to - warmup_from)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
class CompLoss(nn.Module):
def __init__(self, n_cls, temperature=0.07, base_temperature=0.07):
super(CompLoss, self).__init__()
self.n_cls = n_cls
self.temperature = temperature
self.base_temperature = base_temperature
def forward(self, features, prototypes, labels):
device = torch.device('cuda')
proxy_labels = torch.arange(0, self.n_cls).to(device)
batch_size = features.shape[0]
labels = labels.contiguous().view(-1, 1)
if labels.shape[0] != batch_size:
raise ValueError('Num of labels does not match num of features')
mask = torch.eq(labels, proxy_labels.T).float().to(device)
# compute logits
anchor_feature = features
contrast_feature = prototypes / prototypes.norm(dim=-1, keepdim=True)
anchor_dot_contrast = torch.div(
torch.matmul(anchor_feature, contrast_feature.T), self.temperature)
# for numerical stability
logits_max, _ = torch.max(anchor_dot_contrast, dim=1, keepdim=True)
logits = anchor_dot_contrast - logits_max.detach()
# compute log_prob
exp_logits = torch.exp(logits)
log_prob = logits - torch.log(exp_logits.sum(1, keepdim=True))
# compute mean of log-likelihood over positive
mean_log_prob_pos = (mask * log_prob).sum(1)
loss = -(self.temperature /
self.base_temperature) * mean_log_prob_pos.mean()
return loss
class DispLoss(nn.Module):
def __init__(self,
n_cls,
feat_dim,
proto_m,
model,
loader,
temperature=0.1,
base_temperature=0.1):
super(DispLoss, self).__init__()
self.n_cls = n_cls
self.feat_dim = feat_dim
self.proto_m = proto_m
self.temperature = temperature
self.base_temperature = base_temperature
self.register_buffer('prototypes',
torch.zeros(self.n_cls, self.feat_dim))
self.model = model
self.loader = loader
self.init_class_prototypes()
def forward(self, features, labels):
prototypes = self.prototypes
num_cls = self.n_cls
for j in range(len(features)):
prototypes[labels[j].item()] = F.normalize(
prototypes[labels[j].item()] * self.proto_m + features[j] *
(1 - self.proto_m),
dim=0)
self.prototypes = prototypes.detach()
labels = torch.arange(0, num_cls).cuda()
labels = labels.contiguous().view(-1, 1)
mask = (1 - torch.eq(labels, labels.T).float()).cuda()
logits = torch.div(torch.matmul(prototypes, prototypes.T),
self.temperature)
logits_mask = torch.scatter(torch.ones_like(mask), 1,
torch.arange(num_cls).view(-1, 1).cuda(),
0)
mask = mask * logits_mask
mean_prob_neg = torch.log(
(mask * torch.exp(logits)).sum(1) / mask.sum(1))
mean_prob_neg = mean_prob_neg[~torch.isnan(mean_prob_neg)]
loss = self.temperature / self.base_temperature * mean_prob_neg.mean()
return loss
def init_class_prototypes(self):
"""Initialize class prototypes."""
self.model.eval()
start = time.time()
prototype_counts = [0] * self.n_cls
with torch.no_grad():
prototypes = torch.zeros(self.n_cls, self.feat_dim).cuda()
for i, batch in enumerate(self.loader):
input = batch['data']
target = batch['label']
input, target = input.cuda(), target.cuda()
features = self.model(input)
for j, feature in enumerate(features):
prototypes[target[j].item()] += feature
prototype_counts[target[j].item()] += 1
for cls in range(self.n_cls):
prototypes[cls] /= prototype_counts[cls]
# measure elapsed time
duration = time.time() - start
print(f'Time to initialize prototypes: {duration:.3f}')
prototypes = F.normalize(prototypes, dim=1)
self.prototypes = prototypes
def generate_outliers(ID,
input_index,
negative_samples,
ID_points_num=2,
K=20,
select=1,
cov_mat=0.1,
sampling_ratio=1.0,
pic_nums=30,
depth=342):
length = negative_samples.shape[0]
data_norm = torch.norm(ID, p=2, dim=1, keepdim=True)
normed_data = ID / data_norm
rand_ind = np.random.choice(normed_data.shape[0],
int(normed_data.shape[0] * sampling_ratio),
replace=False)
index = input_index
index.add(normed_data[rand_ind])
minD_idx, k_th = KNN_dis_search_decrease(ID, index, K, select)
minD_idx = minD_idx[np.random.choice(select, int(pic_nums), replace=False)]
data_point_list = torch.cat(
[ID[i:i + 1].repeat(length, 1) for i in minD_idx])
negative_sample_cov = cov_mat * negative_samples.cuda().repeat(pic_nums, 1)
negative_sample_list = negative_sample_cov + data_point_list
point = KNN_dis_search_distance(negative_sample_list, index, K,
ID_points_num, length, depth)
index.reset()
return point
def KNN_dis_search_distance(target,
index,
K=50,
num_points=10,
length=2000,
depth=342):
'''
data_point: Queue for searching k-th points
target: the target of the search
K
'''
# Normalize the features
target_norm = torch.norm(target, p=2, dim=1, keepdim=True)
normed_target = target / target_norm
distance, output_index = index.search(normed_target, K)
k_th_distance = distance[:, -1]
k_th = k_th_distance.view(length, -1)
# target_new = target.view(length, -1, depth)
k_th_distance, minD_idx = torch.topk(k_th, num_points, dim=0)
minD_idx = minD_idx.squeeze()
point_list = []
for i in range(minD_idx.shape[1]):
point_list.append(i * length + minD_idx[:, i])
return target[torch.cat(point_list)]
def KNN_dis_search_decrease(
target,
index,
K=50,
select=1,
):
'''
data_point: Queue for searching k-th points
target: the target of the search
K
'''
# Normalize the features
target_norm = torch.norm(target, p=2, dim=1, keepdim=True)
normed_target = target / target_norm
distance, output_index = index.search(normed_target, K)
k_th_distance = distance[:, -1]
k_th_distance, minD_idx = torch.topk(k_th_distance, select)
return minD_idx, k_th_distance
| 17,725 | 38.391111 | 79 | py |
null | OpenOOD-main/openood/trainers/oe_trainer.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
from tqdm import tqdm
import openood.utils.comm as comm
from openood.utils import Config
from .base_trainer import BaseTrainer
class OETrainer(BaseTrainer):
def __init__(
self,
net: nn.Module,
train_loader: DataLoader,
train_unlabeled_loader: DataLoader,
config: Config,
) -> None:
super().__init__(net, train_loader, config)
self.train_unlabeled_loader = train_unlabeled_loader
self.lambda_oe = config.trainer.lambda_oe
def train_epoch(self, epoch_idx):
self.net.train() # enter train mode
loss_avg = 0.0
train_dataiter = iter(self.train_loader)
if self.train_unlabeled_loader:
unlabeled_dataiter = iter(self.train_unlabeled_loader)
for train_step in tqdm(range(1,
len(train_dataiter) + 1),
desc='Epoch {:03d}: '.format(epoch_idx),
position=0,
leave=True,
disable=not comm.is_main_process()):
batch = next(train_dataiter)
try:
unlabeled_batch = next(unlabeled_dataiter)
except StopIteration:
unlabeled_dataiter = iter(self.train_unlabeled_loader)
unlabeled_batch = next(unlabeled_dataiter)
data = torch.cat((batch['data'], unlabeled_batch['data'])).cuda()
batch_size = batch['data'].size(0)
# forward
logits_classifier = self.net(data)
loss = F.cross_entropy(logits_classifier[:batch_size],
batch['label'].cuda())
loss_oe = -(
logits_classifier[batch_size:].mean(1) -
torch.logsumexp(logits_classifier[batch_size:], dim=1)).mean()
loss += self.lambda_oe * loss_oe
# backward
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
self.scheduler.step()
# exponential moving average, show smooth values
with torch.no_grad():
loss_avg = loss_avg * 0.8 + float(loss) * 0.2
metrics = {}
metrics['epoch_idx'] = epoch_idx
metrics['loss'] = self.save_metrics(loss_avg)
return self.net, metrics
| 2,477 | 31.605263 | 78 | py |
null | OpenOOD-main/openood/trainers/opengan_trainer.py | import random
import torch
import torch.nn as nn
import torch.optim as optim
from tqdm import tqdm
import openood.utils.comm as comm
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
nn.init.normal_(m.weight.data, 0.0, 0.02)
elif classname.find('BatchNorm') != -1:
nn.init.normal_(m.weight.data, 1.0, 0.02)
nn.init.constant_(m.bias.data, 0)
class OpenGanTrainer:
def __init__(self, net, feat_loader, config) -> None:
manualSeed = 999
print('Random Seed: ', manualSeed)
random.seed(manualSeed)
torch.manual_seed(manualSeed)
self.config = config
self.netG = net['netG']
self.netD = net['netD']
self.netG.apply(weights_init)
self.netD.apply(weights_init)
self.feat_loader = feat_loader
self.nz = self.config.network.nz
self.real_label = 1
self.fake_label = 0
optimizer_config = self.config.optimizer
self.optimizerD = optim.Adam(self.netD.parameters(),
lr=optimizer_config.lr / 1.5,
betas=(optimizer_config.beta1, 0.999))
self.optimizerG = optim.Adam(self.netG.parameters(),
lr=optimizer_config.lr,
betas=(optimizer_config.beta1, 0.999))
self.criterion = nn.BCELoss()
self.G_losses = []
self.D_losses = []
def train_epoch(self, epoch_idx):
feat_dataiter = iter(self.feat_loader)
for train_step in tqdm(range(1,
len(feat_dataiter) + 1),
desc='Epoch {:03d}: '.format(epoch_idx),
position=0,
leave=True,
disable=not comm.is_main_process()):
data = next(feat_dataiter)['data']
############################
# (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))
###########################
# Train with all-real batch
self.netD.zero_grad()
# Format batch
loaded_data = data.cuda()
b_size = loaded_data.size(0)
label = torch.full((b_size, ), self.real_label).cuda()
label = label.to(torch.float32)
# Forward pass real batch through D
output = self.netD(loaded_data).view(-1)
# import pdb
# pdb.set_trace()
# Calculate loss on all-real batch
errD_real = self.criterion(output, label)
# Calculate gradients for D in backward pass
errD_real.backward()
D_x = output.mean().item()
# Train with all-fake batch
# Generate batch of latent vectors
noise = torch.randn(b_size, self.nz, 1, 1).cuda()
# Generate fake image batch with G
fake = self.netG(noise)
label.fill_(self.fake_label)
# Classify all fake batch with D
output = self.netD(fake.detach()).view(-1)
# Calculate D's loss on the all-fake batch
errD_fake = self.criterion(output, label)
# Calculate the gradients for this batch
errD_fake.backward()
D_G_z1 = output.mean().item()
# Add the gradients from the all-real and all-fake batches
errD = errD_real + errD_fake
# Update D
self.optimizerD.step()
############################
# (2) Update G network: maximize log(D(G(z)))
###########################
self.netG.zero_grad()
label.fill_(
self.real_label) # fake labels are real for generator cost
# Since we just updated D,
# perform another forward pass of all-fake batch through D
output = self.netD(fake).view(-1)
# Calculate G's loss based on this output
errG = self.criterion(output, label)
# Calculate gradients for G
errG.backward()
D_G_z2 = output.mean().item()
# Update G
self.optimizerG.step()
# Save Losses for plotting later, if needed
self.G_losses.append(errG.item())
self.D_losses.append(errD.item())
return {
'netG': self.netG,
'netD': self.netD
}, {
'G_losses': self.G_losses,
'D_losses': self.D_losses,
'epoch_idx': epoch_idx
}
| 4,636 | 34.128788 | 75 | py |
null | OpenOOD-main/openood/trainers/rd4ad_trainer.py | import torch
from torchvision.datasets import ImageFolder
import numpy as np
import random
import os
from torch.utils.data import DataLoader
import torch.backends.cudnn as cudnn
import argparse
from torch.nn import functional as F
from tqdm import tqdm
from openood.utils import Config
from openood.losses.rd4ad_loss import loss_function
class Rd4adTrainer:
def __init__(self, net, train_loader, config: Config):
self.config = config
self.train_loader = train_loader
self.encoder = net['encoder']
self.bn = net['bn']
self.decoder = net['decoder']
if config.optimizer.name == 'adam':
self.optimizer=torch.optim.Adam(list(self.decoder.parameters())+list(self.bn.parameters()), lr=config.optimizer.lr, betas=config.optimizer.betas)
def train_epoch(self, epoch_idx):
self.encoder.eval()
self.bn.train()
self.decoder.train()
train_dataiter = iter(self.train_loader)
epoch_loss = 0
for train_step in tqdm(range(1,
len(train_dataiter) + 1),
desc='Epoch {:03d} '.format(epoch_idx),
position=0,
leave=True):
batch = next(train_dataiter)
img = batch['data'].cuda()
feature_list = self.encoder.forward(img,return_feature_list=True)[1]
inputs = feature_list[1:4]
outputs = self.decoder(self.bn(inputs))
loss = loss_function(inputs, outputs)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
epoch_loss += loss.item()
metrics = {}
net = {}
metrics['epoch_idx'] = epoch_idx
metrics['loss'] = epoch_loss
net['encoder'] = self.encoder
net['bn'] = self.bn
net['decoder'] = self.decoder
return net, metrics
| 1,938 | 34.907407 | 157 | py |
null | OpenOOD-main/openood/trainers/regmixup_trainer.py | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
from tqdm import tqdm
import openood.utils.comm as comm
from openood.utils import Config
from .lr_scheduler import cosine_annealing
# https://github.com/FrancescoPinto/RegMixup/blob/main/models/regmixup.py
def mixup_data(x, y, alpha=1.0):
"""Returns mixed inputs, pairs of targets, and lambda."""
if alpha > 0:
lam = np.random.beta(alpha, alpha)
else:
lam = 1
batch_size = x.size()[0]
index = torch.randperm(batch_size).cuda()
mixed_x = lam * x + (1 - lam) * x[index]
y_a, y_b = y, y[index]
return mixed_x, y_a, y_b, lam
def regmixup_criterion(criterion, pred, y_a, y_b, lam):
return lam * criterion(pred, y_a) + (1 - lam) * criterion(pred, y_b)
class RegMixupTrainer:
def __init__(self, net: nn.Module, train_loader: DataLoader,
config: Config) -> None:
self.net = net
self.train_loader = train_loader
self.config = config
self.alpha = self.config.trainer.trainer_args.alpha
self.optimizer = torch.optim.SGD(
net.parameters(),
config.optimizer.lr,
momentum=config.optimizer.momentum,
weight_decay=config.optimizer.weight_decay,
nesterov=True,
)
self.scheduler = torch.optim.lr_scheduler.LambdaLR(
self.optimizer,
lr_lambda=lambda step: cosine_annealing(
step,
config.optimizer.num_epochs * len(train_loader),
1,
1e-6 / config.optimizer.lr,
),
)
def train_epoch(self, epoch_idx):
self.net.train()
loss_avg = 0.0
train_dataiter = iter(self.train_loader)
for train_step in tqdm(range(1,
len(train_dataiter) + 1),
desc='Epoch {:03d}: '.format(epoch_idx),
position=0,
leave=True,
disable=not comm.is_main_process()):
batch = next(train_dataiter)
x, y = batch['data'].cuda(), batch['label'].cuda()
# mixup operation
mixup_x, part_y_a, part_y_b, lam = mixup_data(x, y, self.alpha)
targets_a = torch.cat([y, part_y_a])
targets_b = torch.cat([y, part_y_b])
x = torch.cat([x, mixup_x], dim=0)
# forward
logits = self.net(x)
loss = regmixup_criterion(F.cross_entropy, logits, targets_a,
targets_b, lam)
# backward
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
self.scheduler.step()
# exponential moving average, show smooth values
with torch.no_grad():
loss_avg = loss_avg * 0.8 + float(loss) * 0.2
metrics = {}
metrics['epoch_idx'] = epoch_idx
metrics['loss'] = loss_avg
return self.net, metrics
| 3,137 | 30.069307 | 75 | py |
null | OpenOOD-main/openood/trainers/rotpred_trainer.py | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
from tqdm import tqdm
import openood.utils.comm as comm
from openood.utils import Config
from .lr_scheduler import cosine_annealing
class RotPredTrainer:
def __init__(self, net: nn.Module, train_loader: DataLoader,
config: Config) -> None:
self.net = net
self.train_loader = train_loader
self.config = config
self.optimizer = torch.optim.SGD(
net.parameters(),
config.optimizer.lr,
momentum=config.optimizer.momentum,
weight_decay=config.optimizer.weight_decay,
nesterov=True,
)
self.scheduler = torch.optim.lr_scheduler.LambdaLR(
self.optimizer,
lr_lambda=lambda step: cosine_annealing(
step,
config.optimizer.num_epochs * len(train_loader),
1,
1e-6 / config.optimizer.lr,
),
)
def train_epoch(self, epoch_idx):
self.net.train()
loss_avg = 0.0
train_dataiter = iter(self.train_loader)
for train_step in tqdm(range(1,
len(train_dataiter) + 1),
desc='Epoch {:03d}: '.format(epoch_idx),
position=0,
leave=True,
disable=not comm.is_main_process()):
batch = next(train_dataiter)
data = batch['data'].cuda()
target = batch['label'].cuda()
batch_size = len(data)
x_90 = torch.rot90(data, 1, [2, 3])
x_180 = torch.rot90(data, 2, [2, 3])
x_270 = torch.rot90(data, 3, [2, 3])
x_rot = torch.cat([data, x_90, x_180, x_270])
y_rot = torch.cat([
torch.zeros(batch_size),
torch.ones(batch_size),
2 * torch.ones(batch_size),
3 * torch.ones(batch_size),
]).long().cuda()
# forward
logits, logits_rot = self.net(x_rot, return_rot_logits=True)
loss_cls = F.cross_entropy(logits[:batch_size], target)
loss_rot = F.cross_entropy(logits_rot, y_rot)
loss = loss_cls + loss_rot
# backward
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
self.scheduler.step()
# exponential moving average, show smooth values
with torch.no_grad():
loss_avg = loss_avg * 0.8 + float(loss) * 0.2
# comm.synchronize()
metrics = {}
metrics['epoch_idx'] = epoch_idx
metrics['loss'] = self.save_metrics(loss_avg)
return self.net, metrics
def save_metrics(self, loss_avg):
all_loss = comm.gather(loss_avg)
total_losses_reduced = np.mean([x for x in all_loss])
return total_losses_reduced
| 3,049 | 30.122449 | 72 | py |
null | OpenOOD-main/openood/trainers/rts_trainer.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
from tqdm import tqdm
from openood.utils import Config
from .lr_scheduler import cosine_annealing
class RTSTrainer:
def __init__(self, net: nn.Module, train_loader: DataLoader,
config: Config) -> None:
self.net = net
self.train_loader = train_loader
self.config = config
self.optimizer = torch.optim.SGD(
net.parameters(),
config.optimizer.lr,
momentum=config.optimizer.momentum,
weight_decay=config.optimizer.weight_decay,
nesterov=True,
)
self.scheduler = torch.optim.lr_scheduler.LambdaLR(
self.optimizer,
lr_lambda=lambda step: cosine_annealing(
step,
config.optimizer.num_epochs * len(train_loader),
1,
1e-6 / config.optimizer.lr,
),
)
def train_epoch(self, epoch_idx):
self.net.train()
loss_avg = 0.0
train_dataiter = iter(self.train_loader)
for train_step in tqdm(range(1,
len(train_dataiter) + 1),
desc='Epoch {:03d}: '.format(epoch_idx),
position=0,
leave=True):
batch = next(train_dataiter)
data = batch['data'].cuda()
target = batch['label'].cuda()
# forward
logits_classifier, variance = self.net(data, return_var=True)
epsilon = torch.randn_like(variance)
temperature = torch.sum(variance * epsilon * epsilon, dim=1, keepdim=True) / (self.config.network.dof - 2)
loss_kl = ((variance - torch.log(variance + 1e-8) - 1) * 0.5).mean()
loss_head = F.cross_entropy(logits_classifier / temperature, target)
loss = loss_head + self.config.network.kl_scale * loss_kl
# backward
self.optimizer.zero_grad()
loss.backward()
nn.utils.clip_grad_norm_(parameters=self.net.parameters(), max_norm=2.5, norm_type=2)
self.optimizer.step()
self.scheduler.step()
# exponential moving average, show smooth values
with torch.no_grad():
loss_avg = loss_avg * 0.8 + float(loss) * 0.2
metrics = {}
metrics['epoch_idx'] = epoch_idx
metrics['loss'] = loss_avg
return self.net, metrics
| 2,575 | 31.2 | 118 | py |
null | OpenOOD-main/openood/trainers/sae_trainer.py | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from sklearn.mixture import GaussianMixture
from torch.utils.data import DataLoader
from tqdm import tqdm
from openood.losses import soft_cross_entropy
from openood.postprocessors.gmm_postprocessor import compute_single_GMM_score
from openood.postprocessors.mds_ensemble_postprocessor import (
process_feature_type, reduce_feature_dim, tensor2list)
from openood.utils import Config
from .lr_scheduler import cosine_annealing
from .mixup_trainer import mixing, prepare_mixup
class SAETrainer:
def __init__(self, net: nn.Module, train_loader: DataLoader,
config: Config) -> None:
self.net = net
self.train_loader = train_loader
self.config = config
self.trainer_args = self.config.trainer.trainer_args
self.optimizer = torch.optim.SGD(
net.parameters(),
config.optimizer.lr,
momentum=config.optimizer.momentum,
weight_decay=config.optimizer.weight_decay,
nesterov=True,
)
self.scheduler = torch.optim.lr_scheduler.LambdaLR(
self.optimizer,
lr_lambda=lambda step: cosine_annealing(
step,
config.optimizer.num_epochs * len(train_loader),
1,
1e-6 / config.optimizer.lr,
),
)
@torch.no_grad()
def setup(self):
feature_all = None
label_all = []
# collect features
for batch in tqdm(self.train_loader,
desc='Compute GMM Stats [Collecting]'):
data = batch['data_aux'].cuda()
label = batch['label']
_, feature_list = self.net(data, return_feature_list=True)
label_all.extend(tensor2list(label))
feature_processed = process_feature_type(
feature_list[0], self.trainer_args.feature_type)
if isinstance(feature_all, type(None)):
feature_all = tensor2list(feature_processed)
else:
feature_all.extend(tensor2list(feature_processed))
label_all = np.array(label_all)
# reduce feature dim and perform gmm estimation
feature_all = np.array(feature_all)
transform_matrix = reduce_feature_dim(feature_all, label_all,
self.trainer_args.reduce_dim)
feature_all = np.dot(feature_all, transform_matrix)
# GMM estimation
gm = GaussianMixture(n_components=self.trainer_args.num_clusters,
random_state=0,
covariance_type='tied').fit(feature_all)
feature_mean = gm.means_
feature_prec = gm.precisions_
component_weight = gm.weights_
self.feature_mean = torch.Tensor(feature_mean).cuda()
self.feature_prec = torch.Tensor(feature_prec).cuda()
self.component_weight = torch.Tensor(component_weight).cuda()
self.transform_matrix = torch.Tensor(transform_matrix).cuda()
def train_epoch(self, epoch_idx):
self.net.train()
loss_avg = 0.0
train_dataiter = iter(self.train_loader)
for train_step in tqdm(range(1,
len(train_dataiter) + 1),
desc='Epoch {:03d}: '.format(epoch_idx),
position=0,
leave=True):
batch = next(train_dataiter)
data = batch['data'].cuda()
target = batch['label'].cuda()
# mixup operation
index, lam = prepare_mixup(batch, self.trainer_args.alpha)
data_mix = mixing(batch['data'].cuda(), index, lam)
soft_label_mix = mixing(batch['soft_label'].cuda(), index, lam)
# classfication loss
logits_cls = self.net(data)
loss_clsstd = F.cross_entropy(logits_cls, target) # standard cls
logits_mix = self.net(data_mix)
loss_clsmix = soft_cross_entropy(logits_mix, soft_label_mix)
# source awareness enhancement
prob_id = compute_single_GMM_score(self.net, data,
self.feature_mean,
self.feature_prec,
self.component_weight,
self.transform_matrix, 0,
self.trainer_args.feature_type)
prob_ood = compute_single_GMM_score(self.net, data_mix,
self.feature_mean,
self.feature_prec,
self.component_weight,
self.transform_matrix, 0,
self.trainer_args.feature_type)
loss_sae_id = 1 - torch.mean(prob_id)
loss_sae_ood = torch.mean(prob_ood)
# loss
loss = self.trainer_args.loss_weight[0] * loss_clsstd \
+ self.trainer_args.loss_weight[1] * loss_clsmix \
+ self.trainer_args.loss_weight[2] * loss_sae_id \
+ self.trainer_args.loss_weight[3] * loss_sae_ood
# backward
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
self.scheduler.step()
# exponential moving average, show smooth values
with torch.no_grad():
loss_avg = loss_avg * 0.8 + float(loss) * 0.2
metrics = {}
metrics['epoch_idx'] = epoch_idx
metrics['loss'] = loss_avg
return self.net, metrics
| 5,853 | 39.09589 | 79 | py |
null | OpenOOD-main/openood/trainers/udg_trainer.py | import time
import faiss
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
from tqdm import tqdm
import openood.utils.comm as comm
from openood.utils import Config
from ..losses import rew_ce, rew_sce
from .base_trainer import BaseTrainer
class UDGTrainer(BaseTrainer):
def __init__(
self,
net: nn.Module,
train_loader: DataLoader,
train_unlabeled_loader: DataLoader,
config: Config,
) -> None:
super().__init__(net, train_loader, config)
self.train_unlabeled_loader = train_unlabeled_loader
self.num_clusters = config.trainer.num_clusters
self.purity_ind_thresh = config.trainer.purity_ind_thresh
self.purity_ood_thresh = config.trainer.purity_ood_thresh
self.oe_enhance_ratio = config.trainer.oe_enhance_ratio
self.lambda_oe = config.trainer.lambda_oe
self.lambda_aux = config.trainer.lambda_aux
# Init clustering algorithm
self.k_means = KMeans(k=config.trainer.num_clusters,
pca_dim=config.trainer.pca_dim)
def train_epoch(self, epoch_idx):
self._run_clustering(epoch_idx)
metrics = self._compute_loss(epoch_idx)
return self.net, metrics
def _compute_loss(self, epoch_idx):
self.net.train() # enter train mode
loss_avg, loss_cls_avg, loss_oe_avg, loss_aux_avg = 0.0, 0.0, 0.0, 0.0
train_dataiter = iter(self.train_loader)
unlabeled_dataiter = iter(self.train_unlabeled_loader)
for train_step in tqdm(range(1,
len(train_dataiter) + 1),
desc='Epoch {:03d}: '.format(epoch_idx),
position=0,
leave=True,
disable=not comm.is_main_process()):
batch = next(train_dataiter)
try:
unlabeled_batch = next(unlabeled_dataiter)
except StopIteration:
unlabeled_dataiter = iter(self.train_unlabeled_loader)
unlabeled_batch = next(unlabeled_dataiter)
data = batch['data'].cuda()
unlabeled_data = unlabeled_batch['data'].cuda()
# concat labeled and unlabeled data
logits_cls, logits_aux = self.net(data, return_aux=True)
logits_oe_cls, logits_oe_aux = self.net(unlabeled_data,
return_aux=True)
# classification loss
concat_logits_cls = torch.cat([logits_cls, logits_oe_cls])
concat_label = torch.cat([
batch['label'],
unlabeled_batch['pseudo_label'].type_as(batch['label']),
])
loss_cls = F.cross_entropy(
concat_logits_cls[concat_label != -1],
concat_label[concat_label != -1].cuda(),
)
# oe loss
concat_softlabel = torch.cat(
[batch['soft_label'], unlabeled_batch['pseudo_softlabel']])
concat_conf = torch.cat(
[batch['ood_conf'], unlabeled_batch['ood_conf']])
loss_oe = rew_sce(
concat_logits_cls[concat_label == -1],
concat_softlabel[concat_label == -1].cuda(),
concat_conf[concat_label == -1].cuda(),
)
# aux loss
concat_logits_aux = torch.cat([logits_aux, logits_oe_aux])
concat_cluster_id = torch.cat(
[batch['cluster_id'], unlabeled_batch['cluster_id']])
concat_cluster_reweight = torch.cat([
batch['cluster_reweight'], unlabeled_batch['cluster_reweight']
])
loss_aux = rew_ce(
concat_logits_aux,
concat_cluster_id.cuda(),
concat_cluster_reweight.cuda(),
)
# loss addition
loss = loss_cls + self.lambda_oe * loss_oe \
+ self.lambda_aux * loss_aux
# backward
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
self.scheduler.step()
with torch.no_grad():
# exponential moving average, show smooth values
loss_cls_avg = loss_cls_avg * 0.8 + float(loss_cls) * 0.2
loss_oe_avg = loss_oe_avg * 0.8 + float(
self.lambda_oe * loss_oe) * 0.2
loss_aux_avg = (loss_aux_avg * 0.8 +
float(self.lambda_aux * loss_aux) * 0.2)
loss_avg = loss_avg * 0.8 + float(loss) * 0.2
metrics = {}
metrics['epoch_idx'] = epoch_idx
metrics['train_cls_loss'] = loss_cls_avg
metrics['train_oe_loss'] = loss_oe_avg
metrics['train_aux_loss'] = loss_aux_avg
metrics['loss'] = loss_avg
return metrics
def _run_clustering(self, epoch_idx):
self.net.eval()
start_time = time.time()
# get data from train loader
print('Clustering: starting gather training features...', flush=True)
# gather train image feature
train_idx_list, unlabeled_idx_list, feature_list, train_label_list = (
[],
[],
[],
[],
)
train_dataiter = iter(self.train_loader)
for step in tqdm(range(1,
len(train_dataiter) + 1),
desc='Epoch {:03d} ID Clustering: '.format(epoch_idx),
position=0,
leave=True,
disable=not comm.is_main_process()):
batch = next(train_dataiter)
index = batch['index']
label = batch['label']
# we use no augmented image for clustering
data = batch['data_aux'].cuda()
_, feature = self.net(data, return_feature=True)
feature = feature.detach()
# evaluation
for idx in range(len(data)):
train_idx_list.append(index[idx].tolist())
train_label_list.append(label[idx].tolist())
feature_list.append(feature[idx].cpu().tolist())
num_train_data = len(feature_list)
train_idx_list = np.array(train_idx_list, dtype=int)
train_label_list = np.array(train_label_list, dtype=int)
train_label_list = sort_array(train_label_list, train_idx_list)
# in-distribution samples always have pseudo labels == actual labels
self.train_loader.dataset.pseudo_label = train_label_list
torch.cuda.empty_cache()
# gather unlabeled image feature in order
unlabeled_conf_list, unlabeled_pseudo_list = [], []
unlabeled_dataiter = iter(self.train_unlabeled_loader)
for step in tqdm(range(1,
len(unlabeled_dataiter) + 1),
desc='Epoch {:03d} OE Clustering: '.format(epoch_idx),
position=0,
leave=True,
disable=not comm.is_main_process()):
batch = next(unlabeled_dataiter)
index = batch['index']
# we use no augmented image for clustering
data = batch['data_aux'].cuda()
logit, feature = self.net(data, return_feature=True)
feature = feature.detach()
logit = logit.detach()
score = torch.softmax(logit, dim=1)
conf, pseudo = torch.max(score, dim=1)
# evaluation
for idx in range(len(data)):
unlabeled_idx_list.append(index[idx].tolist())
feature_list.append(feature[idx].cpu().tolist())
unlabeled_conf_list.append(conf[idx].cpu().tolist())
unlabeled_pseudo_list.append(pseudo[idx].cpu().tolist())
feature_list = np.array(feature_list)
unlabeled_idx_list = np.array(unlabeled_idx_list, dtype=int)
unlabeled_conf_list = np.array(unlabeled_conf_list)
unlabeled_pseudo_list = np.array(unlabeled_pseudo_list)
unlabeled_conf_list = sort_array(unlabeled_conf_list,
unlabeled_idx_list)
unlabeled_pseudo_list = sort_array(unlabeled_pseudo_list,
unlabeled_idx_list)
torch.cuda.empty_cache()
print('\nAssigning Cluster Labels...', flush=True)
cluster_id = self.k_means.cluster(feature_list)
train_cluster_id = cluster_id[:num_train_data]
unlabeled_cluster_id = cluster_id[num_train_data:]
# assign cluster id to samples. Sorted by shuffle-recording index.
train_cluster_id = sort_array(train_cluster_id, train_idx_list)
unlabeled_cluster_id = sort_array(unlabeled_cluster_id,
unlabeled_idx_list)
self.train_loader.dataset.cluster_id = train_cluster_id
self.train_unlabeled_loader.dataset.cluster_id = unlabeled_cluster_id
cluster_id = np.concatenate([train_cluster_id, unlabeled_cluster_id])
# reweighting based on samples in clusters
cluster_stat = np.zeros(self.num_clusters)
cluster_id_list, cluster_id_counts = np.unique(cluster_id,
return_counts=True)
for cluster_idx, counts in zip(cluster_id_list, cluster_id_counts):
cluster_stat[cluster_idx] = counts
inv_class_freq = 1 / (cluster_stat + 1e-10)
sample_weight = np.power(inv_class_freq, 0.5)
sample_weight *= 1 / sample_weight.mean()
sample_weight_list = np.array([sample_weight[i] for i in cluster_id])
self.train_loader.dataset.cluster_reweight \
= sample_weight_list[:num_train_data]
self.train_unlabeled_loader.dataset.cluster_reweight \
= sample_weight_list[num_train_data:]
print('In-Distribution Filtering (with OOD Enhancement)...',
flush=True)
old_train_pseudo_label \
= self.train_loader.dataset.pseudo_label
old_unlabeled_pseudo_label \
= self.train_unlabeled_loader.dataset.pseudo_label
old_pseudo_label = np.append(old_train_pseudo_label,
old_unlabeled_pseudo_label).astype(int)
new_pseudo_label = (-1 * np.ones_like(old_pseudo_label)).astype(int)
# process ood confidence for oe loss enhancement (ole)
new_ood_conf = np.ones_like(old_pseudo_label).astype(float)
total_num_to_filter = 0
purity_ind_thresh = self.purity_ind_thresh
purity_ood_thresh = self.purity_ood_thresh
# pick out clusters with purity over threshold
for cluster_idx in range(self.num_clusters):
label_in_cluster, label_counts = np.unique(
old_pseudo_label[cluster_id == cluster_idx],
return_counts=True)
cluster_size = len(old_pseudo_label[cluster_id == cluster_idx])
purity = label_counts / cluster_size # purity list for each label
# idf
if np.any(purity > purity_ind_thresh):
majority_label = label_in_cluster[purity > purity_ind_thresh][
0] # first element in the list
new_pseudo_label[cluster_id == cluster_idx] = majority_label
# this might also change some ID but nvm
if majority_label > 0: # ID cluster
num_to_filter = len(label_in_cluster == -1)
total_num_to_filter += num_to_filter
# ole
elif np.any(purity > purity_ood_thresh):
majority_label = label_in_cluster[
purity > purity_ood_thresh][0]
if majority_label == -1:
new_ood_conf[cluster_id ==
cluster_idx] = self.oe_enhance_ratio
print(f'{total_num_to_filter} sample(s) filtered!', flush=True)
self.train_unlabeled_loader.dataset.pseudo_label = new_pseudo_label[
num_train_data:]
self.train_unlabeled_loader.dataset.ood_conf = new_ood_conf[
num_train_data:]
print('Randomize Auxiliary Head...', flush=True)
if hasattr(self.net, 'fc_aux'):
# reset auxiliary branch
self.net.fc_aux.weight.data.normal_(mean=0.0, std=0.01)
self.net.fc_aux.bias.data.zero_()
else:
# reset fc for unsupervised learning (baseline)
self.net.fc.weight.data.normal_(mean=0.0, std=0.01)
self.net.fc.bias.data.zero_()
print(
'# Online Clustering Completed! Duration: {:.2f}s #'.format(
time.time() - start_time),
flush=True,
)
def preprocess_features(npdata, pca=256):
"""Preprocess an array of features.
Args:
npdata (np.array N * ndim): features to preprocess
pca (int): dim of output
Returns:
np.array of dim N * pca: data PCA-reduced, whitened and L2-normalized
"""
_, ndim = npdata.shape
npdata = npdata.astype('float32')
# Apply PCA-whitening with Faiss
mat = faiss.PCAMatrix(ndim, pca, eigen_power=-0.5)
mat.train(npdata)
assert mat.is_trained
npdata = mat.apply_py(npdata)
# L2 normalization
row_sums = np.linalg.norm(npdata, axis=1)
npdata = npdata / row_sums[:, np.newaxis]
return npdata
def run_kmeans(x, nmb_clusters, verbose=False):
"""Runs kmeans on 1 GPU.
Args:
x: data
nmb_clusters (int): number of clusters
Returns:
list: ids of data in each cluster
"""
n_data, d = x.shape
# faiss implementation of k-means
clus = faiss.Clustering(d, nmb_clusters)
# Change faiss seed at each k-means so that the randomly picked
# initialization centroids do not correspond to the same feature ids
# from an epoch to another.
clus.seed = np.random.randint(1234)
clus.niter = 20
clus.max_points_per_centroid = 10000000
res = faiss.StandardGpuResources()
flat_config = faiss.GpuIndexFlatConfig()
flat_config.useFloat16 = False
flat_config.device = 0
index = faiss.GpuIndexFlatL2(res, d, flat_config)
# perform the training
clus.train(x, index)
_, I = index.search(x, 1)
return I.reshape(-1, )
class KMeans(object):
def __init__(self, k, pca_dim):
self.k = k
self.pca_dim = pca_dim
def cluster(self, data, verbose=True):
"""Performs k-means clustering.
Args:
x_data (np.array N * dim): data to cluster
"""
# PCA-reducing, whitening and L2-normalization
xb = preprocess_features(data, pca=self.pca_dim)
if np.isnan(xb).any():
row_sums = np.linalg.norm(data, axis=1)
data_norm = data / row_sums[:, np.newaxis]
if np.isnan(data_norm).any():
I = run_kmeans(data_norm, self.k, verbose)
else:
I = run_kmeans(data, self.k, verbose)
else:
# cluster the data
I = run_kmeans(xb, self.k, verbose)
return I
def sort_array(old_array, index_array):
sorted_array = np.ones_like(old_array)
sorted_array[index_array] = old_array
return sorted_array
| 15,462 | 39.373368 | 79 | py |
null | OpenOOD-main/openood/trainers/utils.py | from torch.utils.data import DataLoader
from openood.utils import Config
from .arpl_gan_trainer import ARPLGANTrainer
from .arpl_trainer import ARPLTrainer
from .augmix_trainer import AugMixTrainer
from .base_trainer import BaseTrainer
from .cider_trainer import CIDERTrainer
from .conf_branch_trainer import ConfBranchTrainer
from .csi_trainer import CSITrainer
from .cutmix_trainer import CutMixTrainer
from .cutpaste_trainer import CutPasteTrainer
from .draem_trainer import DRAEMTrainer
from .dropout_trainer import DropoutTrainer
from .dsvdd_trainer import AETrainer, DSVDDTrainer
from .godin_trainer import GodinTrainer
from .kdad_trainer import KdadTrainer
from .logitnorm_trainer import LogitNormTrainer
from .mcd_trainer import MCDTrainer
from .mixup_trainer import MixupTrainer
from .mos_trainer import MOSTrainer
from .npos_trainer import NPOSTrainer
from .oe_trainer import OETrainer
from .opengan_trainer import OpenGanTrainer
from .rd4ad_trainer import Rd4adTrainer
from .sae_trainer import SAETrainer
from .udg_trainer import UDGTrainer
from .vos_trainer import VOSTrainer
from .rts_trainer import RTSTrainer
from .rotpred_trainer import RotPredTrainer
from .regmixup_trainer import RegMixupTrainer
from .mixoe_trainer import MixOETrainer
def get_trainer(net, train_loader: DataLoader, val_loader: DataLoader,
config: Config):
if type(train_loader) is DataLoader:
trainers = {
'base': BaseTrainer,
'augmix': AugMixTrainer,
'mixup': MixupTrainer,
'regmixup': RegMixupTrainer,
'sae': SAETrainer,
'draem': DRAEMTrainer,
'kdad': KdadTrainer,
'conf_branch': ConfBranchTrainer,
'dcae': AETrainer,
'dsvdd': DSVDDTrainer,
'npos': NPOSTrainer,
'opengan': OpenGanTrainer,
'kdad': KdadTrainer,
'godin': GodinTrainer,
'arpl': ARPLTrainer,
'arpl_gan': ARPLGANTrainer,
'mos': MOSTrainer,
'vos': VOSTrainer,
'cider': CIDERTrainer,
'cutpaste': CutPasteTrainer,
'cutmix': CutMixTrainer,
'dropout': DropoutTrainer,
'csi': CSITrainer,
'logitnorm': LogitNormTrainer,
'rd4ad': Rd4adTrainer,
'rts': RTSTrainer,
'rotpred': RotPredTrainer
}
if config.trainer.name in ['cider', 'npos']:
return trainers[config.trainer.name](net, train_loader, val_loader,
config)
else:
return trainers[config.trainer.name](net, train_loader, config)
else:
trainers = {
'oe': OETrainer,
'mcd': MCDTrainer,
'udg': UDGTrainer,
'mixoe': MixOETrainer
}
return trainers[config.trainer.name](net, train_loader[0],
train_loader[1], config)
| 2,980 | 34.915663 | 79 | py |
null | OpenOOD-main/openood/trainers/vos_trainer.py | import numpy as np
import torch
import torch.nn.functional as F
from torch.distributions.multivariate_normal import MultivariateNormal
from tqdm import tqdm
import openood.utils.comm as comm
from openood.utils import Config
def cosine_annealing(step, total_steps, lr_max, lr_min):
return lr_min + (lr_max -
lr_min) * 0.5 * (1 + np.cos(step / total_steps * np.pi))
class VOSTrainer:
def __init__(self, net, train_loader, config: Config):
self.train_loader = train_loader
self.config = config
self.net = net
weight_energy = torch.nn.Linear(config.num_classes, 1).cuda()
torch.nn.init.uniform_(weight_energy.weight)
self.logistic_regression = torch.nn.Linear(1, 2).cuda()
self.optimizer = torch.optim.SGD(
list(net.parameters()) + list(weight_energy.parameters()) +
list(self.logistic_regression.parameters()),
config.optimizer['lr'],
momentum=config.optimizer['momentum'],
weight_decay=config.optimizer['weight_decay'],
nesterov=True)
self.scheduler = torch.optim.lr_scheduler.LambdaLR(
self.optimizer,
lr_lambda=lambda step: cosine_annealing(
step, config.optimizer['num_epochs'] * len(train_loader), 1,
1e-6 / config.optimizer['lr']))
self.number_dict = {}
for i in range(self.config['num_classes']):
self.number_dict[i] = 0
self.data_dict = torch.zeros(self.config['num_classes'],
self.config['sample_number'],
self.config['feature_dim']).cuda()
def train_epoch(self, epoch_idx):
self.net.train()
loss_avg = 0.0
sample_number = self.config['sample_number']
num_classes = self.config['num_classes']
train_dataiter = iter(self.train_loader)
eye_matrix = torch.eye(self.config['feature_dim'], device='cuda')
for train_step in tqdm(range(1,
len(train_dataiter) + 1),
desc='Epoch {:03d}'.format(epoch_idx),
position=0,
leave=True,
disable=not comm.is_main_process()):
batch = next(train_dataiter)
images = batch['data'].cuda()
labels = batch['label'].cuda()
x, output = self.net.forward(images, return_feature=True)
sum_temp = 0
for index in range(num_classes):
sum_temp += self.number_dict[index]
lr_reg_loss = torch.zeros(1).cuda()[0]
if (sum_temp == num_classes * sample_number
and epoch_idx < self.config['start_epoch']):
target_numpy = labels.cpu().data.numpy()
for index in range(len(labels)):
dict_key = target_numpy[index]
self.data_dict[dict_key] = torch.cat(
(self.data_dict[dict_key][1:],
output[index].detach().view(1, -1)), 0)
elif (sum_temp == num_classes * sample_number
and epoch_idx >= self.config['start_epoch']):
target_numpy = labels.cpu().data.numpy()
for index in range(len(labels)):
dict_key = target_numpy[index]
self.data_dict[dict_key] = torch.cat(
(self.data_dict[dict_key][1:],
output[index].detach().view(1, -1)), 0)
for index in range(num_classes):
if index == 0:
X = self.data_dict[index] - self.data_dict[index].mean(
0)
mean_embed_id = self.data_dict[index].mean(0).view(
1, -1)
else:
X = torch.cat((X, self.data_dict[index] -
self.data_dict[index].mean(0)), 0)
mean_embed_id = torch.cat(
(mean_embed_id, self.data_dict[index].mean(0).view(
1, -1)), 0)
temp_precision = torch.mm(X.t(), X) / len(X)
temp_precision += 0.0001 * eye_matrix
for index in range(num_classes):
new_dis = MultivariateNormal(
loc=mean_embed_id[index],
covariance_matrix=temp_precision)
negative_samples = new_dis.rsample(
(self.config['sample_from'], ))
prob_density = new_dis.log_prob(negative_samples)
cur_samples, index_prob = torch.topk(
-prob_density, self.config['select'])
if index == 0:
ood_samples = negative_samples[index_prob]
else:
ood_samples = torch.cat(
(ood_samples, negative_samples[index_prob]), 0)
if len(ood_samples) != 0:
energy_score_for_fg = log_sum_exp(x,
num_classes=num_classes,
dim=1)
try:
predictions_ood = self.net.fc(ood_samples)
except AttributeError:
predictions_ood = self.net.module.fc(ood_samples)
energy_score_for_bg = log_sum_exp(predictions_ood,
num_classes=num_classes,
dim=1)
input_for_lr = torch.cat(
(energy_score_for_fg, energy_score_for_bg), -1)
labels_for_lr = torch.cat(
(torch.ones(len(output)).cuda(),
torch.zeros(len(ood_samples)).cuda()), -1)
output1 = self.logistic_regression(input_for_lr.view(
-1, 1))
lr_reg_loss = F.cross_entropy(output1,
labels_for_lr.long())
else:
target_numpy = labels.cpu().data.numpy()
for index in range(len(labels)):
dict_key = target_numpy[index]
if self.number_dict[dict_key] < sample_number:
self.data_dict[dict_key][self.number_dict[
dict_key]] = output[index].detach()
self.number_dict[dict_key] += 1
self.optimizer.zero_grad()
loss = F.cross_entropy(x, labels)
loss += self.config.trainer['loss_weight'] * lr_reg_loss
loss.backward()
self.optimizer.step()
self.scheduler.step()
loss_avg = loss_avg * 0.8 + float(loss) * 0.2
metrics = {}
metrics['loss'] = loss_avg
metrics['epoch_idx'] = epoch_idx
return self.net, metrics
def log_sum_exp(value, num_classes=10, dim=None, keepdim=False):
"""Numerically stable implementation of the operation."""
value.exp().sum(dim, keepdim).log()
# TODO: torch.max(value, dim=None) threw an error at time of writing
weight_energy = torch.nn.Linear(num_classes, 1).cuda()
if dim is not None:
m, _ = torch.max(value, dim=dim, keepdim=True)
value0 = value - m
if keepdim is False:
m = m.squeeze(dim)
output = m + torch.log(
torch.sum(F.relu(weight_energy.weight) * torch.exp(value0),
dim=dim,
keepdim=keepdim))
# set lower bound
out_list = output.cpu().detach().numpy().tolist()
for i in range(len(out_list)):
if out_list[i] < -1:
out_list[i] = -1
else:
continue
output = torch.Tensor(out_list).cuda()
return output
else:
m = torch.max(value)
sum_exp = torch.sum(torch.exp(value - m))
# if isinstance(sum_exp, Number):
# return m + math.log(sum_exp)
# else:
return m + torch.log(sum_exp)
| 8,367 | 42.134021 | 79 | py |
null | OpenOOD-main/openood/utils/__init__.py | from .config import Config, setup_config
from .launch import launch
from .logger import setup_logger
| 101 | 24.5 | 40 | py |
null | OpenOOD-main/openood/utils/comm.py | # Copyright (c) Facebook, Inc. and its affiliates.
"""This file contains primitives for multi-gpu communication.
This is useful when doing distributed training.
"""
import functools
import numpy as np
import torch
import torch.distributed as dist
_LOCAL_PROCESS_GROUP = None
"""
A torch process group which only includes processes
that on the same machine as the current process.
This variable is set when processes are spawned
by `launch()` in "engine/launch.py".
"""
def get_world_size() -> int:
if not dist.is_available():
return 1
if not dist.is_initialized():
return 1
return dist.get_world_size()
def get_rank() -> int:
if not dist.is_available():
return 0
if not dist.is_initialized():
return 0
return dist.get_rank()
def get_local_rank() -> int:
"""
Returns:
The rank of the current process
within the local (per-machine) process group.
"""
if not dist.is_available():
return 0
if not dist.is_initialized():
return 0
assert (
_LOCAL_PROCESS_GROUP is not None
), 'Local process group is not created! '\
'Please use launch() to spawn processes!'
return dist.get_rank(group=_LOCAL_PROCESS_GROUP)
def get_local_size() -> int:
"""
Returns:
The size of the per-machine process group,
i.e. the number of processes per machine.
"""
if not dist.is_available():
return 1
if not dist.is_initialized():
return 1
return dist.get_world_size(group=_LOCAL_PROCESS_GROUP)
def is_main_process() -> bool:
return get_rank() == 0
def synchronize():
"""Helper function to synchronize (barrier) among all processes when using
distributed training."""
if not dist.is_available():
return
if not dist.is_initialized():
return
world_size = dist.get_world_size()
if world_size == 1:
return
if dist.get_backend() == dist.Backend.NCCL:
# This argument is needed to avoid warnings.
# It's valid only for NCCL backend.
dist.barrier(device_ids=[torch.cuda.current_device()])
else:
dist.barrier()
@functools.lru_cache()
def _get_global_gloo_group():
"""Return a process group based on gloo backend, containing all the ranks
The result is cached."""
if dist.get_backend() == 'nccl':
return dist.new_group(backend='gloo')
else:
return dist.group.WORLD
def all_gather(data, group=None):
"""Run all_gather on arbitrary picklable data (not necessarily tensors).
Args:
data: any picklable object
group: a torch process group. By default, will use a group which
contains all ranks on gloo backend.
Returns:
list[data]: list of data gathered from each rank
"""
if get_world_size() == 1:
return [data]
if group is None:
group = _get_global_gloo_group(
) # use CPU group by default, to reduce GPU RAM usage.
world_size = dist.get_world_size(group)
if world_size == 1:
return [data]
output = [None for _ in range(world_size)]
dist.all_gather_object(output, data, group=group)
return output
def gather(data, dst=0, group=None):
"""Run gather on arbitrary picklable data (not necessarily tensors).
Args:
data: any picklable object
dst (int): destination rank
group: a torch process group. By default, will use a group which
contains all ranks on gloo backend.
Returns:
list[data]: on dst, a list of data gathered from each rank. Otherwise,
an empty list.
"""
if get_world_size() == 1:
return [data]
if group is None:
group = _get_global_gloo_group()
world_size = dist.get_world_size(group=group)
if world_size == 1:
return [data]
rank = dist.get_rank(group=group)
if rank == dst:
output = [None for _ in range(world_size)]
dist.gather_object(data, output, dst=dst, group=group)
return output
else:
dist.gather_object(data, None, dst=dst, group=group)
return []
def shared_random_seed():
"""
Returns:
int: a random number that is the same across all workers.
If workers need a shared RNG, they can use this shared seed to
create one.
All workers must call this function, otherwise it will deadlock.
"""
ints = np.random.randint(2**31)
all_ints = all_gather(ints)
return all_ints[0]
def reduce_dict(input_dict, average=True):
"""Reduce the values in the dictionary from all processes so that process
with rank 0 has the reduced results.
Args:
input_dict (dict): inputs to be reduced.
All the values must be scalar CUDA Tensor.
average (bool): whether to do average or sum
Returns:
a dict with the same keys as input_dict, after reduction.
"""
world_size = get_world_size()
if world_size < 2:
return input_dict
with torch.no_grad():
names = []
values = []
# sort the keys so that they are consistent across processes
for k in sorted(input_dict.keys()):
names.append(k)
values.append(input_dict[k])
values = torch.stack(values, dim=0)
dist.reduce(values, dst=0)
if dist.get_rank() == 0 and average:
# only main process gets accumulated, so only divide by
# world_size in this case
values /= world_size
reduced_dict = {k: v for k, v in zip(names, values)}
return reduced_dict
| 5,612 | 27.065 | 78 | py |
null | OpenOOD-main/openood/utils/config.py | import argparse
import os
import re
import yaml
def setup_config(config_process_order=('merge', 'parse_args', 'parse_refs')):
"""Parsing configuration files and command line augments.
This method reads the command line to
1. extract and stack YAML config files,
2. collect modification in command line arguments,
so that the finalized configuration file is generated.
Note:
The default arguments allow the following equivalent code:
config = merge_configs(*config)
--> merge multiple YAML config files
config.parse_args(unknown_args)
--> use command line arguments to overwrite default settings
config.parse_refs()
--> replace '@{xxx.yyy}'-like values with referenced values
It is recommended to merge before parse_args so that the latter configs
can re-use references in the previous configs.
For example, if
config1.key1 = jkyang
config1.key2 = '@{key1}'
config2.key1 = yzang
config3 = merge_configs(config1, config2)
config3.parse_refs()
then
config3.key2 will be yzang rather than jkyang
Return:
An object of <class 'openood.utils.config.Config'>.
Can be understanded as a dictionary.
"""
parser = argparse.ArgumentParser()
parser.add_argument('--config', dest='config', nargs='+', required=True)
opt, unknown_args = parser.parse_known_args()
config = [Config(path) for path in opt.config]
for process in config_process_order:
if process == 'merge':
config = merge_configs(*config)
elif process == 'parse_args':
if isinstance(config, Config):
config.parse_args(unknown_args)
else:
for cfg in config:
cfg.parse_args(unknown_args)
elif process == 'parse_refs':
if isinstance(config, Config):
config.parse_refs()
else:
for cfg in config:
cfg.parse_refs()
else:
raise ValueError('unknown config process name: {}'.format(process))
# manually modify 'output_dir'
config.output_dir = os.path.join(config.output_dir, config.exp_name)
return config
def parse_config(config):
config_process_order = ('merge', 'parse_refs')
for process in config_process_order:
if process == 'merge':
config = merge_configs(*config)
elif process == 'parse_refs':
if isinstance(config, Config):
config.parse_refs()
else:
for cfg in config:
cfg.parse_refs()
else:
raise ValueError('unknown config process name: {}'.format(process))
# manually modify 'output_dir'
config.output_dir = os.path.join(config.output_dir, config.exp_name)
return config
class Config(dict):
def __init__(self, *args, **kwargs):
super(Config, self).__init__()
for arg in args:
if arg == ' ':
continue # hard code remove white space in config file list
if isinstance(arg, str):
if arg.endswith('.yml'):
with open(arg, 'r') as f:
raw_dict = yaml.safe_load(f)
else:
raise Exception('unknown file format %s' % arg)
init_assign(self, raw_dict)
elif isinstance(arg, dict):
init_assign(self, arg)
else:
raise TypeError('arg should be an instance of <str> or <dict>')
if kwargs:
init_assign(self, kwargs)
def __call__(self, *args, **kwargs):
return Config(self, *args, **kwargs)
def __repr__(self, indent=4, prefix=''):
r = []
for key, value in sorted(self.items()):
if isinstance(value, Config):
r.append('{}{}:'.format(prefix, key))
r.append(value.__repr__(indent, prefix + ' ' * indent))
else:
r.append('{}{}: {}'.format(prefix, key, value))
return '\n'.join(r)
def __setstate__(self, state):
init_assign(self, state)
def __getstate__(self):
d = dict()
for key, value in self.items():
if type(value) is Config:
value = value.__getstate__()
d[key] = value
return d
# access by '.' -> access by '[]'
def __getattr__(self, key):
return self[key]
def __setattr__(self, key, value):
self[key] = value
def __delattr__(self, key):
del self[key]
# access by '[]'
def __getitem__(self, key):
sub_cfg, sub_key = consume_dots(self, key, create_default=False)
return dict.__getitem__(sub_cfg, sub_key)
def __setitem__(self, key, value):
sub_cfg, sub_key = consume_dots(self, key, create_default=True)
if sub_cfg.__contains__(sub_key) and value == '_DELETE_CONFIG_':
dict.__delitem__(sub_cfg, sub_key)
else:
dict.__setitem__(sub_cfg, sub_key, value)
def __delitem__(self, key):
sub_cfg, sub_key = consume_dots(self, key, create_default=False)
dict.__delitem__(sub_cfg, sub_key)
# access by 'in'
def __contains__(self, key):
try:
sub_cfg, sub_key = consume_dots(self, key, create_default=False)
except KeyError:
return False
return dict.__contains__(sub_cfg, sub_key)
# traverse keys / values/ items
def all_keys(self, only_leaf=True):
for key in traverse_dfs(self,
'key',
continue_type=Config,
only_leaf=only_leaf):
yield key
def all_values(self, only_leaf=True):
for value in traverse_dfs(self,
'value',
continue_type=Config,
only_leaf=only_leaf):
yield value
def all_items(self, only_leaf=True):
for key, value in traverse_dfs(self,
'item',
continue_type=Config,
only_leaf=only_leaf):
yield key, value
# for command line arguments
def parse_args(self, cmd_args=None, strict=True):
unknown_args = []
if cmd_args is None:
import sys
cmd_args = sys.argv[1:]
index = 0
while index < len(cmd_args):
arg = cmd_args[index]
err_msg = 'invalid command line argument pattern: %s' % arg
assert arg.startswith('--'), err_msg
assert len(arg) > 2, err_msg
assert arg[2] != '-', err_msg
arg = arg[2:]
if '=' in arg:
key, full_value_str = arg.split('=')
index += 1
else:
assert len(
cmd_args) > index + 1, 'incomplete command line arguments'
key = arg
full_value_str = cmd_args[index + 1]
index += 2
if ':' in full_value_str:
value_str, value_type_str = full_value_str.split(':')
value_type = eval(value_type_str)
else:
value_str = full_value_str
value_type = None
if key not in self:
if strict:
raise KeyError(key)
else:
unknown_args.extend(['--' + key, full_value_str])
continue
if value_type is None:
value_type = type(self[key])
if value_type is bool:
self[key] = {
'true': True,
'True': True,
'1': True,
'false': False,
'False': False,
'0': False,
}[value_str]
else:
self[key] = value_type(value_str)
return unknown_args
# for key reference
def parse_refs(self, subconf=None, stack_depth=1, max_stack_depth=10):
if stack_depth > max_stack_depth:
raise Exception(
('Recursively calling `parse_refs` too many times'
'with stack depth > {}. '
'A circular reference may exists in your config.\n'
'If deeper calling stack is really needed,'
'please call `parse_refs` with extra argument like: '
'`parse_refs(max_stack_depth=9999)`').format(max_stack_depth))
if subconf is None:
subconf = self
for key in subconf.keys():
value = subconf[key]
if type(value) is str and '@' in value:
if value.count('@') == 1 and value.startswith(
'@{') and value.endswith('}'):
# pure reference
ref_key = value[2:-1]
ref_value = self[ref_key]
subconf[key] = ref_value
else:
# compositional references
ref_key_list = re.findall("'@{(.+?)}'", value)
ref_key_list = list(set(ref_key_list))
ref_value_list = [
self[ref_key] for ref_key in ref_key_list
]
origin_ref_key_list = [
"'@{" + ref_key + "}'" for ref_key in ref_key_list
]
for origin_ref_key, ref_value in zip(
origin_ref_key_list, ref_value_list):
value = value.replace(origin_ref_key, str(ref_value))
subconf[key] = value
for key in subconf.keys():
value = subconf[key]
if type(value) is Config:
self.parse_refs(value, stack_depth + 1)
def merge_configs(*configs):
final_config = Config()
for i in range(len(configs)):
config = configs[i]
if not isinstance(config, Config):
raise TypeError(
'config.merge_configs expect `Config` type inputs, '
'but got `{}`.\n'
'Correct usage: merge_configs(config1, config2, ...)\n'
'Incorrect usage: merge_configs([configs1, configs2, ...])'.
format(type(config)))
final_config = final_config(dict(config.all_items()))
return final_config
def consume_dots(config, key, create_default):
sub_keys = key.split('.', 1)
sub_key = sub_keys[0]
if sub_key in Config.__dict__:
raise KeyError(
'"{}" is a preserved API name, '
'which should not be used as normal dictionary key'.format(
sub_key))
if not dict.__contains__(config, sub_key) and len(sub_keys) == 2:
if create_default:
dict.__setitem__(config, sub_key, Config())
else:
raise KeyError(key)
if len(sub_keys) == 1:
return config, sub_key
else:
sub_config = dict.__getitem__(config, sub_key)
if type(sub_config) != Config:
if create_default:
sub_config = Config()
dict.__setitem__(config, sub_key, sub_config)
else:
raise KeyError(key)
return consume_dots(sub_config, sub_keys[1], create_default)
def traverse_dfs(root, mode, continue_type, only_leaf, key_prefix=''):
for key, value in root.items():
full_key = '.'.join([key_prefix, key]).strip('.')
child_kvs = []
if type(value) == continue_type:
for kv in traverse_dfs(value, mode, continue_type, only_leaf,
full_key):
child_kvs.append(kv)
# equivalent:
# if not (len(child_kvs) > 0 and
# type(value) == continue_type and
# only_leaf)
if len(child_kvs
) == 0 or type(value) != continue_type or not only_leaf:
yield {
'key': full_key,
'value': value,
'item': (full_key, value)
}[mode]
for kv in child_kvs:
yield kv
def init_assign(config, d):
for full_key, value in traverse_dfs(d,
'item',
continue_type=dict,
only_leaf=True):
sub_cfg, sub_key = consume_dots(config, full_key, create_default=True)
sub_cfg[sub_key] = value
| 12,782 | 34.706704 | 79 | py |
null | OpenOOD-main/openood/utils/launch.py | # Copyright (c) Facebook, Inc. and its affiliates.
import logging
from datetime import timedelta
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
from openood.utils import comm
__all__ = ['DEFAULT_TIMEOUT', 'launch']
DEFAULT_TIMEOUT = timedelta(minutes=30)
def _find_free_port():
import socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Binding to port 0 will cause the OS to find an available port for us
sock.bind(('', 0))
port = sock.getsockname()[1]
sock.close()
# NOTE: there is still a chance the port could be taken by other processes.
return port
def launch(
main_func,
num_gpus_per_machine,
num_machines=1,
machine_rank=0,
dist_url=None,
args=(),
timeout=DEFAULT_TIMEOUT,
):
"""Launch multi-gpu or distributed training. This function must be called
on all machines involved in the training. It will spawn child processes
(defined by ``num_gpus_per_machine``) on each machine.
Args:
main_func: a function that will be called by `main_func(*args)`
num_gpus_per_machine (int): number of GPUs per machine
num_machines (int): the total number of machines
machine_rank (int): the rank of this machine
dist_url (str): url to connect to for distributed jobs,
including protocol e.g. "tcp://127.0.0.1:8686".
Can be set to "auto" to automatically select a free port on localhost
timeout (timedelta): timeout of the distributed workers
args (tuple): arguments passed to main_func
"""
world_size = num_machines * num_gpus_per_machine
if world_size > 1:
# https://github.com/pytorch/pytorch/pull/14391
# TODO prctl in spawned processes
if dist_url == 'auto':
assert num_machines == 1, \
'dist_url=auto not supported in multi-machine jobs.'
port = _find_free_port()
dist_url = f'tcp://127.0.0.1:{port}'
if num_machines > 1 and dist_url.startswith('file://'):
logger = logging.getLogger(__name__)
logger.warning(
'file:// is not a reliable init_method in multi-machine jobs.'
'Prefer tcp://'
)
mp.spawn(
_distributed_worker,
nprocs=num_gpus_per_machine,
args=(
main_func,
world_size,
num_gpus_per_machine,
machine_rank,
dist_url,
args,
timeout,
),
daemon=False,
)
else:
main_func(*args)
def _distributed_worker(
local_rank,
main_func,
world_size,
num_gpus_per_machine,
machine_rank,
dist_url,
args,
timeout=DEFAULT_TIMEOUT,
):
assert torch.cuda.is_available(
), 'cuda is not available. Please check your installation.'
global_rank = machine_rank * num_gpus_per_machine + local_rank
try:
dist.init_process_group(
backend='NCCL',
init_method=dist_url,
world_size=world_size,
rank=global_rank,
timeout=timeout,
)
except Exception as e:
logger = logging.getLogger(__name__)
logger.error('Process group URL: {}'.format(dist_url))
raise e
# Setup the local process group
# (which contains ranks within the same machine)
assert comm._LOCAL_PROCESS_GROUP is None
num_machines = world_size // num_gpus_per_machine
for i in range(num_machines):
ranks_on_i = list(
range(i * num_gpus_per_machine, (i + 1) * num_gpus_per_machine))
pg = dist.new_group(ranks_on_i)
if i == machine_rank:
comm._LOCAL_PROCESS_GROUP = pg
assert num_gpus_per_machine <= torch.cuda.device_count()
torch.cuda.set_device(local_rank)
# synchronize is needed here to prevent a possible timeout
# after calling init_process_group
# See: https://github.com/facebookresearch/maskrcnn-benchmark/issues/172
comm.synchronize()
main_func(*args)
| 4,142 | 30.150376 | 79 | py |
null | OpenOOD-main/openood/utils/logger.py | import errno
import os
import os.path as osp
import sys
import yaml
import openood.utils.comm as comm
def mkdir_if_missing(dirname):
"""Create dirname if it is missing."""
if not osp.exists(dirname):
try:
os.makedirs(dirname)
except OSError as e:
if e.errno != errno.EEXIST:
raise
class Logger:
"""Write console output to external text file.
Imported from
`<https://github.com/Cysu/open-reid/blob/master/reid/utils/logging.py>`
Args:
fpath (str): directory to save logging file.
Examples:
>>> import sys
>>> import os.path as osp
>>> save_dir = 'output/experiment-1'
>>> log_name = 'train.log'
>>> sys.stdout = Logger(osp.join(save_dir, log_name))
"""
def __init__(self, fpath=None):
self.console = sys.stdout
self.file = None
if fpath is not None:
mkdir_if_missing(osp.dirname(fpath))
self.file = open(fpath, 'w')
def __del__(self):
self.close()
def __enter__(self):
pass
def __exit__(self, *args):
self.close()
def write(self, msg):
self.console.write(msg)
if self.file is not None:
self.file.write(msg)
def flush(self):
self.console.flush()
if self.file is not None:
self.file.flush()
os.fsync(self.file.fileno())
def close(self):
self.console.close()
if self.file is not None:
self.file.close()
def setup_logger(config):
"""generate exp directory to save configs, logger, checkpoints, etc.
Args:
config: all configs of the experiment
"""
print('------------------ Config --------------------------', flush=True)
print(config, flush=True)
print(u'\u2500' * 70, flush=True)
output = config.output_dir
if config.save_output and comm.is_main_process():
print('Output dir: {}'.format(output), flush=True)
if osp.isdir(output):
if config.merge_option == 'default':
ans = input('Exp dir already exists, merge it? (y/n)')
if ans in ['yes', 'Yes', 'YES', 'y', 'Y', 'can']:
save_logger(config, output)
elif ans in ['no', 'No', 'NO', 'n', 'N']:
print('Quitting the process...', flush=True)
quit()
else:
raise ValueError('Unexpected Input.')
elif config.merge_option == 'merge':
save_logger(config, output)
elif config.merge_option == 'pass':
if os.path.exists(os.path.join(config.save_output, 'ood.csv')):
print('Exp dir already exists, quitting the process...',
flush=True)
quit()
else:
save_logger(config, output)
else:
save_logger(config, output)
else:
print('No output directory.', flush=True)
comm.synchronize()
def save_logger(config, output):
print('Output directory path: {}'.format(output), flush=True)
os.makedirs(output, exist_ok=True)
# Save config
# FIXME: saved config file is not beautified.
config_save_path = osp.join(output, 'config.yml')
with open(config_save_path, 'w') as f:
yaml.dump(config,
f,
default_flow_style=False,
sort_keys=False,
indent=2)
# save log file
fpath = osp.join(output, 'log.txt')
sys.stdout = Logger(fpath)
| 3,614 | 27.690476 | 79 | py |
null | OpenOOD-main/scripts/eval_ood.py | import os, sys
ROOT_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..')
sys.path.append(ROOT_DIR)
import numpy as np
import pandas as pd
import argparse
import pickle
import collections
from glob import glob
import torch
import torch.nn as nn
import torch.nn.functional as F
from openood.evaluation_api import Evaluator
from openood.networks import ResNet18_32x32, ResNet18_224x224, ResNet50
from openood.networks.conf_branch_net import ConfBranchNet
from openood.networks.godin_net import GodinNet
from openood.networks.rot_net import RotNet
from openood.networks.csi_net import CSINet
from openood.networks.udg_net import UDGNet
from openood.networks.cider_net import CIDERNet
from openood.networks.npos_net import NPOSNet
def update(d, u):
for k, v in u.items():
if isinstance(v, collections.abc.Mapping):
d[k] = update(d.get(k, {}), v)
else:
d[k] = v
return d
parser = argparse.ArgumentParser()
parser.add_argument('--root', required=True)
parser.add_argument('--postprocessor', default='msp')
parser.add_argument(
'--id-data',
type=str,
default='cifar10',
choices=['cifar10', 'cifar100', 'aircraft', 'cub', 'imagenet200'])
parser.add_argument('--batch-size', type=int, default=200)
parser.add_argument('--save-csv', action='store_true')
parser.add_argument('--save-score', action='store_true')
parser.add_argument('--fsood', action='store_true')
args = parser.parse_args()
root = args.root
# specify an implemented postprocessor
# 'openmax', 'msp', 'temp_scaling', 'odin'...
postprocessor_name = args.postprocessor
NUM_CLASSES = {'cifar10': 10, 'cifar100': 100, 'imagenet200': 200}
MODEL = {
'cifar10': ResNet18_32x32,
'cifar100': ResNet18_32x32,
'imagenet200': ResNet18_224x224,
}
try:
num_classes = NUM_CLASSES[args.id_data]
model_arch = MODEL[args.id_data]
except KeyError:
raise NotImplementedError(f'ID dataset {args.id_data} is not supported.')
# assume that the root folder contains subfolders each corresponding to
# a training run, e.g., s0, s1, s2
# this structure is automatically created if you use OpenOOD for train
if len(glob(os.path.join(root, 's*'))) == 0:
raise ValueError(f'No subfolders found in {root}')
# iterate through training runs
all_metrics = []
for subfolder in sorted(glob(os.path.join(root, 's*'))):
# load pre-setup postprocessor if exists
if os.path.isfile(
os.path.join(subfolder, 'postprocessors',
f'{postprocessor_name}.pkl')):
with open(
os.path.join(subfolder, 'postprocessors',
f'{postprocessor_name}.pkl'), 'rb') as f:
postprocessor = pickle.load(f)
else:
postprocessor = None
# load the pretrained model provided by the user
if postprocessor_name == 'conf_branch':
net = ConfBranchNet(backbone=model_arch(num_classes=num_classes),
num_classes=num_classes)
elif postprocessor_name == 'godin':
backbone = model_arch(num_classes=num_classes)
net = GodinNet(backbone=backbone,
feature_size=backbone.feature_size,
num_classes=num_classes)
elif postprocessor_name == 'rotpred':
net = RotNet(backbone=model_arch(num_classes=num_classes),
num_classes=num_classes)
elif 'csi' in root:
backbone = model_arch(num_classes=num_classes)
net = CSINet(backbone=backbone,
feature_size=backbone.feature_size,
num_classes=num_classes)
elif 'udg' in root:
backbone = model_arch(num_classes=num_classes)
net = UDGNet(backbone=backbone,
num_classes=num_classes,
num_clusters=1000)
elif postprocessor_name == 'cider':
backbone = model_arch(num_classes=num_classes)
net = CIDERNet(backbone,
head='mlp',
feat_dim=128,
num_classes=num_classes)
elif postprocessor_name == 'npos':
backbone = model_arch(num_classes=num_classes)
net = NPOSNet(backbone,
head='mlp',
feat_dim=128,
num_classes=num_classes)
else:
net = model_arch(num_classes=num_classes)
net.load_state_dict(
torch.load(os.path.join(subfolder, 'best.ckpt'), map_location='cpu'))
net.cuda()
net.eval()
evaluator = Evaluator(
net,
id_name=args.id_data, # the target ID dataset
data_root=os.path.join(ROOT_DIR, 'data'),
config_root=os.path.join(ROOT_DIR, 'configs'),
preprocessor=None, # default preprocessing
postprocessor_name=postprocessor_name,
postprocessor=
postprocessor, # the user can pass his own postprocessor as well
batch_size=args.
batch_size, # for certain methods the results can be slightly affected by batch size
shuffle=False,
num_workers=8)
# load pre-computed scores if exist
if os.path.isfile(
os.path.join(subfolder, 'scores', f'{postprocessor_name}.pkl')):
with open(
os.path.join(subfolder, 'scores', f'{postprocessor_name}.pkl'),
'rb') as f:
scores = pickle.load(f)
update(evaluator.scores, scores)
print('Loaded pre-computed scores from file.')
# save the postprocessor for future reuse
if hasattr(evaluator.postprocessor, 'setup_flag'
) or evaluator.postprocessor.hyperparam_search_done is True:
pp_save_root = os.path.join(subfolder, 'postprocessors')
if not os.path.exists(pp_save_root):
os.makedirs(pp_save_root)
if not os.path.isfile(
os.path.join(pp_save_root, f'{postprocessor_name}.pkl')):
with open(os.path.join(pp_save_root, f'{postprocessor_name}.pkl'),
'wb') as f:
pickle.dump(evaluator.postprocessor, f,
pickle.HIGHEST_PROTOCOL)
metrics = evaluator.eval_ood(fsood=args.fsood)
all_metrics.append(metrics.to_numpy())
# save computed scores
if args.save_score:
score_save_root = os.path.join(subfolder, 'scores')
if not os.path.exists(score_save_root):
os.makedirs(score_save_root)
with open(os.path.join(score_save_root, f'{postprocessor_name}.pkl'),
'wb') as f:
pickle.dump(evaluator.scores, f, pickle.HIGHEST_PROTOCOL)
# compute mean metrics over training runs
all_metrics = np.stack(all_metrics, axis=0)
metrics_mean = np.mean(all_metrics, axis=0)
metrics_std = np.std(all_metrics, axis=0)
final_metrics = []
for i in range(len(metrics_mean)):
temp = []
for j in range(metrics_mean.shape[1]):
temp.append(u'{:.2f} \u00B1 {:.2f}'.format(metrics_mean[i, j],
metrics_std[i, j]))
final_metrics.append(temp)
df = pd.DataFrame(final_metrics, index=metrics.index, columns=metrics.columns)
if args.save_csv:
saving_root = os.path.join(root, 'ood' if not args.fsood else 'fsood')
if not os.path.exists(saving_root):
os.makedirs(saving_root)
df.to_csv(os.path.join(saving_root, f'{postprocessor_name}.csv'))
else:
print(df)
| 7,384 | 35.559406 | 93 | py |
null | OpenOOD-main/scripts/eval_ood_imagenet.py | import collections
import os, sys
ROOT_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..')
sys.path.append(ROOT_DIR)
import numpy as np
import pandas as pd
import argparse
import pickle
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision.models import ResNet50_Weights, Swin_T_Weights, ViT_B_16_Weights
from torchvision import transforms as trn
from torch.hub import load_state_dict_from_url
from openood.evaluation_api import Evaluator
from openood.networks import ResNet50, Swin_T, ViT_B_16
from openood.networks.conf_branch_net import ConfBranchNet
from openood.networks.godin_net import GodinNet
from openood.networks.rot_net import RotNet
from openood.networks.cider_net import CIDERNet
def update(d, u):
for k, v in u.items():
if isinstance(v, collections.abc.Mapping):
d[k] = update(d.get(k, {}), v)
else:
d[k] = v
return d
parser = argparse.ArgumentParser()
parser.add_argument('--arch',
default='resnet50',
choices=['resnet50', 'swin-t', 'vit-b-16'])
parser.add_argument('--tvs-version', default=1, choices=[1, 2])
parser.add_argument('--ckpt-path', default=None)
parser.add_argument('--tvs-pretrained', action='store_true')
parser.add_argument('--postprocessor', default='msp')
parser.add_argument('--save-csv', action='store_true')
parser.add_argument('--save-score', action='store_true')
parser.add_argument('--fsood', action='store_true')
parser.add_argument('--batch-size', default=200, type=int)
args = parser.parse_args()
if not args.tvs_pretrained:
assert args.ckpt_path is not None
root = '/'.join(args.ckpt_path.split('/')[:-1])
else:
root = os.path.join(
ROOT_DIR, 'results',
f'imagenet_{args.arch}_tvsv{args.tvs_version}_base_default')
if not os.path.exists(root):
os.makedirs(root)
# specify an implemented postprocessor
# 'openmax', 'msp', 'temp_scaling', 'odin'...
postprocessor_name = args.postprocessor
# load pre-setup postprocessor if exists
if os.path.isfile(
os.path.join(root, 'postprocessors', f'{postprocessor_name}.pkl')):
with open(
os.path.join(root, 'postprocessors', f'{postprocessor_name}.pkl'),
'rb') as f:
postprocessor = pickle.load(f)
else:
postprocessor = None
# assuming the model is either
# 1) torchvision pre-trained; or
# 2) a specified checkpoint
if args.tvs_pretrained:
if args.arch == 'resnet50':
net = ResNet50()
weights = eval(f'ResNet50_Weights.IMAGENET1K_V{args.tvs_version}')
net.load_state_dict(load_state_dict_from_url(weights.url))
preprocessor = weights.transforms()
elif args.arch == 'swin-t':
net = Swin_T()
weights = eval(f'Swin_T_Weights.IMAGENET1K_V{args.tvs_version}')
net.load_state_dict(load_state_dict_from_url(weights.url))
preprocessor = weights.transforms()
elif args.arch == 'vit-b-16':
net = ViT_B_16()
weights = eval(f'ViT_B_16_Weights.IMAGENET1K_V{args.tvs_version}')
net.load_state_dict(load_state_dict_from_url(weights.url))
preprocessor = weights.transforms()
else:
raise NotImplementedError
else:
if args.arch == 'resnet50':
if postprocessor_name == 'conf_branch':
net = ConfBranchNet(backbone=ResNet50(), num_classes=1000)
elif postprocessor_name == 'godin':
backbone = ResNet50()
net = GodinNet(backbone=backbone,
feature_size=backbone.feature_size,
num_classes=1000)
elif postprocessor_name == 'rotpred':
net = RotNet(backbone=ResNet50(), num_classes=1000)
elif postprocessor_name == 'cider':
net = CIDERNet(backbone=ResNet50(),
head='mlp',
feat_dim=128,
num_classes=1000)
else:
net = ResNet50()
ckpt = torch.load(args.ckpt_path, map_location='cpu')
net.load_state_dict(ckpt)
preprocessor = trn.Compose([
trn.Resize(256),
trn.CenterCrop(224),
trn.ToTensor(),
trn.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
else:
raise NotImplementedError
net.cuda()
net.eval()
# a unified evaluator
evaluator = Evaluator(
net,
id_name='imagenet', # the target ID dataset
data_root=os.path.join(ROOT_DIR, 'data'),
config_root=os.path.join(ROOT_DIR, 'configs'),
preprocessor=preprocessor, # default preprocessing
postprocessor_name=postprocessor_name,
postprocessor=postprocessor,
batch_size=args.
batch_size, # for certain methods the results can be slightly affected by batch size
shuffle=False,
num_workers=8)
# load pre-computed scores if exists
if os.path.isfile(os.path.join(root, 'scores', f'{postprocessor_name}.pkl')):
with open(os.path.join(root, 'scores', f'{postprocessor_name}.pkl'),
'rb') as f:
scores = pickle.load(f)
update(evaluator.scores, scores)
print('Loaded pre-computed scores from file.')
# save postprocessor for future reuse
if hasattr(evaluator.postprocessor, 'setup_flag'
) or evaluator.postprocessor.hyperparam_search_done is True:
pp_save_root = os.path.join(root, 'postprocessors')
if not os.path.exists(pp_save_root):
os.makedirs(pp_save_root)
if not os.path.isfile(
os.path.join(pp_save_root, f'{postprocessor_name}.pkl')):
with open(os.path.join(pp_save_root, f'{postprocessor_name}.pkl'),
'wb') as f:
pickle.dump(evaluator.postprocessor, f, pickle.HIGHEST_PROTOCOL)
# the metrics is a dataframe
metrics = evaluator.eval_ood(fsood=args.fsood)
# saving and recording
if args.save_csv:
saving_root = os.path.join(root, 'ood' if not args.fsood else 'fsood')
if not os.path.exists(saving_root):
os.makedirs(saving_root)
if not os.path.isfile(
os.path.join(saving_root, f'{postprocessor_name}.csv')):
metrics.to_csv(os.path.join(saving_root, f'{postprocessor_name}.csv'),
float_format='{:.2f}'.format)
if args.save_score:
score_save_root = os.path.join(root, 'scores')
if not os.path.exists(score_save_root):
os.makedirs(score_save_root)
with open(os.path.join(score_save_root, f'{postprocessor_name}.pkl'),
'wb') as f:
pickle.dump(evaluator.scores, f, pickle.HIGHEST_PROTOCOL)
| 6,599 | 34.869565 | 89 | py |
null | OpenOOD-main/scripts/ad/cutpaste/bottle_test_cutpaste.sh | #!/bin/bash
# sh scripts/a_anomaly/2_cutpaste_test.sh
GPU=1
CPU=1
node=68
jobname=openood
PYTHONPATH='.':$PYTHONPATH \
#srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
#--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
#--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \
python main.py \
--config configs/datasets/draem/bottle.yml \
configs/networks/cutpaste.yml \
configs/pipelines/test/test_cutpaste.yml \
configs/postprocessors/cutpaste.yml \
configs/preprocessors/cutpaste_preprocessor.yml \
--network.checkpoint "results/bottle_projectionNet_cutpaste_e100_lr0.03/best_epoch35_auroc93.08219178082192.ckpt" \
--evaluator.name ad
| 655 | 30.238095 | 115 | sh |
null | OpenOOD-main/scripts/ad/cutpaste/cifar100_test_ood_cutpaste.sh | #!/bin/bash
# sh scripts/ad/cutpaste/cifar100_test_ood_cutpaste.sh
GPU=1
CPU=1
jobname=openood
PYTHONPATH='.':$PYTHONPATH \
srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
--kill-on-bad-exit=1 --job-name=${jobname} \
python main.py \
--config configs/datasets/cifar100/cifar100.yml \
configs/datasets/cifar100/cifar100_ood.yml \
configs/networks/cutpaste.yml \
configs/pipelines/test/test_cutpaste.yml \
configs/preprocessors/cutpaste_preprocessor.yml \
configs/postprocessors/cutpaste.yml \
--network.backbone.name resnet18_32x32 \
--num_workers 8 \
--network.pretrained True \
--network.checkpoint "results/cifar100_projectionNet_cutpaste_e100_lr0.03/best.ckpt"
| 716 | 30.173913 | 84 | sh |
null | OpenOOD-main/scripts/ad/cutpaste/cifar100_train_cutpaste.sh | #!/bin/bash
# sh scripts/ad/cutpaste/cifar100_train_cutpaste.sh
GPU=1
CPU=1
node=68
jobname=openood
PYTHONPATH='.':$PYTHONPATH \
srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
--kill-on-bad-exit=1 --job-name=${jobname} \
python main.py \
--config configs/datasets/cifar100/cifar100.yml \
configs/datasets/cifar100/cifar100_ood.yml \
configs/networks/cutpaste.yml \
configs/pipelines/train/train_cutpaste.yml \
configs/preprocessors/cutpaste_preprocessor.yml \
configs/postprocessors/cutpaste.yml \
--network.pretrained False \
--network.backbone.name resnet18_32x32 \
--network.backbone.pretrained True \
--network.backbone.checkpoint 'results/checkpoints/cifar100_res18_acc78.20.ckpt' \
--num_workers 8 \
--optimizer.num_epochs 100 \
--merge_option merge
| 809 | 29 | 82 | sh |
null | OpenOOD-main/scripts/ad/cutpaste/cifar10_test_ood_cutpaste.sh | #!/bin/bash
# sh scripts/ad/cutpaste/cifar10_test_ood_cutpaste.sh
# GPU=1
# CPU=1
# node=68
# jobname=openood
PYTHONPATH='.':$PYTHONPATH \
#srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
#--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
#--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \
python main.py \
--config configs/datasets/cifar10/cifar10.yml \
configs/datasets/cifar10/cifar10_ood.yml \
configs/networks/cutpaste.yml \
configs/pipelines/test/test_cutpaste.yml \
configs/preprocessors/cutpaste_preprocessor.yml \
configs/postprocessors/cutpaste.yml \
--network.backbone.name resnet18_32x32 \
--num_workers 8 \
--network.checkpoint "results/cifar10_projectionNet_cutpaste_e1_lr0.03/best_epoch1_auroc0.5146.ckpt"
| 746 | 30.125 | 100 | sh |
null | OpenOOD-main/scripts/ad/cutpaste/cifar10_train_cutpaste.sh | #!/bin/bash
# sh scripts/ad/cutpaste/cifar10_train_cutpaste.sh
GPU=1
CPU=1
jobname=openood
PYTHONPATH='.':$PYTHONPATH \
srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
--kill-on-bad-exit=1 --job-name=${jobname} \
python main.py \
--config configs/datasets/cifar10/cifar10.yml \
configs/datasets/cifar10/cifar10_ood.yml \
configs/networks/cutpaste.yml \
configs/pipelines/train/train_cutpaste.yml \
configs/preprocessors/cutpaste_preprocessor.yml \
configs/postprocessors/cutpaste.yml \
--network.pretrained False \
--network.backbone.name resnet18_32x32 \
--network.backbone.pretrained True \
--network.backbone.checkpoint 'results/checkpoints/cifar10_res18_acc94.30.ckpt' \
--num_workers 8 \
--optimizer.num_epochs 100 \
--merge_option merge
| 795 | 29.615385 | 81 | sh |
null | OpenOOD-main/scripts/ad/cutpaste/mnist_test_osr_cutpaste.sh | #!/bin/bash
# sh scripts/ad/cutpaste/mnist_test_osr_cutpaste.sh
# GPU=1
# CPU=1
# node=68
# jobname=openood
PYTHONPATH='.':$PYTHONPATH \
#srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
#--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
#--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \
python main.py \
--config configs/datasets/osr_mnist6/mnist6_seed1.yml \
configs/datasets/osr_mnist6/mnist6_seed1_ood.yml \
configs/networks/cutpaste.yml \
configs/pipelines/test/test_cutpaste.yml \
configs/preprocessors/cutpaste_preprocessor.yml \
configs/postprocessors/cutpaste.yml \
--network.backbone.name lenet \
--num_workers 8 \
--network.checkpoint 'results/osr_mnist6_seed1_projectionNet_cutpaste_e100_lr0.03/best.ckpt' \
--network.backbone.pretrained False \
--evaluator.name osr
| 804 | 29.961538 | 94 | sh |
null | OpenOOD-main/scripts/ad/cutpaste/mnist_train_cutpaste.sh | #!/bin/bash
# sh scripts/ad/cutpaste/mnist_train_cutpaste.sh
GPU=1
CPU=1
jobname=openood
PYTHONPATH='.':$PYTHONPATH \
srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
--kill-on-bad-exit=1 --job-name=${jobname} \
python main.py \
--config configs/datasets/mnist/mnist.yml \
configs/datasets/mnist/mnist_ood.yml \
configs/networks/cutpaste.yml \
configs/pipelines/train/train_cutpaste.yml \
configs/preprocessors/cutpaste_preprocessor.yml \
configs/postprocessors/cutpaste.yml \
--network.pretrained False \
--network.backbone.name lenet \
--network.backbone.pretrained True \
--network.backbone.checkpoint 'results/checkpoints/mnist_lenet_acc99.30.ckpt' \
--num_workers 8 \
--optimizer.num_epochs 100 \
--merge_option merge
| 774 | 28.807692 | 79 | sh |
null | OpenOOD-main/scripts/ad/cutpaste/mnist_train_osr_cutpaste.sh | #!/bin/bash
# sh scripts/ad/cutpaste/mnist_train_osr_cutpaste.sh
GPU=1
CPU=1
node=68
jobname=openood
PYTHONPATH='.':$PYTHONPATH \
# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
# --kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \
python main.py \
--config configs/datasets/osr_mnist6/mnist6_seed1.yml \
configs/datasets/osr_mnist6/mnist6_seed1_ood.yml \
configs/networks/cutpaste.yml \
configs/pipelines/train/train_cutpaste.yml \
configs/preprocessors/cutpaste_preprocessor.yml \
configs/postprocessors/cutpaste.yml \
--network.pretrained False \
--network.backbone.name lenet \
--network.backbone.pretrained True \
--network.backbone.checkpoint 'results/checkpoints/osr/mnist6_seed1.ckpt' \
--num_workers 8 \
--optimizer.num_epochs 100
| 816 | 30.423077 | 75 | sh |
null | OpenOOD-main/scripts/ad/cutpaste/osr_cifar50_train_cutpaste.sh | #!/bin/bash
# sh scripts/ad/cutpaste/cifar50_train_cutpaste.sh
GPU=1
CPU=1
node=68
jobname=openood
PYTHONPATH='.':$PYTHONPATH \
srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
--kill-on-bad-exit=1 --job-name=${jobname} \
python main.py \
--config configs/datasets/osr_cifar50/cifar50_seed1.yml \
configs/datasets/osr_cifar50/cifar50_seed1_ood.yml \
configs/networks/cutpaste.yml \
configs/pipelines/train/train_cutpaste.yml \
configs/preprocessors/cutpaste_preprocessor.yml \
configs/postprocessors/cutpaste.yml \
--network.pretrained False \
--network.backbone.name resnet18_32x32 \
--network.backbone.pretrained True \
--network.backbone.checkpoint 'results/checkpoints/osr/cifar50_seed1_acc80.24.ckpt' \
--num_workers 8 \
--optimizer.num_epochs 100 \
--merge_option merge
| 827 | 29.666667 | 85 | sh |
null | OpenOOD-main/scripts/ad/cutpaste/osr_cifar6_train_cutpaste.sh | #!/bin/bash
# sh scripts/ad/cutpaste/cifar6_train_cutpaste.sh
GPU=1
CPU=1
node=68
jobname=openood
PYTHONPATH='.':$PYTHONPATH \
srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
--kill-on-bad-exit=1 --job-name=${jobname} \
python main.py \
--config configs/datasets/osr_cifar6/cifar6_seed1.yml \
configs/datasets/osr_cifar6/cifar6_seed1_ood.yml \
configs/networks/cutpaste.yml \
configs/pipelines/train/train_cutpaste.yml \
configs/preprocessors/cutpaste_preprocessor.yml \
configs/postprocessors/cutpaste.yml \
--network.pretrained False \
--network.backbone.name resnet18_32x32 \
--network.backbone.pretrained True \
--network.backbone.checkpoint 'results/checkpoints/osr/cifar6_seed1_acc97.57.ckpt' \
--num_workers 8 \
--optimizer.num_epochs 100 \
--merge_option merge
| 821 | 29.444444 | 84 | sh |
null | OpenOOD-main/scripts/ad/cutpaste/osr_test_ood_cutpaste.sh | #!/bin/bash
# sh scripts/ad/cutpaste/osr_test_ood_cutpaste.sh
GPU=1
CPU=1
jobname=openood
PYTHONPATH='.':$PYTHONPATH \
srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
--kill-on-bad-exit=1 --job-name=${jobname} \
python main.py \
--config configs/datasets/mnist/mnist.yml \
configs/datasets/mnist/mnist_ood.yml \
configs/networks/cutpaste.yml \
configs/pipelines/test/test_cutpaste.yml \
configs/preprocessors/cutpaste_preprocessor.yml \
configs/postprocessors/cutpaste.yml \
--network.backbone.name lenet \
--num_workers 8 \
--network.pretrained True \
--network.checkpoint "results/mnist_projectionNet_cutpaste_e100_lr0.03/best.ckpt"
| 687 | 28.913043 | 81 | sh |
null | OpenOOD-main/scripts/ad/cutpaste/osr_tin20_train_cutpaste.sh | #!/bin/bash
# sh scripts/ad/cutpaste/tin20_train_cutpaste.sh
GPU=1
CPU=1
node=68
jobname=openood
PYTHONPATH='.':$PYTHONPATH \
srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
--kill-on-bad-exit=1 --job-name=${jobname} \
python main.py \
--config configs/datasets/osr_tin20/tin20_seed1.yml \
configs/datasets/osr_tin20/tin20_seed1_ood.yml \
configs/networks/cutpaste.yml \
configs/pipelines/train/train_cutpaste.yml \
configs/preprocessors/cutpaste_preprocessor.yml \
configs/postprocessors/cutpaste.yml \
--network.pretrained False \
--network.backbone.name resnet18_64x64 \
--network.backbone.pretrained True \
--network.backbone.checkpoint 'results/checkpoints/osr/tin20_seed1_acc77.23.ckpt' \
--num_workers 8 \
--optimizer.num_epochs 100 \
--merge_option merge
| 815 | 29.222222 | 83 | sh |
null | OpenOOD-main/scripts/ad/draem/bottle_test_draem.sh | #!/bin/bash
# sh scripts/ad/draem/bottle_test_draem.sh
# GPU=1
# CPU=1
# node=30
# jobname=openood
PYTHONPATH='.':$PYTHONPATH \
#srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
#--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
#--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \
python main.py \
--config configs/datasets/draem/bottle.yml \
configs/networks/draem.yml \
configs/pipelines/test/test_draem.yml \
configs/preprocessors/draem_preprocessor.yml \
configs/postprocessors/msp.yml \
--evaluator.name ood
| 535 | 25.8 | 72 | sh |
null | OpenOOD-main/scripts/ad/draem/bottle_train_draem.sh | #!/bin/bash
# sh scripts/ad/draem/bottle_train_draem.sh
# GPU=1
# CPU=1
# node=30
# jobname=openood
PYTHONPATH='.':$PYTHONPATH \
#srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
#--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
#--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \
python main.py \
--config configs/datasets/mvtec/bottle.yml \
configs/networks/draem.yml \
configs/pipelines/train/train_draem.yml \
configs/preprocessors/draem_preprocessor.yml \
configs/postprocessors/msp.yml \
--evaluator.name ad \
--optimizer.num_epochs 2
| 564 | 25.904762 | 72 | sh |
null | OpenOOD-main/scripts/ad/draem/cifar100_test_ood_draem.sh | #!/bin/bash
# sh scripts/ad/draem/cifar100_test_ood_draem.sh
# GPU=1
# CPU=1
# node=30
# jobname=openood
PYTHONPATH='.':$PYTHONPATH \
#srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
#--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
#--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \
python main.py \
--config configs/datasets/cifar100/cifar100.yml \
configs/datasets/cifar100/cifar100_ood.yml \
configs/networks/draem.yml \
configs/pipelines/test/test_draem.yml \
configs/preprocessors/draem_preprocessor.yml \
configs/postprocessors/draem.yml \
--network.pretrained True \
| 601 | 26.363636 | 72 | sh |
null | OpenOOD-main/scripts/ad/draem/cifar100_train_draem.sh | #!/bin/bash
# sh scripts/ad/draem/cifar100_train_draem.sh
# GPU=1
# CPU=1
# node=30
# jobname=openood
PYTHONPATH='.':$PYTHONPATH \
#srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
#--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
#--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \
python main.py \
--config configs/datasets/cifar100/cifar100.yml \
configs/datasets/cifar100/cifar100_ood.yml \
configs/networks/draem.yml \
configs/pipelines/train/train_draem.yml \
configs/preprocessors/draem_preprocessor.yml \
configs/postprocessors/draem.yml \
--evaluator.name ad \
--dataset.train.batch_size 64 \
--num_workers 8 \
--optimizer.num_epochs 100 \
--recorder.name draem
| 694 | 26.8 | 72 | sh |
null | OpenOOD-main/scripts/ad/draem/cifar10_test_ood_draem.sh | #!/bin/bash
# sh scripts/ad/draem/cifar10_test_ood_draem.sh
# GPU=1
# CPU=1
# node=30
# jobname=openood
PYTHONPATH='.':$PYTHONPATH \
#srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
#--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
#--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \
python main.py \
--config configs/datasets/cifar10/cifar10.yml \
configs/datasets/cifar10/cifar10_ood.yml \
configs/networks/draem.yml \
configs/pipelines/test/test_draem.yml \
configs/preprocessors/draem_preprocessor.yml \
configs/postprocessors/draem.yml \
--network.pretrained True
| 593 | 27.285714 | 72 | sh |
null | OpenOOD-main/scripts/ad/draem/cifar10_train_draem.sh | #!/bin/bash
# sh scripts/ad/draem/cifar10_train_draem.sh
# GPU=1
# CPU=1
# node=30
# jobname=openood
PYTHONPATH='.':$PYTHONPATH \
#srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
#--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
#--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \
python main.py \
--config configs/datasets/cifar10/cifar10.yml \
configs/datasets/cifar10/cifar10_ood.yml \
configs/networks/draem.yml \
configs/pipelines/train/train_draem.yml \
configs/preprocessors/draem_preprocessor.yml \
configs/postprocessors/draem.yml \
--evaluator.name ad \
--dataset.train.batch_size 64 \
--num_workers 8 \
--optimizer.num_epochs 2
| 663 | 26.666667 | 72 | sh |
null | OpenOOD-main/scripts/ad/draem/mnist_test_osr_draem.sh | #!/bin/bash
# sh scripts/ad/draem/mnist_test_osr_draem.sh
# GPU=1
# CPU=1
# node=30
# jobname=openood
PYTHONPATH='.':$PYTHONPATH \
#srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
#--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
#--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \
python main.py \
--config configs/datasets/osr_mnist6/mnist6_seed1.yml \
configs/datasets/osr_mnist6/mnist6_seed1_ood.yml \
configs/networks/draem.yml \
configs/pipelines/test/test_draem.yml \
configs/preprocessors/draem_preprocessor.yml \
configs/postprocessors/draem.yml \
--network.pretrained True \
--evaluator.name osr
| 630 | 27.681818 | 72 | sh |
null | OpenOOD-main/scripts/ad/draem/mnist_train_draem.sh | #!/bin/bash
# sh scripts/ad/draem/mnist_train_draem.sh
GPU=1
CPU=1
node=30
jobname=openood
PYTHONPATH='.':$PYTHONPATH \
srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
--kill-on-bad-exit=1 --job-name=${jobname} \
python main.py \
--config configs/datasets/osr_cifar6/cifar6_seed1.yml \
configs/datasets/osr_cifar6/cifar6_seed1_ood.yml \
configs/networks/draem.yml \
configs/pipelines/train/train_draem.yml \
configs/preprocessors/draem_preprocessor.yml \
configs/postprocessors/draem.yml \
--evaluator.name ad \
--dataset.train.batch_size 64 \
--num_workers 8 \
--optimizer.num_epochs 100
| 641 | 25.75 | 55 | sh |
null | OpenOOD-main/scripts/ad/draem/mnist_train_osr_draem.sh | #!/bin/bash
# sh scripts/ad/draem/mnist_train_osr_draem.sh
# GPU=1
# CPU=1
# node=30
# jobname=openood
PYTHONPATH='.':$PYTHONPATH \
#srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
#--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
#--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \
python main.py \
--config configs/datasets/osr_mnist6/mnist6_seed1.yml \
configs/datasets/osr_mnist6/mnist6_seed1_ood.yml \
configs/networks/draem.yml \
configs/pipelines/train/train_draem.yml \
configs/preprocessors/draem_preprocessor.yml \
configs/postprocessors/draem.yml \
--evaluator.name ad \
--dataset.train.batch_size 32 \
--num_workers 8 \
--optimizer.num_epochs 100 \
--recorder.name draem
| 708 | 26.269231 | 72 | sh |
null | OpenOOD-main/scripts/ad/draem/osr_cifar50_train_draem.sh | #!/bin/bash
# sh scripts/ad/draem/osr_cifar50_train_draem.sh
GPU=1
CPU=1
node=30
jobname=openood
PYTHONPATH='.':$PYTHONPATH \
srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
--kill-on-bad-exit=1 --job-name=${jobname} \
python main.py \
--config configs/datasets/osr_tin20/tin20_seed1.yml \
configs/datasets/osr_tin20/tin20_seed1_ood.yml \
configs/networks/draem.yml \
configs/pipelines/train/train_draem.yml \
configs/preprocessors/draem_preprocessor.yml \
configs/postprocessors/draem.yml \
--evaluator.name ad \
--dataset.train.batch_size 64 \
--num_workers 8 \
--optimizer.num_epochs 2 &
| 643 | 25.833333 | 53 | sh |
null | OpenOOD-main/scripts/ad/draem/osr_cifar6_train_draem.sh | #!/bin/bash
# sh scripts/ad/draem/osr_cifar6_train_draem.sh
GPU=1
CPU=1
node=30
jobname=openood
PYTHONPATH='.':$PYTHONPATH \
srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
--kill-on-bad-exit=1 --job-name=${jobname} \
python main.py \
--config configs/datasets/osr_cifar6/cifar6_seed1.yml \
configs/datasets/osr_cifar6/cifar6_seed1_ood.yml \
configs/networks/draem.yml \
configs/pipelines/train/train_draem.yml \
configs/preprocessors/draem_preprocessor.yml \
configs/postprocessors/draem.yml \
--evaluator.name ad \
--dataset.train.batch_size 64 \
--num_workers 8 \
--optimizer.num_epochs 2 &
| 646 | 25.958333 | 55 | sh |
null | OpenOOD-main/scripts/ad/draem/osr_tin20_train_draem.sh | #!/bin/bash
# sh scripts/ad/draem/osr_tin20_train_draem.sh
GPU=1
CPU=1
node=30
jobname=openood
PYTHONPATH='.':$PYTHONPATH \
srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
--kill-on-bad-exit=1 --job-name=${jobname} \
python main.py \
--config configs/datasets/osr_cifar50/cifar50_seed1.yml \
configs/datasets/osr_cifar50/cifar50_seed1_ood.yml \
configs/networks/draem.yml \
configs/pipelines/train/train_draem.yml \
configs/preprocessors/draem_preprocessor.yml \
configs/postprocessors/draem.yml \
--evaluator.name ad \
--dataset.train.batch_size 64 \
--num_workers 8 \
--optimizer.num_epochs 2 &
| 649 | 26.083333 | 57 | sh |
null | OpenOOD-main/scripts/ad/dsvdd/0_dcae_pretrain.sh | #!/bin/bash
# sh scripts/ad/dsvdd/0_dcae_pretrain.sh
PYTHONPATH='.':$PYTHONPATH \
#srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
#--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
#--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \
python main.py \
--config configs/datasets/objects/cifar10.yml \
configs/datasets/objects/cifar10_ood.yml \
configs/pipelines/train/train_dcae.yml \
configs/networks/dcae.yml \
configs/preprocessors/base_preprocessor.yml \
configs/postprocessors/msp.yml \
--pipeline.name train_ad \
--postprocessor.name dsvdd \
--evaluator.name ad \
--recorder.name ad \
--optimizer.num_epochs 2 \
--trainer.name dsvdd
| 662 | 26.625 | 72 | sh |
null | OpenOOD-main/scripts/ad/dsvdd/0_dsvdd_test.sh | #!/bin/bash
# sh scripts/a_anomaly/0_dsvdd_test.sh
PYTHONPATH='.':$PYTHONPATH \
#srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
#--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
#--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \
python main.py \
--config configs/datasets/objects/cifar10.yml \
configs/datasets/objects/cifar10_ood.yml \
configs/networks/resnet18_32x32.yml \
configs/pipelines/test/test_dsvdd.yml \
configs/preprocessors/base_preprocessor.yml \
configs/postprocessors/msp.yml \
--pipeline.name test_ad \
--postprocessor.name dsvdd \
--evaluator.name ood \
--network.pretrained True \
--network.checkpoint 'results/cifar10_resnet18_32x32_dsvdd_e3/DSVDD_best_epoch2_roc_auc0.719908611111111.pth'
| 736 | 34.095238 | 109 | sh |
null | OpenOOD-main/scripts/ad/dsvdd/0_dsvdd_train_dcae.sh | #!/bin/bash
# sh scripts/a_anomaly/0_dsvdd_train_dcae.sh
PYTHONPATH='.':$PYTHONPATH \
#srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
#--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
#--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \
python main.py \
--config configs/datasets/objects/cifar10.yml \
configs/datasets/objects/cifar10_ood.yml \
configs/pipelines/train/train_dsvdd.yml \
configs/networks/dsvdd.yml \
configs/preprocessors/base_preprocessor.yml \
configs/postprocessors/msp.yml \
--pipeline.name train_ad \
--postprocessor.name dsvdd \
--evaluator.name ad \
--recorder.name ad \
--optimizer.num_epochs 2 \
--network.pretrained True \
--network.checkpoint 'results/cifar10_dcae_dsvdd_e3/best.ckpt'
| 737 | 29.75 | 72 | sh |
null | OpenOOD-main/scripts/ad/dsvdd/cifar100_test_ood_dsvdd.sh | #!/bin/bash
# sh scripts/ad/dsvdd/cifar100_test_ood_dsvdd.sh
PYTHONPATH='.':$PYTHONPATH \
#srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
#--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
#--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \
python main.py \
--config configs/datasets/cifar100/cifar100.yml \
configs/datasets/cifar100/cifar100_ood.yml \
configs/networks/resnet18_32x32.yml \
configs/pipelines/test/test_dsvdd.yml \
configs/preprocessors/base_preprocessor.yml \
configs/postprocessors/dsvdd.yml \
--network.pretrained True \
--network.checkpoint 'results/cifar100_resnet18_32x32_dsvdd_e100/best.ckpt'
| 641 | 32.789474 | 75 | sh |
null | OpenOOD-main/scripts/ad/dsvdd/cifar100_train_dsvdd.sh | #!/bin/bash
# sh scripts/ad/dsvdd/cifar100_train_dsvdd.sh
PYTHONPATH='.':$PYTHONPATH \
#srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
#--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
#--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \
python main.py \
--config configs/datasets/cifar100/cifar100.yml \
configs/datasets/cifar100/cifar100_ood.yml \
configs/pipelines/train/train_dsvdd.yml \
configs/networks/resnet18_32x32.yml \
configs/preprocessors/base_preprocessor.yml \
configs/postprocessors/dsvdd.yml \
--optimizer.num_epochs 100 \
--network.pretrained True \
--network.checkpoint 'results/cifar100_resnet18_32x32_base_e100_lr0.1/best.ckpt'
| 674 | 32.75 | 80 | sh |
null | OpenOOD-main/scripts/ad/dsvdd/cifar10_test_ood_dsvdd.sh | #!/bin/bash
# sh scripts/ad/dsvdd/cifar10_test_ood_dsvdd.sh
PYTHONPATH='.':$PYTHONPATH \
#srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
#--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
#--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \
python main.py \
--config configs/datasets/cifar10/cifar10.yml \
configs/datasets/cifar10/cifar10_ood.yml \
configs/networks/resnet18_32x32.yml \
configs/pipelines/test/test_dsvdd.yml \
configs/preprocessors/base_preprocessor.yml \
configs/postprocessors/dsvdd.yml \
--network.pretrained True \
--network.checkpoint 'results/cifar10_resnet18_32x32_dsvdd_e2/best.ckpt'
| 633 | 32.368421 | 72 | sh |
null | OpenOOD-main/scripts/ad/dsvdd/cifar10_train_dsvdd.sh | #!/bin/bash
# sh scripts/ad/dsvdd/cifar10_train_dsvdd.sh
PYTHONPATH='.':$PYTHONPATH \
srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
--kill-on-bad-exit=1 --job-name=${jobname} \
-w SG-IDC1-10-51-2-${node} \
python main.py \
--config configs/datasets/cifar10/cifar10.yml \
configs/datasets/cifar10/cifar10_ood.yml \
configs/pipelines/train/train_dsvdd.yml \
configs/networks/resnet18_32x32.yml \
configs/preprocessors/base_preprocessor.yml \
configs/postprocessors/dsvdd.yml \
--optimizer.num_epochs 100 \
--network.pretrained True \
--network.checkpoint 'results/checkpoints/cifar10_res18_acc94.30.ckpt'
| 657 | 31.9 | 70 | sh |
null | OpenOOD-main/scripts/ad/dsvdd/mnist_test_osr_dsvdd.sh | #!/bin/bash
# sh scripts/ad/dsvdd/mnist_test_osr_dsvdd.sh
PYTHONPATH='.':$PYTHONPATH \
#srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
#--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
#--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \
python main.py \
--config configs/datasets/osr_mnist6/mnist6_seed1.yml \
configs/datasets/osr_mnist6/mnist6_seed1_ood.yml \
configs/networks/lenet.yml \
configs/pipelines/test/test_dsvdd.yml \
configs/preprocessors/base_preprocessor.yml \
configs/postprocessors/dsvdd.yml \
--network.pretrained True \
--network.checkpoint 'results/osr_mnist6_seed1_lenet_dsvdd_e100/best.ckpt' \
--evaluator.name osr
| 663 | 32.2 | 76 | sh |
null | OpenOOD-main/scripts/ad/dsvdd/mnist_train_osr_dsvdd.sh | #!/bin/bash
# sh scripts/ad/dsvdd/mnist_train_osr_dsvdd.sh
PYTHONPATH='.':$PYTHONPATH \
#srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
#--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
#--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \
python main.py \
--config configs/datasets/osr_mnist6/mnist6_seed1.yml \
configs/datasets/osr_mnist6/mnist6_seed1_ood.yml \
configs/networks/lenet.yml \
configs/pipelines/train/train_dsvdd.yml \
configs/preprocessors/base_preprocessor.yml \
configs/postprocessors/dsvdd.yml \
--num_workers 8 \
--network.checkpoint 'results/checkpoints/cifar10_res18_acc94.30.ckpt' \
--optimizer.num_epochs 100
| 658 | 31.95 | 72 | sh |
null | OpenOOD-main/scripts/ad/dsvdd/osr_cifar50_train_dsvdd.sh | #!/bin/bash
# sh scripts/ad/dsvdd/osr_cifar50_train_dsvdd.sh
GPU=1
CPU=1
node=30
jobname=openood
PYTHONPATH='.':$PYTHONPATH \
srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
--kill-on-bad-exit=1 --job-name=${jobname} \
python main.py \
--config configs/datasets/osr_cifar50/cifar50_seed1.yml \
configs/datasets/osr_cifar50/cifar50_seed1_ood.yml \
configs/pipelines/train/train_dsvdd.yml \
configs/networks/resnet18_32x32.yml \
configs/preprocessors/base_preprocessor.yml \
configs/postprocessors/dsvdd.yml \
--optimizer.num_epochs 100 \
--network.pretrained True \
--network.checkpoint 'results/checkpoints/osr/cifar50_seed1_acc80.24.ckpt' &
| 694 | 29.217391 | 76 | sh |
null | OpenOOD-main/scripts/ad/dsvdd/osr_cifar6_train_dsvdd.sh | #!/bin/bash
# sh scripts/ad/dsvdd/osr_cifar6_train_dsvdd.sh
GPU=1
CPU=1
node=30
jobname=openood
PYTHONPATH='.':$PYTHONPATH \
srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
--kill-on-bad-exit=1 --job-name=${jobname} \
python main.py \
--config configs/datasets/osr_cifar6/cifar6_seed1.yml \
configs/datasets/osr_cifar6/cifar6_seed1_ood.yml \
configs/pipelines/train/train_dsvdd.yml \
configs/networks/resnet18_32x32.yml \
configs/preprocessors/base_preprocessor.yml \
configs/postprocessors/dsvdd.yml \
--optimizer.num_epochs 100 \
--network.pretrained True \
--network.checkpoint 'results/checkpoints/osr/cifar6_seed1_acc97.57.ckpt' &
| 688 | 28.956522 | 75 | sh |
null | OpenOOD-main/scripts/ad/dsvdd/osr_mnist6_train_dsvdd.sh | #!/bin/bash
# sh scripts/ad/dsvdd/osr_cifar6_train_dsvdd.sh
GPU=1
CPU=1
node=30
jobname=openood
PYTHONPATH='.':$PYTHONPATH \
srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
--kill-on-bad-exit=1 --job-name=${jobname} \
python main.py \
--config configs/datasets/osr_cifar6/cifar6_seed1.yml \
configs/datasets/osr_cifar6/cifar6_seed1_ood.yml \
configs/pipelines/train/train_dsvdd.yml \
configs/networks/resnet18_32x32.yml \
configs/preprocessors/base_preprocessor.yml \
configs/postprocessors/dsvdd.yml \
--optimizer.num_epochs 100 \
--network.pretrained True \
--network.checkpoint 'results/checkpoints/osr/cifar6_seed1_acc97.57.ckpt' &
| 688 | 28.956522 | 75 | sh |
null | OpenOOD-main/scripts/ad/dsvdd/osr_tin20_train_dsvdd.sh | #!/bin/bash
# sh scripts/ad/dsvdd/osr_tin20_train_dsvdd.sh
GPU=1
CPU=1
node=30
jobname=openood
PYTHONPATH='.':$PYTHONPATH \
srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
--kill-on-bad-exit=1 --job-name=${jobname} \
python main.py \
--config configs/datasets/osr_tin20/tin20_seed1.yml \
configs/datasets/osr_tin20/tin20_seed1_ood.yml \
configs/pipelines/train/train_dsvdd.yml \
configs/networks/resnet18_32x32.yml \
configs/preprocessors/base_preprocessor.yml \
configs/postprocessors/dsvdd.yml \
--optimizer.num_epochs 100 \
--network.pretrained True \
--network.checkpoint 'results/checkpoints/osr/tin20_seed1_acc77.23.ckpt' \
--merge_option merge &
| 705 | 28.416667 | 74 | sh |
null | OpenOOD-main/scripts/ad/dsvdd/sweep_osr.py | # python scripts/uncertainty/temp_scaling/sweep_osr.py
import os
config = [
[
'osr_cifar6/cifar6_seed1.yml', 'osr_cifar6/cifar6_seed1_ood.yml',
'resnet18_32x32', 'results/checkpoints/osr/cifar6_seed1.ckpt'
],
[
'osr_cifar50/cifar50_seed1.yml', 'osr_cifar50/cifar50_seed1_ood.yml',
'resnet18_32x32', 'results/checkpoints/osr/cifar50_seed1.ckpt'
],
[
'osr_tin20/tin20_seed1.yml', 'osr_tin20/tin20_seed1_ood.yml',
'resnet18_64x64', 'results/checkpoints/osr/tin20_seed1.ckpt'
],
[
'osr_mnist6/mnist6_seed1.yml', 'osr_mnist6/mnist6_seed1_ood.yml',
'lenet', 'results/checkpoints/osr/mnist6_seed1.ckpt'
],
[
'mnist/mnist.yml', 'mnist/mnist_ood.yml', 'lenet',
'results/checkpoints/osr/mnist6_seed1.ckpt'
],
[
'osr_mnist6/mnist6_seed1.yml', 'osr_mnist6/mnist6_seed1_ood.yml',
'lenet', 'results/checkpoints/osr/mnist6_seed1.ckpt'
],
]
for [dataset, ood_dataset, network, pth] in config:
command = (f"PYTHONPATH='.':$PYTHONPATH \
srun -p dsta --mpi=pmi2 --gres=gpu:1 -n1 \
--cpus-per-task=1 --ntasks-per-node=1 \
--kill-on-bad-exit=1 --job-name=openood \
python main.py \
--config configs/datasets/{dataset} \
configs/datasets/{ood_dataset} \
configs/networks/{network}.yml \
configs/pipelines/test/test_osr.yml \
configs/preprocessors/base_preprocessor.yml \
configs/postprocessors/temp_scaling.yml \
--network.checkpoint {pth} \
--num_workers 8 \
--merge_option merge &")
os.system(command)
| 1,588 | 32.808511 | 77 | py |
null | OpenOOD-main/scripts/ad/kdad/kdad_test.sh | #!/bin/bash
# sh scripts/a_anomaly/1_kdad_test_det.sh
PYTHONPATH='.':$PYTHONPATH \
#srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
#--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
#--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \
python main.py \
--config configs/datasets/KDAD/kdad_cifar10.yml \
configs/datasets/KDAD/kdad_cifar10_ood.yml \
configs/pipelines/test/test_kdad.yml \
configs/networks/kdad_vgg.yml \
configs/preprocessors/base_preprocessor.yml
| 485 | 31.4 | 72 | sh |
null | OpenOOD-main/scripts/ad/kdad/kdad_train.sh | #!/bin/bash
# sh scripts/a_anomaly/1_kdad_train.sh
PYTHONPATH='.':$PYTHONPATH \
#srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
#--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
#--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \
python main.py \
--config configs/datasets/KDAD/kdad_cifar10.yml \
configs/datasets/KDAD/kdad_cifar10_ood.yml \
configs/pipelines/train/train_kdad.yml \
configs/networks/kdad_vgg.yml \
configs/preprocessors/base_preprocessor.yml
| 484 | 31.333333 | 72 | sh |
null | OpenOOD-main/scripts/ad/patchcore/bottle_test_ood_patchcore.sh | #!/bin/bash
# sh scripts/ad/patchcore/patchcore.sh
# GPU=1
# CPU=1
# node=30
# jobname=openood
PYTHONPATH='.':$PYTHONPATH \
python main.py \
--config configs/datasets/mnist/mnist.yml \
configs/datasets/mnist/mnist_ood.yml \
configs/networks/patchcorenet.yml \
configs/pipelines/test/test_patchcore.yml \
configs/postprocessors/patch.yml \
configs/preprocessors/base_preprocessor.yml \
--evaluator.name ad \
--num_workers 8 \
--merge_option merge
| 449 | 20.428571 | 45 | sh |