text
stringlengths
1
93.6k
Evaluate on CIFAR10/100-C
'''
test_seen_c_loader_list = []
for corruption in CORRUPTIONS:
test_c_loader = cifar_c_testloader(corruption=corruption, data_dir=args.data_root_path, num_classes=num_classes,
test_batch_size=args.test_batch_size, num_workers=args.cpus)
test_seen_c_loader_list.append(test_c_loader)
# val corruption:
print('evaluating corruptions...')
test_c_losses, test_c_accs = [], []
for corruption, test_c_loader in zip(CORRUPTIONS, test_seen_c_loader_list):
test_c_batch_num = len(test_c_loader)
print(test_c_batch_num) # each corruption has 10k * 5 images, each magnitude has 10k images
ts = time.time()
test_c_loss_meter, test_c_acc_meter = AverageMeter(), AverageMeter()
with torch.no_grad():
for batch_idx, (images, targets) in enumerate(test_c_loader):
images, targets = images.cuda(), targets.cuda()
logits = model(images)
loss = F.cross_entropy(logits, targets)
pred = logits.data.max(1)[1]
acc = pred.eq(targets.data).float().mean()
# append loss:
test_c_loss_meter.append(loss.item())
test_c_acc_meter.append(acc.item())
print('%s test time: %.2fs' % (corruption, time.time()-ts))
# test loss and acc of each type of corruptions:
test_c_losses.append(test_c_loss_meter.avg)
test_c_accs.append(test_c_acc_meter.avg)
# print
corruption_str = '%s: %.4f' % (corruption, test_c_accs[-1])
print(corruption_str)
fp.write(corruption_str + '\n')
fp.flush()
# mean over 16 types of attacks:
test_c_loss = np.mean(test_c_losses)
test_c_acc = np.mean(test_c_accs)
# print
avg_str = 'corruption acc: (mean) %.4f' % (test_c_acc)
print(avg_str)
fp.write(avg_str + '\n')
fp.flush()
def val_cifar10_1():
'''
Evaluate on cifar10.1
'''
test_v2_loader = cifar10_1_testloader(data_dir=os.path.join(args.data_root_path))
model.eval()
ts = time.time()
test_loss_meter, test_acc_meter = AverageMeter(), AverageMeter()
with torch.no_grad():
for images, targets in test_v2_loader:
images, targets = images.cuda(), targets.cuda()
logits = model(images)
loss = F.cross_entropy(logits, targets)
pred = logits.data.max(1)[1]
acc = pred.eq(targets.data).float().mean()
# append loss:
test_loss_meter.append(loss.item())
test_acc_meter.append(acc.item())
print('cifar10.1 test time: %.2fs' % (time.time()-ts))
# test loss and acc of this epoch:
test_loss = test_loss_meter.avg
test_acc = test_acc_meter.avg
# print:
clean_str = 'cifar10.1 test acc: %.4f' % test_acc
print(clean_str)
fp.write(clean_str + '\n')
fp.flush()
## Test on Tiny-ImageNet:
ResNet18_c_CE_list = [
0.8037, 0.7597, 0.7758, 0.8426, 0.8274,
0.7907, 0.8212, 0.7497, 0.7381, 0.7433,
0.6800, 0.8939, 0.7308, 0.6121, 0.6452
]
def find_mCE(target_model_c_CE, anchor_model_c_CE):
'''
Args:
target_model_c_CE: np.ndarray. shape=(15). CE of each corruption type of the target model.
anchor_model_c_CE: np.ndarray. shape=(15). CE of each corruption type of the anchor model (normally trained ResNet18 as default).
'''
assert len(target_model_c_CE) == 15 # a total of 15 types of corruptions
mCE = 0
for target_model_CE, anchor_model_CE in zip(target_model_c_CE, anchor_model_c_CE):
mCE += target_model_CE/anchor_model_CE
mCE /= len(target_model_c_CE)
return mCE
def val_tin():
'''
Evaluate on Tiny ImageNet