text
stringlengths 1
93.6k
|
|---|
ts = time.time()
|
test_loss_meter, test_acc_meter = AverageMeter(), AverageMeter()
|
with torch.no_grad():
|
for images, targets in val_loader:
|
images, targets = images.cuda(), targets.cuda()
|
logits = model(images)
|
loss = F.cross_entropy(logits, targets)
|
pred = logits.data.max(1)[1]
|
acc = pred.eq(targets.data).float().mean()
|
# append loss:
|
test_loss_meter.append(loss.item())
|
test_acc_meter.append(acc.item())
|
print('clean test time: %.2fs' % (time.time()-ts))
|
# test loss and acc of this epoch:
|
test_loss = test_loss_meter.avg
|
test_acc = test_acc_meter.avg
|
# print:
|
clean_str = 'clean acc: %.4f' % test_acc
|
print(clean_str)
|
fp.write(clean_str + '\n')
|
fp.flush()
|
AlexNet_ERR = [
|
0.886428, 0.894468, 0.922640, 0.819880, 0.826268, 0.785948, 0.798360,
|
0.866816, 0.826572, 0.819324, 0.564592, 0.853204, 0.646056, 0.717840,
|
0.606500
|
]
|
def val_IN_c():
|
'''
|
Evaluate on ImageNet-C
|
'''
|
test_seen_c_loader_list = []
|
for corruption in CORRUPTIONS:
|
test_seen_c_loader_list_c = []
|
for severity in range(1,6):
|
test_c_loader_c_s = imagenet_c_testloader(corruption=corruption, severity=severity,
|
data_dir=os.path.join(args.data_root_path, 'ImageNet-C'),
|
test_batch_size=args.test_batch_size, num_workers=args.cpus)
|
test_seen_c_loader_list_c.append(test_c_loader_c_s)
|
test_seen_c_loader_list.append(test_seen_c_loader_list_c)
|
model.eval()
|
# val corruption:
|
print('evaluating corruptions...')
|
test_CE_c_list = []
|
for corruption, test_seen_c_loader_list_c in zip(CORRUPTIONS, test_seen_c_loader_list):
|
test_c_CE_c_s_list = []
|
ts = time.time()
|
for severity in range(1,6):
|
test_c_loader_c_s = test_seen_c_loader_list_c[severity-1]
|
test_c_batch_num = len(test_c_loader_c_s)
|
# print(test_c_batch_num) # each corruption has 10k * 5 images, each magnitude has 10k images
|
test_c_loss_meter, test_c_CE_meter = AverageMeter(), AverageMeter()
|
with torch.no_grad():
|
for batch_idx, (images, targets) in enumerate(test_c_loader_c_s):
|
images, targets = images.cuda(), targets.cuda()
|
logits = model(images)
|
loss = F.cross_entropy(logits, targets)
|
pred = logits.data.max(1)[1]
|
CE = (~pred.eq(targets.data)).float().mean()
|
# append loss:
|
test_c_loss_meter.append(loss.item())
|
test_c_CE_meter.append(CE.item())
|
# test loss and acc of each type of corruptions:
|
test_c_CE_c_s = test_c_CE_meter.avg
|
test_c_CE_c_s_list.append(test_c_CE_c_s)
|
test_CE_c = np.mean(test_c_CE_c_s_list)
|
test_CE_c_list.append(test_CE_c)
|
# print
|
print('%s test time: %.2fs' % (corruption, time.time()-ts))
|
corruption_str = '%s CE: %.4f' % (corruption, test_CE_c)
|
print(corruption_str)
|
fp.write(corruption_str + '\n')
|
fp.flush()
|
# mean over 16 types of corruptions:
|
test_c_acc = 1-np.mean(test_CE_c_list)
|
# weighted mean over 16 types of corruptions:
|
test_mCE = find_mCE(test_CE_c_list, anchor_model_c_CE=AlexNet_ERR)
|
# print
|
avg_str = 'corruption acc: %.4f' % (test_c_acc)
|
print(avg_str)
|
fp.write(avg_str + '\n')
|
mCE_str = 'mCE: %.4f' % test_mCE
|
print(mCE_str)
|
fp.write(mCE_str + '\n')
|
fp.flush()
|
if __name__ == '__main__':
|
model.apply(lambda m: setattr(m, 'route', 'M'))
|
if args.dataset in ['cifar10', 'cifar100']:
|
if args.mode in ['clean', 'all']:
|
val_cifar()
|
if args.mode in ['c', 'all']:
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.