text
stringlengths 1
93.6k
|
|---|
'''
|
_, val_data = tiny_imagenet_dataloaders(data_dir=os.path.join(args.data_root_path, 'tiny-imagenet-200'), AugMax=None)
|
val_loader = DataLoader(val_data, batch_size=args.test_batch_size, shuffle=False, num_workers=args.cpus, pin_memory=True)
|
model.eval()
|
ts = time.time()
|
test_loss_meter, test_acc_meter = AverageMeter(), AverageMeter()
|
with torch.no_grad():
|
for images, targets in val_loader:
|
images, targets = images.cuda(), targets.cuda()
|
logits = model(images)
|
loss = F.cross_entropy(logits, targets)
|
pred = logits.data.max(1)[1]
|
acc = pred.eq(targets.data).float().mean()
|
# append loss:
|
test_loss_meter.append(loss.item())
|
test_acc_meter.append(acc.item())
|
print('clean test time: %.2fs' % (time.time()-ts))
|
# test loss and acc of this epoch:
|
test_loss = test_loss_meter.avg
|
test_acc = test_acc_meter.avg
|
# print:
|
clean_str = 'clean acc: %.4f' % test_acc
|
print(clean_str)
|
fp.write(clean_str + '\n')
|
fp.flush()
|
def val_tin_c():
|
'''
|
Evaluate on Tiny ImageNet-C
|
'''
|
test_seen_c_loader_list = []
|
for corruption in CORRUPTIONS:
|
test_seen_c_loader_list_c = []
|
for severity in range(1,6):
|
test_c_loader_c_s = tiny_imagenet_c_testloader(data_dir=os.path.join(args.data_root_path, 'TinyImageNet-C/Tiny-ImageNet-C'),
|
corruption=corruption, severity=severity,
|
test_batch_size=args.test_batch_size, num_workers=args.cpus)
|
test_seen_c_loader_list_c.append(test_c_loader_c_s)
|
test_seen_c_loader_list.append(test_seen_c_loader_list_c)
|
model.eval()
|
# val corruption:
|
print('evaluating corruptions...')
|
test_CE_c_list = []
|
for corruption, test_seen_c_loader_list_c in zip(CORRUPTIONS, test_seen_c_loader_list):
|
test_c_CE_c_s_list = []
|
ts = time.time()
|
for severity in range(1,6):
|
test_c_loader_c_s = test_seen_c_loader_list_c[severity-1]
|
test_c_batch_num = len(test_c_loader_c_s)
|
# print(test_c_batch_num) # each corruption has 10k * 5 images, each magnitude has 10k images
|
test_c_loss_meter, test_c_CE_meter = AverageMeter(), AverageMeter()
|
with torch.no_grad():
|
for batch_idx, (images, targets) in enumerate(test_c_loader_c_s):
|
images, targets = images.cuda(), targets.cuda()
|
logits = model(images)
|
loss = F.cross_entropy(logits, targets)
|
pred = logits.data.max(1)[1]
|
CE = (~pred.eq(targets.data)).float().mean()
|
# append loss:
|
test_c_loss_meter.append(loss.item())
|
test_c_CE_meter.append(CE.item())
|
# test loss and acc of each type of corruptions:
|
test_c_CE_c_s = test_c_CE_meter.avg
|
test_c_CE_c_s_list.append(test_c_CE_c_s)
|
test_CE_c = np.mean(test_c_CE_c_s_list)
|
test_CE_c_list.append(test_CE_c)
|
# print
|
print('%s test time: %.2fs' % (corruption, time.time()-ts))
|
corruption_str = '%s CE: %.4f' % (corruption, test_CE_c)
|
print(corruption_str)
|
fp.write(corruption_str + '\n')
|
fp.flush()
|
# mean over 16 types of corruptions:
|
test_c_acc = 1-np.mean(test_CE_c_list)
|
# weighted mean over 16 types of corruptions:
|
test_mCE = find_mCE(test_CE_c_list, anchor_model_c_CE=ResNet18_c_CE_list)
|
# print
|
avg_str = 'corruption acc: %.4f' % (test_c_acc)
|
print(avg_str)
|
fp.write(avg_str + '\n')
|
mCE_str = 'mCE: %.4f' % test_mCE
|
print(mCE_str)
|
fp.write(mCE_str + '\n')
|
fp.flush()
|
## Test on ImageNet:
|
def val_IN():
|
'''
|
Evaluate on ImageNet
|
'''
|
_, val_data = imagenet_dataloaders(data_dir=os.path.join(args.data_root_path, 'imagenet'), AugMax=None)
|
val_loader = DataLoader(val_data, batch_size=args.test_batch_size, shuffle=False, num_workers=args.cpus, pin_memory=True)
|
model.eval()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.