text
stringlengths
1
93.6k
if args.model == 'ResNeXt29_DuBIN':
model_fn = ResNeXt29_DuBIN
if args.dataset in ['cifar10', 'cifar100']:
num_classes=10 if args.dataset == 'cifar10' else 100
init_stride = 1
elif args.dataset == 'tin':
num_classes, init_stride = 200, 2
elif args.dataset == 'IN':
num_classes, init_stride = 1000, None
if args.dataset == 'IN':
model = model_fn().cuda()
else:
model = model_fn(num_classes=num_classes, init_stride=init_stride).cuda()
model = torch.nn.DataParallel(model)
# load model:
ckpt = torch.load(os.path.join(args.save_root_path, 'AugMax_results', args.ckpt_path, 'best_SA.pth'))
model.load_state_dict(ckpt)
# log file:
fp = open(os.path.join(args.save_root_path, 'AugMax_results', args.ckpt_path, 'test_results.txt'), 'a+')
## Test on CIFAR:
def val_cifar():
'''
Evaluate on CIFAR10/100
'''
_, val_data = cifar_dataloaders(data_dir=args.data_root_path, num_classes=num_classes, train_batch_size=256, test_batch_size=args.test_batch_size, num_workers=args.cpus, AugMax=None)
test_loader = DataLoader(val_data, batch_size=args.test_batch_size, shuffle=False, num_workers=args.cpus, pin_memory=True)
model.eval()
ts = time.time()
test_loss_meter, test_acc_meter = AverageMeter(), AverageMeter()
with torch.no_grad():
for images, targets in test_loader:
images, targets = images.cuda(), targets.cuda()
logits = model(images)
loss = F.cross_entropy(logits, targets)
pred = logits.data.max(1)[1]
acc = pred.eq(targets.data).float().mean()
# append loss:
test_loss_meter.append(loss.item())
test_acc_meter.append(acc.item())
print('clean test time: %.2fs' % (time.time()-ts))
# test loss and acc of this epoch:
test_loss = test_loss_meter.avg
test_acc = test_acc_meter.avg
# print:
clean_str = 'clean: %.4f' % test_acc
print(clean_str)
fp.write(clean_str + '\n')
fp.flush()
def val_cifar_worst_of_k_affine(K):
'''
Test model robustness against spatial transform attacks using worst-of-k method on CIFAR10/100.
'''
model.eval()
ts = time.time()
test_loss_meter, test_acc_meter = AverageMeter(), AverageMeter()
with torch.no_grad():
K_loss = torch.zeros((K, args.test_batch_size)).cuda()
K_logits = torch.zeros((K, args.test_batch_size, num_classes)).cuda()
for k in range(K):
random.seed(k+1)
val_data = cifar_random_affine_test_set(data_dir=args.data_root_path, num_classes=num_classes)
test_loader = DataLoader(val_data, batch_size=args.test_batch_size, shuffle=False, num_workers=args.cpus, pin_memory=True)
images, targets = next(iter(test_loader))
images, targets = images.cuda(), targets.cuda()
logits = model(images)
loss = F.cross_entropy(logits, targets, reduction='none')
# stack all losses:
K_loss[k,:] = loss # shape=(K,N)
K_logits[k,...] = logits
# print('K_loss:', K_loss[:,0:3], K_loss.shape)
adv_idx = torch.max(K_loss, dim=0).indices
logits_adv = torch.zeros_like(logits).to(logits.device)
for n in range(images.shape[0]):
logits_adv[n] = K_logits[adv_idx[n],n,:]
print('logits_adv:', logits_adv.shape)
pred = logits_adv.data.max(1)[1]
print('pred:', pred.shape)
acc = pred.eq(targets.data).float().mean()
# append loss:
test_acc_meter.append(acc.item())
print('worst of %d test time: %.2fs' % (K, time.time()-ts))
# test loss and acc of this epoch:
test_acc = test_acc_meter.avg
# print:
clean_str = 'worst of %d: %.4f' % (K, test_acc)
print(clean_str)
fp.write(clean_str + '\n')
fp.flush()
def val_cifar_c():
'''