text
stringlengths 1
93.6k
|
|---|
else:
|
sampler_train = torch.utils.data.RandomSampler(dataset_train)
|
sampler_val = torch.utils.data.SequentialSampler(dataset_val)
|
if global_rank == 0 and args.log_dir is not None and not args.eval:
|
os.makedirs(args.log_dir, exist_ok=True)
|
log_writer = SummaryWriter(log_dir=args.log_dir)
|
else:
|
log_writer = None
|
data_loader_train = torch.utils.data.DataLoader(
|
dataset_train, sampler=sampler_train,
|
batch_size=args.batch_size,
|
num_workers=args.num_workers,
|
pin_memory=args.pin_mem,
|
drop_last=True,
|
)
|
data_loader_val = torch.utils.data.DataLoader(
|
dataset_val, sampler=sampler_val,
|
batch_size=args.batch_size,
|
num_workers=args.num_workers,
|
pin_memory=args.pin_mem,
|
drop_last=False
|
)
|
mixup_fn = None
|
mixup_active = args.mixup > 0 or args.cutmix > 0. or args.cutmix_minmax is not None
|
if mixup_active:
|
print("Mixup is activated!")
|
mixup_fn = Mixup(
|
mixup_alpha=args.mixup, cutmix_alpha=args.cutmix, cutmix_minmax=args.cutmix_minmax,
|
prob=args.mixup_prob, switch_prob=args.mixup_switch_prob, mode=args.mixup_mode,
|
label_smoothing=args.smoothing, num_classes=args.nb_classes)
|
model = models_vit.__dict__[args.model](
|
num_classes=args.nb_classes,
|
drop_path_rate=args.drop_path,
|
global_pool=args.global_pool,
|
lp_num_layers=1,
|
)
|
if args.finetune and not args.eval:
|
checkpoint = torch.load(args.finetune, map_location='cpu')
|
print("Load pre-trained checkpoint from: %s" % args.finetune)
|
checkpoint_model = checkpoint['model']
|
state_dict = model.state_dict()
|
for k in ['head.weight', 'head.bias']:
|
if k in checkpoint_model and checkpoint_model[k].shape != state_dict[k].shape:
|
print(f"Removing key {k} from pretrained checkpoint")
|
del checkpoint_model[k]
|
# interpolate position embedding
|
interpolate_pos_embed(model, checkpoint_model)
|
# load pre-trained model
|
msg = model.load_state_dict(checkpoint_model, strict=False)
|
print(msg)
|
if args.global_pool:
|
assert set(msg.missing_keys) == {'head.weight', 'head.bias', 'fc_norm.weight', 'fc_norm.bias'}
|
else:
|
assert set(msg.missing_keys) == {'head.weight', 'head.bias'}
|
# manually initialize fc layer
|
trunc_normal_(model.head.weight, std=2e-5)
|
model.to(device)
|
model_without_ddp = model
|
n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
|
print("Model = %s" % str(model_without_ddp))
|
print('number of params (M): %.2f' % (n_parameters / 1.e6))
|
eff_batch_size = args.batch_size * args.accum_iter * misc.get_world_size()
|
if args.lr is None: # only base_lr is specified
|
args.lr = args.blr * eff_batch_size / 256
|
print("base lr: %.2e" % (args.lr * 256 / eff_batch_size))
|
print("actual lr: %.2e" % args.lr)
|
print("accumulate grad iterations: %d" % args.accum_iter)
|
print("effective batch size: %d" % eff_batch_size)
|
if args.distributed:
|
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu], find_unused_parameters=True)
|
model_without_ddp = model.module
|
# build optimizer with layer-wise lr decay (lrd)
|
param_groups = lrd.param_groups_lrd(model_without_ddp, args.weight_decay,
|
no_weight_decay_list=model_without_ddp.no_weight_decay(),
|
layer_decay=args.layer_decay
|
)
|
optimizer = torch.optim.AdamW(param_groups, lr=args.lr)
|
loss_scaler = NativeScaler()
|
if mixup_fn is not None:
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.