text
stringlengths 1
93.6k
|
|---|
model_ema = ModelEma(
|
model,
|
decay=args.model_ema_decay,
|
device='cpu' if args.model_ema_force_cpu else '',
|
resume='')
|
print("Using EMA with decay = %.8f" % args.model_ema_decay)
|
model_without_ddp = model
|
n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
|
print('number of params:', n_parameters)
|
total_batch_size = args.batch_size * args.update_freq * utils.get_world_size()
|
num_training_steps_per_epoch = len(dataset_train) // total_batch_size
|
print("LR = %.8f" % args.lr)
|
print("Batch size = %d" % total_batch_size)
|
print("Update frequent = %d" % args.update_freq)
|
print("Number of training examples = %d" % len(dataset_train))
|
print("Number of training training per epoch = %d" % num_training_steps_per_epoch)
|
if args.layer_decay < 1.0 or args.layer_decay > 1.0:
|
num_layers = 12 # convnext layers divided into 12 parts, each with a different decayed lr value.
|
assert args.model in ['convnext_small', 'convnext_base', 'convnext_large', 'convnext_xlarge'], \
|
"Layer Decay impl only supports convnext_small/base/large/xlarge"
|
assigner = LayerDecayValueAssigner(list(args.layer_decay ** (num_layers + 1 - i) for i in range(num_layers + 2)))
|
else:
|
assigner = None
|
if assigner is not None:
|
print("Assigned values = %s" % str(assigner.values))
|
if args.distributed:
|
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu], find_unused_parameters=True)
|
model_without_ddp = model.module
|
optimizer = create_optimizer(
|
args, model_without_ddp, skip_list=None,
|
get_num_layer=assigner.get_layer_id if assigner is not None else None,
|
get_layer_scale=assigner.get_scale if assigner is not None else None)
|
loss_scaler = NativeScaler() # if args.use_amp is False, this won't be used
|
print("Use Cosine LR scheduler")
|
print('number of params:', n_parameters)
|
lr_schedule_values = utils.cosine_scheduler(
|
args.lr, args.min_lr, args.epochs, num_training_steps_per_epoch,
|
warmup_epochs=args.warmup_epochs, warmup_steps=args.warmup_steps,
|
)
|
if args.weight_decay_end is None:
|
args.weight_decay_end = args.weight_decay
|
wd_schedule_values = utils.cosine_scheduler(
|
args.weight_decay, args.weight_decay_end, args.epochs, num_training_steps_per_epoch)
|
print("Max WD = %.7f, Min WD = %.7f" % (max(wd_schedule_values), min(wd_schedule_values)))
|
if mixup_fn is not None:
|
# smoothing is handled with mixup label transform
|
criterion = SoftTargetCrossEntropy()
|
elif args.smoothing > 0.:
|
criterion = LabelSmoothingCrossEntropy(smoothing=args.smoothing)
|
else:
|
criterion = torch.nn.CrossEntropyLoss()
|
print("criterion = %s" % str(criterion))
|
if args.show_flops:
|
from fvcore.nn import FlopCountAnalysis
|
from collections import Counter
|
input = (torch.zeros((args.batch_size, 3, 224, 224)).cuda(),)
|
flops = FlopCountAnalysis(model_without_ddp, input)
|
def get_shape(val):
|
if val.isCompleteTensor():
|
return val.type().sizes()
|
else:
|
return None
|
def conv_flop_jit(inputs, outputs):
|
x, w = inputs[:2]
|
x_shape, w_shape, out_shape = (get_shape(x), get_shape(w), get_shape(outputs[0]))
|
flop = np.prod(w_shape) * np.prod(out_shape) // w_shape[0]
|
return Counter({"conv": flop})
|
flops.set_op_handle('prim::PythonOp._DepthwiseOrientedConv1d', conv_flop_jit)
|
flops = flops.total()/1e9/args.batch_size
|
del input
|
print(f'flops: {flops}')
|
utils.auto_load_model(
|
args=args, model=model, model_without_ddp=model_without_ddp,
|
optimizer=optimizer, loss_scaler=loss_scaler, model_ema=model_ema)
|
if args.eval:
|
print(f"Eval only mode")
|
if args.model_ema_eval:
|
print('Eval ema')
|
test_stats = evaluate(data_loader_val, model_ema.ema, device, use_amp=args.use_amp)
|
else:
|
print('Eval non-ema')
|
test_stats = evaluate(data_loader_val, model, device, use_amp=args.use_amp)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.