text
stringlengths 1
93.6k
|
|---|
print(f"Accuracy of the network on {len(dataset_val)} test images: {test_stats['acc1']:.5f}%")
|
return
|
max_accuracy = 0.0
|
if args.model_ema and args.model_ema_eval:
|
max_accuracy_ema = 0.0
|
print("Start training for %d epochs" % args.epochs)
|
start_time = time.time()
|
for epoch in range(args.start_epoch, args.epochs):
|
if args.distributed:
|
data_loader_train.sampler.set_epoch(epoch)
|
if log_writer is not None:
|
log_writer.set_step(epoch * num_training_steps_per_epoch * args.update_freq)
|
if wandb_logger:
|
wandb_logger.set_steps()
|
train_stats = train_one_epoch(
|
model, criterion, data_loader_train, optimizer,
|
device, epoch, loss_scaler, args.clip_grad, model_ema, mixup_fn,
|
log_writer=log_writer, wandb_logger=wandb_logger, start_steps=epoch * num_training_steps_per_epoch,
|
lr_schedule_values=lr_schedule_values, wd_schedule_values=wd_schedule_values,
|
num_training_steps_per_epoch=num_training_steps_per_epoch, update_freq=args.update_freq,
|
use_amp=args.use_amp
|
)
|
if args.output_dir and args.save_ckpt:
|
if (epoch + 1) % args.save_ckpt_freq == 0 or epoch + 1 == args.epochs:
|
utils.save_model(
|
args=args, model=model, model_without_ddp=model_without_ddp, optimizer=optimizer,
|
loss_scaler=loss_scaler, epoch=epoch, model_ema=model_ema)
|
if data_loader_val is not None:
|
test_stats = evaluate(data_loader_val, model, device, use_amp=args.use_amp)
|
print(f"Accuracy of the model on the {len(dataset_val)} test images: {test_stats['acc1']:.1f}%")
|
if max_accuracy < test_stats["acc1"] and epoch+1 >= args.epochs-50:
|
max_accuracy = test_stats["acc1"]
|
if args.output_dir and args.save_ckpt:
|
utils.save_model(
|
args=args, model=model, model_without_ddp=model_without_ddp, optimizer=optimizer,
|
loss_scaler=loss_scaler, epoch=epoch, name="best", model_ema=model_ema)
|
print(f'Max accuracy: {max_accuracy:.2f}%, FLOPS={flops:.2f}G, PARAMS={n_parameters*1e-6:.2f}M')
|
if log_writer is not None:
|
log_writer.update(test_acc1=test_stats['acc1'], head="perf", step=epoch)
|
log_writer.update(test_acc5=test_stats['acc5'], head="perf", step=epoch)
|
log_writer.update(test_loss=test_stats['loss'], head="perf", step=epoch)
|
log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},
|
**{f'test_{k}': v for k, v in test_stats.items()},
|
'epoch': epoch,
|
'n_parameters': n_parameters,
|
'flops': flops
|
}
|
# repeat testing routines for EMA, if ema eval is turned on
|
if args.model_ema and args.model_ema_eval:
|
test_stats_ema = evaluate(data_loader_val, model_ema.ema, device, use_amp=args.use_amp)
|
print(f"Accuracy of the model EMA on {len(dataset_val)} test images: {test_stats_ema['acc1']:.1f}%")
|
if max_accuracy_ema < test_stats_ema["acc1"] and epoch+1 >= args.epochs-50:
|
max_accuracy_ema = test_stats_ema["acc1"]
|
if args.output_dir and args.save_ckpt:
|
utils.save_model(
|
args=args, model=model, model_without_ddp=model_without_ddp, optimizer=optimizer,
|
loss_scaler=loss_scaler, epoch=epoch,name="best-ema", model_ema=model_ema)
|
print(f'Max EMA accuracy: {max_accuracy_ema:.2f}%')
|
if log_writer is not None:
|
log_writer.update(test_acc1_ema=test_stats_ema['acc1'], head="perf", step=epoch)
|
log_stats.update({**{f'test_{k}_ema': v for k, v in test_stats_ema.items()}})
|
else:
|
log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},
|
'epoch': epoch,
|
'n_parameters': n_parameters,
|
'flops': flops}
|
if args.output_dir and utils.is_main_process():
|
if log_writer is not None:
|
log_writer.flush()
|
with open(os.path.join(args.output_dir, "log.txt"), mode="a", encoding="utf-8") as f:
|
f.write(json.dumps(log_stats) + "\n")
|
if wandb_logger:
|
wandb_logger.log_epoch_metrics(log_stats)
|
if wandb_logger and args.wandb_ckpt and args.save_ckpt and args.output_dir:
|
wandb_logger.log_checkpoints()
|
total_time = time.time() - start_time
|
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
|
print('Training time {}'.format(total_time_str))
|
if __name__ == '__main__':
|
parser = argparse.ArgumentParser('ConvNeXt training and evaluation script', parents=[get_args_parser()])
|
args = parser.parse_args()
|
if args.output_dir:
|
Path(args.output_dir).mkdir(parents=True, exist_ok=True)
|
main(args)
|
# <FILESEP>
|
#!/usr/bin/env python3
|
"""
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.