text
stringlengths 1
93.6k
|
|---|
# smoothing is handled with mixup label transform
|
criterion = SoftTargetCrossEntropy()
|
elif args.smoothing > 0.:
|
criterion = LabelSmoothingCrossEntropy(smoothing=args.smoothing)
|
else:
|
criterion = torch.nn.CrossEntropyLoss()
|
print("criterion = %s" % str(criterion))
|
misc.load_model(args=args, model_without_ddp=model_without_ddp, optimizer=optimizer, loss_scaler=loss_scaler)
|
if args.eval:
|
test_stats = evaluate(data_loader_val, model, device)
|
print(f"Accuracy of the network on the {len(dataset_val)} test images: {test_stats['acc1']:.1f}%")
|
exit(0)
|
print(f"Start training for {args.epochs} epochs")
|
start_time = time.time()
|
max_accuracy = 0.0
|
for epoch in range(args.start_epoch, args.epochs):
|
if args.distributed:
|
data_loader_train.sampler.set_epoch(epoch)
|
train_stats = train_one_epoch(
|
model, criterion, data_loader_train,
|
optimizer, device, epoch, loss_scaler,
|
args.clip_grad, mixup_fn,
|
log_writer=log_writer,
|
args=args
|
)
|
if args.output_dir:
|
misc.save_model(
|
args=args, model=model, model_without_ddp=model_without_ddp, optimizer=optimizer,
|
loss_scaler=loss_scaler, epoch=epoch)
|
test_stats = evaluate(data_loader_val, model, device)
|
print(f"Accuracy of the network on the {len(dataset_val)} test images: {test_stats['acc1']:.1f}%")
|
max_accuracy = max(max_accuracy, test_stats["acc1"])
|
print(f'Max accuracy: {max_accuracy:.2f}%')
|
if log_writer is not None:
|
log_writer.add_scalar('perf/test_acc1', test_stats['acc1'], epoch)
|
log_writer.add_scalar('perf/test_acc5', test_stats['acc5'], epoch)
|
log_writer.add_scalar('perf/test_loss', test_stats['loss'], epoch)
|
log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},
|
**{f'test_{k}': v for k, v in test_stats.items()},
|
'epoch': epoch,
|
'n_parameters': n_parameters}
|
if args.output_dir and misc.is_main_process():
|
if log_writer is not None:
|
log_writer.flush()
|
with open(os.path.join(args.output_dir, "log.txt"), mode="a", encoding="utf-8") as f:
|
f.write(json.dumps(log_stats) + "\n")
|
total_time = time.time() - start_time
|
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
|
print('Training time {}'.format(total_time_str))
|
if __name__ == '__main__':
|
args = get_args_parser()
|
args = args.parse_args()
|
if args.output_dir:
|
Path(args.output_dir).mkdir(parents=True, exist_ok=True)
|
main(args)
|
# <FILESEP>
|
'''
|
Software for the tracking of eddies in
|
OFAM model output following Chelton et
|
al., Progress in Oceanography, 2011.
|
'''
|
# Load required modules
|
import numpy as np
|
import matplotlib
|
# Turn the followin on if you are running on storm sometimes - Forces matplotlib to not use any Xwindows backend.
|
matplotlib.use('Agg')
|
from matplotlib import pyplot as plt
|
import eddy_functions as eddy
|
# Load parameters
|
from params import *
|
# Load latitude and longitude vectors and restrict to domain of interest
|
lon, lat = eddy.load_lonlat(run)
|
##chris' dodgy hack for not having the eric find_nearest function...
|
#i1=0
|
#i2=2000
|
#j1=0
|
#j2=2000
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.