text
stringlengths
1
93.6k
for metric in ['loss1', 'new_mae']:
valid_losses[metric].append(epoch_val_losses[metric])
test_losses[metric].append(epoch_test_losses[metric])
# Save networks
save_checkpoint({
'epoch': epoch + 1,
'state_dict': network.state_dict(),
'optimizer': network_functions.optimizers.state_dict(),
}, snapshot_path, get_filename(network.name, epoch + 1))
print ('saving graphs...')
with open(os.path.join(snapshot_path, 'losses.pkl'), 'wb') as lossfile:
pickle.dump((train_losses, valid_losses, test_losses), lossfile, protocol=2)
for metric in train_losses.keys():
if "maxima_split" not in metric:
if isinstance(train_losses[metric][0], list):
for i in range(len(train_losses[metric][0])):
plt.plot([a[i] for a in train_losses[metric]])
plt.savefig(os.path.join(snapshot_path, 'train_%s_%d.png' % (metric, i)))
plt.clf()
plt.close()
print(metric, "METRIC", train_losses[metric])
plt.plot(train_losses[metric])
plt.savefig(os.path.join(snapshot_path, 'train_%s.png' % metric))
plt.clf()
plt.close()
for metric in valid_losses.keys():
if isinstance(valid_losses[metric][0], list):
for i in range(len(valid_losses[metric][0])):
plt.plot([a[i] for a in valid_losses[metric]])
plt.savefig(os.path.join(snapshot_path, 'valid_%s_%d.png' % (metric, i)))
plt.clf()
plt.close()
plt.plot(valid_losses[metric])
plt.savefig(os.path.join(snapshot_path, 'valid_%s.png' % metric))
plt.clf()
plt.close()
for metric in test_losses.keys():
if isinstance(test_losses[metric][0], list):
for i in range(len(test_losses[metric][0])):
plt.plot([a[i] for a in test_losses[metric]])
plt.savefig(os.path.join(snapshot_path, 'test_%s_%d.png' % (metric, i)))
plt.clf()
plt.close()
plt.plot(test_losses[metric])
plt.savefig(os.path.join(snapshot_path, 'test_%s.png' % metric))
plt.clf()
plt.close()
# -- Finding best NMS Threshold
if args.threshold == -1:
threshold = find_class_threshold(f, dataset, 1, test_funcs, network)
log(f, "Best Threshold is", threshold)
else:
threshold = args.threshold
# Test the latest model and the best model
try:
min_epoch = np.argmin(map(sum, valid_losses['mae']))
min_epoch = np.argmin(valid_losses['new_mae'])
log(f, 'Done Training.\n Minimum loss %s at epoch %s' % (valid_losses['new_mae'][min_epoch], min_epoch))
except:
pass
log(f, '\nTesting ...')
_, txt = test_lsccnn(test_funcs, dataset, 'test', network, './models/dump_test', thresh=threshold)
log(f, 'TEST epoch: ' + str(num_epochs - 1) + ' ' + txt)
log(f, 'Exiting train...')
f.close()
return
"""
This method dumps dataset (if not created yet) and calls
`train_networks` which consists of training, validation
and testing steps.
Basically, this is a wrapper around the main training stage.
"""
def train():
global dataset_paths, model_save_dir, batch_size, crop_size, dataset, args
print(dataset_paths, dataset)
if not dataset.dataset_ready:
print ('CREATING DATASET...')
if args.dataset == "ucfqnrf":
image_scale_factor = 2
else:
image_scale_factor = 1
dataset.create_dataset_files(dataset_paths,
image_crop_size=crop_size,
image_roi_size=80,
image_roi_stride=72,
image_scale_factor=image_scale_factor,
prediction_downscale_factor=output_downscale,
valid_set_size=validation_set,
use_rgb=True,
test_batch_size=4)
exit(0)