text stringlengths 1 93.6k |
|---|
snapshot_path = os.path.join(log_path, 'snapshots')
|
f = open(os.path.join(log_path, 'train0.log'), 'w')
|
# -- Logging Parameters
|
log(f, 'args: ' + str(args))
|
log(f, 'model: ' + str(network), False)
|
log(f, 'Training0...')
|
log(f, 'LR: %.12f.' % (args.lr))
|
log(f, 'Classification Model')
|
# -- Get train, test functions
|
train_funcs, test_funcs = network_functions.create_network_functions(network)
|
start_epoch = args.start_epoch
|
num_epochs = args.epochs
|
valid_losses = {}
|
test_losses = {}
|
train_losses = {}
|
for metric in ['loss1', 'new_mae']:
|
valid_losses[metric] = []
|
test_losses[metric] = []
|
for metric in ['loss1']:
|
train_losses[metric] = []
|
batch_size = args.batch_size
|
num_train_images = len(dataset.dataset_files['train'])
|
num_patches_per_image = args.patches
|
num_batches_per_epoch = num_patches_per_image * num_train_images // batch_size
|
if start_epoch > 0:
|
with open(os.path.join(snapshot_path, 'losses.pkl'), 'rb') as lossfile:
|
train_losses, valid_losses, test_losses = pickle.load(lossfile, encoding='latin1')
|
print ('loaded prev losses')
|
for metric in metrics:
|
try:
|
valid_losses[metric] = valid_losses[metric][:start_epoch]
|
except:
|
pass
|
test_losses[metric] = test_losses[metric][:start_epoch]
|
for metric in train_losses.keys():
|
train_losses[metric] = train_losses[metric][:start_epoch]
|
network, _= load_net(network,
|
network_functions, 0,
|
snapshot_path,
|
get_filename(\
|
network.name,
|
start_epoch))
|
# -- Main Training Loop
|
global loss_weights
|
if os.path.isfile("loss_weights.npy"):
|
loss_weights = np.load('loss_weights.npy')
|
else:
|
loss_weights = np.ones((4, 4))
|
HIST_GT = []
|
for e_i, epoch in enumerate(range(start_epoch, num_epochs)):
|
avg_loss = [0.0 for _ in range(1)]
|
hist_boxes = np.zeros((16,))
|
hist_boxes_gt = np.zeros((16,))
|
# b_i - batch index
|
for b_i in range(num_batches_per_epoch):
|
# Generate next training sample
|
Xs, Ys, _ = dataset.train_get_batch()
|
losses, hist_boxes, hist_boxes_gt = train_funcs[0](Xs, Ys, hist_boxes, hist_boxes_gt, loss_weights, network)
|
for scale_idx in range(1):
|
avg_loss[scale_idx] = avg_loss[scale_idx] + losses[scale_idx]
|
# Logging losses after 1k iterations.
|
if b_i % 1000 == 0:
|
log(f, 'Epoch %d [%d]: %s loss: %s.' % (epoch, b_i, [network.name], losses))
|
log(f, 'hist_boxes %s.' % (np.array_str(np.int32(hist_boxes))))
|
log(f, 'hist_boxes_gt %s.' % (np.array_str(np.int32(hist_boxes_gt))))
|
hist_boxes = np.zeros((16,))
|
hist_boxes_gt = np.zeros((16,))
|
HIST_GT.append(hist_boxes_gt)
|
if np.all(loss_weights == 1):
|
HIST_GT = np.asarray(HIST_GT)
|
HIST_GT = np.sum(HIST_GT, axis=0)
|
HIST_GT = np.reshape(HIST_GT, (4, 4))
|
loss_weights = compute_box_weights(HIST_GT)
|
np.save('loss_weights.npy', loss_weights)
|
print("Saving loss weights!! PLEASE re-run the code for training/testing")
|
exit()
|
# -- Stats update
|
avg_loss = [al / num_batches_per_epoch for al in avg_loss]
|
avg_loss = [av for av in avg_loss]
|
train_losses['loss1'].append(avg_loss)
|
epoch_test_losses, txt = test_lsccnn(test_funcs, dataset, 'test', network, True)
|
log(f, 'TEST epoch: ' + str(epoch) + ' ' + txt)
|
epoch_val_losses, txt = test_lsccnn(test_funcs, dataset, 'test_valid', network, True)
|
log(f, 'TEST valid epoch: ' + str(epoch) + ' ' + txt)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.