text
stringlengths
1
93.6k
for param in net.parameters():
nb_param += np.prod(list(param.data.size()))
print('Number of parameters:', nb_param)
# Create log directory
log_dir = f"./logs/{config.expt_name}/"
os.makedirs(log_dir, exist_ok=True)
json.dump(config, open(f"{log_dir}/config.json", "w"), indent=4)
writer = SummaryWriter(log_dir) # Define Tensorboard writer
# Training parameters
num_nodes = config.num_nodes
num_neighbors = config.num_neighbors
max_epochs = config.max_epochs
val_every = config.val_every
test_every = config.test_every
batch_size = config.batch_size
batches_per_epoch = config.batches_per_epoch
accumulation_steps = config.accumulation_steps
learning_rate = config.learning_rate
decay_rate = config.decay_rate
val_loss_old = 1e6 # For decaying LR based on validation loss
best_pred_tour_len = 1e6 # For saving checkpoints
best_val_loss = 1e6 # For saving checkpoints
# Define optimizer
optimizer = torch.optim.Adam(net.parameters(), lr=learning_rate)
print(optimizer)
dataset = DataReader(
config.num_nodes, config.num_neighbors, config.batch_size,
config.train_filepath, config.train_filepath_solution,
do_prep=False
)
if 'resume_from_dir' in config:
if torch.cuda.is_available():
checkpoint = torch.load(os.path.join(config.resume_from_dir, "last_train_checkpoint.tar"))
else:
checkpoint = torch.load(os.path.join(config.resume_from_dir, "last_train_checkpoint.tar"), map_location='cpu')
# Load network state
net.load_state_dict(checkpoint['model_state_dict'])
# Load optimizer state
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
# Load other training parameters
epoch = checkpoint['epoch']
train_loss = checkpoint['train_loss']
val_loss = checkpoint['val_loss']
# Note: the learning_rate was set in load_state_dict,
# this is just to have the local variable for logging
for param_group in optimizer.param_groups:
learning_rate = param_group['lr']
print(f"Loaded checkpoint from epoch {epoch}")
else:
epoch = -1
epoch_bar = master_bar(range(epoch + 1, max_epochs))
for epoch in epoch_bar:
# Log to Tensorboard
writer.add_scalar('learning_rate', learning_rate, epoch)
# Train
train_time, train_loss, train_err_edges, train_err_tour, train_err_tsp, train_pred_tour_len, train_gt_tour_len = train_one_epoch(net, optimizer, config, epoch_bar, dataset=dataset)
epoch_bar.write('t: ' + metrics_to_str(epoch, train_time, learning_rate, train_loss, train_err_edges, train_err_tour, train_err_tsp, train_pred_tour_len, train_gt_tour_len))
writer.add_scalar('loss/train_loss', train_loss, epoch)
writer.add_scalar('pred_tour_len/train_pred_tour_len', train_pred_tour_len, epoch)
writer.add_scalar('optimality_gap/train_opt_gap', train_pred_tour_len/train_gt_tour_len - 1, epoch)
if epoch % val_every == 0 or epoch == max_epochs-1:
# Validate
val_time, val_loss, val_err_edges, val_err_tour, val_err_tsp, val_pred_tour_len, val_gt_tour_len = test(net, config, epoch_bar, mode='val')
epoch_bar.write('v: ' + metrics_to_str(epoch, val_time, learning_rate, val_loss, val_err_edges, val_err_tour, val_err_tsp, val_pred_tour_len, val_gt_tour_len))
writer.add_scalar('loss/val_loss', val_loss, epoch)
writer.add_scalar('pred_tour_len/val_pred_tour_len', val_pred_tour_len, epoch)
writer.add_scalar('optimality_gap/val_opt_gap', val_pred_tour_len/val_gt_tour_len - 1, epoch)
# Save checkpoint
if val_pred_tour_len < best_pred_tour_len:
best_pred_tour_len = val_pred_tour_len # Update best val predicted tour length
torch.save({
'epoch': epoch,
'model_state_dict': net.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'train_loss': train_loss,
'val_loss': val_loss,
}, log_dir+"best_val_tourlen_checkpoint.tar")
if val_loss < best_val_loss:
best_val_loss = val_loss # Update best val loss
torch.save({
'epoch': epoch,
'model_state_dict': net.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'train_loss': train_loss,
'val_loss': val_loss,
}, log_dir+"best_val_loss_checkpoint.tar")
# Update learning rate
if val_loss > 0.99 * val_loss_old:
learning_rate /= decay_rate
for param_group in optimizer.param_groups: