text
stringlengths 1
93.6k
|
|---|
running_pred_tour_len = 0.0
|
running_gt_tour_len = 0.0
|
running_nb_data = 0
|
running_nb_batch = 0
|
with torch.no_grad():
|
start_test = time.time()
|
for batch_num in progress_bar(range(batches_per_epoch), parent=master_bar):
|
# Generate a batch of TSPs
|
try:
|
batch = next(dataset)
|
except StopIteration:
|
break
|
# Convert batch to torch Variables
|
# x_edges = Variable(torch.LongTensor(batch.edges).type(dtypeLong), requires_grad=False)
|
# x_edges_values = Variable(torch.FloatTensor(batch.edges_values).type(dtypeFloat), requires_grad=False)
|
# x_nodes = Variable(torch.LongTensor(batch.nodes).type(dtypeLong), requires_grad=False)
|
x_nodes_coord = Variable(torch.FloatTensor(batch.nodes_coord).type(dtypeFloat), requires_grad=False)
|
x_nodes_timew = Variable(torch.FloatTensor(batch.nodes_timew).type(dtypeFloat), requires_grad=False) if is_tsptw else None
|
# y_edges = Variable(torch.LongTensor(batch.edges_target).type(dtypeLong), requires_grad=False)
|
# y_nodes = Variable(torch.LongTensor(batch.nodes_target).type(dtypeLong), requires_grad=False)
|
y_tour = Variable(torch.LongTensor(batch.tour_nodes).type(dtypeLong), requires_grad=False)
|
# Compute class weights (if uncomputed)
|
if type(edge_cw) != torch.Tensor:
|
# edge_labels = y_edges.cpu().numpy().flatten()
|
# edge_cw = compute_class_weight("balanced", classes=np.unique(edge_labels), y=edge_labels)
|
# edge_cw = len(y_edges) / (num_edge_classes * edge_label_bincount)
|
num_nodes = x_nodes_coord.size(1)
|
num_edges = num_nodes * num_nodes
|
num_edge_classes = 2
|
# Don't make tensor since then it will mess up DataParallel, this is a parameter, not input!
|
edge_label_bincount = np.array([num_edges - 2 * num_nodes, 2 * num_nodes])
|
edge_cw = num_edges / (num_edge_classes * edge_label_bincount)
|
# Forward pass
|
# y_preds, loss = net.forward(x_edges, x_edges_values, x_nodes, x_nodes_coord, y_edges, edge_cw)
|
y_preds, loss, x_edges_values = net.forward(x_nodes_coord, x_nodes_timew, y_tour, edge_cw)
|
loss = loss.mean() # Take mean of loss across multiple GPUs
|
# Compute error metrics
|
# err_edges, err_tour, err_tsp, tour_err_idx, tsp_err_idx = edge_error(y_preds, y_edges, x_edges)
|
# Get batch beamsearch tour prediction
|
# if mode == 'val': # Validation: faster 'vanilla' beamsearch
|
# bs_nodes = beamsearch_tour_nodes(
|
# y_preds, beam_size, batch_size, num_nodes, dtypeFloat, dtypeLong, probs_type='logits')
|
# elif mode == 'test': # Testing: beamsearch with shortest tour heuristic
|
# bs_nodes = beamsearch_tour_nodes_shortest(
|
# y_preds, x_edges_values, beam_size, batch_size, num_nodes, dtypeFloat, dtypeLong, probs_type='logits')
|
#
|
# Compute mean tour length
|
# pred_tour_len = mean_tour_len_nodes(x_edges_values, bs_nodes)
|
gt_tour_len = np.mean(batch.tour_len)
|
# Update running data
|
running_nb_data += batch_size
|
running_loss += batch_size* loss.data.item()
|
# running_err_edges += batch_size* err_edges
|
# running_err_tour += batch_size* err_tour
|
# running_err_tsp += batch_size* err_tsp
|
# running_pred_tour_len += batch_size* pred_tour_len
|
running_gt_tour_len += batch_size* gt_tour_len
|
running_nb_batch += 1
|
# Log intermediate statistics
|
# result = ('loss:{loss:.4f} pred_tour_len:{pred_tour_len:.3f} gt_tour_len:{gt_tour_len:.3f}'.format(
|
result = ('loss:{loss:.4f} gt_tour_len:{gt_tour_len:.3f}'.format(
|
loss=running_loss/running_nb_data,
|
# pred_tour_len=running_pred_tour_len/running_nb_data,
|
gt_tour_len=running_gt_tour_len/running_nb_data))
|
master_bar.child.comment = result
|
# Compute statistics for full epoch
|
loss = running_loss/ running_nb_data
|
err_edges = 0 # running_err_edges/ running_nb_data
|
err_tour = 0 # running_err_tour/ running_nb_data
|
err_tsp = 0 # running_err_tsp/ running_nb_data
|
pred_tour_len = running_pred_tour_len/ running_nb_data
|
gt_tour_len = running_gt_tour_len/ running_nb_data
|
return time.time()-start_test, loss, err_edges, err_tour, err_tsp, pred_tour_len, gt_tour_len
|
def main(config):
|
# Instantiate the network
|
assert config.num_neighbors == -1, "KNN features is deprecated due to PrepWrap"
|
model = ResidualGatedGCNModel(config, dtypeFloat, dtypeLong)
|
if 'sparse' in config and config.sparse is not None:
|
model = wrap_sparse(model, config.sparse)
|
model = PrepWrapResidualGatedGCNModel(model)
|
net = nn.DataParallel(model)
|
if torch.cuda.is_available():
|
net.cuda()
|
print(net)
|
# Compute number of network parameters
|
nb_param = 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.