text
stringlengths
1
93.6k
print("CUDA available, using {} GPUs".format(torch.cuda.device_count()))
dtypeFloat = torch.cuda.FloatTensor
dtypeLong = torch.cuda.LongTensor
torch.cuda.manual_seed(1)
else:
print("CUDA not available")
dtypeFloat = torch.FloatTensor
dtypeLong = torch.LongTensor
torch.manual_seed(1)
def mean_tour_len_edges(x_edges_values, y_pred_edges):
"""
Computes mean tour length for given batch prediction as edge adjacency matrices (for PyTorch tensors).
Args:
x_edges_values: Edge values (distance) matrix (batch_size, num_nodes, num_nodes)
y_pred_edges: Edge predictions (batch_size, num_nodes, num_nodes, voc_edges)
Returns:
mean_tour_len: Mean tour length over batch
"""
y = F.softmax(y_pred_edges, dim=-1) # B x V x V x voc_edges
y = y.argmax(dim=3) # B x V x V
# Divide by 2 because edges_values is symmetric
tour_lens = (y.float() * x_edges_values.float()).sum(dim=1).sum(dim=1) / 2
mean_tour_len = tour_lens.sum().to(dtype=torch.float).item() / tour_lens.numel()
return mean_tour_len
def train_one_epoch(net, optimizer, config, master_bar, dataset=None):
# Set training mode
net.train()
# Assign parameters
num_nodes = config.num_nodes
num_neighbors = config.num_neighbors
batch_size = config.batch_size
batches_per_epoch = config.batches_per_epoch
accumulation_steps = config.accumulation_steps
train_filepath = config.train_filepath
train_target_filepath = config.train_filepath_solution
if dataset is None:
dataset = DataReader(num_nodes, num_neighbors, batch_size, train_filepath, train_target_filepath, do_shuffle=True, do_prep=False)
else:
dataset.shuffle()
if batches_per_epoch != -1:
batches_per_epoch = min(batches_per_epoch, dataset.max_iter)
else:
batches_per_epoch = dataset.max_iter
# Convert dataset to iterable
dataset = iter(dataset)
# Initially set loss class weights as None
edge_cw = None
# Initialize running data
running_loss = 0.0
# running_err_edges = 0.0
# running_err_tour = 0.0
# running_err_tsp = 0.0
running_pred_tour_len = 0.0
running_gt_tour_len = 0.0
running_nb_data = 0
running_nb_batch = 0
start_epoch = time.time()
for batch_num in progress_bar(range(batches_per_epoch), parent=master_bar):
# Generate a batch of TSPs
try:
batch = next(dataset)
except StopIteration:
break
# Convert batch to torch Variables
# x_edges = Variable(torch.LongTensor(batch.edges).type(dtypeLong), requires_grad=False)
# x_edges_values = Variable(torch.FloatTensor(batch.edges_values).type(dtypeFloat), requires_grad=False)
# x_nodes = Variable(torch.LongTensor(batch.nodes).type(dtypeLong), requires_grad=False)
x_nodes_coord = Variable(torch.FloatTensor(batch.nodes_coord).type(dtypeFloat), requires_grad=False)
x_nodes_timew = Variable(torch.FloatTensor(batch.nodes_timew).type(dtypeFloat), requires_grad=False) if is_tsptw else None
# y_edges = Variable(torch.LongTensor(batch.edges_target).type(dtypeLong), requires_grad=False)
# y_nodes = Variable(torch.LongTensor(batch.nodes_target).type(dtypeLong), requires_grad=False)
y_tour = Variable(torch.LongTensor(batch.tour_nodes).type(dtypeLong), requires_grad=False)
# Compute class weights (if uncomputed)
if type(edge_cw) != torch.Tensor:
# edge_labels = y_edges.cpu().numpy().flatten()
# edge_cw = compute_class_weight("balanced", classes=np.unique(edge_labels), y=edge_labels)
# edge_cw = len(y_edges) / (num_edge_classes * edge_label_bincount)
num_nodes = x_nodes_coord.size(1)
num_edges = num_nodes * num_nodes
num_edge_classes = 2
# Don't make tensor since then it will mess up DataParallel, this is a parameter, not input!
edge_label_bincount = np.array([num_edges - 2 * num_nodes, 2 * num_nodes])
edge_cw = num_edges / (num_edge_classes * edge_label_bincount)