text stringlengths 1 93.6k |
|---|
patch_size_h = int((pred.size(2) / grid_factor).item())
|
patch_size_w = int((pred.size(3) / grid_factor).item())
|
pred_re = pred.unfold(2, patch_size_h, patch_size_h).unfold(3, patch_size_w, patch_size_w).contiguous()
|
gt_re = gt.unfold(1, patch_size_h, patch_size_h).unfold(2, patch_size_w, patch_size_w).contiguous()
|
pred_re_merged = pred_re.view(pred_re.size(0), pred_re.size(1), -1, pred_re.size(-2), pred_re.size(-1))
|
gt_re_merged = gt_re.view(gt_re.size(0), -1, gt_re.size(-2), gt_re.size(-1))
|
grids_in_each_column = int(pred.shape[2] / patch_size_h)
|
grids_in_each_row = int(pred.shape[3] / patch_size_w)
|
num_grids = grids_in_each_column * grids_in_each_row
|
assert(num_grids == pred_re_merged.size(2))
|
assert(num_grids == gt_re_merged.size(1))
|
max_loss = -float("inf")
|
for ng in range(num_grids):
|
out = pred_re_merged[:, :, ng]
|
yss = gt_re_merged[:, ng]
|
curr_loss = criterion(out, yss)
|
if curr_loss > max_loss:
|
max_loss = curr_loss
|
return max_loss
|
'''
|
Create network functions i.e train and test functions
|
for LSC-CNN.
|
Parameters
|
-----------
|
network: (torch model)torch model to train.
|
Here len(network == 1)
|
Returns
|
---------
|
train_funcs: list of train function for each of the network in
|
network
|
test_funcs: list of test function for each of the network in
|
network
|
'''
|
def create_network_functions(self, network):
|
self.optimizers = optim.SGD(filter(lambda p: p.requires_grad, network.parameters()),
|
lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
|
'''
|
Train function for LSC-CNN, with GWTA Loss
|
and scale-wise weighting.
|
Parameters
|
-----------
|
Xs - (ndarray) Batched images
|
Ys - (ndarray) Batched Ground truth of largest scale
|
Returns
|
---------
|
losses: (list of float) list of loss values of each scale.
|
hist_boxes: (list) histogram of boxes of predictions
|
hist_boxes_gt: (list) histogram of boxes of gt.
|
'''
|
def train_function(Xs, Ys, hist_boxes, hist_boxes_gt, loss_weights, network):
|
Ys = (Ys>0).astype(np.float32)
|
network = network.cuda()
|
self.optimizers.zero_grad()
|
if torch.cuda.is_available():
|
X = torch.autograd.Variable(torch.from_numpy(Xs)).cuda()
|
Y = torch.autograd.Variable(torch.FloatTensor(Ys)).cuda()
|
Yss = [Y]
|
else:
|
assert(0)
|
for s in range(0, 3):
|
Yss.append(torch.nn.functional.avg_pool2d(Yss[s], (2, 2)) * 4)
|
output_vars = [network(X, None)]
|
outputs_1 = [out for out in output_vars[0]]
|
Yss_out = self.get_box_gt(Yss) # Making 4 channel ground truth
|
Yss = Yss[::-1] # Reverse GT for uniformity of having lowest scale in the beginning
|
Yss_out = Yss_out[::-1] # Reverse pred for uniformity of having lowest scale in the beginning
|
# Put outputs in list
|
outputs = [out for out in output_vars[0]]
|
losses = []
|
sums = []
|
Yss_argmax = [torch.argmax(yss, dim=1) for yss in Yss_out]
|
alpha1 = torch.cuda.FloatTensor(loss_weights[3]) # 1/16 scale
|
alpha2 = torch.cuda.FloatTensor(loss_weights[2]) # 1/8 scale
|
alpha3 = torch.cuda.FloatTensor(loss_weights[1]) # 1/4 scale
|
alpha4 = torch.cuda.FloatTensor(loss_weights[0]) # 1/2 scale
|
m_1 = nn.CrossEntropyLoss(size_average=True, weight=alpha1)
|
m_2 = nn.CrossEntropyLoss(size_average=True, weight=alpha2)
|
m_3 = nn.CrossEntropyLoss(size_average=True, weight=alpha3)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.