text stringlengths 1 93.6k |
|---|
m_4 = nn.CrossEntropyLoss(size_average=True, weight=alpha4)
|
loss = 0.0
|
'''
|
GWTA Loss
|
'''
|
for idx, (m, out, yss) in enumerate(zip([m_1, m_2, m_3, m_4], outputs, Yss_argmax)):
|
if idx != 0:
|
loss_ = self.gwta_loss(out, yss, m, grid_factor=np.power(2, idx))
|
else:
|
loss_ = m(out, yss)
|
loss += loss_
|
losses.append(loss_.item())
|
loss.backward()
|
self.optimizers.step()
|
# -- Histogram of boxes for weighting --
|
for out_idx, (out, yss) in enumerate(zip(outputs[::-1], Yss_out[::-1])):
|
out_argmax = torch.argmax(out, dim=1)
|
bin_ = np.bincount(out_argmax.cpu().data.numpy().flatten())
|
ii = np.nonzero(bin_)[0]
|
hist_boxes[ii+4*out_idx] += bin_[ii]
|
Yss_argmax = torch.argmax(yss, dim=1)
|
bin_gt = np.bincount(Yss_argmax.cpu().data.numpy().flatten())
|
ii_gt = np.nonzero(bin_gt)[0]
|
hist_boxes_gt[ii_gt+4*out_idx] += bin_gt[ii_gt]
|
return losses, hist_boxes, hist_boxes_gt
|
'''
|
Test function for LSC-CNN.
|
Parameters
|
-----------
|
X - (np.ndarray) Image patches (Bx3XHxW)
|
Y - (np.ndarray) Ground truth in highest scale (BX1XHXW)
|
Returns
|
---------
|
losses: (list of float) list of loss values of each scale.
|
upsample_pred: (list) list of torch tensor predictions for each scale ([Bx4xHxW] * number of scales)
|
upscaled to the prediction scale
|
upsample_gt: (list) list of torch tensor gt for each scale ([Bx4xHxW] * number of scales)
|
upscaled to the prediction scale
|
NOTE: Here 4 denotes the number of channels in prediction. In LSC-CNN 4 represents
|
[b_1, b_2, b_3, z] where b_i are boxes and z is the background.
|
'''
|
def test_function(X, Y, loss_weights, network):
|
Y = (Y>0).astype(np.float32)
|
if torch.cuda.is_available():
|
X = torch.autograd.Variable(torch.from_numpy(X)).cuda()
|
X_clone = X.clone()
|
Y = torch.autograd.Variable(torch.from_numpy(Y)).cuda()
|
Yss = [Y]
|
else:
|
assert(0)
|
network = network.cuda()
|
output = network(X, None)
|
for s in range(0, 3):
|
Yss.append(torch.nn.functional.avg_pool2d(Yss[s], (2, 2)) * 4)
|
assert(torch.sum(Yss[0]) == torch.sum(Yss[1]))
|
# Making 4 channel ground truth
|
Yss_out = self.get_box_gt(Yss)
|
Yss = Yss[::-1]
|
Yss_out = Yss_out[::-1]
|
Yss_argmax = [torch.argmax(yss, dim=1) for yss in Yss_out]
|
alpha1 = torch.cuda.FloatTensor(loss_weights[3]) # 1/16 scale
|
alpha2 = torch.cuda.FloatTensor(loss_weights[2]) # 1/8 scale
|
alpha3 = torch.cuda.FloatTensor(loss_weights[1]) # 1/4 scale
|
alpha4 = torch.cuda.FloatTensor(loss_weights[0]) # 1/2 scale
|
m_1 = nn.CrossEntropyLoss(size_average=True, weight=alpha1)
|
m_2 = nn.CrossEntropyLoss(size_average=True, weight=alpha2)
|
m_3 = nn.CrossEntropyLoss(size_average=True, weight=alpha3)
|
m_4 = nn.CrossEntropyLoss(size_average=True, weight=alpha4)
|
loss = 0.0
|
for (out, yss, m) in zip(output, Yss_argmax, [m_1, m_2, m_3, m_4]):
|
loss += m(out, yss)
|
out_softmax = [nn.functional.softmax(o, dim=1) for o in output]
|
out_argmax = [torch.argmax(o, dim=1) for o in out_softmax]
|
upsample_max = int(np.log2(16 // output_downscale))
|
upsample_gt = []
|
upsample_pred = []
|
for idx, (yss_out, out) in enumerate(zip(Yss_out, output)):
|
out = nn.functional.softmax(out, dim=1)
|
upsample_yss_out = yss_out
|
upsample_out = out
|
for n in range(upsample_max-idx):
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.