text stringlengths 1 93.6k |
|---|
threshold = config.threshold,
|
threshold_mode = config.threshold_mode,
|
cooldown = config.cooldown,
|
min_lr = config.min_lr,
|
eps = config.eps
|
)
|
elif config.sch == 'CosineAnnealingWarmRestarts':
|
scheduler = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts(
|
optimizer,
|
T_0 = config.T_0,
|
T_mult = config.T_mult,
|
eta_min = config.eta_min,
|
last_epoch = config.last_epoch
|
)
|
elif config.sch == 'WP_MultiStepLR':
|
lr_func = lambda epoch: epoch / config.warm_up_epochs if epoch <= config.warm_up_epochs else config.gamma**len(
|
[m for m in config.milestones if m <= epoch])
|
scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lr_func)
|
elif config.sch == 'WP_CosineLR':
|
lr_func = lambda epoch: epoch / config.warm_up_epochs if epoch <= config.warm_up_epochs else 0.5 * (
|
math.cos((epoch - config.warm_up_epochs) / (config.epochs - config.warm_up_epochs) * math.pi) + 1)
|
scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lr_func)
|
return scheduler
|
def save_imgs(img, msk, msk_pred, i, save_path, datasets, threshold=0.5, test_data_name=None):
|
img = img.squeeze(0).permute(1,2,0).detach().cpu().numpy()
|
img = img / 255. if img.max() > 1.1 else img
|
if datasets == 'retinal':
|
msk = np.squeeze(msk, axis=0)
|
msk_pred = np.squeeze(msk_pred, axis=0)
|
else:
|
msk = np.where(np.squeeze(msk, axis=0) > 0.5, 1, 0)
|
msk_pred = np.where(np.squeeze(msk_pred, axis=0) > threshold, 1, 0)
|
plt.figure(figsize=(7,15))
|
plt.subplot(3,1,1)
|
plt.imshow(img)
|
plt.axis('off')
|
plt.subplot(3,1,2)
|
plt.imshow(msk, cmap= 'gray')
|
plt.axis('off')
|
plt.subplot(3,1,3)
|
plt.imshow(msk_pred, cmap = 'gray')
|
plt.axis('off')
|
if test_data_name is not None:
|
save_path = save_path + test_data_name + '_'
|
plt.savefig(save_path + str(i) +'.png')
|
plt.close()
|
class BCELoss(nn.Module):
|
def __init__(self):
|
super(BCELoss, self).__init__()
|
self.bceloss = nn.BCELoss()
|
def forward(self, pred, target):
|
size = pred.size(0)
|
pred_ = pred.view(size, -1)
|
target_ = target.view(size, -1)
|
return self.bceloss(pred_, target_)
|
class DiceLoss(nn.Module):
|
def __init__(self):
|
super(DiceLoss, self).__init__()
|
def forward(self, pred, target):
|
smooth = 1
|
size = pred.size(0)
|
pred_ = pred.view(size, -1)
|
target_ = target.view(size, -1)
|
intersection = pred_ * target_
|
dice_score = (2 * intersection.sum(1) + smooth)/(pred_.sum(1) + target_.sum(1) + smooth)
|
dice_loss = 1 - dice_score.sum()/size
|
return dice_loss
|
class nDiceLoss(nn.Module):
|
def __init__(self, n_classes):
|
super(nDiceLoss, self).__init__()
|
self.n_classes = n_classes
|
def _one_hot_encoder(self, input_tensor):
|
tensor_list = []
|
for i in range(self.n_classes):
|
temp_prob = input_tensor == i # * torch.ones_like(input_tensor)
|
tensor_list.append(temp_prob.unsqueeze(1))
|
output_tensor = torch.cat(tensor_list, dim=1)
|
return output_tensor.float()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.