text stringlengths 1 93.6k |
|---|
image, mask = data
|
if random.random() < self.p: return TF.vflip(image), TF.vflip(mask)
|
else: return image, mask
|
class myRandomRotation:
|
def __init__(self, p=0.5, degree=[0,360]):
|
self.angle = random.uniform(degree[0], degree[1])
|
self.p = p
|
def __call__(self, data):
|
image, mask = data
|
if random.random() < self.p: return TF.rotate(image,self.angle), TF.rotate(mask,self.angle)
|
else: return image, mask
|
class myNormalize:
|
def __init__(self, data_name, train=True):
|
if data_name == 'isic18':
|
if train:
|
self.mean = 157.561
|
self.std = 26.706
|
else:
|
self.mean = 149.034
|
self.std = 32.022
|
elif data_name == 'isic17':
|
if train:
|
self.mean = 159.922
|
self.std = 28.871
|
else:
|
self.mean = 148.429
|
self.std = 25.748
|
elif data_name == 'isic18_82':
|
if train:
|
self.mean = 156.2899
|
self.std = 26.5457
|
else:
|
self.mean = 149.8485
|
self.std = 35.3346
|
def __call__(self, data):
|
img, msk = data
|
img_normalized = (img-self.mean)/self.std
|
img_normalized = ((img_normalized - np.min(img_normalized))
|
/ (np.max(img_normalized)-np.min(img_normalized))) * 255.
|
return img_normalized, msk
|
from thop import profile ## 导入thop模块
|
def cal_params_flops(model, size, logger):
|
input = torch.randn(1, 3, size, size).cuda()
|
flops, params = profile(model, inputs=(input,))
|
print('flops',flops/1e9) ## 打印计算量
|
print('params',params/1e6) ## 打印参数量
|
total = sum(p.numel() for p in model.parameters())
|
print("Total params: %.2fM" % (total/1e6))
|
logger.info(f'flops: {flops/1e9}, params: {params/1e6}, Total params: : {total/1e6:.4f}')
|
def calculate_metric_percase(pred, gt):
|
pred[pred > 0] = 1
|
gt[gt > 0] = 1
|
if pred.sum() > 0 and gt.sum()>0:
|
dice = metric.binary.dc(pred, gt)
|
hd95 = metric.binary.hd95(pred, gt)
|
return dice, hd95
|
elif pred.sum() > 0 and gt.sum()==0:
|
return 1, 0
|
else:
|
return 0, 0
|
def test_single_volume(image, label, net, classes, patch_size=[256, 256],
|
test_save_path=None, case=None, z_spacing=1, val_or_test=False):
|
image, label = image.squeeze(0).cpu().detach().numpy(), label.squeeze(0).cpu().detach().numpy()
|
if len(image.shape) == 3:
|
prediction = np.zeros_like(label)
|
for ind in range(image.shape[0]):
|
slice = image[ind, :, :]
|
x, y = slice.shape[0], slice.shape[1]
|
if x != patch_size[0] or y != patch_size[1]:
|
slice = zoom(slice, (patch_size[0] / x, patch_size[1] / y), order=3) # previous using 0
|
input = torch.from_numpy(slice).unsqueeze(0).unsqueeze(0).float().cuda()
|
net.eval()
|
with torch.no_grad():
|
outputs = net(input)
|
out = torch.argmax(torch.softmax(outputs, dim=1), dim=1).squeeze(0)
|
out = out.cpu().detach().numpy()
|
if x != patch_size[0] or y != patch_size[1]:
|
pred = zoom(out, (x / patch_size[0], y / patch_size[1]), order=0)
|
else:
|
pred = out
|
prediction[ind] = pred
|
else:
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.